max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
routeros_api/api_communicator/async_decorator.py | davidc/RouterOS-api | 183 | 11151867 | class AsyncApiCommunicator(object):
def __init__(self, inner):
self.inner = inner
def call(self, *args, **kwargs):
tag = self.inner.send(*args, **kwargs)
return ResponsePromise(self.inner, tag)
class ResponsePromise(object):
def __init__(self, receiver, tag):
self.receiver = receiver
self.tag = tag
self.response = None
def get(self):
if self.response is None:
self.response = self.receiver.receive(self.tag)
return self.response
def __iter__(self):
return self.receiver.receive_iterator(self.tag)
|
external/gprof2dot/hotshotmain.py | sean-v8/RMG-Py | 1,348 | 11151877 | #!/usr/bin/env python
#
# Copyright 2007 <NAME>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
def run(statement, filename=None, sort=-1):
import os, tempfile, hotshot, hotshot.stats
logfd, logfn = tempfile.mkstemp()
prof = hotshot.Profile(logfn)
try:
prof = prof.run(statement)
except SystemExit:
pass
try:
try:
prof = prof.run(statement)
except SystemExit:
pass
prof.close()
finally:
stats = hotshot.stats.load(logfn)
stats.strip_dirs()
stats.sort_stats(sort)
if filename is not None:
result = stats.dump_stats(filename)
else:
result = stats.print_stats()
os.unlink(logfn)
return result
def main():
import os, sys
from optparse import OptionParser
usage = "hotshotmain.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class", default=-1)
if not sys.argv[1:]:
parser.print_usage()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if (len(sys.argv) > 0):
sys.path.insert(0, os.path.dirname(sys.argv[0]))
run('execfile(%r)' % (sys.argv[0],), options.outfile, options.sort)
else:
parser.print_usage()
return parser
if __name__ == "__main__":
main()
|
modoboa/limits/migrations/0006_auto_20170216_1112.py | HarshCasper/modoboa | 1,602 | 11151879 | # Generated by Django 1.10.5 on 2017-02-16 10:12
from django.db import migrations
def create_quota_limits(apps, schema_editor):
"""Create quota limits."""
User = apps.get_model("core", "User")
if not User.objects.exists():
return
ContentType = apps.get_model("contenttypes", "ContentType")
UserObjectLimit = apps.get_model("limits", "UserObjectLimit")
to_create = []
ct = ContentType.objects.get(app_label="admin", model="domain")
for u in User.objects.all():
to_create.append(
UserObjectLimit(user=u, name="quota", max_value=0, content_type=ct)
)
UserObjectLimit.objects.bulk_create(to_create)
class Migration(migrations.Migration):
dependencies = [
('limits', '0005_auto_20160415_1654'),
]
operations = [
migrations.RunPython(create_quota_limits)
]
|
archai/common/model_summary.py | shatadru99/archai | 344 | 11151885 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Iterable, Mapping, Sized, Sequence
import math
import torch
import torch.nn as nn
from collections import OrderedDict
import numpy as np
from numbers import Number
def summary(model, input_size):
result, params_info = summary_string(model, input_size)
print(result)
return params_info
def is_scaler(o):
return isinstance(o, Number) or isinstance(o, str) or o is None
def get_tensor_stat(tensor):
assert isinstance(tensor, torch.Tensor)
# some pytorch low-level memory management constant
# the minimal allocate memory size (Byte)
PYTORCH_MIN_ALLOCATE = 2 ** 9
# the minimal cache memory size (Byte)
PYTORCH_MIN_CACHE = 2 ** 20
numel = tensor.numel()
element_size = tensor.element_size()
fact_numel = tensor.storage().size()
fact_memory_size = fact_numel * element_size
# since pytorch allocate at least 512 Bytes for any tensor, round
# up to a multiple of 512
memory_size = math.ceil(fact_memory_size / PYTORCH_MIN_ALLOCATE) \
* PYTORCH_MIN_ALLOCATE
# tensor.storage should be the actual object related to memory
# allocation
data_ptr = tensor.storage().data_ptr()
size = tuple(tensor.size())
# torch scalar has empty size
if not size:
size = (1,)
return ([size], numel, memory_size)
def get_all_tensor_stats(o):
if is_scaler(o):
return ([[]], 0, 0)
elif isinstance(o, torch.Tensor):
return get_tensor_stat(o)
elif isinstance(o, Mapping):
return get_all_tensor_stats(o.values())
elif isinstance(o, Iterable): # tuple, list, maps
stats = [[]], 0, 0
for oi in o:
tz = get_all_tensor_stats(oi)
stats = tuple(x+y for x,y in zip(stats, tz))
return stats
elif hasattr(o, '__dict__'):
return get_all_tensor_stats(o.__dict__)
else:
return ([[]], 0, 0)
def get_shape(o):
if is_scaler(o):
return str(o)
elif hasattr(o, 'shape'):
return f'shape{o.shape}'
elif hasattr(o, 'size'):
return f'size{o.size()}'
elif isinstance(o, Sequence):
if len(o)==0:
return 'seq[]'
elif is_scaler(o[0]):
return f'seq[{len(o)}]'
return f'seq{[get_shape(oi) for oi in o]}'
elif isinstance(o, Mapping):
if len(o)==0:
return 'map[]'
elif is_scaler(next(o)):
return f'map[{len(o)}]'
arr = [(get_shape(ki), get_shape(vi)) for ki, vi in o]
return f'map{arr}'
else:
return 'N/A'
def summary_string(model, input_size, dtype=torch.float32):
summary_str = ''
# create properties
summary = OrderedDict()
hooks = []
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(summary)
m_key = "%s-%i" % (class_name, module_idx + 1)
summary[m_key] = OrderedDict()
summary[m_key]["input"] = get_all_tensor_stats(input)
summary[m_key]["output"] = get_all_tensor_stats(output)
params = 0
if hasattr(module, "weight") and hasattr(module.weight, "size"):
params += torch.prod(torch.LongTensor(list(module.weight.size()))).item()
summary[m_key]["trainable"] = module.weight.requires_grad
if hasattr(module, "bias") and hasattr(module.bias, "size"):
params += torch.prod(torch.LongTensor(list(module.bias.size()))).item()
summary[m_key]["nb_params"] = params
if (
not isinstance(module, nn.Sequential)
and not isinstance(module, nn.ModuleList)
):
hooks.append(module.register_forward_hook(hook))
# batch_size of 2 for batchnorm
x = torch.rand(input_size, dtype=dtype,
device=next(model.parameters()).device)
# register hook
model.apply(register_hook)
# make a forward pass
# print(x.shape)
model(x)
# remove these hooks
for h in hooks:
h.remove()
summary_str += "----------------------------------------------------------------" + "\n"
line_new = "{:>20} {:>25} {:>15}".format(
"Layer (type)", "Output (elments, mem)", "Param #")
summary_str += line_new + "\n"
summary_str += "================================================================" + "\n"
total_params = 0
total_input = get_tensor_stat(x)
total_output = ([[], 0, 0])
trainable_params = 0
for layer in summary:
# input_shape, output_shape, trainable, nb_params
line_new = "{:>20} {:>25} {:>15}".format(
layer,
str(summary[layer]["output"][1:]),
"{0:,}".format(summary[layer]["nb_params"]),
)
total_params += summary[layer]["nb_params"]
total_output = tuple(x+y for x,y in zip(total_output, summary[layer]["output"]))
if "trainable" in summary[layer]:
if summary[layer]["trainable"] == True:
trainable_params += summary[layer]["nb_params"]
summary_str += line_new + "\n"
total_numel = total_params + total_output[1] + total_input[1]
summary_str += "================================================================" + "\n"
summary_str += "Total params: {0:,}".format(total_params) + "\n"
summary_str += "Trainable params: {0:,}".format(trainable_params) + "\n"
summary_str += "Non-trainable params: {0:,}".format(total_params -
trainable_params) + "\n"
summary_str += "----------------------------------------------------------------" + "\n"
summary_str += f"Input Elments: {total_input[1]:.4e}\n"
summary_str += f"Input Mem: {total_input[2]:.4e}\n"
summary_str += f"Layer Output Elements: {total_output[1]:.4e}\n"
summary_str += f"Layer Output Mem: {total_output[2]:.4e}\n"
summary_str += f"Params {total_params:.4e}\n"
summary_str += f"Total Elements {total_numel:.4e}\n"
summary_str += "----------------------------------------------------------------" + "\n"
# return summary
return summary_str, (total_params, trainable_params) |
tests/common/test_op/ascend/lstm_rnn_grad.py | tianjiashuo/akg | 286 | 11151905 | # Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""lstm_rnn_gard"""
import akg.topi
import akg.tvm
from akg.utils.format_transform import get_shape
from akg.ops.math.ascend import Tanh
from tests.common.test_op.ascend.dense import dense
from akg.ops.array.ascend import Concat
from akg.ops.array.ascend import Split
from tests.common.test_op.ascend.sigmoid import sigmoid
def lstmcell_grad_h(input, hx, cx, w_ih, w_hh, b_ih, b_hh, dh, dc, target="cce"):
"""
Computes dh w.r.t. dw, db, dcx, dhx, dx.
Args:
input: akg.tvm.Tensor of type float16, float32.
hx: akg.tvm.Tensor for hidden variable from previous cell.
cx: akg.tvm.Tensor for state variable from previous cell.
w_ih: akg.tvm.Tensor for input weights.
w_hh: akg.tvm.Tensor for hidden weights.
b_ih: akg.tvm.Tensor for input bias.
b_hh: akg.tvm.Tensor for hidden bias.
Returns:
dw_ih: akg.tvm.Tensor for dh/dw_ih.
dw_hh: akg.tvm.Tensor for dh/dw_hh.
db_ih: akg.tvm.Tensor for dh/db_ih.
db_hh: akg.tvm.Tensor for dh/db_hh.
dcx: akg.tvm.Tensor for dh/dcx.
dhx: akg.tvm.Tensor for dh/dhx.
dx: akg.tvm.Tensor for dh/dx.
"""
# things from fwd
batch, input_size = get_shape(input)
_, hidden_size = get_shape(hx)
xh = akg.topi.concatenate((hx, input), 1)
whl = [w_ih, w_hh]
W = Concat(whl, 1) # [4*hidden_size, input_size+hidden_size]
gates = dense(input, w_ih, b_ih, True) + dense(hx, w_hh, b_hh, True)
ingate_in, forgetgate_in, cellgate_in, outgate_in = Split(gates, 4, 1)
ingate = sigmoid(ingate_in)
forgetgate = sigmoid(forgetgate_in)
cellgate = Tanh(cellgate_in)
outgate = sigmoid(outgate_in)
cy = (forgetgate * cx) + (ingate * cellgate)
tanh_cy = Tanh(cy)
#hy = outgate * tanh_cy
# starts bwd
# head * dh/do shape [n,]
doutgate = dh * tanh_cy
doutgate_in = outgate * (1 - outgate) * doutgate
kk = akg.tvm.reduce_axis((0, batch))
dWo = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:
akg.tvm.sum(xh[kk, j] * doutgate_in(kk, i), axis=kk), name="dWo")
dtanh_cy = dh * outgate
dc = (1 - tanh_cy * tanh_cy) * dtanh_cy
dingate = cellgate * dc
dingate_in = ingate * (1 - ingate) * dingate
kk3 = akg.tvm.reduce_axis((0, batch))
dWi = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:
akg.tvm.sum(xh[kk3, j] * dingate_in(kk3, i), axis=kk3), name="dWi")
dforgetgate = dc * cx
dforgetgate_in = forgetgate * (1 - forgetgate) * dforgetgate
kk2 = akg.tvm.reduce_axis((0, batch))
dWf = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:
akg.tvm.sum(xh[kk2, j] * dforgetgate_in(kk2, i), axis=kk2), name="dWf")
dcellgate = ingate * dc
dcellgate_in = (1 - cellgate * cellgate) * dcellgate
kk4 = akg.tvm.reduce_axis((0, batch))
dWc = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:
akg.tvm.sum(xh[kk4, j] * dcellgate_in(kk4, i), axis=kk4), name="dWc")
dW = akg.topi.concatenate((dWi, dWf, dWc, dWo))
db = akg.topi.concatenate((dingate_in, dforgetgate_in, dcellgate_in, doutgate_in), 1)
kk5 = akg.tvm.reduce_axis((0, 4 * hidden_size))
dxh = akg.tvm.compute((batch, hidden_size + input_size), lambda i, j:
akg.tvm.sum(W[kk5, j] * db[i, kk5], axis=kk5), name="dxh")
dhx = akg.tvm.compute((batch, hidden_size), lambda i, j: dxh[i, j], name="dhx")
dx = akg.tvm.compute((batch, input_size), lambda i, j: dxh[i, j + hidden_size], name="dx")
dcx = forgetgate * dc
dw_ih = akg.tvm.compute(w_ih.shape, lambda i, j: dW[i, j])
#dw_hh = akg.tvm.compute(w_hh.shape, lambda i, j: dW[i, j + input_size])
bhr = akg.tvm.reduce_axis((0, batch))
db_ih = akg.tvm.compute((4 * hidden_size,), lambda i: akg.tvm.sum(db[i, bhr], axis=bhr), name="dbih")
bir = akg.tvm.reduce_axis((0, batch))
db_hh = akg.tvm.compute((4 * hidden_size,), lambda i: akg.tvm.sum(db[i, bir], axis=bir), name="dbhh")
return dw_ih, w_hh, db_ih, db_hh, dcx, dhx, dx
def lstmcell_grad_c(input, hx, cx, w_ih, w_hh, b_ih, b_hh, dc, target="cce"):
"""
Computes dc w.r.t. dw, db, dcx, dhx, dx.
Args:
input: akg.tvm.Tensor of type float16, float32.
hx: akg.tvm.Tensor for hidden variable from previous cell.
cx: akg.tvm.Tensor for state variable from previous cell.
w_ih: akg.tvm.Tensor for input weights.
w_hh: akg.tvm.Tensor for hidden weights.
b_ih: akg.tvm.Tensor for input bias.
b_hh: akg.tvm.Tensor for hidden bias.
Returns:
dw_ih: akg.tvm.Tensor for dc/dw_ih.
dw_hh: akg.tvm.Tensor for dc/dw_hh.
db_ih: akg.tvm.Tensor for dc/db_ih.
db_hh: akg.tvm.Tensor for dc/db_hh.
dcx: akg.tvm.Tensor for dc/dcx.
dhx: akg.tvm.Tensor for dc/dhx.
dx: akg.tvm.Tensor for dc/dx.
"""
# things from fwd
whl = [w_ih, w_hh]
W = Concat(whl, 1) # [4*hidden_size, input_size+hidden_size]
b = b_ih + b_hh
batch, input_size = get_shape(input)
_, hidden_size = get_shape(hx)
xh = akg.topi.concatenate((hx, input), 1)
t = akg.topi.nn.dense(xh, W, b)
temp_i = akg.tvm.compute((batch, hidden_size), lambda i, j: t(i, j), name="temp_i")
i = sigmoid(temp_i)
temp_f = akg.tvm.compute((batch, hidden_size), lambda i, j: t(i, j + hidden_size), name="temp_f")
f = sigmoid(temp_f)
temp_c_ = akg.tvm.compute((batch, hidden_size), lambda i, j: t(i, j + 2 * hidden_size), name="temp_c")
c_ = Tanh(temp_c_)
# starts bwd
# head * dh/do shape [n,]
dtemp_o = akg.tvm.compute((batch, hidden_size), lambda *i: 0)
dWo = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j: 0, name="dWo")
df = dc * cx
dtemp_f = f * (1 - f) * df
kk2 = akg.tvm.reduce_axis((0, batch))
dWf = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:
akg.tvm.sum(xh[kk2, j] * dtemp_f(kk2, i), axis=kk2), name="dWf")
di = c_ * dc
dtemp_i = i * (1 - i) * di
kk3 = akg.tvm.reduce_axis((0, batch))
dWi = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:
akg.tvm.sum(xh[kk3, j] * dtemp_i(kk3, i), axis=kk3), name="dWi")
dc_ = i * dc
dtemp_c_ = (1 - c_ * c_) * dc_
kk4 = akg.tvm.reduce_axis((0, batch))
dWc = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:
akg.tvm.sum(xh[kk4, j] * dtemp_c_(kk4, i), axis=kk4), name="dWc")
dW = akg.topi.concatenate((dWi, dWf, dWc, dWo))
db = akg.topi.concatenate((dtemp_i, dtemp_f, dtemp_c_, dtemp_o), 1)
kk5 = akg.tvm.reduce_axis((0, 4 * hidden_size))
dxh = akg.tvm.compute((batch, hidden_size + input_size), lambda i, j:
akg.tvm.sum(W[kk5, j] * db[i, kk5], axis=kk5), name="dxh")
dhx = akg.tvm.compute((batch, hidden_size), lambda i, j: dxh[i, j], name="dhx")
dx = akg.tvm.compute((batch, input_size), lambda i, j: dxh[i, j + hidden_size], name="dx")
dcx = f * dc
dw_ih = akg.tvm.compute(w_ih.shape, lambda i, j: dW[i, j])
#dw_hh = akg.tvm.compute(w_hh.shape, lambda i, j: dW[i, j + input_size])
bhr = akg.tvm.reduce_axis((0, batch))
db_ih = akg.tvm.compute((4 * hidden_size,), lambda i: akg.tvm.sum(db[i, bhr], axis=bhr), name="dbih")
bir = akg.tvm.reduce_axis((0, batch))
db_hh = akg.tvm.compute((4 * hidden_size,), lambda i: akg.tvm.sum(db[i, bir], axis=bir), name="dbhh")
return dw_ih, w_hh, db_ih, db_hh, dcx, dhx, dx
def rnn_tanh_cell_grad(input, hidden, w_ih, w_hh, b_ih, b_hh, grad, target="cce"):
"""
Computes dgrad w.r.t. dinput (di), dhidden_input (dhid), dweights (dWih, dWhh), dbias (db).
Args:
input: akg.tvm.Tensor of type float16, float32 with shape [batch, input_size].
hidden: akg.tvm.Tensor for hidden variable from previous cell with shape [batch, hidden_size].
w_ih: akg.tvm.Tensor for input weights with shape [hidden_size, input_size].
w_hh: akg.tvm.Tensor for hidden weights with shape [hidden_size, hidden_size].
b_ih: akg.tvm.Tensor for input bias with shape [hidden_size].
b_hh: akg.tvm.Tensor for hidden bias with shape [hidden_size].
grad: akg.tvm.Tensor representing dy with shape [batch, hidden_size].
Returns:
di: akg.tvm.Tensor for dy/di.
dhid: akg.tvm.Tensor for dy/dhid.
dWih: akg.tvm.Tensor for dy/dWih (input weights).
dWhh: akg.tvm.Tensor for dy/dWhh (hidden weights).
db: akg.tvm.Tensor for dy/db.
"""
batch, input_size = get_shape(input)
_, hidden_size = get_shape(hidden)
igates = akg.topi.nn.dense(input, w_ih, b_ih)
hgates = akg.topi.nn.dense(hidden, w_hh, b_hh)
h = Tanh(igates + hgates)
dh = (1 - h * h) * grad
kk = akg.tvm.reduce_axis((0, batch))
dWih = akg.tvm.compute((hidden_size, input_size), lambda i, j:
akg.tvm.sum(input[kk, j] * dh(kk, i), axis=kk), name="dWih")
kk2 = akg.tvm.reduce_axis((0, batch))
dWhh = akg.tvm.compute((hidden_size, hidden_size), lambda i, j:
akg.tvm.sum(hidden[kk2, j] * dh(kk2, i), axis=kk2), name="dWhh")
kk3 = akg.tvm.reduce_axis((0, hidden_size))
di = akg.tvm.compute((batch, input_size), lambda i, j: akg.tvm.sum(w_ih[kk3, j] * dh[i, kk3], axis=kk3), name="di")
kk4 = akg.tvm.reduce_axis((0, hidden_size))
dhid = akg.tvm.compute((batch, hidden_size), lambda i, j: akg.tvm.sum(w_hh[kk4, j] * dh[i, kk4], axis=kk4), name="dhid")
db = akg.topi.sum(dh, 0)
return di, dhid, dWih, dWhh, db
# dbih/dbhh are the same and returning it twice causes CCEbuild to fail due to some SSA error
# return di, dhid, dWih, dWhh, db, db
def rnn_relu_cell_grad(input, hidden, w_ih, w_hh, b_ih, b_hh, grad, target="cce"):
"""
Computes dgrad w.r.t. dinput (di), dhidden_input (dhi), dweights (dWih, dWhh), dbias (db).
Args:
input: akg.tvm.Tensor of type float16, float32 with shape [batch, input_size].
hidden: akg.tvm.Tensor for hidden variable from previous cell with shape [batch, hidden_size].
w_ih: akg.tvm.Tensor for input weights with shape [hidden_size, input_size].
w_hh: akg.tvm.Tensor for hidden weights with shape [hidden_size, hidden_size].
b_ih: akg.tvm.Tensor for input bias with shape [hidden_size].
b_hh: akg.tvm.Tensor for hidden bias with shape [hidden_size].
grad: akg.tvm.Tensor representing dy with shape [batch, hidden_size].
Returns:
di: akg.tvm.Tensor for dy/di.
dhi: akg.tvm.Tensor for dy/dhi.
dWih: akg.tvm.Tensor for dy/dWih (input weights).
dWhh: akg.tvm.Tensor for dy/dWhh (hidden weights).
db: akg.tvm.Tensor for dy/db.
"""
batch, input_size = get_shape(input)
_, hidden_size = get_shape(hidden)
igates = akg.topi.nn.dense(input, w_ih, b_ih)
hgates = akg.topi.nn.dense(hidden, w_hh, b_hh)
h = akg.topi.nn.relu(igates + hgates)
dh = akg.tvm.compute((batch, hidden_size), lambda *i: grad(*i) * akg.tvm.expr.Select(h(*i) >= 0, 1, 0), name="dh")
kk = akg.tvm.reduce_axis((0, batch))
dWih = akg.tvm.compute((hidden_size, input_size), lambda i, j:
akg.tvm.sum(input[kk, j] * dh(kk, i), axis=kk), name="dWih")
kk2 = akg.tvm.reduce_axis((0, batch))
dWhh = akg.tvm.compute((hidden_size, hidden_size), lambda i, j:
akg.tvm.sum(hidden[kk2, j] * dh(kk2, i), axis=kk2), name="dWhh")
kk3 = akg.tvm.reduce_axis((0, hidden_size))
di = akg.tvm.compute((batch, input_size), lambda i, j:
akg.tvm.sum(w_ih[kk3, j] * dh[i, kk3], axis=kk3), name="di")
kk4 = akg.tvm.reduce_axis((0, hidden_size))
dhi = akg.tvm.compute((batch, hidden_size), lambda i, j:
akg.tvm.sum(w_hh[kk4, j] * dh[i, kk4], axis=kk4), name="dhi")
db = akg.topi.sum(dh, 0)
return di, dhi, dWih, dWhh, db
# dbih/dbhh are the same and returning it twice causes CCEbuild to fail due to some SSA error
# return di, dhi, dWih, dWhh, db, db
|
angr/engines/soot/values/paramref.py | Kyle-Kyle/angr | 6,132 | 11151912 | <filename>angr/engines/soot/values/paramref.py
from .base import SimSootValue
class SimSootValue_ParamRef(SimSootValue):
__slots__ = [ 'id', 'index', 'type' ]
def __init__(self, index, type_):
self.id = "param_%d" % index
self.index = index
self.type = type_
def __repr__(self):
return self.id
@classmethod
def from_sootvalue(cls, soot_value, state):
return cls(soot_value.index, soot_value.type)
|
src/test/python/test_exec.py | tschoonj/jep | 698 | 11151958 | import unittest
from jep_pipe import jep_pipe
from jep_pipe import build_java_process_cmd
class TestRunScript(unittest.TestCase):
def test_compiledScript(self):
jep_pipe(build_java_process_cmd('jep.test.TestExec'))
|
tests/testnode/addons.py | BabisK/microk8s | 6,286 | 11151965 | <reponame>BabisK/microk8s
#!/bin/env python3
from pathlib import Path
import datetime
import time
from jinja2 import Template
import requests
class Addon:
"""
Base class for testing Microk8s addons.
Validation requires a Kubernetes instance on the node
"""
name = None
def __init__(self, node):
self.node = node
def enable(self):
return self.node.microk8s.enable([self.name])
def disable(self):
return self.node.microk8s.disable([self.name])
def apply_template(self, template, context={}, yml=False):
# Create manifest
cwd = Path(__file__).parent
template = cwd / "templates" / template
with template.open() as f:
rendered = Template(f.read()).render(context)
render_path = f"/tmp/{template.stem}.yaml"
self.node.write(render_path, rendered)
return self.node.microk8s.kubectl.apply(["-f", render_path], yml=yml)
def delete_template(self, template, context={}, yml=False):
path = Path(template)
render_path = f"/tmp/{path.stem}.yaml"
return self.node.microk8s.kubectl.delete(["-f", render_path], yml=yml)
class Dns(Addon):
"""Microk8s dns addon"""
name = "dns"
def validate(self):
self.node.kubernetes.wait_containers_ready(
"kube-system", label="k8s-app=kube-dns", timeout=120
)
class Dashboard(Addon):
"""Dashboard addon"""
name = "dashboard"
def validate(self):
self.node.kubernetes.wait_containers_ready(
"kube-system",
label="k8s-app=kubernetes-dashboard",
timeout=90,
)
self.node.kubernetes.wait_containers_ready(
"kube-system", label="k8s-app=dashboard-metrics-scraper"
)
name = "https:kubernetes-dashboard:"
result = self.node.kubernetes.get_service_proxy(name=name, namespace="kube-system")
assert "Kubernetes Dashboard" in result
class Storage(Addon):
"""Storage addon"""
name = "storage"
def validate(self):
self.node.kubernetes.wait_containers_ready(
"kube-system", label="k8s-app=hostpath-provisioner"
)
claim = self.node.kubernetes.create_pvc(
"testpvc", "kube-system", storage_class="microk8s-hostpath", wait=True
)
assert claim.spec.storage_class_name == "microk8s-hostpath"
self.node.kubernetes.delete_pvc("testpvc", "kube-system")
class Ingress(Addon):
"""Ingress addon"""
name = "ingress"
def validate(self):
# TODO: Is this still needed?
# self.node.kubernetes.wait_containers_ready("default", label="app=default-http-backend")
# self.node.kubernetes.wait_containers_ready("default", label="name=nginx-ingress-microk8s")
self.node.kubernetes.wait_containers_ready("ingress", label="name=nginx-ingress-microk8s")
# Create manifest
context = {
"arch": "amd64",
"address": self.node.get_primary_address(),
}
self.apply_template("ingress.j2", context)
self.node.kubernetes.wait_containers_ready("default", label="app=microbot")
nip_addresses = self.node.kubernetes.wait_ingress_ready("microbot-ingress-nip", "default")
assert "127.0.0.1" in nip_addresses[0].ip
deadline = datetime.datetime.now() + datetime.timedelta(seconds=30)
while True:
resp = requests.get(f"http://microbot.{context['address']}.nip.io/")
if resp.status_code == 200 or datetime.datetime.now() > deadline:
break
time.sleep(1)
assert resp.status_code == 200
assert "microbot.png" in resp.content.decode("utf8")
self.delete_template("ingress.j2", context)
class Gpu(Addon):
"""Gpu addon"""
name = "gpu"
def validate(self):
self.node.kubernetes.wait_containers_ready(
"kube-system", label="name=nvidia-device-plugin-ds"
)
# Create manifest
context = {}
self.apply_template("cuda-add.j2", context)
# TODO: Finish validator on hardware with GPU
self.delete_template("cuda-add.j2", context)
class Registry(Addon):
"""Registry addon"""
name = "registry"
def validate(self):
self.node.kubernetes.wait_containers_ready(
"container-registry", label="app=registry", timeout=300
)
claim = self.node.kubernetes.wait_pvc_phase("registry-claim", "container-registry")
assert "20Gi" in claim.status.capacity["storage"]
self.node.docker.cmd(["pull", "busybox"])
self.node.docker.cmd(["tag", "busybox", "localhost:32000/my-busybox"])
self.node.docker.cmd(["push", "localhost:32000/my-busybox"])
context = {"image": "localhost:32000/my-busybox"}
self.apply_template("bbox.j2", context)
self.node.kubernetes.wait_containers_ready("default", field="metadata.name=busybox")
pods = self.node.kubernetes.get_pods("default", field="metadata.name=busybox")
assert pods[0].spec.containers[0].image == "localhost:32000/my-busybox"
self.delete_template("bbox.j2", context)
class MetricsServer(Addon):
name = "metrics-server"
def validate(self):
self.node.kubernetes.wait_containers_ready("kube-system", label="k8s-app=metrics-server")
metrics_uri = "/apis/metrics.k8s.io/v1beta1/pods"
reply = self.node.kubernetes.get_raw_api(metrics_uri)
assert reply["kind"] == "PodMetricsList"
class Fluentd(Addon):
name = "fluentd"
def validate(self):
self.node.kubernetes.wait_containers_ready(
"kube-system", field="metadata.name=elasticsearch-logging-0", timeout=300
)
self.node.kubernetes.wait_containers_ready(
"kube-system", label="k8s-app=fluentd-es", timeout=300
)
self.node.kubernetes.wait_containers_ready(
"kube-system", label="k8s-app=kibana-logging", timeout=300
)
class Jaeger(Addon):
name = "jaeger"
def validate(self):
self.node.kubernetes.wait_containers_ready("default", label="name=jaeger-operator")
self.node.kubernetes.wait_ingress_ready("simplest-query", "default", timeout=180)
class Metallb(Addon):
name = "metallb"
def enable(self, ip_ranges=None):
if not ip_ranges:
return self.node.microk8s.enable([self.name])
else:
return self.node.microk8s.enable([f"{self.name}:{ip_ranges}"])
def validate(self, ip_ranges=None):
context = {}
self.apply_template("load-balancer.j2", context)
ip = self.node.kubernetes.wait_load_balancer_ip("default", "example-service")
if ip_ranges:
assert ip in ip_ranges
self.delete_template("load-balancer.j2", context)
|
rocketqa/predict/dual_encoder.py | procedure2012/RocketQA | 210 | 11151968 | <filename>rocketqa/predict/dual_encoder.py
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finetuning on classification tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import json
import multiprocessing
import numpy as np
# NOTE(paddle-dev): All of these flags should be
# set before `import paddle`. Otherwise, it would
# not take any effect.
os.environ['FLAGS_eager_delete_tensor_gb'] = '0' # enable gc
import paddle.fluid as fluid
from rocketqa.reader import reader_de_predict
from rocketqa.model.ernie import ErnieConfig
from rocketqa.model.dual_encoder_model import create_model
from rocketqa.utils.args import print_arguments, check_cuda, prepare_logger
from rocketqa.utils.init import init_pretraining_params, init_checkpoint
from rocketqa.utils.finetune_args import parser
class DualEncoder(object):
def __init__(self, conf_path, use_cuda=False, device_id=0, batch_size=1, **kwargs):
if "model_path" in kwargs:
args = self._parse_args(conf_path, model_path=kwargs["model_path"])
else:
args = self._parse_args(conf_path)
if "model_name" in kwargs:
args.model_name = kwargs["model_name"].replace('/', '-')
else:
args.model_name = "my_de"
args.use_cuda = use_cuda
ernie_config = ErnieConfig(args.ernie_config_path)
#ernie_config.print_config()
self.batch_size = batch_size
if args.use_cuda:
dev_list = fluid.cuda_places()
place = dev_list[device_id]
dev_count = len(dev_list)
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
self.exe = fluid.Executor(place)
self.reader = reader_de_predict.DEPredictorReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
q_max_seq_len=args.q_max_seq_len,
p_max_seq_len=args.p_max_seq_len,
do_lower_case=args.do_lower_case,
in_tokens=args.in_tokens,
random_seed=args.random_seed,
tokenizer=args.tokenizer,
for_cn=args.for_cn,
task_id=args.task_id)
startup_prog = fluid.Program()
self.test_prog = fluid.Program()
with fluid.program_guard(self.test_prog, startup_prog):
with fluid.unique_name.guard():
self.test_pyreader, self.graph_vars = create_model(
args,
pyreader_name=args.model_name + '_test_reader',
ernie_config=ernie_config,
is_prediction=True,
share_parameter=args.share_parameter)
self.test_prog = self.test_prog.clone(for_test=True)
self.exe = fluid.Executor(place)
self.exe.run(startup_prog)
if not args.init_checkpoint:
raise ValueError("args 'init_checkpoint' should be set if"
"only doing validation or testing!")
init_checkpoint(
self.exe,
args.init_checkpoint,
main_program=startup_prog)
def _parse_args(self, conf_path, model_path=''):
args, unknown = parser.parse_known_args()
with open(conf_path, 'r', encoding='utf8') as json_file:
config_dict = json.load(json_file)
args.do_train = False
args.do_val = False
args.do_test = True
args.use_fast_executor = True
args.q_max_seq_len = config_dict['q_max_seq_len']
args.p_max_seq_len = config_dict['p_max_seq_len']
args.ernie_config_path = model_path + config_dict['model_conf_path']
args.vocab_path = model_path + config_dict['model_vocab_path']
args.init_checkpoint = model_path + config_dict['model_checkpoint_path']
if 'share_parameter' in config_dict:
args.share_parameter = config_dict['share_parameter']
else:
args.share_parameter = 0
return args
def encode_query(self, query):
data = []
for q in query:
data.append(q + '\t-\t-')
self.test_pyreader.decorate_tensor_provider(
self.reader.data_generator(
data,
self.batch_size,
shuffle=False))
self.test_pyreader.start()
fetch_list = [self.graph_vars["q_rep"]]
embs = []
while True:
try:
q_rep = self.exe.run(program=self.test_prog,
fetch_list=fetch_list)
embs.append(q_rep[0])
except fluid.core.EOFException:
self.test_pyreader.reset()
break
return np.concatenate(embs)[:len(data)]
def encode_para(self, para, title=[]):
data = []
if len(title) != 0:
assert (len(para) == len(title)), "The input para(List) and title(List) should be the same length"
for t, p in zip(title, para):
data.append('-\t' + t + '\t' + p)
else:
for p in para:
data.append('-\t-\t' + p)
self.test_pyreader.decorate_tensor_provider(
self.reader.data_generator(
data,
self.batch_size,
shuffle=False))
self.test_pyreader.start()
fetch_list = [self.graph_vars["p_rep"]]
embs = []
while True:
try:
p_rep = self.exe.run(program=self.test_prog,
fetch_list=fetch_list)
embs.append(p_rep[0])
except fluid.core.EOFException:
self.test_pyreader.reset()
break
return np.concatenate(embs)[:len(data)]
def matching(self, query, para, title=[]):
data = []
assert (len(para) == len(query)), "The input query(List) and para(List) should be the same length"
if len(title) != 0:
assert (len(para) == len(title)), "The input query(List) and para(List) should be the same length"
for q, t, p in zip(query, title, para):
data.append(q + '\t' + t + '\t' + p)
else:
for q, p in zip(query, para):
data.append(q + '\t-\t' + p)
self.test_pyreader.decorate_tensor_provider(
self.reader.data_generator(
data,
self.batch_size,
shuffle=False))
self.test_pyreader.start()
fetch_list = [self.graph_vars["probs"]]
inner_probs = []
while True:
try:
probs = self.exe.run(program=self.test_prog,
fetch_list=fetch_list)
inner_probs.extend(probs[0].tolist())
except fluid.core.EOFException:
self.test_pyreader.reset()
break
return inner_probs
|
iotbx/regression/tst_symmetry.py | dperl-sol/cctbx_project | 155 | 11152029 | <filename>iotbx/regression/tst_symmetry.py
from __future__ import absolute_import, division, print_function
import iotbx.symmetry
from cctbx import sgtbx, uctbx
from libtbx.test_utils import Exception_expected
from libtbx.utils import Sorry
from six.moves import cStringIO as StringIO
def exercise():
m = iotbx.symmetry.manager(prefer_pdb_space_group=True)
(uc_mismatch, sg_mismatch) = m.add_reflections_file(
file_name="data.mtz",
space_group=sgtbx.space_group_info("P222"),
unit_cell=uctbx.unit_cell("50 60 70 90 90 90"))
assert (m.get_current_as_strings() == ('P 2 2 2', '50 60 70 90 90 90'))
(uc_mismatch, sg_mismatch) = m.add_pdb_file(
file_name="model.pdb",
space_group=sgtbx.space_group_info("P212121"),
unit_cell=uctbx.unit_cell("50 60 70 90 90 90"))
assert (not (uc_mismatch or sg_mismatch))
(uc_mismatch, sg_mismatch) = m.add_pdb_file(
file_name="reference_model.pdb",
space_group=sgtbx.space_group_info("P63"),
unit_cell=uctbx.unit_cell("40 40 75 90 90 120"))
assert ((uc_mismatch, sg_mismatch) == (True, True))
assert (m.get_current_as_strings() == ('P 21 21 21', '50 60 70 90 90 90'))
(uc_mismatch, sg_mismatch) = m.add_reflections_file(
file_name="data_neutron.mtz",
space_group=sgtbx.space_group_info("P222"),
unit_cell=uctbx.unit_cell("50.1 60 70.1 90 90 90"))
assert (not (uc_mismatch or sg_mismatch))
(uc_mismatch, sg_mismatch) = m.add_reflections_file(
file_name="data_rfree.hkl",
space_group=None,
unit_cell=None)
assert (not (uc_mismatch or sg_mismatch))
assert (m.get_current_as_strings() == ('P 21 21 21', '50 60 70 90 90 90'))
assert (m.check_cell_compatibility("phenix.refine"))
symm_choices = m.get_symmetry_choices()
assert (symm_choices.space_group_files == [('model.pdb', 'P 21 21 21'),
('reference_model.pdb', 'P 63'), ('data.mtz', 'P 2 2 2'),
('data_neutron.mtz', 'P 2 2 2')])
assert (symm_choices.unit_cell_files == [
('model.pdb', '(50, 60, 70, 90, 90, 90)'),
('reference_model.pdb', '(40, 40, 75, 90, 90, 120)'),
('data.mtz', '(50, 60, 70, 90, 90, 90)'),
('data_neutron.mtz', '(50.1, 60, 70.1, 90, 90, 90)')])
m.set_current_as_strings("P63", "50 60 70 90 90 90")
try :
m.check_cell_compatibility(
program_name="phenix.refine",
raise_error_if_incomplete=True)
except Sorry :
pass
else :
raise Exception_expected
out = StringIO()
m.show(out=out)
assert (out.getvalue() == """\
model.pdb: (50, 60, 70, 90, 90, 90) P 21 21 21
reference_model.pdb: (40, 40, 75, 90, 90, 120) P 63
data.mtz: (50, 60, 70, 90, 90, 90) P 2 2 2
data_neutron.mtz: (50.1, 60, 70.1, 90, 90, 90) P 2 2 2
data_rfree.hkl: None None
""")
if (__name__ == "__main__"):
exercise()
print("OK")
|
test/config_test.py | tamirzb/logcat-color | 373 | 11152041 | from common import *
from logcatcolor.column import *
from logcatcolor.config import *
from logcatcolor.layout import *
from logcatcolor.profile import *
from logcatcolor.reader import *
import unittest
this_dir = os.path.abspath(os.path.dirname(__file__))
configs_dir = os.path.join(this_dir, "configs")
def config_test(config_file, wrap=None, stay_connected=None):
def run_config_test(fn):
def wrapped(self):
path = os.path.join(configs_dir, config_file)
options = MockObject(config=path,
wrap=wrap,
stay_connected=stay_connected)
fn(self, LogcatColorConfig(options))
return wrapped
return run_config_test
class ConfigTest(unittest.TestCase):
def setUp(self):
self.tag_column = MockObject(NAME="tag", DEFAULT_WIDTH=20)
@config_test("")
def test_default_config(self, config):
self.assertEqual(config.get_default_layout(), config.DEFAULT_LAYOUT)
self.assertEqual(config.get_column_width(self.tag_column), 20)
self.assertEqual(config.get_wrap(), config.DEFAULT_WRAP)
self.assertEqual(config.get_adb(), config.DEFAULT_ADB)
self.assertEqual(config.get_stay_connected(), config.DEFAULT_STAY_CONNECTED)
@config_test("simple_config")
def test_simple_config(self, config):
self.assertEqual(config.get_default_layout(), "test")
self.assertEqual(config.get_column_width(self.tag_column), 1)
self.assertFalse(config.get_wrap())
self.assertEqual(config.get_adb(), "/path/to/adb")
self.assertEqual(config.get_stay_connected(), True)
@config_test("simple_config", wrap=True, stay_connected=True)
def test_simple_config_overrides(self, config):
self.assertTrue(config.get_wrap())
self.assertTrue(config.get_stay_connected())
|
research/src/get_family_names.py | vbarceloscs/serenata-de-amor | 3,001 | 11152043 | import os
import re
import datetime
import requests
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
DATE = datetime.date.today().strftime('%Y-%m-%d')
DATA_DIR = 'data'
PROCESSED_DATA_FILE = '{}-congressperson-relatives.xz'
PROCESSED_DATA_PATH = os.path.join(DATA_DIR, PROCESSED_DATA_FILE).format(DATE)
RAW_DATA_FILE = '{}-congressperson-relatives-raw.xz'
RAW_DATA_PATH = os.path.join(DATA_DIR, RAW_DATA_FILE).format(DATE)
write_csv_params = {
'compression': 'xz',
'encoding': 'utf-8',
'index': False}
def format_string(s):
return s.strip().replace(':', '')
def extract_contents_from_div(div):
return list(map(format_string,
filter(lambda x: '\n' not in x,
div[0].strings)))
def convert_to_dict(contents):
return dict(zip(contents[1:-2:2],
contents[2:-1:2]))
def is_single_word(s):
return (' ' not in s)
def fix_when_theres_a_single_surname_after_the_split(names):
pairs = [(i, name) for i, name
in enumerate(names) if is_single_word(name)]
while pairs:
i, name = pairs.pop(0)
if i == 0:
continue
names_to_join = names[i - 1:i + 1]
name = ' e '.join(names_to_join)
for n in names_to_join:
names.remove(n)
names.append(name)
pairs = [(i, name) for i, name
in enumerate(names) if is_single_word(name)]
return names
def split_names(s):
names = s.split(' e ')
names = fix_when_theres_a_single_surname_after_the_split(names)
return names
def create_one_row_per_parent(df):
result = []
dict_df = df.to_dict(orient='records')[0]
for name in dict_df['parents_list']:
result.append({'id': dict_df['id'],
'relative_name': name,
'relationship': 'son_of'})
return pd.DataFrame(data=result)
def get_all_congress_people_ids():
print('Fetching all congresspeople ids', end='\r')
ids_series = [read_csv(name)['congressperson_id']
for name in ['current-year', 'last-year', 'previous-years']]
return list(pd.concat(ids_series).unique())
def find_newest_file(name):
"""
Assuming that the files will be in the form of :
yyyy-mm-dd-type_of_file.xz we can try to find the newest file
based on the date, but if the file doesn't exist fallback to another
date until all dates are exhausted
"""
date_regex = re.compile('\d{4}-\d{2}-\d{2}')
matches = (date_regex.findall(f) for f in os.listdir(DATA_DIR))
dates = sorted(set([l[0] for l in matches if l]), reverse=True)
for date in dates:
filename = os.path.join(DATA_DIR, '{}-{}.xz'.format(date, name))
if os.path.isfile(filename):
return filename
return None
def read_csv(name):
filename = find_newest_file(name)
if filename is None:
raise TypeError('could not find the dataset for {}.'.format(name))
return pd.read_csv(filename,
parse_dates=[16],
dtype={'document_id': np.str,
'congressperson_id': np.str,
'congressperson_document': np.str,
'term_id': np.str,
'cnpj_cpf': np.str,
'reimbursement_number': np.str})
def write_formatted_data(df):
people_with_two_or_less_parents = (
df
[df.parents_list.apply(len) <= 2]
[['id', 'parents_list']])
final = (
people_with_two_or_less_parents
.groupby('id')
.apply(create_one_row_per_parent)
.reset_index(drop=True))
final.rename(inplace=True, columns={'id': 'congressperson_id'})
final.to_csv(PROCESSED_DATA_PATH, **write_csv_params)
def write_raw_data(df):
people_with_more_than_two_parents = (
df
[df.parents_list.apply(len) > 2]
[['id', 'Filiação']])
people_with_more_than_two_parents.rename(inplace=True, columns={
'id': 'congressperson_id',
'Filiação': 'parents'})
if len(people_with_more_than_two_parents):
people_with_more_than_two_parents.to_csv(
RAW_DATA_PATH,
**write_csv_params
)
def get_congresspeople_parents_names():
url = 'http://www2.camara.leg.br/deputados/pesquisa/layouts_deputados_' \
'biografia?pk={}'
ids = get_all_congress_people_ids()
dicts = []
total = len(ids)
for i, id in enumerate(ids):
id = str(id).replace('\n', '').strip()
try:
data = requests.get(url.format(id)).content.decode('utf8')
soup = BeautifulSoup(str(data), 'html.parser')
bio_details = soup.findAll('div', {'class': 'bioDetalhes'})
contents_bio_details = extract_contents_from_div(bio_details)
dict_bio_details = convert_to_dict(contents_bio_details)
dict_bio_details['id'] = id
dicts.append(dict_bio_details)
except IndexError:
print('Could not parse data')
msg = 'Processed {} out of {} ({:.2f}%)'
print(msg.format(i, total, i / total * 100), end='\r')
df = pd.DataFrame(data=dicts)
df = df[df['Filiação'].notnull()]
df = df[~(df['Filiação'].str.contains('Escolaridade'))]
df['parents_list'] = df['Filiação'].apply(split_names)
write_formatted_data(df)
write_raw_data(df)
if __name__ == '__main__':
get_congresspeople_parents_names()
|
homeassistant/components/unifi_direct/__init__.py | domwillcode/home-assistant | 30,023 | 11152072 | """The unifi_direct component."""
|
Vecihi/Backend/vecihi/users/migrations/0002_auto_20171201_2311.py | developertqw2017/migrationDjango | 220 | 11152078 | <filename>Vecihi/Backend/vecihi/users/migrations/0002_auto_20171201_2311.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-01 23:11
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
],
),
migrations.RemoveField(
model_name='user',
name=b'name',
),
migrations.AddField(
model_name='user',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='image',
field=models.ImageField(blank=True, default='Images/None/No-img.jpg', null=True, upload_to='Images/'),
),
migrations.AddField(
model_name='user',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='user',
name=b'email',
field=models.EmailField(max_length=255, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name=b'username',
field=models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and ./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.+-]+$', 'Enter a valid username jebal.', 'invalid')], verbose_name='username'),
),
]
|
crits/signatures/signature.py | dutrow/crits | 738 | 11152086 | <reponame>dutrow/crits
import uuid
try:
from django_mongoengine import Document
except ImportError:
from mongoengine import Document
from mongoengine import EmbeddedDocument
from mongoengine import StringField, ListField
from mongoengine import UUIDField
from mongoengine import IntField, BooleanField
from django.conf import settings
from crits.core.crits_mongoengine import CritsBaseAttributes, CritsDocumentFormatter
from crits.core.crits_mongoengine import CritsSourceDocument, CritsDocument, CritsSchemaDocument
from crits.core.crits_mongoengine import CommonAccess, CritsActionsDocument
class SignatureDependency(CritsDocument, CritsSchemaDocument, Document):
"""
Signature dependency class.
"""
meta = {
"collection": settings.COL_SIGNATURE_DEPENDENCY,
"auto_create_index": False,
"crits_type": 'SignatureDependency',
"latest_schema_version": 1,
"schema_doc": {
'name': 'The name of this data dependency',
'active': 'Enabled in the UI (on/off)'
},
}
name = StringField()
active = StringField(default="on")
class SignatureType(CritsDocument, CritsSchemaDocument, Document):
"""
Signature type class.
"""
meta = {
"collection": settings.COL_SIGNATURE_TYPES,
"auto_create_index": False,
"crits_type": 'SignatureType',
"latest_schema_version": 1,
"schema_doc": {
'name': 'The name of this data type',
'active': 'Enabled in the UI (on/off)'
},
}
name = StringField()
active = StringField(default="on")
class Signature(CritsBaseAttributes, CritsSourceDocument, CritsActionsDocument,
Document):
"""
Signature class.
"""
meta = {
"collection": settings.COL_SIGNATURES,
"auto_create_index": False,
"crits_type": 'Signature',
"latest_schema_version": 1,
"schema_doc": {
},
"jtable_opts": {
'details_url': 'crits-signatures-views-signature_detail',
'details_url_key': 'id',
'default_sort': "modified DESC",
'searchurl': 'crits-signatures-views-signatures_listing',
'fields': [ "title", "data_type", "data_type_min_version",
"data_type_max_version",
"data_type_dependency", "version",
"modified", "source", "campaign",
"id", "status"],
'jtopts_fields': [ "details",
"title",
"data_type",
"data_type_min_version",
"data_type_max_version",
"data_type_dependency",
"version",
"modified",
"source",
"campaign",
"status",
"favorite",
"id"],
'hidden_fields': [],
'linked_fields': ["source", "campaign"],
'details_link': 'details',
'no_sort': ['details']
}
}
data_type = StringField()
data_type_min_version = StringField()
data_type_max_version = StringField()
data_type_dependency = ListField()
data = StringField()
link_id = UUIDField(binary=True, required=True, default=uuid.uuid4)
md5 = StringField()
title = StringField()
version = IntField()
class SignatureAccess(EmbeddedDocument, CritsDocumentFormatter, CommonAccess):
"""
ACL for Signatures.
"""
upload_new_version = BooleanField(default=False)
data_type_read = BooleanField(default=False)
data_type_edit = BooleanField(default=False)
data_type_min_version_read = BooleanField(default=False)
data_type_min_version_edit = BooleanField(default=False)
data_type_max_version_read = BooleanField(default=False)
data_type_max_version_edit = BooleanField(default=False)
data_read = BooleanField(default=False)
data_edit = BooleanField(default=False)
dependencies_read = BooleanField(default=False)
dependencies_edit = BooleanField(default=False)
|
爬虫小demo/04 fileHandler.py | lb2281075105/Python-Spider | 713 | 11152122 | # -*- coding:utf-8 -*-
import csv
# 1、txt文件
file = open('file.txt','r')
# 获取所有的信息
print file.read()
file.write("你好")
# 获取所有并且在所有行存在一个数组
print file.readlines()
# 获取第一行
print file.readline()
# 2、读取csv文件
writer = csv.writer(open('test.csv','wb'))
writer.writerow(['col1','col2','col3'])
data = [range(3) for i in range(3)]
for item in data:
writer.writerow(item)
filelist = csv.reader(open('./test.csv','r'))
for item in filelist:
print item
# 3、读取xml文件
from xml.dom import minidom
# parse打开xml文件
dom = minidom.parse("info.xml")
# 获取根节点
root = dom.documentElement
print root.nodeName
print root.nodeValue
print root.nodeType
print root.ELEMENT_NODE
print "--" * 8
province = root.getElementsByTagName("province")
print province[0].tagName
print province[0].getAttribute("username")
print province[0].firstChild.data
|
lldb/test/API/lang/cpp/diamond/TestDiamond.py | acidburn0zzz/llvm-project | 2,338 | 11152139 | <gh_stars>1000+
"""
Tests that bool types work
"""
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class CPPTestDiamondInheritance(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test_with_run_command(self):
"""Test that virtual base classes work in when SBValue objects are used to explore the variable value"""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.set_breakpoint(line_number('main.cpp', '// breakpoint 1'))
self.set_breakpoint(line_number('main.cpp', '// breakpoint 2'))
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
self.assertIsNotNone(thread)
frame = thread.GetFrameAtIndex(0)
j1 = frame.FindVariable("j1")
j1_Derived1 = j1.GetChildAtIndex(0)
j1_Derived2 = j1.GetChildAtIndex(1)
j1_Derived1_VBase = j1_Derived1.GetChildAtIndex(0)
j1_Derived2_VBase = j1_Derived2.GetChildAtIndex(0)
j1_Derived1_VBase_m_value = j1_Derived1_VBase.GetChildAtIndex(0)
j1_Derived2_VBase_m_value = j1_Derived2_VBase.GetChildAtIndex(0)
self.assertEqual(
j1_Derived1_VBase.GetLoadAddress(), j1_Derived2_VBase.GetLoadAddress(),
"ensure virtual base class is the same between Derived1 and Derived2")
self.assertTrue(j1_Derived1_VBase_m_value.GetValueAsUnsigned(
1) == j1_Derived2_VBase_m_value.GetValueAsUnsigned(2), "ensure m_value in VBase is the same")
self.assertTrue(frame.FindVariable("d").GetChildAtIndex(0).GetChildAtIndex(
0).GetValueAsUnsigned(0) == 12345, "ensure Derived2 from j1 is correct")
thread.StepOver()
self.assertTrue(frame.FindVariable("d").GetChildAtIndex(0).GetChildAtIndex(
0).GetValueAsUnsigned(0) == 12346, "ensure Derived2 from j2 is correct")
def set_breakpoint(self, line):
# Some compilers (for example GCC 4.4.7 and 4.6.1) emit multiple locations for the statement with the ternary
# operator in the test program, while others emit only 1.
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", line, num_expected_locations=-1, loc_exact=False)
|
RecoMET/METFilters/python/hcalLaserBadEvents_2011.py | ckamtsikis/cmssw | 852 | 11152145 | # File last updated on 16:58:23 28 Nov 2011
# A total of 281 bad events
badEvents=[
160957,146483131,
160957,146483132,
160957,367078426,
163289,120704451,
163289,120704452,
163332,300924904,
163587,5705088,
163588,86700074,
163659,269761831,
163659,379050220,
165415,696548170,
165415,696548171,
165617,295894671,
165617,295894672,
165993,120876169,
165993,120876170,
166011,58123616,
166011,58123617,
166380,833988349,
166380,833988350,
166380,874735805,
166380,874735806,
166380,915050480,
166380,1037024294,
166512,1222721981,
166512,1222721982,
166563,299342294,
166563,299431306,
166563,299431307,
166563,299645965,
166699,908134746,
166699,908134747,
167281,115904577,
167282,286707493,
167282,286707494,
167282,286766119,
167282,286766120,
167284,44118160,
167284,44118161,
167551,365086623,
167551,365086624,
167674,59067344,
167674,59067345,
167675,227610655,
167675,227610656,
167754,73011356,
167754,73011357,
167807,1202030368,
167807,1202030369,
167898,568063754,
167898,568063755,
167898,718530727,
167969,3462839,
167969,3462840,
167969,9442755,
167969,9442756,
167969,11435992,
167969,11435993,
170255,83361834,
170255,83361835,
170304,57541359,
170304,57541360,
170854,291050200,
170854,291050201,
170854,329611102,
170854,329611103,
170899,39787119,
170899,39787120,
171091,9021397,
171091,9021398,
171091,97261559,
171091,97261560,
171156,369378017,
171156,369378018,
171897,353709470,
172033,412685841,
172033,412685842,
172033,885328695,
172033,982705197,
172033,982705198,
172163,530358965,
172389,45660388,
172389,45660389,
172411,173742880,
172411,173742881,
172478,53762243,
172478,53762244,
172478,54053290,
172478,54092625,
172478,54092626,
172478,54092948,
172478,98093904,
172478,98093905,
172485,424192588,
172485,424192589,
172791,966404647,
172802,464891113,
172802,464891114,
172802,464892883,
172802,464892884,
172819,81201593,
172822,1074244840,
172822,2836941609,
172868,393947631,
172868,393947632,
172868,1421063049,
172868,1421063050,
172868,1421076216,
172868,1421076217,
172868,2012432054,
172868,2012432055,
172868,2137890207,
172868,2137890208,
173198,741435194,
173198,741435195,
173198,1009198868,
173198,1009198869,
173226,781573,
173226,781574,
173241,746837625,
173241,746837626,
173380,21324672,
173380,21324673,
173659,128113809,
173659,128113810,
173662,10511470,
173662,10511471,
173692,755803939,
173692,2597438478,
173692,2597438479,
174809,777532,
174809,777533,
175560,2368923,
175560,2368924,
175560,7580776,
175834,105072341,
175834,105072342,
175866,343429213,
175866,343429214,
175875,182390835,
175875,182390836,
175888,49192628,
175888,49192629,
175888,128999776,
175888,128999777,
175973,122366434,
175973,122366435,
175976,80421989,
175976,80421990,
175990,6376426,
175990,6376427,
175990,75007084,
175990,75007085,
175990,146437701,
175990,146437702,
176161,15560079,
176161,15560080,
176202,119772380,
176202,119772381,
176202,324604001,
176202,324604002,
176309,233512658,
176309,233512659,
176309,935495115,
176309,935495116,
176309,1331935829,
176309,1331935830,
176309,2496631352,
176697,403510,
176697,403511,
176701,73573187,
176701,73573188,
176702,11693970,
176702,11693971,
176702,67569367,
176702,67569368,
176801,410530622,
176929,460082240,
176929,460082241,
176954,138469,
176954,138470,
177053,327815438,
177053,327815439,
177074,154911610,
177074,154911611,
177140,785923000,
177317,72936561,
177317,72936562,
177317,73219012,
177317,73219013,
177449,275466422,
177449,275466423,
177452,226991391,
177452,226991392,
177509,99081581,
177509,99081582,
177509,314204437,
177509,314204438,
177509,314319381,
177509,314319382,
177515,291757022,
177515,291757023,
177515,1103056195,
177515,1103056196,
177515,1534353246,
177515,1534353247,
177718,890704274,
177718,890704275,
177719,294071879,
177719,294071880,
177730,1850737398,
177730,1850737399,
177730,2007600403,
177730,2007600404,
177730,2563818242,
177730,2563818243,
177790,507968788,
177790,507968789,
177790,772640382,
177790,772640383,
177791,89470582,
177791,89470583,
177875,647616276,
177875,647616277,
178041,783372,
178041,783394,
178041,783395,
178041,784044,
178041,784045,
178041,784499,
178041,784500,
178041,784551,
178041,784552,
178041,786438,
178041,786439,
178041,786770,
178041,786771,
178041,787142,
178041,787143,
178041,787202,
178100,898633273,
178100,1566052885,
178100,1566052886,
178116,453801141,
178116,453801142,
178424,630485076,
178424,630485077,
178667,494184,
178667,494185,
178667,51504048,
178667,51504049,
178825,149780,
178825,149781,
178866,410333501,
178866,410333502,
178866,651783943,
178866,651783944,
178871,236100751,
178970,660540406,
178970,660540407,
178985,85355292,
178985,85355293,
179547,21999275,
179547,21999276,
179563,143108913,
179563,143108914,
179563,391201547,
179563,391201548,
180163,7578238,
180163,7578239,
180222,6076323,
180222,24642472,
180241,500046589,
180241,500562971,
180241,500562972,
180250,371542986,
180250,371542987,
180275,10578469,
180275,10578470,
180275,10583104,
180275,10583105,
]
|
library/oci_suppression_facts.py | slmjy/oci-ansible-modules | 106 | 11152171 | <gh_stars>100-1000
#!/usr/bin/python
# Copyright (c) 2018, 2019, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_suppression_facts
short_description: Fetches details of OCI Suppression.
description:
- Fetches details of all OCI Suppression in a compartment or a specific OCI Suppression
version_added: "2.5"
options:
compartment_id:
description: Identifier of the compartment from which details of all Suppressions must be fetched.
required: false
suppression_id:
description: Identifier of the Suppression whose details needs to be fetched.
required: false
aliases: ['id']
email_address:
description: A filter to only return Suppression that match the given Email Adress.
required: false
time_created_greater_than_or_equal_to:
description: Search for suppressions that were created within a specific date range,
using this parameter to specify the earliest creation date for the returned
list (inclusive). Specifying this parameter without the corresponding
time_created_less_than parameter will retrieve suppressions created from the
given time_created_greater_than_or_equal_to to the current time, in
"YYYY-MM-ddThh:mmZ" format with a Z offset, as defined by RFC 3339.
required: false
time_created_less_than:
description: Search for suppressions that were created within a specific date range,
using this parameter to specify the latest creation date for the returned
list (exclusive). Specifying this parameter without the corresponding
time_created_greater_than_or_equal_to parameter will retrieve all suppressions
created before the specified end date, in "YYYY-MM-ddThh:mmZ" format with
a Z offset, as defined by RFC 3339.
required: false
author:
- "<NAME>(@debayan_gupta)"
extends_documentation_fragment: [ oracle ]
"""
EXAMPLES = """
# Fetch Suppression
- name: List all Suppressions in a compartment
oci_suppression_facts:
compartment_id: 'ocid1.compartment..xxxxxEXAMPLExxxxx'
# Fetch Suppression with specific Email Address
- name: List suppression in a compartment, filetered by Email Address
oci_suppression_facts:
compartment_id: 'ocid1.compartment..xxxxxEXAMPLExxxxx'
email_address: '<EMAIL>'
# Fetch Suppression filtered by time_created_greater_than_or_equal_to
- name: List Suppression in a compartment, filetered by time_created_greater_than_or_equal_to
oci_suppression_facts:
compartment_id: 'ocid1.compartment..xxxxxEXAMPLExxxxx'
time_created_greater_than_or_equal_to: '2018-10-31T09:27:14Z'
# Fetch Suppression filtered by time_created_less_than
- name: List Suppression in a compartment, filetered by time_created_less_than
oci_suppression_facts:
compartment_id: 'ocid1.compartment..xxxxxEXAMPLExxxxx'
time_created_less_than: '2018-10-31T09:27:14Z'
# Fetch Suppression filtered by time_created_greater_than_or_equal_to and time_created_less_than
- name: List Suppression in a compartment, filetered by time_created_greater_than_or_equal_to and time_created_less_than
oci_suppression_facts:
compartment_id: 'ocid1.compartment..xxxxxEXAMPLExxxxx'
time_created_greater_than_or_equal_to: '2018-10-31T09:25:14Z'
time_created_less_than: '2018-10-31T09:27:14Z'
# Fetch a specific Suppression
- name: List a specific Suppression
oci_suppression_facts:
suppression_id: 'ocid1.emailsuppression.oc1..xxxxxEXAMPLExxxxx..qndq'
"""
RETURN = """
suppressions:
description: Attributes of the Fetched Suppression.
returned: success
type: complex
contains:
email_address:
description: Email Address of the Suppression
returned: always
type: string
sample: <EMAIL>
reason:
description: The reason that the email address was suppressed.
returned: always
type: string
sample: MANUAL
time_created:
description: Date and time when the Suppression was created, in
the format defined by RFC3339
returned: always
type: datetime
sample: 2016-08-25T21:10:29.600Z
id:
description: The identifier of the Suppression
returned: always
type: string
sample: ocid1.emailsuppression.oc1.xzvf..xxxxxEXAMPLExxxxx
sample: [{
"email_address":"<EMAIL>",
"id":"ocid1.emailsuppression.oc1.iad.xxxxxEXAMPLExxxxx",
"reason":"MANUAL",
"time_created":"2018-10-31T09:20:52.245000+00:00"
},
{
"email_address":"<EMAIL>",
"id":"ocid1.emailsuppression.oc1.iad.xxxxxEXAMPLExxxxx",
"reason":"MANUAL",
"time_created":"2018-10-31T09:25:52.245000+00:00"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
from oci.email.email_client import EmailClient
from oci.exceptions import ServiceError
from oci.util import to_dict
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
logger = None
def list_suppressions(email_client, module):
result = dict(suppressions="")
compartment_id = module.params.get("compartment_id")
suppression_id = module.params.get("suppression_id")
try:
if compartment_id:
get_logger().debug(
"Listing all suppressions under compartment %s", compartment_id
)
optional_list_method_params = [
"email_address",
"time_created_greater_than_or_equal_to",
"time_created_less_than",
]
optional_kwargs = dict(
(param, module.params[param])
for param in optional_list_method_params
if module.params.get(param) is not None
)
existing_suppressions_summary = to_dict(
oci_utils.list_all_resources(
email_client.list_suppressions,
compartment_id=compartment_id,
**optional_kwargs
)
)
existing_suppressions = [
oci_utils.call_with_backoff(
email_client.get_suppression, suppression_id=suppression["id"]
).data
for suppression in existing_suppressions_summary
]
elif suppression_id:
get_logger().debug("Listing suppression %s", suppression_id)
response = oci_utils.call_with_backoff(
email_client.get_suppression, suppression_id=suppression_id
)
existing_suppressions = [response.data]
except ServiceError as ex:
get_logger().error("Unable to list suppressions due to %s", ex.message)
module.fail_json(msg=ex.message)
result["suppressions"] = to_dict(existing_suppressions)
return result
def set_logger(input_logger):
global logger
logger = input_logger
def get_logger():
return logger
def main():
logger = oci_utils.get_logger("oci_suppression_facts")
set_logger(logger)
module_args = oci_utils.get_common_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str", required=False),
suppression_id=dict(type="str", required=False, aliases=["id"]),
time_created_greater_than_or_equal_to=dict(type="str", required=False),
time_created_less_than=dict(type="str", required=False),
email_address=dict(type=str, required=False),
)
)
module = AnsibleModule(
argument_spec=module_args,
mutually_exclusive=[["compartment_id", "suppression_id"]],
)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module")
email_client = oci_utils.create_service_client(module, EmailClient)
result = list_suppressions(email_client, module)
module.exit_json(**result)
if __name__ == "__main__":
main()
|
L1Trigger/L1TMuonBarrel/test/kalmanTools/validation.py | ckamtsikis/cmssw | 852 | 11152186 | <reponame>ckamtsikis/cmssw<filename>L1Trigger/L1TMuonBarrel/test/kalmanTools/validation.py
from __future__ import print_function
import ROOT,itertools,math #
from array import array #
from DataFormats.FWLite import Events, Handle
ROOT.FWLiteEnabler.enable()
#
tag='output'
##A class to keep BMTF data
###Common methods############
def fetchStubsOLD(event,ontime=False,isData=True):
phiSeg = Handle ('L1MuDTChambPhContainer')
if not isData:
event.getByLabel('simTwinMuxDigis',phiSeg)
else:
event.getByLabel('bmtfDigis',phiSeg)
if ontime:
filtered=filter(lambda x: x.bxNum()==0, phiSeg.product().getContainer())
return filtered
else:
return phiSeg.product().getContainer()
def fetchStubs(event,ontime=True):
phiSeg2 = Handle ('std::vector<L1MuKBMTCombinedStub>')
event.getByLabel('simKBmtfStubs',phiSeg2)
if ontime:
filtered=filter(lambda x: x.bxNum()==0, phiSeg2.product())
return filtered
else:
return phiSeg2.product()
def globalBMTFPhi(muon):
temp=muon.processor()*48+muon.hwPhi()
temp=temp*2*math.pi/576.0-math.pi*15.0/180.0;
if temp>math.pi:
temp=temp-2*math.pi;
K=1.0/muon.hwPt()
if muon.hwSign()>0:
K=-1.0/muon.hwPt()
return temp+5.740*K
def fetchKMTF(event,etaMax,collection):
kbmtfH = Handle ('BXVector<l1t::RegionalMuonCand>')
event.getByLabel(collection,kbmtfH)
kbmtf=kbmtfH.product()
kbmtfMuons={}
for bx in [-3,-2,-1,0,1,2,3]:
kbmtfMuons[bx]=[]
for bx in range(kbmtf.getFirstBX(),kbmtf.getLastBX()+1):
for j in range(0,kbmtf.size(bx)):
mu = kbmtf.at(bx,j)
kbmtfMuons[bx].append(mu)
# kbmtfMuons[bx]=sorted(kbmtfMuons[bx],key=lambda x: x.hwPt(),reverse=True)
return kbmtfMuons
def curvResidual(a,b):
return (a.charge()/a.pt()-b.charge()/b.pt())*b.pt()/b.charge()
def ptResidual(a,b):
return (a.pt()-b.pt())/b.pt()
def curvResidualSTA(a,b):
return (a.charge()/a.ptUnconstrained()-b.charge()/b.pt())*b.pt()/b.charge()
def deltaPhi( p1, p2):
'''Computes delta phi, handling periodic limit conditions.'''
res = p1 - p2
while res > math.pi:
res -= 2*math.pi
while res < -math.pi:
res += 2*math.pi
return res
def deltaR( *args ):
return math.sqrt( deltaR2(*args) )
def deltaR2( e1, p1, e2, p2):
de = e1 - e2
dp = deltaPhi(p1, p2)
return de*de + dp*dp
def log(event,counter,mystubs,kmtf,bmtf):
print("--------EVENT"+str(counter)+"------------")
print('RUN={run} LUMI={lumi} EVENT={event}'.format(run=event.eventAuxiliary().id().run(),lumi=event.eventAuxiliary().id().luminosityBlock(),event=event.eventAuxiliary().id().event()))
print("-----------------------------")
print("-----------------------------")
print('Stubs:')
for stub in mystubs:
print('wheel={w} sector={sc} station={st} high/low={ts} phi={phi} phiB={phiB} qual={qual} BX={BX}'.format(w=stub.whNum(),sc=stub.scNum(),st=stub.stNum(),ts=stub.Ts2Tag(),phi=stub.phi(),phiB=stub.phiB(),qual=stub.code(),BX=stub.bxNum()))
print('EMU:')
for g in bmtf :
print("EMU sector={sector} pt={pt} eta={eta} phi={phi} qual={qual} dxy={dxy} pt2={pt2} hasFineEta={HF}".format(sector=g.processor(), pt=g.hwPt(),eta=g.hwEta(),phi=g.hwPhi(),qual=g.hwQual(),dxy=g.hwDXY(),pt2=g.hwPtUnconstrained(),HF=g.hwHF()))
print('DATA:')
for g in kmtf :
print("DATA sector={sector} pt={pt} eta={eta} phi={phi} qual={qual} dxy={dxy} pt2={pt2} hasFineEta={HF}".format(sector=g.processor(),pt=g.hwPt(),eta=g.hwEta(),phi=g.hwPhi(),qual=g.hwQual(),dxy=g.hwDXY(),pt2=g.hwPtUnconstrained(),HF=g.hwHF()))
print("-----------------------------")
print("-----------------------------")
print("c + enter to continue")
import pdb;pdb.set_trace()
###############################
#########Histograms#############
histos={}
histos['fw']={}
histos['fw']['pt1']=ROOT.TH1D("fw_pt1","HW p_{T}",512,0,511)
histos['fw']['eta1']=ROOT.TH1D("fw_eta1","HW #eta",256,-127,128)
histos['fw']['phi1']=ROOT.TH1D("fw_phi1","HW #phi",256,-127,128)
histos['fw']['HF1']=ROOT.TH1D("fw_HF1","HW HF",256,-127,128)
histos['fw']['qual1']=ROOT.TH1D("fw_qual1","HW qual",16,0,16)
histos['fw']['dxy1']=ROOT.TH1D("fw_dxy1","HW DXY",4,0,4)
histos['fw']['ptSTA1']=ROOT.TH1D("fw_ptSTA1","HW STA PT",256,0,255)
histos['fw']['pt2']=ROOT.TH1D("fw_pt2","HW p_{T}",512,0,511)
histos['fw']['eta2']=ROOT.TH1D("fw_eta2","HW #eta",256,-127,128)
histos['fw']['phi2']=ROOT.TH1D("fw_phi2","HW #phi",256,-127,128)
histos['fw']['HF2']=ROOT.TH1D("fw_HF2","HW HF",256,-127,128)
histos['fw']['qual2']=ROOT.TH1D("fw_qual2","HW qual",16,0,16)
histos['fw']['dxy2']=ROOT.TH1D("fw_dxy2","HW DXY",4,0,4)
histos['fw']['ptSTA2']=ROOT.TH1D("fw_ptSTA2","HW STA PT",256,0,255)
histos['fw']['pt3']=ROOT.TH1D("fw_pt3","HW p_{T}",512,0,511)
histos['fw']['eta3']=ROOT.TH1D("fw_eta3","HW #eta",256,-127,128)
histos['fw']['phi3']=ROOT.TH1D("fw_phi3","HW #phi",256,-127,128)
histos['fw']['HF3']=ROOT.TH1D("fw_HF3","HW HF",256,-127,128)
histos['fw']['qual3']=ROOT.TH1D("fw_qual3","HW qual",16,0,16)
histos['fw']['dxy3']=ROOT.TH1D("fw_dxy3","HW DXY",4,0,4)
histos['fw']['ptSTA3']=ROOT.TH1D("fw_ptSTA3","HW STA PT",256,0,255)
histos['emu']={}
histos['emu']['pt1']=ROOT.TH1D("emu_pt1","HW p_{T}",512,0,511)
histos['emu']['eta1']=ROOT.TH1D("emu_eta1","HW #eta",256,-127,128)
histos['emu']['phi1']=ROOT.TH1D("emu_phi1","HW #phi",256,-127,128)
histos['emu']['HF1']=ROOT.TH1D("emu_HF1","HW HF",256,-127,128)
histos['emu']['qual1']=ROOT.TH1D("emu_qual1","HW qual",16,0,16)
histos['emu']['dxy1']=ROOT.TH1D("emu_dxy1","HW DXY",4,0,4)
histos['emu']['ptSTA1']=ROOT.TH1D("emu_ptSTA1","HW STA PT",256,0,255)
histos['emu']['pt2']=ROOT.TH1D("emu_pt2","HW p_{T}",512,0,511)
histos['emu']['eta2']=ROOT.TH1D("emu_eta2","HW #eta",256,-127,128)
histos['emu']['phi2']=ROOT.TH1D("emu_phi2","HW #phi",256,-127,128)
histos['emu']['HF2']=ROOT.TH1D("emu_HF2","HW HF",256,-127,128)
histos['emu']['qual2']=ROOT.TH1D("emu_qual2","HW qual",16,0,16)
histos['emu']['dxy2']=ROOT.TH1D("emu_dxy2","HW DXY",4,0,4)
histos['emu']['ptSTA2']=ROOT.TH1D("emu_ptSTA2","HW STA PT",256,0,255)
histos['emu']['pt3']=ROOT.TH1D("emu_pt3","HW p_{T}",512,0,511)
histos['emu']['eta3']=ROOT.TH1D("emu_eta3","HW #eta",256,-127,128)
histos['emu']['phi3']=ROOT.TH1D("emu_phi3","HW #phi",256,-127,128)
histos['emu']['HF3']=ROOT.TH1D("emu_HF3","HW HF",256,-127,128)
histos['emu']['qual3']=ROOT.TH1D("emu_qual3","HW qual",16,0,16)
histos['emu']['dxy3']=ROOT.TH1D("emu_dxy3","HW DXY",4,0,4)
histos['emu']['ptSTA3']=ROOT.TH1D("emu_ptSTA3","HW STA PT",256,0,255)
for key,histo in histos['fw'].iteritems():
histo.Sumw2()
def fill(info,mu):
if len(mu)>0:
info['pt1'].Fill(mu[0].hwPt())
info['eta1'].Fill(mu[0].hwEta())
info['phi1'].Fill(mu[0].hwPhi())
info['HF1'].Fill(mu[0].hwHF())
info['qual1'].Fill(mu[0].hwQual())
info['dxy1'].Fill(mu[0].hwDXY())
info['ptSTA1'].Fill(mu[0].hwPtUnconstrained())
else:
info['pt1'].Fill(0)
info['eta1'].Fill(0)
info['phi1'].Fill(0)
info['HF1'].Fill(0)
info['qual1'].Fill(0)
info['dxy1'].Fill(0)
info['ptSTA1'].Fill(0)
if len(mu)>1:
info['pt2'].Fill(mu[1].hwPt())
info['eta2'].Fill(mu[1].hwEta())
info['phi2'].Fill(mu[1].hwPhi())
info['HF2'].Fill(mu[1].hwHF())
info['qual2'].Fill(mu[1].hwQual())
info['dxy2'].Fill(mu[1].hwDXY())
info['ptSTA2'].Fill(mu[1].hwPtUnconstrained())
else:
info['pt2'].Fill(0)
info['eta2'].Fill(0)
info['phi2'].Fill(0)
info['HF2'].Fill(0)
info['qual2'].Fill(0)
info['dxy2'].Fill(0)
info['ptSTA2'].Fill(0)
if len(mu)>2:
info['pt3'].Fill(mu[2].hwPt())
info['eta3'].Fill(mu[2].hwEta())
info['phi3'].Fill(mu[2].hwPhi())
info['HF3'].Fill(mu[2].hwHF())
info['qual3'].Fill(mu[2].hwQual())
info['dxy3'].Fill(mu[2].hwDXY())
info['ptSTA3'].Fill(mu[2].hwPtUnconstrained())
else:
info['pt3'].Fill(0)
info['eta3'].Fill(0)
info['phi3'].Fill(0)
info['HF3'].Fill(0)
info['qual3'].Fill(0)
info['dxy3'].Fill(0)
info['ptSTA3'].Fill(0)
##############################
BUNCHES=[0]
events=Events([tag+'.root'])
counter=-1
for event in events:
counter=counter+1
#fetch stubs
stubs=fetchStubsOLD(event,True)
unpacker=fetchKMTF(event,100.0,'bmtfDigis:kBMTF')
emulator=fetchKMTF(event,100.0,'simKBmtfDigis:BMTF')
for processor in range(0,12):
for bx in BUNCHES:
emu=filter(lambda x: x.processor()==processor,emulator[bx])
data=filter(lambda x: x.processor()==processor,unpacker[bx])
if (len(emu)+len(data))>0:
fill(histos['emu'],emu)
fill(histos['fw'],data)
# if len(emu)!=0 and len(data)==0:
# log(event,counter,stubs,data,emu)
# import pdb;pdb.set_trace()
f=ROOT.TFile("validationResults.root","RECREATE")
for key,histo in histos['fw'].iteritems():
histo.SetMarkerStyle(7)
histo.Write()
for key,histo in histos['emu'].iteritems():
histo.SetLineColor(ROOT.kRed)
histo.Write()
#make fancy plots
histonames=['pt1','eta1','phi1','HF1','qual1','dxy1','ptSTA1']
for h in histonames:
c=ROOT.TCanvas(h)
c.cd()
histos['emu'][h].Draw("HIST")
histos['emu'][h].GetXaxis().SetTitle(histos['emu'][h].GetTitle())
histos['emu'][h].GetYaxis().SetTitle("events")
histos['fw'][h].Draw("SAME")
c.SetLogy()
l=ROOT.TLegend(0.6,0.6,0.9,0.8)
l.AddEntry(histos['emu'][h],"emulator","l")
l.AddEntry(histos['fw'][h],"data","p")
l.Draw()
c.Write("plot_"+h)
f.Close()
|
code/baseline_ptbdb_transfer_fullupdate.py | Dr-Anonymous/ECG_Heartbeat_Classification | 123 | 11152202 | <filename>code/baseline_ptbdb_transfer_fullupdate.py
import pandas as pd
import numpy as np
from keras import optimizers, losses, activations, models
from keras.callbacks import ModelCheckpoint, EarlyStopping, LearningRateScheduler, ReduceLROnPlateau
from keras.layers import Dense, Input, Dropout, Convolution1D, MaxPool1D, GlobalMaxPool1D, GlobalAveragePooling1D, \
concatenate
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split
df_1 = pd.read_csv("../input/ptbdb_normal.csv", header=None)
df_2 = pd.read_csv("../input/ptbdb_abnormal.csv", header=None)
df = pd.concat([df_1, df_2])
df_train, df_test = train_test_split(df, test_size=0.2, random_state=1337, stratify=df[187])
Y = np.array(df_train[187].values).astype(np.int8)
X = np.array(df_train[list(range(187))].values)[..., np.newaxis]
Y_test = np.array(df_test[187].values).astype(np.int8)
X_test = np.array(df_test[list(range(187))].values)[..., np.newaxis]
def get_model():
nclass = 1
inp = Input(shape=(187, 1))
img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(inp)
img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding="valid")(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = Dropout(rate=0.1)(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = Dropout(rate=0.1)(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = Dropout(rate=0.1)(img_1)
img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding="valid")(img_1)
img_1 = GlobalMaxPool1D()(img_1)
img_1 = Dropout(rate=0.2)(img_1)
dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
dense_1 = Dense(nclass, activation=activations.sigmoid, name="dense_3_ptbdb")(dense_1)
model = models.Model(inputs=inp, outputs=dense_1)
opt = optimizers.Adam(0.001)
model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=['acc'])
model.summary()
return model
model = get_model()
file_path = "baseline_cnn_ptbdb_transfer_fullupdate.h5"
checkpoint = ModelCheckpoint(file_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
early = EarlyStopping(monitor="val_acc", mode="max", patience=5, verbose=1)
redonplat = ReduceLROnPlateau(monitor="val_acc", mode="max", patience=3, verbose=2)
callbacks_list = [checkpoint, early, redonplat] # early
model.load_weights("baseline_cnn_mitbih.h5", by_name=True)
model.fit(X, Y, epochs=1000, verbose=2, callbacks=callbacks_list, validation_split=0.1)
model.load_weights(file_path)
pred_test = model.predict(X_test)
pred_test = (pred_test>0.5).astype(np.int8)
f1 = f1_score(Y_test, pred_test)
print("Test f1 score : %s "% f1)
acc = accuracy_score(Y_test, pred_test)
print("Test accuracy score : %s "% acc) |
setup.py | songhongxiang/symoro | 109 | 11152225 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
BIN_FOLDER = 'bin'
def readme():
with open('README.md') as f:
return f.read()
def apply_folder_join(item):
return os.path.join(BIN_FOLDER, item)
if os.name is 'nt':
bin_scripts = ['symoro-bin.py']
else:
bin_scripts = ['symoro-bin']
bin_scripts = map(apply_folder_join, bin_scripts)
setup(
name='symoro',
version='0.2',
description='SYmoblic MOdelling of RObots software package',
url='http://github.com/symoro/symoro',
license='MIT',
scripts=bin_scripts,
packages=find_packages(exclude=['*.tests', '*.tests.*', 'tests.*', 'tests']),
install_requires=[
'sympy==0.7.3',
'numpy>=1.6.1',
'wxPython>=2.8.11',
'PyOpenGL>=3.0.1b2'
],
dependency_links=[
'https://github.com/sympy/sympy/archive/sympy-0.7.3.zip'
],
zip_safe=False
)
|
lib/django-0.96/django/middleware/gzip.py | MiCHiLU/google_appengine_sdk | 790 | 11152231 | <reponame>MiCHiLU/google_appengine_sdk<gh_stars>100-1000
import re
from django.utils.text import compress_string
from django.utils.cache import patch_vary_headers
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(object):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
patch_vary_headers(response, ('Accept-Encoding',))
# Avoid gzipping if we've already got a content-encoding or if the
# content-type is Javascript (silly IE...)
is_js = "javascript" in response.headers.get('Content-Type', '').lower()
if response.has_header('Content-Encoding') or is_js:
return response
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
response.content = compress_string(response.content)
response['Content-Encoding'] = 'gzip'
response['Content-Length'] = str(len(response.content))
return response
|
tests/api/test_deploy_queue.py | sobolevn/paasta | 1,711 | 11152245 | <reponame>sobolevn/paasta
# Copyright 2015-2020 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from zake.fake_client import FakeClient
from paasta_tools.api import settings
from paasta_tools.api.views import deploy_queue
from paasta_tools.deployd.common import ServiceInstance
from paasta_tools.utils import SystemPaastaConfig
@mock.patch("paasta_tools.api.views.deploy_queue.KazooClient", autospec=True)
@mock.patch("paasta_tools.api.views.deploy_queue.ZKDelayDeadlineQueue", autospec=True)
def test_list_deploy_queue(mock_delay_deadline_queue_class, mock_kazoo_client):
mock_request = mock.Mock()
settings.system_paasta_config = mock.create_autospec(SystemPaastaConfig)
mock_kazoo_client.return_value = FakeClient()
available_service_instance = ServiceInstance(
service="fake_service1",
instance="fake_instance1",
watcher="worker0",
bounce_by=1577952000,
wait_until=1577952000,
enqueue_time=1577952000,
bounce_start_time=1577952000,
failures=1,
processed_count=2,
)
unavailable_service_instance = ServiceInstance(
service="fake_service2",
instance="fake_instance2",
watcher="worker1",
bounce_by=1577952100,
wait_until=1577952200,
enqueue_time=1577952100,
bounce_start_time=1577952100,
failures=2,
processed_count=3,
)
mock_delay_deadline_queue = mock_delay_deadline_queue_class.return_value
mock_delay_deadline_queue.get_available_service_instances.return_value = [
(mock.Mock(), available_service_instance)
]
mock_delay_deadline_queue.get_unavailable_service_instances.return_value = [
(mock.Mock(), mock.Mock(), unavailable_service_instance)
]
output = deploy_queue.list_deploy_queue(mock_request)
assert output == {
"available_service_instances": [
{
"service": "fake_service1",
"instance": "fake_instance1",
"watcher": "worker0",
"bounce_by": 1577952000,
"wait_until": 1577952000,
"enqueue_time": 1577952000,
"bounce_start_time": 1577952000,
"failures": 1,
"processed_count": 2,
}
],
"unavailable_service_instances": [
{
"service": "fake_service2",
"instance": "fake_instance2",
"watcher": "worker1",
"bounce_by": 1577952100,
"wait_until": 1577952200,
"enqueue_time": 1577952100,
"bounce_start_time": 1577952100,
"failures": 2,
"processed_count": 3,
}
],
}
|
vendor-local/lib/python/south/tests/brokenapp/models.py | glogiotatidis/affiliates | 285 | 11152253 | # -*- coding: UTF-8 -*-
from django.db import models
from django.contrib.auth.models import User as UserAlias
def default_func():
return "yays"
# An empty case.
class Other1(models.Model): pass
# Nastiness.
class HorribleModel(models.Model):
"A model to test the edge cases of model parsing"
ZERO, ONE = range(2)
# First, some nice fields
name = models.CharField(max_length=255)
short_name = models.CharField(max_length=50)
slug = models.SlugField(unique=True)
# A ForeignKey, to a model above, and then below
o1 = models.ForeignKey(Other1)
o2 = models.ForeignKey('Other2')
# Now to something outside
user = models.ForeignKey(UserAlias, related_name="horribles")
# Unicode!
code = models.CharField(max_length=25, default="↑↑↓↓←→←→BA")
# Odd defaults!
class_attr = models.IntegerField(default=ZERO)
func = models.CharField(max_length=25, default=default_func)
# Time to get nasty. Define a non-field choices, and use it
choices = [('hello', '1'), ('world', '2')]
choiced = models.CharField(max_length=20, choices=choices)
class Meta:
db_table = "my_fave"
verbose_name = "Dr. Strangelove," + \
"""or how I learned to stop worrying
and love the bomb"""
# Now spread over multiple lines
multiline = \
models.TextField(
)
# Special case.
class Other2(models.Model):
# Try loading a field without a newline after it (inspect hates this)
close_but_no_cigar = models.PositiveIntegerField(primary_key=True) |
tests/trac/test-trac-0207.py | eLBati/pyxb | 123 | 11152257 | # -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb
import pyxb.utils.utility
import pyxb.binding.datatypes as xsd
from pyxb.utils.six.moves import cPickle as pickle
import unittest
class TestTrac0207 (unittest.TestCase):
def testDuration (self):
dur = xsd.duration("P10675199DT2H48M5.4775807S")
self.assertEqual(dur.days, 10675199)
self.assertEqual(dur.seconds, 10085)
self.assertEqual(dur.microseconds, 477580)
serialized = pickle.dumps(dur)
xdur = pickle.loads(serialized)
self.assertEqual(dur, xdur)
def testDateTime (self):
now = xsd.dateTime.now()
serialized = pickle.dumps(now)
xnow = pickle.loads(serialized)
self.assertEqual(now, xnow)
if __name__ == '__main__':
unittest.main()
|
setup.py | Hamz-a/frida-ios-hook | 354 | 11152274 | <filename>setup.py
import os
from setuptools import setup, find_packages
def _package_files(directory: str, suffix: str) -> list:
"""
Get all of the file paths in the directory specified by suffix.
:param directory:
:return:
"""
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
if filename.endswith(suffix):
paths.append(os.path.join('..', path, filename))
return paths
with open ("README.md", "r") as fh:
long_description = fh.read()
path = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(path, 'requirements.txt'), 'r') as f:
requirements = f.readlines()
setup(
name='Frida-iOS-Hook',
version='3.4',
description='Trace Class/Function & Modify Return Value',
author='noobpk',
author_email='<EMAIL>',
long_description =long_description,
long_description_content_type="text/markdown",
url='https://github.com/noobpk/frida-ios-hook/',
packages=find_packages(),
# include other files
package_data={
'': _package_files(os.path.join(path, 'frida-ios-hook'), '.js') +
_package_files(os.path.join(path, 'frida-ios-hook/frida-scripts'), '.js') +
_package_files(os.path.join(path, 'frida-ios-hook/methods'), '.js')
},
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3 :: Only",
'Natural Language :: English',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
python_requires='>=3.0',
entry_points={
'console_scripts': [
'ioshook=core.hook.run:run',
],
},
)
|
alipay/aop/api/domain/AlipayEcoPrinterStatusNotifyModel.py | antopen/alipay-sdk-python-all | 213 | 11152289 | <filename>alipay/aop/api/domain/AlipayEcoPrinterStatusNotifyModel.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEcoPrinterStatusNotifyModel(object):
def __init__(self):
self._eprint_sign = None
self._machine_code = None
self._oauth_type = None
self._online = None
self._push_time = None
@property
def eprint_sign(self):
return self._eprint_sign
@eprint_sign.setter
def eprint_sign(self, value):
self._eprint_sign = value
@property
def machine_code(self):
return self._machine_code
@machine_code.setter
def machine_code(self, value):
self._machine_code = value
@property
def oauth_type(self):
return self._oauth_type
@oauth_type.setter
def oauth_type(self, value):
self._oauth_type = value
@property
def online(self):
return self._online
@online.setter
def online(self, value):
self._online = value
@property
def push_time(self):
return self._push_time
@push_time.setter
def push_time(self, value):
self._push_time = value
def to_alipay_dict(self):
params = dict()
if self.eprint_sign:
if hasattr(self.eprint_sign, 'to_alipay_dict'):
params['eprint_sign'] = self.eprint_sign.to_alipay_dict()
else:
params['eprint_sign'] = self.eprint_sign
if self.machine_code:
if hasattr(self.machine_code, 'to_alipay_dict'):
params['machine_code'] = self.machine_code.to_alipay_dict()
else:
params['machine_code'] = self.machine_code
if self.oauth_type:
if hasattr(self.oauth_type, 'to_alipay_dict'):
params['oauth_type'] = self.oauth_type.to_alipay_dict()
else:
params['oauth_type'] = self.oauth_type
if self.online:
if hasattr(self.online, 'to_alipay_dict'):
params['online'] = self.online.to_alipay_dict()
else:
params['online'] = self.online
if self.push_time:
if hasattr(self.push_time, 'to_alipay_dict'):
params['push_time'] = self.push_time.to_alipay_dict()
else:
params['push_time'] = self.push_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoPrinterStatusNotifyModel()
if 'eprint_sign' in d:
o.eprint_sign = d['eprint_sign']
if 'machine_code' in d:
o.machine_code = d['machine_code']
if 'oauth_type' in d:
o.oauth_type = d['oauth_type']
if 'online' in d:
o.online = d['online']
if 'push_time' in d:
o.push_time = d['push_time']
return o
|
netpyne/network/__init__.py | adamjhn/netpyne | 120 | 11152297 | """
Package for dealing with network models
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from .network import Network
from .pop import Pop
|
samcli/lib/utils/lock_distributor.py | praneetap/aws-sam-cli | 2,285 | 11152299 | """LockDistributor for creating and managing a set of locks"""
import threading
import multiprocessing
import multiprocessing.managers
from typing import Dict, Set, Optional, cast
from enum import Enum, auto
class LockChain:
"""Wrapper class for acquiring multiple locks in the same order to prevent dead locks
Can be used with `with` statement"""
def __init__(self, lock_mapping: Dict[str, threading.Lock]):
"""
Parameters
----------
lock_mapping : Dict[str, threading.Lock]
Dictionary of locks with keys being used as generating reproduciable order for aquiring and releasing locks.
"""
self._locks = [value for _, value in sorted(lock_mapping.items())]
def acquire(self) -> None:
"""Aquire all locks in the LockChain"""
for lock in self._locks:
lock.acquire()
def release(self) -> None:
"""Release all locks in the LockChain"""
for lock in self._locks:
lock.release()
def __enter__(self) -> "LockChain":
self.acquire()
return self
def __exit__(self, exception_type, exception_value, traceback) -> None:
self.release()
class LockDistributorType(Enum):
"""Types of LockDistributor"""
THREAD = auto()
PROCESS = auto()
class LockDistributor:
"""Dynamic lock distributor that supports threads and processes.
In the case of processes, both manager(server process) or shared memory can be used.
"""
_lock_type: LockDistributorType
_manager: Optional[multiprocessing.managers.SyncManager]
_dict_lock: threading.Lock
_locks: Dict[str, threading.Lock]
def __init__(
self,
lock_type: LockDistributorType = LockDistributorType.THREAD,
manager: Optional[multiprocessing.managers.SyncManager] = None,
):
"""[summary]
Parameters
----------
lock_type : LockDistributorType, optional
Whether locking with threads or processes, by default LockDistributorType.THREAD
manager : Optional[multiprocessing.managers.SyncManager], optional
Optional process sync mananger for creating proxy locks, by default None
"""
self._lock_type = lock_type
self._manager = manager
self._dict_lock = self._create_new_lock()
self._locks = (
self._manager.dict()
if self._lock_type == LockDistributorType.PROCESS and self._manager is not None
else dict()
)
def _create_new_lock(self) -> threading.Lock:
"""Create a new lock based on lock type
Returns
-------
threading.Lock
Newly created lock
"""
if self._lock_type == LockDistributorType.THREAD:
return threading.Lock()
return self._manager.Lock() if self._manager is not None else cast(threading.Lock, multiprocessing.Lock())
def get_lock(self, key: str) -> threading.Lock:
"""Retrieve a lock associating with the key
If the lock does not exist, a new lock will be created.
Parameters
----------
key : Key for retrieving the lock
Returns
-------
threading.Lock
Lock associated with the key
"""
with self._dict_lock:
if key not in self._locks:
self._locks[key] = self._create_new_lock()
return self._locks[key]
def get_locks(self, keys: Set[str]) -> Dict[str, threading.Lock]:
"""Retrieve a list of locks associating with keys
Parameters
----------
keys : Set[str]
Set of keys for retrieving the locks
Returns
-------
Dict[str, threading.Lock]
Dictionary mapping keys to locks
"""
lock_mapping = dict()
for key in keys:
lock_mapping[key] = self.get_lock(key)
return lock_mapping
def get_lock_chain(self, keys: Set[str]) -> LockChain:
"""Similar to get_locks, but retrieves a LockChain object instead of a dictionary
Parameters
----------
keys : Set[str]
Set of keys for retrieving the locks
Returns
-------
LockChain
LockChain object containing all the locks associated with keys
"""
return LockChain(self.get_locks(keys))
|
src/py-opentimelineio/opentimelineio/console/otiocat.py | desruie/OpenTimelineIO | 1,021 | 11152353 | #!/usr/bin/env python
#
# Copyright Contributors to the OpenTimelineIO project
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
"""Print the contents of an OTIO file to stdout."""
import argparse
import sys
import opentimelineio as otio
def _parsed_args():
""" parse commandline arguments with argparse """
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'filepath',
type=str,
nargs='+',
help='files to print the contents of'
)
parser.add_argument(
'-a',
'--adapter-arg',
type=str,
default=[],
action='append',
help='Extra arguments to be passed to input adapter in the form of '
'key=value. Values are strings, numbers or Python literals: True, '
'False, etc. Can be used multiple times: -a burrito="bar" -a taco=12.'
)
parser.add_argument(
'-m',
'--media-linker',
type=str,
default="Default",
help=(
"Specify a media linker. 'Default' means use the "
"$OTIO_DEFAULT_MEDIA_LINKER if set, 'None' or '' means explicitly "
"disable the linker, and anything else is interpreted as the name"
" of the media linker to use."
)
)
parser.add_argument(
'-H',
'--hook-function-arg',
type=str,
default=[],
action='append',
help='Extra arguments to be passed to the hook functions in the form of '
'key=value. Values are strings, numbers or Python literals: True, '
'False, etc. Can be used multiple times: -H burrito="bar" -H taco=12.'
)
parser.add_argument(
'-M',
'--media-linker-arg',
type=str,
default=[],
action='append',
help='Extra arguments to be passed to the media linker in the form of '
'key=value. Values are strings, numbers or Python literals: True, '
'False, etc. Can be used multiple times: -M burrito="bar" -M taco=12.'
)
return parser.parse_args()
def _otio_compatible_file_to_json_string(
fpath,
media_linker_name,
hooks_args,
media_linker_argument_map,
adapter_argument_map
):
"""Read the file at fpath with the default otio adapter and return the json
as a string.
"""
adapter = otio.adapters.from_name("otio_json")
return adapter.write_to_string(
otio.adapters.read_from_file(
fpath,
hook_function_argument_map=hooks_args,
media_linker_name=media_linker_name,
media_linker_argument_map=media_linker_argument_map,
**adapter_argument_map
)
)
def main():
"""Parse arguments and call _otio_compatible_file_to_json_string."""
args = _parsed_args()
media_linker_name = otio.console.console_utils.media_linker_name(
args.media_linker
)
try:
read_adapter_arg_map = otio.console.console_utils.arg_list_to_map(
args.adapter_arg,
"adapter"
)
hooks_args = otio.console.console_utils.arg_list_to_map(
args.hook_function_arg,
"hook function"
)
media_linker_argument_map = otio.console.console_utils.arg_list_to_map(
args.media_linker_arg,
"media linker"
)
except ValueError as exc:
sys.stderr.write("\n" + str(exc) + "\n")
sys.exit(1)
for fpath in args.filepath:
print(
_otio_compatible_file_to_json_string(
fpath,
media_linker_name,
hooks_args,
media_linker_argument_map,
read_adapter_arg_map
)
)
if __name__ == '__main__':
main()
|
mac/pyobjc-framework-Quartz/PyObjCTest/test_cifiltergenerator.py | albertz/music-player | 132 | 11152389 |
from PyObjCTools.TestSupport import *
from Quartz.QuartzCore import *
try:
unicode
except NameError:
unicode = str
class TestCIFilterGenerator (TestCase):
def testConstants(self):
self.assertIsInstance(kCIFilterGeneratorExportedKey, unicode)
self.assertIsInstance(kCIFilterGeneratorExportedKeyTargetObject, unicode)
self.assertIsInstance(kCIFilterGeneratorExportedKeyName, unicode)
def testMethods(self):
self.assertResultIsBOOL(CIFilterGenerator.writeToURL_atomically_)
self.assertArgIsBOOL(CIFilterGenerator.writeToURL_atomically_, 1)
if __name__ == "__main__":
main()
|
accounting/libs/templatetags/url_tags.py | Abdur-rahmaanJ/django-accounting | 127 | 11152411 | <filename>accounting/libs/templatetags/url_tags.py
# encoding: utf-8
from django import template
from django.http import QueryDict
from classytags.core import Tag, Options
from classytags.arguments import MultiKeywordArgument, MultiValueArgument
register = template.Library()
class QueryParameters(Tag):
name = 'query'
options = Options(
MultiKeywordArgument('kwa'),
)
def render_tag(self, context, kwa):
q = QueryDict('').copy()
q.update(kwa)
return q.urlencode()
register.tag(QueryParameters)
class GetParameters(Tag):
"""
{% get_parameters [except_field, ] %}
"""
name = 'get_parameters'
options = Options(
MultiValueArgument('except_fields', required=False),
)
def render_tag(self, context, except_fields):
try:
# If there's an exception (500), default context_processors may not
# be called.
request = context['request']
except KeyError:
return context
getvars = request.GET.copy()
for field in except_fields:
if field in getvars:
del getvars[field]
return getvars.urlencode()
register.tag(GetParameters)
|
helper/linear_regression.py | dahjeong/gittest_ssh | 639 | 11152457 | <filename>helper/linear_regression.py
import tensorflow as tf
import numpy as np
import scipy.io as sio
import scipy.optimize as opt
import pandas as pd
import matplotlib.pyplot as plt
from helper import general as general
# support functions ------------------------------------------------------------
def load_data():
"""for ex5
d['X'] shape = (12, 1)
pandas has trouble taking this 2d ndarray to construct a dataframe, so I ravel
the results
"""
d = sio.loadmat('ex5data1.mat')
return map(np.ravel, [d['X'], d['y'], d['Xval'], d['yval'], d['Xtest'], d['ytest']])
def poly_features(x, power, as_ndarray=False):
data = {'f{}'.format(i): np.power(x, i) for i in range(1, power + 1)}
df = pd.DataFrame(data)
return df.as_matrix() if as_ndarray else df
def prepare_poly_data(*args, power):
"""
args: keep feeding in X, Xval, or Xtest
will return in the same order
"""
def prepare(x):
# expand feature
df = poly_features(x, power=power)
# normalization
ndarr = general.normalize_feature(df).as_matrix()
# add intercept term
return np.insert(ndarr, 0, np.ones(ndarr.shape[0]), axis=1)
return [prepare(x) for x in args]
def plot_learning_curve(X, y, Xval, yval, l=0):
training_cost, cv_cost = [], []
m = X.shape[0]
for i in range(1, m + 1):
# regularization applies here for fitting parameters
res = linear_regression_np(X[:i, :], y[:i], l=l)
# remember, when you compute the cost here, you are computing
# non-regularized cost. Regularization is used to fit parameters only
tc = cost(res.x, X[:i, :], y[:i])
cv = cost(res.x, Xval, yval)
training_cost.append(tc)
cv_cost.append(cv)
plt.plot(np.arange(1, m + 1), training_cost, label='training cost')
plt.plot(np.arange(1, m + 1), cv_cost, label='cv cost')
plt.legend(loc=1)
# linear regression functions --------------------------------------------------
def cost(theta, X, y):
"""
X: R(m*n), m records, n features
y: R(m)
theta : R(n), linear regression parameters
"""
m = X.shape[0]
inner = X @ theta - y # R(m*1)
# 1*m @ m*1 = 1*1 in matrix multiplication
# but you know numpy didn't do transpose in 1d array, so here is just a
# vector inner product to itselves
square_sum = inner.T @ inner
cost = square_sum / (2 * m)
return cost
def regularized_cost(theta, X, y, l=1):
m = X.shape[0]
regularized_term = (l / (2 * m)) * np.power(theta[1:], 2).sum()
return cost(theta, X, y) + regularized_term
def gradient(theta, X, y):
m = X.shape[0]
inner = X.T @ (X @ theta - y) # (m,n).T @ (m, 1) -> (n, 1)
return inner / m
def regularized_gradient(theta, X, y, l=1):
m = X.shape[0]
regularized_term = theta.copy() # same shape as theta
regularized_term[0] = 0 # don't regularize intercept theta
regularized_term = (l / m) * regularized_term
return gradient(theta, X, y) + regularized_term
def batch_gradient_decent(theta, X, y, epoch, alpha=0.01):
"""fit the linear regression, return the parameter and cost
epoch: how many pass to run through whole batch
"""
cost_data = [cost(theta, X, y)]
_theta = theta.copy() # don't want to mess up with original theta
for _ in range(epoch):
_theta = _theta - alpha * gradient(_theta, X, y)
cost_data.append(cost(_theta, X, y))
return _theta, cost_data
def linear_regression_np(X, y, l=1):
"""linear regression
args:
X: feature matrix, (m, n+1) # with incercept x0=1
y: target vector, (m, )
l: lambda constant for regularization
return: trained parameters
"""
# init theta
theta = np.ones(X.shape[1])
# train it
res = opt.minimize(fun=regularized_cost,
x0=theta,
args=(X, y, l),
method='TNC',
jac=regularized_gradient,
options={'disp': True})
return res
def linear_regression(X_data, y_data, alpha, epoch, optimizer=tf.train.GradientDescentOptimizer):
"""tensorflow implementation"""
# placeholder for graph input
X = tf.placeholder(tf.float32, shape=X_data.shape)
y = tf.placeholder(tf.float32, shape=y_data.shape)
# construct the graph
with tf.variable_scope('linear-regression'):
W = tf.get_variable("weights",
(X_data.shape[1], 1),
initializer=tf.constant_initializer()) # n*1
y_pred = tf.matmul(X, W) # m*n @ n*1 -> m*1
loss = 1 / (2 * len(X_data)) * tf.matmul((y_pred - y), (y_pred - y), transpose_a=True) # (m*1).T @ m*1 = 1*1
opt = optimizer(learning_rate=alpha)
opt_operation = opt.minimize(loss)
# run the session
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
loss_data = []
for i in range(epoch):
_, loss_val, W_val = sess.run([opt_operation, loss, W], feed_dict={X: X_data, y: y_data})
loss_data.append(loss_val[0, 0]) # because every loss_val is 1*1 ndarray
if len(loss_data) > 1 and np.abs(loss_data[-1] - loss_data[-2]) < 10 ** -9: # early break when it's converged
# print('Converged at epoch {}'.format(i))
break
# clear the graph
tf.reset_default_graph()
return {'loss': loss_data, 'parameters': W_val} # just want to return in row vector format
|
release/stubs.min/Autodesk/Revit/DB/Lighting.py | htlcnn/ironpython-stubs | 182 | 11152463 | # encoding: utf-8
# module Autodesk.Revit.DB.Lighting calls itself Lighting
# from RevitAPI,Version=17.0.0.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class LossFactor(object,IDisposable):
""" This class is the base class for calculating lighting loss factor. """
def Clone(self):
"""
Clone(self: LossFactor) -> LossFactor
Creates a copy of the LossFactor derived object.
"""
pass
def Dispose(self):
""" Dispose(self: LossFactor) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LossFactor,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: LossFactor) -> bool
"""
LossFactorValue=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The calculated loss factor value
Get: LossFactorValue(self: LossFactor) -> float
"""
class AdvancedLossFactor(LossFactor,IDisposable):
"""
This class encapsulates advanced lighting loss factor calculation.
AdvancedLossFactor(other: AdvancedLossFactor)
AdvancedLossFactor(ballastLossFactorIn: float,lampLumenDepreciationIn: float,lampTiltLossFactorIn: float,luminaireDirtDepreciationIn: float,surfaceDepreciationLossFactorIn: float,temperatureLossFactorIn: float,voltageLossFactorIn: float)
AdvancedLossFactor()
"""
def Dispose(self):
""" Dispose(self: LossFactor,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LossFactor,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: AdvancedLossFactor)
__new__(cls: type,ballastLossFactorIn: float,lampLumenDepreciationIn: float,lampTiltLossFactorIn: float,luminaireDirtDepreciationIn: float,surfaceDepreciationLossFactorIn: float,temperatureLossFactorIn: float,voltageLossFactorIn: float)
__new__(cls: type)
"""
pass
BallastLossFactor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The ballast loss factor.
Get: BallastLossFactor(self: AdvancedLossFactor) -> float
Set: BallastLossFactor(self: AdvancedLossFactor)=value
"""
LampLumenDepreciation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The lamp lumen depreciation loss factor.
Get: LampLumenDepreciation(self: AdvancedLossFactor) -> float
Set: LampLumenDepreciation(self: AdvancedLossFactor)=value
"""
LampTiltLossFactor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The lamp tilt loss factor.
Get: LampTiltLossFactor(self: AdvancedLossFactor) -> float
Set: LampTiltLossFactor(self: AdvancedLossFactor)=value
"""
LuminaireDirtDepreciation=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The luminaire dirt depreciation loss factor.
Get: LuminaireDirtDepreciation(self: AdvancedLossFactor) -> float
Set: LuminaireDirtDepreciation(self: AdvancedLossFactor)=value
"""
SurfaceDepreciationLossFactor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The surface depreciation loss factor.
Get: SurfaceDepreciationLossFactor(self: AdvancedLossFactor) -> float
Set: SurfaceDepreciationLossFactor(self: AdvancedLossFactor)=value
"""
TemperatureLossFactor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The temperature loss factor.
Get: TemperatureLossFactor(self: AdvancedLossFactor) -> float
Set: TemperatureLossFactor(self: AdvancedLossFactor)=value
"""
VoltageLossFactor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The voltage loss factor.
Get: VoltageLossFactor(self: AdvancedLossFactor) -> float
Set: VoltageLossFactor(self: AdvancedLossFactor)=value
"""
class BasicLossFactor(LossFactor,IDisposable):
"""
This class encapsulates basic lighting loss factor calculation.
BasicLossFactor(other: BasicLossFactor)
BasicLossFactor(lossFactorIn: float)
BasicLossFactor()
"""
def Dispose(self):
""" Dispose(self: LossFactor,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LossFactor,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: BasicLossFactor)
__new__(cls: type,lossFactorIn: float)
__new__(cls: type)
"""
pass
LossFactor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The loss factor.
Get: LossFactor(self: BasicLossFactor) -> float
Set: LossFactor(self: BasicLossFactor)=value
"""
class LightShape(object,IDisposable):
""" This class is the base class for specifying light shape. """
def Clone(self):
"""
Clone(self: LightShape) -> LightShape
Creates a copy of the LightShape derived object.
"""
pass
def Dispose(self):
""" Dispose(self: LightShape) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightShape,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: LightShape) -> bool
"""
class CircleLightShape(LightShape,IDisposable):
"""
This class encapsulates a circle light shape.
CircleLightShape(other: CircleLightShape)
CircleLightShape(emitDiameter: float)
CircleLightShape()
"""
def Dispose(self):
""" Dispose(self: LightShape,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightShape,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: CircleLightShape)
__new__(cls: type,emitDiameter: float)
__new__(cls: type)
"""
pass
EmitDiameter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The emit diameter.
Get: EmitDiameter(self: CircleLightShape) -> float
Set: EmitDiameter(self: CircleLightShape)=value
"""
class ColorPreset(Enum,IComparable,IFormattable,IConvertible):
"""
Preset values of initial colors for specific lighting types
enum ColorPreset,values: D50 (1),D65 (0),FluorescentCool (7),FluorescentDayLight (9),FluorescentLightWhite (10),FluorescentWarm (6),FluorescentWhite (8),Halogen (2),HighPressureSodium (12),Incandescent (3),LowPressureSodium (13),Mercury (14),MetalHalide (11),PhosphorMercury (15),Quartz (5),Xenon (4)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
D50=None
D65=None
FluorescentCool=None
FluorescentDayLight=None
FluorescentLightWhite=None
FluorescentWarm=None
FluorescentWhite=None
Halogen=None
HighPressureSodium=None
Incandescent=None
LowPressureSodium=None
Mercury=None
MetalHalide=None
PhosphorMercury=None
Quartz=None
value__=None
Xenon=None
class InitialColor(object,IDisposable):
""" This class is the base class for calculating initial light color. """
def Clone(self):
"""
Clone(self: InitialColor) -> InitialColor
Creates a copy of the InitialColor derived object.
"""
pass
def Dispose(self):
""" Dispose(self: InitialColor) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: InitialColor,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: InitialColor) -> bool
"""
TemperatureValue=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The light color temperature value in Kelvins.
Get: TemperatureValue(self: InitialColor) -> float
"""
class CustomInitialColor(InitialColor,IDisposable):
"""
This class encapsulates a custom initial lighting color.
CustomInitialColor(other: CustomInitialColor)
CustomInitialColor(temperature: float)
"""
def Dispose(self):
""" Dispose(self: InitialColor,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: InitialColor,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: CustomInitialColor)
__new__(cls: type,temperature: float)
"""
pass
Temperature=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The custom color temperature value.
Get: Temperature(self: CustomInitialColor) -> float
Set: Temperature(self: CustomInitialColor)=value
"""
class LightDistribution(object,IDisposable):
""" This class is the base class for specifying light distribution. """
def Clone(self):
"""
Clone(self: LightDistribution) -> LightDistribution
Creates a copy of the LightDistribution derived object.
"""
pass
def Dispose(self):
""" Dispose(self: LightDistribution) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightDistribution,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: LightDistribution) -> bool
"""
class HemisphericalLightDistribution(LightDistribution,IDisposable):
"""
This class encapsulates a hemispherical light distribution.
HemisphericalLightDistribution(other: HemisphericalLightDistribution)
HemisphericalLightDistribution()
"""
def Dispose(self):
""" Dispose(self: LightDistribution,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightDistribution,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,other=None):
"""
__new__(cls: type,other: HemisphericalLightDistribution)
__new__(cls: type)
"""
pass
class InitialIntensity(object,IDisposable):
""" This class is the base class for calculating lighting initial intensity. """
def Clone(self):
"""
Clone(self: InitialIntensity) -> InitialIntensity
Creates a copy of the InitialIntensity derived object.
"""
pass
def Dispose(self):
""" Dispose(self: InitialIntensity) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: InitialIntensity,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
InitialIntensityValue=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The calculated initial intensity value.
Get: InitialIntensityValue(self: InitialIntensity) -> float
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: InitialIntensity) -> bool
"""
class InitialFluxIntensity(InitialIntensity,IDisposable):
"""
This class encapsulates initial flux intensity calculation.
InitialFluxIntensity(other: InitialFluxIntensity)
InitialFluxIntensity(flux: float)
"""
def Dispose(self):
""" Dispose(self: InitialIntensity,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: InitialIntensity,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: InitialFluxIntensity)
__new__(cls: type,flux: float)
"""
pass
Flux=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The flux intensity value.
Get: Flux(self: InitialFluxIntensity) -> float
Set: Flux(self: InitialFluxIntensity)=value
"""
class InitialIlluminanceIntensity(InitialIntensity,IDisposable):
"""
This class encapsulates initial illuminance intensity calculation.
InitialIlluminanceIntensity(other: InitialIlluminanceIntensity)
InitialIlluminanceIntensity(distance: float,illuminance: float)
"""
def Dispose(self):
""" Dispose(self: InitialIntensity,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: InitialIntensity,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: InitialIlluminanceIntensity)
__new__(cls: type,distance: float,illuminance: float)
"""
pass
Distance=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The illuminance intensity distance value.
Get: Distance(self: InitialIlluminanceIntensity) -> float
Set: Distance(self: InitialIlluminanceIntensity)=value
"""
Illuminance=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The illuminance intensity value.
Get: Illuminance(self: InitialIlluminanceIntensity) -> float
Set: Illuminance(self: InitialIlluminanceIntensity)=value
"""
class InitialLuminousIntensity(InitialIntensity,IDisposable):
"""
This class encapsulates initial luminous intensity calculation.
InitialLuminousIntensity(other: InitialLuminousIntensity)
InitialLuminousIntensity(luminosity: float)
"""
def Dispose(self):
""" Dispose(self: InitialIntensity,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: InitialIntensity,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: InitialLuminousIntensity)
__new__(cls: type,luminosity: float)
"""
pass
Luminosity=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The luminosity value.
Get: Luminosity(self: InitialLuminousIntensity) -> float
Set: Luminosity(self: InitialLuminousIntensity)=value
"""
class InitialWattageIntensity(InitialIntensity,IDisposable):
"""
This class encapsulates initial wattage intensity calculation.
InitialWattageIntensity(other: InitialWattageIntensity)
InitialWattageIntensity(efficacy: float,wattage: float)
"""
def Dispose(self):
""" Dispose(self: InitialIntensity,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: InitialIntensity,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: InitialWattageIntensity)
__new__(cls: type,efficacy: float,wattage: float)
"""
pass
Efficacy=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The efficacy value.
Get: Efficacy(self: InitialWattageIntensity) -> float
Set: Efficacy(self: InitialWattageIntensity)=value
"""
Wattage=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The wattage value.
Get: Wattage(self: InitialWattageIntensity) -> float
Set: Wattage(self: InitialWattageIntensity)=value
"""
class LightDimmingColor(Enum,IComparable,IFormattable,IConvertible):
"""
Tags for specific light dimming colors
enum LightDimmingColor,values: Incandescent (1),None (0)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Incandescent=None
None=None
value__=None
class LightDistributionStyle(Enum,IComparable,IFormattable,IConvertible):
"""
Tags for specific light distribution styles
enum LightDistributionStyle,values: Hemispherical (1),PhotometricWeb (3),Spherical (0),Spot (2)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Hemispherical=None
PhotometricWeb=None
Spherical=None
Spot=None
value__=None
class LightFamily(object,IDisposable):
""" This class encapsulates light family information. """
def Dispose(self):
""" Dispose(self: LightFamily) """
pass
def GetLightDistributionStyle(self):
"""
GetLightDistributionStyle(self: LightFamily) -> LightDistributionStyle
Returns a LightDistributionStyle value for the light distribution
"""
pass
@staticmethod
def GetLightFamily(document):
"""
GetLightFamily(document: Document) -> LightFamily
Creates a light family object from the given family document
document: The family document
Returns: The newly created LightFamily object
"""
pass
def GetLightShapeStyle(self):
"""
GetLightShapeStyle(self: LightFamily) -> LightShapeStyle
Returns a LightShapeStyle value for the light shape
"""
pass
def GetLightSourceTransform(self):
"""
GetLightSourceTransform(self: LightFamily) -> Transform
Returns a Transform value for the transform of light source.
Returns: The light source transform.
"""
pass
def GetLightType(self,index):
"""
GetLightType(self: LightFamily,index: int) -> LightType
Return a LightType object for the light type at the given index
index: The index of the light type
Returns: A LightType object for the light type at the given index
"""
pass
def GetLightTypeName(self,index):
"""
GetLightTypeName(self: LightFamily,index: int) -> str
Return the name for the light type at the given index
index: The index of the light type
Returns: The name of the light type at the given index
"""
pass
def GetNumberOfLightTypes(self):
"""
GetNumberOfLightTypes(self: LightFamily) -> int
Return the number of light types contained in this light family
Returns: The number of light types contained in this light family
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightFamily,disposing: bool) """
pass
def SetLightDistributionStyle(self,lightDistributionStyle):
"""
SetLightDistributionStyle(self: LightFamily,lightDistributionStyle: LightDistributionStyle)
Set the light distribution style to the given shape distribution
lightDistributionStyle: The light distribution style to set the light distribution type to
"""
pass
def SetLightShapeStyle(self,lightShapeStyle):
"""
SetLightShapeStyle(self: LightFamily,lightShapeStyle: LightShapeStyle)
Set the light shape style to the given shape style
lightShapeStyle: The light shape style value to set the light shape style to
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: LightFamily) -> bool
"""
class LightGroup(object,IDisposable):
""" This class represents a set of lights grouped together for easier management of various lighting scenarios """
def AddLight(self,lightId):
"""
AddLight(self: LightGroup,lightId: ElementId)
Add a new light instance to the group
lightId: The ID of the light instance to add to the group
"""
pass
def Dispose(self):
""" Dispose(self: LightGroup) """
pass
def GetLights(self):
"""
GetLights(self: LightGroup) -> ICollection[ElementId]
Get the set of contained light instances
The set of light instances
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightGroup,disposing: bool) """
pass
def RemoveLight(self,lightId):
"""
RemoveLight(self: LightGroup,lightId: ElementId)
Remove the given light instance from the set of light instances in this group
lightId: The light instance to remove
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Id=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The ElementId of the LightGroup
Get: Id(self: LightGroup) -> ElementId
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: LightGroup) -> bool
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The name of the LightGroup
Get: Name(self: LightGroup) -> str
Set: Name(self: LightGroup)=value
"""
class LightGroupManager(object,IDisposable):
""" This class represents a set of light groups that are used for easier management of various lighting scenarios """
def CreateGroup(self,name):
"""
CreateGroup(self: LightGroupManager,name: str) -> LightGroup
Create a new LightGroup object with the given name
name: The name to use for the new LightGroup object
Returns: The new LightGroup object that was created
"""
pass
def DeleteGroup(self,groupId):
"""
DeleteGroup(self: LightGroupManager,groupId: ElementId)
Remove the given LightGroup object from the set of LightGroup objects
groupId: The Id of the LightGroup object to remove
"""
pass
def Dispose(self):
""" Dispose(self: LightGroupManager) """
pass
def GetGroups(self):
"""
GetGroups(self: LightGroupManager) -> IList[LightGroup]
Get the set of contained LightGroup objects
The set of LightGroup objects
"""
pass
def GetLightDimmer(self,viewId,lightId):
"""
GetLightDimmer(self: LightGroupManager,viewId: ElementId,lightId: ElementId) -> float
Gets the dimmer value for the given light for rendering the given view
viewId: The Id of the view
lightId: The Id of the light to turn on or off
"""
pass
@staticmethod
def GetLightGroupManager(document):
"""
GetLightGroupManager(document: Document) -> LightGroupManager
Creates a light group manager object from the given document
document: The document the manager is from
Returns: The newly created Light group manager object
"""
pass
def IsLightGroupOn(self,viewId,groupId):
"""
IsLightGroupOn(self: LightGroupManager,viewId: ElementId,groupId: ElementId) -> bool
Returns true if the given light group is on
viewId: The Id of the view
groupId: The Id of the light group
"""
pass
def IsLightOn(self,viewId,lightId):
"""
IsLightOn(self: LightGroupManager,viewId: ElementId,lightId: ElementId) -> bool
Returns true if the given light is on for rendering the given view
viewId: The Id of the view
lightId: The Id of the light
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightGroupManager,disposing: bool) """
pass
def SetLightDimmer(self,viewId,lightId,dimmingValue):
"""
SetLightDimmer(self: LightGroupManager,viewId: ElementId,lightId: ElementId,dimmingValue: float)
Sets the dimmer value for the given light for rendering the given view
viewId: The Id of the view
lightId: The Id of the light to turn on or off
dimmingValue: The dimmer value to set int the range of [0.0,1.0]
"""
pass
def SetLightGroupOn(self,viewId,groupId,turnOn):
"""
SetLightGroupOn(self: LightGroupManager,viewId: ElementId,groupId: ElementId,turnOn: bool)
Turns the given light group on or off for rendering the given view depending on
the bool argument
viewId: The Id of the view
groupId: The Id of the light group
turnOn: Turns the light group on if true,off if false
"""
pass
def SetLightOn(self,viewId,lightId,turnOn):
"""
SetLightOn(self: LightGroupManager,viewId: ElementId,lightId: ElementId,turnOn: bool)
Turns the given light on or off for rendering the given view depending on the
bool argument
viewId: The Id of the view
lightId: The Id of the light to turn on or off
turnOn: Turns the light on if true,off if false
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: LightGroupManager) -> bool
"""
class LightShapeStyle(Enum,IComparable,IFormattable,IConvertible):
"""
Tags for specific light shape styles
enum LightShapeStyle,values: Circle (3),Line (1),Point (0),Rectangle (2)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Circle=None
Line=None
Point=None
Rectangle=None
value__=None
class LightType(object,IDisposable):
""" This class encapsulates light information. """
def Dispose(self):
""" Dispose(self: LightType) """
pass
def GetInitialColor(self):
"""
GetInitialColor(self: LightType) -> InitialColor
Return a copy of an object derived from InitialColor
"""
pass
def GetInitialIntensity(self):
"""
GetInitialIntensity(self: LightType) -> InitialIntensity
Return a copy of an object derived from InitialIntensity
"""
pass
def GetLightDistribution(self):
"""
GetLightDistribution(self: LightType) -> LightDistribution
Return a copy of an object derived from LightDistribution
"""
pass
def GetLightShape(self):
"""
GetLightShape(self: LightType) -> LightShape
Return a copy of an object derived from LightShape
"""
pass
@staticmethod
def GetLightType(document,typeId):
"""
GetLightType(document: Document,typeId: ElementId) -> LightType
Creates a light type object from the given document and family type ID
document: The document the typeId is from
typeId: The ID of the light family type
Returns: The newly created LightType object
"""
pass
@staticmethod
def GetLightTypeFromInstance(document,instanceId):
"""
GetLightTypeFromInstance(document: Document,instanceId: ElementId) -> LightType
Creates a light type object from the given document and element ID
document: The document the instanceId is from
instanceId: The ID of the light fixture instance
Returns: The newly created LightType object
"""
pass
def GetLossFactor(self):
"""
GetLossFactor(self: LightType) -> LossFactor
Return a copy of an object derived from LossFactor
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightType,disposing: bool) """
pass
def SetInitialColor(self,initialColor):
"""
SetInitialColor(self: LightType,initialColor: InitialColor)
Replace the current initial color object with the given object
initialColor: An object derived from an InitialColor object
The object pointed to is
cloned internally
"""
pass
def SetInitialIntensity(self,initialIntensity):
"""
SetInitialIntensity(self: LightType,initialIntensity: InitialIntensity)
Replace the current initial intensity object with the given object
initialIntensity: An object derived from an InitialIntensity object
"""
pass
def SetLightDistribution(self,lightDistribution):
"""
SetLightDistribution(self: LightType,lightDistribution: LightDistribution)
Replace the current LightDistribution object with the given object
lightDistribution: An instance of an object derived from LightDistribution
"""
pass
def SetLightShape(self,lightShape):
"""
SetLightShape(self: LightType,lightShape: LightShape)
Replace the current LightShape object with the given object
lightShape: An instance of an object derived from LightShape
"""
pass
def SetLossFactor(self,lossFactor):
"""
SetLossFactor(self: LightType,lossFactor: LossFactor)
Replace the current loss factor object with the given object
lossFactor: An object derived from a LossFactor object
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
ColorFilter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The light filter color.
Get: ColorFilter(self: LightType) -> Color
Set: ColorFilter(self: LightType)=value
"""
DimmingColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The dimming temperature value in Kelvins.
Get: DimmingColor(self: LightType) -> LightDimmingColor
Set: DimmingColor(self: LightType)=value
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: LightType) -> bool
"""
class LineLightShape(LightShape,IDisposable):
"""
This class encapsulates a line light shape.
LineLightShape(other: LineLightShape)
LineLightShape(emitLength: float)
LineLightShape()
"""
def Dispose(self):
""" Dispose(self: LightShape,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightShape,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: LineLightShape)
__new__(cls: type,emitLength: float)
__new__(cls: type)
"""
pass
EmitLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The emit length.
Get: EmitLength(self: LineLightShape) -> float
Set: EmitLength(self: LineLightShape)=value
"""
class PhotometricWebLightDistribution(LightDistribution,IDisposable):
"""
This class encapsulates a photometric web light distribution.
PhotometricWebLightDistribution(other: PhotometricWebLightDistribution)
PhotometricWebLightDistribution(photometricWebFile: str,tiltAngle: float)
"""
def Dispose(self):
""" Dispose(self: LightDistribution,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightDistribution,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: PhotometricWebLightDistribution)
__new__(cls: type,photometricWebFile: str,tiltAngle: float)
"""
pass
PhotometricWebFile=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The filename of an IES photometric web file.
Get: PhotometricWebFile(self: PhotometricWebLightDistribution) -> str
Set: PhotometricWebFile(self: PhotometricWebLightDistribution)=value
"""
TiltAngle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The tilt angle.
Get: TiltAngle(self: PhotometricWebLightDistribution) -> float
Set: TiltAngle(self: PhotometricWebLightDistribution)=value
"""
class PointLightShape(LightShape,IDisposable):
"""
This class encapsulates a point light shape.
PointLightShape(other: PointLightShape)
PointLightShape()
"""
def Dispose(self):
""" Dispose(self: LightShape,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightShape,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,other=None):
"""
__new__(cls: type,other: PointLightShape)
__new__(cls: type)
"""
pass
class PresetInitialColor(InitialColor,IDisposable):
"""
This class encapsulates a preset initial lighting color.
PresetInitialColor(other: PresetInitialColor)
PresetInitialColor(presetIn: ColorPreset)
"""
def Dispose(self):
""" Dispose(self: InitialColor,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: InitialColor,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: PresetInitialColor)
__new__(cls: type,presetIn: ColorPreset)
"""
pass
Preset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The preset value
Get: Preset(self: PresetInitialColor) -> ColorPreset
Set: Preset(self: PresetInitialColor)=value
"""
class RectangleLightShape(LightShape,IDisposable):
"""
This class encapsulates a rectangle light shape.
RectangleLightShape(other: RectangleLightShape)
RectangleLightShape(emitLength: float,emitWidth: float)
RectangleLightShape()
"""
def Dispose(self):
""" Dispose(self: LightShape,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightShape,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: RectangleLightShape)
__new__(cls: type,emitLength: float,emitWidth: float)
__new__(cls: type)
"""
pass
EmitLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The emit length.
Get: EmitLength(self: RectangleLightShape) -> float
Set: EmitLength(self: RectangleLightShape)=value
"""
EmitWidth=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The emit width.
Get: EmitWidth(self: RectangleLightShape) -> float
Set: EmitWidth(self: RectangleLightShape)=value
"""
class SphericalLightDistribution(LightDistribution,IDisposable):
"""
This class encapsulates a spherical light distribution.
SphericalLightDistribution(other: SphericalLightDistribution)
SphericalLightDistribution()
"""
def Dispose(self):
""" Dispose(self: LightDistribution,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightDistribution,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,other=None):
"""
__new__(cls: type,other: SphericalLightDistribution)
__new__(cls: type)
"""
pass
class SpotLightDistribution(LightDistribution,IDisposable):
"""
This class encapsulates a spot light distribution.
SpotLightDistribution(other: SpotLightDistribution)
SpotLightDistribution(spotBeamAngle: float,spotFieldAngle: float,tiltAngle: float)
SpotLightDistribution()
"""
def Dispose(self):
""" Dispose(self: LightDistribution,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: LightDistribution,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type,other: SpotLightDistribution)
__new__(cls: type,spotBeamAngle: float,spotFieldAngle: float,tiltAngle: float)
__new__(cls: type)
"""
pass
SpotBeamAngle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The spot beam angle.
Get: SpotBeamAngle(self: SpotLightDistribution) -> float
Set: SpotBeamAngle(self: SpotLightDistribution)=value
"""
SpotFieldAngle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The spot field angle.
Get: SpotFieldAngle(self: SpotLightDistribution) -> float
Set: SpotFieldAngle(self: SpotLightDistribution)=value
"""
TiltAngle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The tilt angle.
Get: TiltAngle(self: SpotLightDistribution) -> float
Set: TiltAngle(self: SpotLightDistribution)=value
"""
|
tests/test_helm.py | onecommons/giterop | 109 | 11152467 | <gh_stars>100-1000
import os
import os.path
import sys
import threading
import unittest
from functools import partial
import shutil
import urllib.request
from click.testing import CliRunner
from unfurl.job import JobOptions, Runner
from unfurl.yamlmanifest import YamlManifest
from unfurl.localenv import LocalEnv
from .utils import init_project
# http://localhost:8000/fixtures/helmrepo
@unittest.skipIf("helm" in os.getenv("UNFURL_TEST_SKIP", ""), "UNFURL_TEST_SKIP set")
class HelmTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
path = os.path.join(
os.path.dirname(__file__), "examples", "helm-simple-ensemble.yaml"
)
with open(path) as f:
self.manifest = f.read()
server_address = ("", 8010)
directory = os.path.dirname(__file__)
try:
if sys.version_info[0] >= 3:
from http.server import HTTPServer, SimpleHTTPRequestHandler
handler = partial(SimpleHTTPRequestHandler, directory=directory)
self.httpd = HTTPServer(server_address, handler)
else: # for python 2.7
import urllib
import SocketServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class RootedHTTPRequestHandler(SimpleHTTPRequestHandler):
def translate_path(self, path):
path = os.path.normpath(urllib.unquote(path))
words = path.split("/")
words = filter(None, words)
path = directory
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
return path
self.httpd = SocketServer.TCPServer(
server_address, RootedHTTPRequestHandler
)
except: # address might still be in use
self.httpd = None
return
t = threading.Thread(name="http_thread", target=self.httpd.serve_forever)
t.daemon = True
t.start()
def tearDown(self):
if self.httpd:
self.httpd.socket.close()
def test_deploy(self):
# make sure this works
f = urllib.request.urlopen("http://localhost:8010/fixtures/helmrepo/index.yaml")
f.close()
runner = Runner(YamlManifest(self.manifest))
run1 = runner.run(JobOptions(verbose=3, startTime=1))
assert not run1.unexpectedAbort, run1.unexpectedAbort.get_stack_trace()
mysql_release = runner.manifest.rootResource.find_resource("mysql_release")
query = ".::.requirements::[.name=host]::.target::name"
res = mysql_release.query(query)
assert res == "unfurl-helm-unittest"
summary = run1.json_summary()
# print(runner.manifest.statusSummary())
# print(run1.jsonSummary(True))
# print(run1._jsonPlanSummary(True))
if sys.version_info[0] < 3:
return # task order not guaranteed in python 2.7
self.assertEqual(
summary,
{
"job": {
"id": "A01110000000",
"status": "ok",
"total": 4,
"ok": 4,
"error": 0,
"unknown": 0,
"skipped": 0,
"changed": 4,
},
"outputs": {},
"tasks": [
{
"status": "ok",
"target": "stable_repo",
"operation": "create",
"template": "stable_repo",
"type": "unfurl.nodes.HelmRepository",
"targetStatus": "ok",
"targetState": "created",
"changed": True,
"configurator": "unfurl.configurators.shell.ShellConfigurator",
"priority": "required",
"reason": "add",
},
{
"status": "ok",
"target": "k8sNamespace",
"operation": "configure",
"template": "k8sNamespace",
"type": "unfurl.nodes.K8sNamespace",
"targetStatus": "ok",
"targetState": "configured",
"changed": True,
"configurator": "unfurl.configurators.k8s.ResourceConfigurator",
"priority": "required",
"reason": "add",
},
{
"status": "ok",
"target": "mysql_release",
"operation": "configure",
"template": "mysql_release",
"type": "unfurl.nodes.HelmRelease",
"targetStatus": "ok",
"targetState": "configured",
"changed": True,
"configurator": "unfurl.configurators.DelegateConfigurator",
"priority": "required",
"reason": "add",
},
{
"status": "ok",
"target": "mysql_release",
"operation": "execute",
"template": "mysql_release",
"type": "unfurl.nodes.HelmRelease",
"targetStatus": "ok",
"targetState": "configuring",
"changed": True,
"configurator": "unfurl.configurators.shell.ShellConfigurator",
"priority": "required",
"reason": "subtask: for add: Standard.configure",
},
],
},
)
assert all(task["targetStatus"] == "ok" for task in summary["tasks"]), summary[
"tasks"
]
# runner.manifest.dump()
def test_undeploy(self):
###########################################
#### WARNING test_undeploy() will not succeed if called individually because test_deploy
#### installs the chart repo in a tox's tmp directly and will be deleted after the test run
#### so test_undeploy will not find it
###########################################
# note! if tests fail may need to run:
# helm uninstall mysql-test -n unfurl-helm-unittest
# and kubectl delete namespace unfurl-helm-unittest
cli_runner = CliRunner()
with cli_runner.isolated_filesystem():
src_path = os.path.join(
os.path.dirname(__file__), "examples", "helm-simple-ensemble.yaml"
)
path = init_project(cli_runner, src_path)
# init_project creates home at "./unfurl_home"
runner = Runner(
YamlManifest(localEnv=LocalEnv(path, homePath="./unfurl_home"))
)
run = runner.run(JobOptions(workflow="check", startTime=2))
summary = run.json_summary()
assert not run.unexpectedAbort, run.unexpectedAbort.get_stack_trace()
# print("check")
# print(runner.manifest.statusSummary())
# print(run.jsonSummary(True))
summary = run.json_summary()
self.assertEqual(
{
"external_jobs": [
{
"ensemble": summary["external_jobs"][0]["ensemble"],
"job": {
"id": "A01120000000",
"status": "ok",
"total": 2,
"ok": 2,
"error": 0,
"unknown": 0,
"skipped": 0,
"changed": 2,
},
"outputs": {},
"tasks": [
{
"changed": True,
"configurator": "unfurl.configurators.DelegateConfigurator",
"operation": "configure",
"priority": "required",
"reason": "add",
"status": "ok",
"target": "__artifact__helm-artifacts--helm",
"targetState": "configured",
"targetStatus": "ok",
"template": "__artifact__helm-artifacts--helm",
"type": "unfurl.nodes.ArtifactInstaller",
},
{
"changed": True,
"configurator": "unfurl.configurators.shell.ShellConfigurator",
"operation": "configure",
"priority": "required",
"reason": "subtask: for add: Standard.configure",
"status": "ok",
"target": "install",
"targetState": None,
"targetStatus": "unknown",
"template": "install",
"type": "artifact.AsdfTool",
},
],
}
],
"job": {
"id": "A01120000000",
"status": "ok",
"total": 4,
"ok": 4,
"error": 0,
"unknown": 0,
"skipped": 0,
"changed": 4,
},
"outputs": {},
"tasks": [
{
"status": "ok",
"target": "stable_repo",
"operation": "check",
"template": "stable_repo",
"type": "unfurl.nodes.HelmRepository",
"targetStatus": "ok",
"targetState": "started",
"changed": True,
"configurator": "unfurl.configurators.shell.ShellConfigurator",
"priority": "required",
"reason": "check",
},
{
"status": "ok",
"target": "k8sNamespace",
"operation": "check",
"template": "k8sNamespace",
"type": "unfurl.nodes.K8sNamespace",
"targetStatus": "ok",
"targetState": "started",
"changed": True,
"configurator": "unfurl.configurators.k8s.ResourceConfigurator",
"priority": "required",
"reason": "check",
},
{
"status": "ok",
"target": "mysql_release",
"operation": "check",
"template": "mysql_release",
"type": "unfurl.nodes.HelmRelease",
"targetStatus": "ok",
"targetState": "started",
"changed": True,
"configurator": "unfurl.configurators.DelegateConfigurator",
"priority": "required",
"reason": "check",
},
{
"status": "ok",
"target": "mysql_release",
"operation": "execute",
"template": "mysql_release",
"type": "unfurl.nodes.HelmRelease",
"targetStatus": "ok",
"targetState": None,
"changed": True,
"configurator": "unfurl.configurators.shell.ShellConfigurator",
"priority": "required",
"reason": "subtask: for check: Install.check",
},
],
},
summary,
)
# reuse the same runner because the manifest's status has been updated
run2 = runner.run(
JobOptions(workflow="undeploy", startTime=3, destroyunmanaged=True)
)
assert not run2.unexpectedAbort, run2.unexpectedAbort.get_stack_trace()
# print("undeploy")
# print(runner.manifest.status_summary())
# print(run2.json_summary(True))
summary2 = run2.json_summary()
# note: this test relies on stable_repo being place in the helm cache by test_deploy()
# comment out the repository requirement to run this test standalone
# assert all(
# task["targetStatus"] == "absent" for task in summary2["tasks"]
# ), list(summary2["tasks"])
self.assertEqual(
{
"job": {
"id": "A01130000000",
"status": "ok",
"total": 3,
"ok": 3,
"error": 0,
"unknown": 0,
"skipped": 0,
"changed": 3,
},
"outputs": {},
"tasks": [
{
"status": "ok",
"target": "mysql_release",
"operation": "delete",
"template": "mysql_release",
"type": "unfurl.nodes.HelmRelease",
"targetStatus": "absent",
"targetState": "deleted",
"changed": True,
"configurator": "unfurl.configurators.shell.ShellConfigurator",
"priority": "required",
"reason": "undeploy",
},
{
"status": "ok",
"target": "stable_repo",
"operation": "delete",
"template": "stable_repo",
"type": "unfurl.nodes.HelmRepository",
"targetStatus": "absent",
"targetState": "deleted",
"changed": True,
"configurator": "unfurl.configurators.shell.ShellConfigurator",
"priority": "required",
"reason": "undeploy",
},
{
"status": "ok",
"target": "k8sNamespace",
"operation": "delete",
"template": "k8sNamespace",
"type": "unfurl.nodes.K8sNamespace",
"targetStatus": "absent",
"targetState": "deleted",
"changed": True,
"configurator": "unfurl.configurators.k8s.ResourceConfigurator",
"priority": "required",
"reason": "undeploy",
},
],
},
summary2,
)
|
HLTrigger/special/test/test_AsymFilters_cfg.py | ckamtsikis/cmssw | 852 | 11152495 | <filename>HLTrigger/special/test/test_AsymFilters_cfg.py
#
# This python script is the basis for MIB HLT path testing
#
# Only the developed path are runned on the RAW data sample
#
# We are using GRun_data version of the HLT menu
#
# SV (<EMAIL>): 18/01/2011
#
import FWCore.ParameterSet.Config as cms
process = cms.Process('HLT2')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.StandardSequences.GeometryIdeal_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('HLTrigger.Configuration.HLT_GRun_data_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('Configuration.EventContent.EventContent_cff')
# To be adapted to the release
useGlobalTag = 'GR_R_310_V3::All'
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.options = cms.untracked.PSet(
SkipEvent = cms.untracked.vstring('ProductNotFound')
)
# Input source (a raw data file from the Commissioning dataset)
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('rfio:/castor/cern.ch/cms/store/data/Run2010B/Commissioning/RAW/v1/000/147/043/0C1114D5-E5CD-DF11-8FF5-001D09F2546F.root')
)
# Output module (keep only the stuff necessary to the timing module)
process.output = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
outputCommands = cms.untracked.vstring( 'drop *', 'keep HLTPerformanceInfo_*_*_*'),
fileName = cms.untracked.string('HLT.root'),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('')
)
)
# Timer
process.PathTimerService = cms.Service( "PathTimerService" )
process.hltTimer = cms.EDProducer( "PathTimerInserter" )
# Then we define the info necessary to the paths
process.HLTBeginSequence = cms.Sequence( process.hltTriggerType + process.HLTL1UnpackerSequence )
process.HLTDoLocalPixel = cms.Sequence( process.hltSiPixelDigis + process.hltSiPixelClusters)
process.HLTDoLocalHF = cms.Sequence( process.hltHcalDigis + process.hltHfreco )
process.hltPixelAsymmetryFilter = cms.EDFilter( "HLTPixelAsymmetryFilter",
inputTag = cms.InputTag( "hltSiPixelClusters" ),
MinAsym = cms.double( 0. ), # minimum asymmetry
MaxAsym = cms.double( 1. ), # maximum asymmetry
MinCharge = cms.double( 4000. ), # minimum charge for a cluster to be selected (in e-)
MinBarrel = cms.double( 10000. ), # minimum average charge in the barrel (bpix, in e-)
)
process.hltHFAsymmetryFilter = cms.EDFilter( "HLTHFAsymmetryFilter",
ECut_HF = cms.double( 3.0 ), # minimum energy for a cluster to be selected
OS_Asym_max = cms.double( 0.2 ), # Opposite side asymmetry maximum value
SS_Asym_min = cms.double( 0.8 ), # Same side asymmetry minimum value
HFHitCollection = cms.InputTag( "hltHfreco" )
)
# The test paths
process.HLT_L1_BSC_BeamGas = cms.Path( process.HLTBeginSequence + process.hltL1sL1BptxXORBscMinBiasOR + process.HLTDoLocalPixel + process.hltPixelAsymmetryFilter + process.HLTEndSequence )
process.HLT_L1_HF_BeamGas = cms.Path( process.HLTBeginSequence + process.hltL1sL1BptxXORBscMinBiasOR + process.HLTDoLocalHF + process.hltHFAsymmetryFilter + process.HLTEndSequence )
process.m_HLTSchedule = cms.Schedule( *(process.HLTriggerFirstPath, process.HLT_L1_BSC_BeamGas, process.HLT_L1_HF_BeamGas, process.HLTriggerFinalPath, process.HLTAnalyzerEndpath ))
#Deal with the global tag
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.connect = 'frontier://FrontierProd/CMS_COND_31X_GLOBALTAG'
process.GlobalTag.pfnPrefix = cms.untracked.string('frontier://FrontierProd/')
process.GlobalTag.globaltag = useGlobalTag
# Path and EndPath definitions
process.endjob_step = cms.Path(process.endOfProcess)
process.out_step = cms.EndPath( process.hltTimer + process.output)
# Schedule definition
process.schedule = cms.Schedule(process.m_HLTSchedule)
process.schedule.extend([process.endjob_step,process.out_step])
|
tests/signal_handler.py | riddopic/opta | 595 | 11152525 | # type: ignore
import os
import signal
import sys
import time
def signal_handler(sig, frame):
print("You pressed Ctrl+C!")
time.sleep(1)
with open(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"tests",
"signal_gracefully_terminated",
),
"w",
) as f:
f.write("blah")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
print("Press Ctrl+C")
signal.pause()
|
ipytest/_impl.py | chmp/ipytest | 218 | 11152530 | <gh_stars>100-1000
from __future__ import print_function, division, absolute_import
import ast
import contextlib
import fnmatch
import importlib
import os
import pathlib
import shlex
import sys
import tempfile
import threading
import packaging.version
import pytest
from IPython import get_ipython
from ._config import current_config, default_clean
def run(*args, module=None, plugins=()):
"""Execute all tests in the passed module (defaults to __main__) with pytest.
:param args:
additional commandline options passed to pytest
:param module:
the module containing the tests. If not given, `__main__` will be used.
:param filename:
the filename of the file containing the tests. It has to be a real
file, e.g., a notebook name, since itts existence will be checked by
pytest. If not given, the `__file__` attribute of the passed module
will be used.
:param plugins:
additional plugins passed to pytest.
"""
import ipytest
run = run_in_thread if current_config["run_in_thread"] else run_direct
ipytest.exit_code = run(
_run_impl,
*args,
module=module,
plugins=plugins,
)
def pytest_magic(line, cell):
"""IPython magic function running pytest"""
if current_config["clean"] is not False:
clean_tests(current_config["clean"])
try:
get_ipython().run_cell(cell)
except TypeError as e:
if "raw_cell" in str(e):
raise RuntimeError(
"The ipytest magic cannot evaluate the cell. Most likely you "
"are running a modified ipython version. Consider using "
"`ipytest.run` and `ipytest.clean_tests` directly."
) from e
else:
raise e
run(*shlex.split(line))
def clean_tests(pattern=default_clean, items=None):
"""Delete tests with names matching the given pattern.
In IPython the results of all evaluations are kept in global variables
unless explicitly deleted. This behavior implies that when tests are renamed
the previous definitions will still be found if not deleted. This method
aims to simply this process.
An effecitve pattern is to start with the cell containing tests with a call
to `clean_tests`, then defined all test cases, and finally call `run_tests`.
This way renaming tests works as expected.
**Arguments:**
- `pattern`: a glob pattern used to match the tests to delete.
- `items`: the globals object containing the tests. If `None` is given, the
globals object is determined from the call stack.
"""
if items is None:
import __main__
items = vars(__main__)
to_delete = [key for key in items.keys() if fnmatch.fnmatchcase(key, pattern)]
for key in to_delete:
del items[key]
def reload(*mods):
"""Reload all modules passed as strings.
This function may be useful, when mixing code in external modules and
notebooks.
Usage::
reload("ipytest._util", "ipytest")
"""
for mod in mods:
importlib.reload(importlib.import_module(mod))
def _run_impl(*args, module, plugins):
with _prepared_env(module) as filename:
full_args = _build_full_args(args, filename)
return pytest.main(full_args, plugins=[*plugins, FixProgramNamePlugin()])
def _build_full_args(args, filename):
def _fmt(arg):
return arg.format(MODULE=filename)
return [
*(_fmt(arg) for arg in current_config["addopts"]),
*(_fmt(arg) for arg in args),
*([filename] if current_config["defopts"] else []),
]
@contextlib.contextmanager
def _prepared_env(module):
if module is None: # pragma: no cover
import __main__ as module
with tempfile.NamedTemporaryFile(dir=".", suffix=".py") as f:
path = pathlib.Path(f.name)
module_name = path.stem
if not is_valid_module_name(module_name):
raise RuntimeError(
f"Invalid module name {module_name!r} generated by tempfile. "
"This should not happen, please open an issue at "
"'https://github.com/chmp/ipytest/issues' to report a bug."
)
if module_name in sys.modules:
raise RuntimeError(
f"Cannot register module with name {module_name!r}. It would "
"override an existing module. This should not happen. Please "
"report a bug at 'https://github.com/chmp/ipytest/issues'."
)
with patch(module, "__file__", str(path)):
with register_module(module, module_name):
with patched_columns():
yield str(path)
class RewriteAssertTransformer(ast.NodeTransformer):
def register_with_shell(self, shell):
shell.ast_transformers.append(self)
def unregister_with_shell(self, shell):
shell.ast_transformers[:] = [
transformer
for transformer in shell.ast_transformers
if transformer is not self
]
def visit(self, node):
from _pytest.assertion.rewrite import rewrite_asserts
pytest_version = get_pytest_version()
if pytest_version.release[0] >= 5:
# TODO: re-create a pseudo code to include the asserts?
rewrite_asserts(node, b"")
else:
rewrite_asserts(node)
return node
class FixProgramNamePlugin:
def pytest_addoption(self, parser):
# Explanation:
#
# - the prog instance variable is defined, but never overwritten [1]
# - this variable is passed to the the underlying argparse Parser [2]
# via [3]
# - with a `None` value argparse uses sys.argv array to determine the
# program name
#
# [1]: https://github.com/pytest-dev/pytest/blob/6d6bc97231f2d9a68002f1d191828fd3476ca8b8/src/_pytest/config/argparsing.py#L41
# [2]: https://github.com/pytest-dev/pytest/blob/6d6bc97231f2d9a68002f1d191828fd3476ca8b8/src/_pytest/config/argparsing.py#L397
# [3]: https://github.com/pytest-dev/pytest/blob/6d6bc97231f2d9a68002f1d191828fd3476ca8b8/src/_pytest/config/argparsing.py#L119
#
parser.prog = "%%ipytest"
def get_pytest_version():
return packaging.version.parse(pytest.__version__)
@contextlib.contextmanager
def patch(obj, attr, val):
had_attr = hasattr(obj, attr)
prev_val = getattr(obj, attr, None)
setattr(obj, attr, val)
try:
yield
finally:
if not had_attr:
delattr(obj, attr)
else:
setattr(obj, attr, prev_val)
@contextlib.contextmanager
def register_module(obj, name):
if name in sys.modules:
raise RuntimeError(f"Cannot overwrite existing module {name}")
sys.modules[name] = obj
try:
yield
finally:
del sys.modules[name]
@contextlib.contextmanager
def patched_columns():
display_columns = current_config["display_columns"]
if not display_columns:
yield
return
# NOTE: since values have to be strings, None identifies unset values
prev_columns = os.environ.get("COLUMNS")
os.environ["COLUMNS"] = str(display_columns)
yield
if prev_columns is not None:
os.environ["COLUMNS"] = prev_columns
else:
del os.environ["COLUMNS"]
def run_direct(func, *args, **kwargs):
return func(*args, **kwargs)
def run_in_thread(func, *args, **kwargs):
res = None
def _thread():
nonlocal res
res = func(*args, **kwargs)
t = threading.Thread(target=_thread)
t.start()
t.join()
return res
def is_valid_module_name(name):
return all(c not in name for c in ".- ")
|
armi/materials/b4c.py | keckler/armi | 162 | 11152564 | <reponame>keckler/armi<filename>armi/materials/b4c.py
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Boron carbide; a very typical reactor control material."""
from armi.materials import material
from armi.nucDirectory import nuclideBases
from armi import runLog
from armi.utils.units import getTc
DEFAULT_THEORETICAL_DENSITY_FRAC = 0.90
DEFAULT_MASS_DENSITY = 2.52
class B4C(material.Material):
name = "B4C"
enrichedNuclide = "B10"
def applyInputParams(
self, B10_wt_frac=None, theoretical_density=None, TD_frac=None, *args, **kwargs
):
if B10_wt_frac is not None:
# we can't just use the generic enrichment adjustment here because the
# carbon has to change with enrich.
self.adjustMassEnrichment(B10_wt_frac)
if theoretical_density is not None:
runLog.warning(
"The 'threoretical_density' material modification for B4C will be "
"deprecated. Update your inputs to use 'TD_frac' instead.",
single=True,
)
if TD_frac is not None:
runLog.warning(
"Both 'theoretical_density' and 'TD_frac' are specified "
f"for {self}. 'TD_frac' will be used."
)
else:
self.updateTD(theoretical_density)
if TD_frac is not None:
self.updateTD(TD_frac)
def updateTD(self, TD):
self.p.theoreticalDensityFrac = TD
self.clearCache()
def setNewMassFracsFromMassEnrich(self, massEnrichment):
r"""
Calculate the mass fractions for a given mass enrichment and set it on any parent.
Parameters
----------
massEnrichment : float
The mass enrichment as a fraction.
Returns
-------
boron10MassGrams, boron11MassGrams, carbonMassGrams : float
The resulting mass of each nuclide/element
Notes
-----
B-10: 10.012 g/mol
B-11: 11.009 g/mol
Carbon: 12.0107 g/mol
4 moles of boron/1 mole of carbon
grams of boron-10 = 10.012 g/mol* 4 mol * 0.199 = 7.969552 g
grams of boron-11 = 11.009 g/mol* 4 mol * 0.801 = 35.272836 g
grams of carbon= 12.0107 g/mol * 1 mol = 12.0107 g
from number enrichment mi:
mB10 = nB10*AB10 /(nB10*AB10 + nB11*AB11)
"""
if massEnrichment < 0 or massEnrichment > 1:
raise ValueError(
"massEnrichment {} is unphysical for B4C".format(massEnrichment)
)
b10AtomicMass = nuclideBases.byName["B10"].weight
b11AtomicMass = nuclideBases.byName["B11"].weight
b10NumEnrich = (massEnrichment / b10AtomicMass) / (
massEnrichment / b10AtomicMass + (1 - massEnrichment) / b11AtomicMass
)
b11NumEnrich = 1.0 - b10NumEnrich
cAtomicMass = nuclideBases.byName["C"].weight
boron10MassGrams = b10AtomicMass * b10NumEnrich * 4.0
boron11MassGrams = b11AtomicMass * b11NumEnrich * 4.0
carbonMassGrams = cAtomicMass
gTotal = boron10MassGrams + boron11MassGrams + carbonMassGrams
boron10MassGrams /= gTotal
boron11MassGrams /= gTotal
carbonMassGrams /= gTotal
if self.parent:
self.parent.setMassFracs(
{"B10": boron10MassGrams, "B11": boron11MassGrams, "C": carbonMassGrams}
)
return boron10MassGrams, boron11MassGrams, carbonMassGrams
def setDefaultMassFracs(self):
r"""B4C mass fractions. Using Natural B4C. 19.9% B-10/ 80.1% B-11
Boron: 10.811 g/mol
Carbon: 12.0107 g/mol
4 moles of boron/1 mole of carbon
grams of boron-10 = 10.01 g/mol* 4 mol * 0.199 = 7.96796 g
grams of boron-11 = 11.01 g/mol* 4 mol * 0.801 = 35.27604 g
grams of carbon= 12.0107 g/mol * 1 mol = 12.0107 g
total=55.2547 g.
Mass fractions are computed from this.
"""
massEnrich = self.getMassEnrichmentFromNumEnrich(naturalB10NumberFraction=0.199)
gBoron10, gBoron11, gCarbon = self.setNewMassFracsFromMassEnrich(
massEnrichment=massEnrich
)
self.setMassFrac("B10", gBoron10)
self.setMassFrac("B11", gBoron11)
self.setMassFrac("C", gCarbon)
self.p.refDens = DEFAULT_MASS_DENSITY
# TD reference : <NAME>, "Absorber Materials for control rod systems of fast breeder reactors"
# Journal of nuclear materials, 124, 185-194, (1984)."
self.p.theoreticalDensityFrac = (
DEFAULT_THEORETICAL_DENSITY_FRAC # normally is around 0.88-93.
)
@staticmethod
def getMassEnrichmentFromNumEnrich(naturalB10NumberFraction):
b10AtomicMass = nuclideBases.byName["B10"].weight
b11AtomicMass = nuclideBases.byName["B11"].weight
return (
naturalB10NumberFraction
* b10AtomicMass
/ (
naturalB10NumberFraction * b10AtomicMass
+ (1.0 - naturalB10NumberFraction) * b11AtomicMass
)
)
def density(self, Tk=None, Tc=None):
"""
mass density
"""
density = material.Material.density(self, Tk, Tc)
theoreticalDensityFrac = self.p.theoreticalDensityFrac
if theoreticalDensityFrac is None:
theoreticalDensityFrac = 1.0
runLog.warning(
"Assumption: 100% theoretical density",
label="Assumption: B4C is at 100% theoretical density",
single=True,
)
return density * theoreticalDensityFrac # g/cc
def linearExpansionPercent(self, Tk=None, Tc=None):
"""Boron carbide expansion. Very preliminary"""
Tc = getTc(Tc, Tk)
self.checkTempRange(25, 500, Tc, "linear expansion percent")
deltaT = Tc - 25
dLL = deltaT * 4.5e-6 * 100 # percent
return dLL
|
setup.py | wyddmw/ViT-pytorch-1 | 311 | 11152567 | <gh_stars>100-1000
# Copyright 2021 The ML Collections Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python 3
"""Setup for pip package."""
from setuptools import find_namespace_packages
from setuptools import setup
def _parse_requirements(requirements_txt_path):
with open(requirements_txt_path) as fp:
return fp.read().splitlines()
_VERSION = '0.1.0'
setup(
name='ml_collections',
version=_VERSION,
author='ML Collections Authors',
author_email='<EMAIL>',
description='ML Collections is a library of Python collections designed for ML usecases.',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/google/ml_collections',
license='Apache 2.0',
# Contained modules and scripts.
packages=find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements('requirements.txt'),
tests_require=_parse_requirements('requirements-test.txt'),
python_requires='>=2.6',
include_package_data=True,
zip_safe=False,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
tests/test_parse.py | tgragnato/geneva | 1,182 | 11152603 | <reponame>tgragnato/geneva
import pytest
import sys
# Include the root of the project
sys.path.append("..")
import actions.strategy
import actions.utils
import library
EDGE_CASES = [
"[TCP:flags:A]-| \/",
"\/ [TCP:flags:A]-|",
"[TCP:flags:A]-duplicate(duplicate(duplicate(duplicate,),),)-| \/",
"[IP:version:4]-| \/",
"[TCP:flags:A]-duplicate(tamper{TCP:flags:corrupt}(duplicate(duplicate,),),)-| \/",
"[TCP:flags:A]-tamper{TCP:flags:replace:S}(duplicate,)-| \/",
# --- Tamper value tests ---
# Tamper value should an empty string
"[IP:frag:0]-fragment{tcp:-1:False}(drop,tamper{TCP:options-altchksum:replace:})-| \/",
# Tamper value should be "074" and be a string
"[IP:ihl:0]-fragment{tcp:-1:True}(duplicate,tamper{IP:load:replace:074})-| \/"
]
def get_tests():
"""
Returns a list of tuples of tests of combinations of solutions and censors.
"""
tests = []
for solution in library.LAB_STRATEGIES:
tests.append(solution["strategy"])
for strategy in EDGE_CASES:
tests.append(strategy)
return tests
@pytest.mark.parametrize("solution", get_tests())
def test_library(solution, logger):
"""
Pulls each solution from the solution library and tests it against
it's corresponding censor to confirm the solution works.
"""
# Parse the string representation of the solution
strat = actions.utils.parse(solution, logger)
logger.info("Parsed strategy %s" % (str(strat)))
# Confirm the parsing was correct
assert str(strat).strip() == solution, "Failed to correctly parse given strategy"
def test_quotes(logger):
"""
Tests that it properly handles strategies with leading/ending quotes.
"""
assert "\/" == str(actions.utils.parse("\"\/\"", logger)).strip()
assert "\/ [TCP:flags:A]-drop-|" == str(actions.utils.parse("\"\/ [TCP:flags:A]-drop-|\"", logger)).strip()
def test_failures(logger):
"""
Tests that properly fails to parse strategies
"""
with pytest.raises(actions.tree.ActionTreeParseError):
actions.utils.parse("asdfasdf", logger)
with pytest.raises(actions.tree.ActionTreeParseError):
actions.utils.parse("[]-asdfasdf", logger)
# Field doesn't exist
with pytest.raises(AssertionError):
actions.utils.parse("[TCP:thing:1]-nooooooope", logger)
assert actions.utils.parse("", logger) is not None
assert " \/ " == str(actions.utils.parse("", logger))
|
python/lambda-triggered-by-existing-kinesis-stream/lambda-handler.py | marclyo/aws-cdk-examples | 2,941 | 11152615 | <reponame>marclyo/aws-cdk-examples
def main(event, context):
print("I'm running!")
|
examples/webapps/_local_path.py | timgates42/txmongo | 122 | 11152663 | # coding: utf-8
# Copyright 2009-2014 The txmongo authors. All rights reserved.
# Use of this source code is governed by the Apache License that can be
# found in the LICENSE file.
'''
Adds the local txmongo path to PYTHON_PATH
'''
import os.path
import sys
root = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
)
)
sys.path.insert(0, root)
|
rootpy/extern/byteplay3/wbyteplay.py | masonproffitt/rootpy | 146 | 11152672 | # byteplay: CPython assembler/disassembler
# Copyright (C) 2006 <NAME> | Version: http://code.google.com/p/byteplay
# Rewritten 2009 <NAME> | Version: http://github.com/serprex/byteplay
# Screwed the style over, modified stack logic to be more flexible, updated to Python 3
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
__version__ = '1.0'
__all__ = [
'opmap',
'opname',
'opcodes',
'hasflow',
'stack_effect',
'cmp_op',
'hasarg',
'hasname',
'hasjrel',
'hasjabs',
'hasjump',
'haslocal',
'hascompare',
'hasfree',
'hasconst',
'hascode',
'Opcode',
'SetLineno',
'Label',
'isopcode',
'Code']
from sys import version_info
if version_info < (3, 6):
raise NotImplementedError("Currently only Python versions >3.5 are supported!")
import opcode
from dis import findlabels
from types import CodeType
from enum import Enum
class Opcode(int):
__str__ = __repr__ = lambda s: opname[s]
opmap = {name.replace('+', '_'): Opcode(code) for name, code in opcode.opmap.items()}
opname = {code: name for name, code in opmap.items()}
opcodes = set(opname)
for cmp_op, hasname in opmap.items():
globals()[cmp_op] = hasname
__all__.append(cmp_op)
cmp_op = opcode.cmp_op
hasarg = {x for x in opcodes if x >= opcode.HAVE_ARGUMENT}
hasconst = {Opcode(x) for x in opcode.hasconst}
hasname = {Opcode(x) for x in opcode.hasname}
hasjrel = {Opcode(x) for x in opcode.hasjrel}
hasjabs = {Opcode(x) for x in opcode.hasjabs}
hasjump = hasjabs | hasjrel
haslocal = {Opcode(x) for x in opcode.haslocal}
hascompare = {Opcode(x) for x in opcode.hascompare}
hasfree = {Opcode(x) for x in opcode.hasfree}
hascode = {MAKE_FUNCTION}
STOP_CODE = -1
import dis
# Fix bug in Python 3.6.0 (fixed in 3.6.1)
if (3, 6, 0) <= version_info < (3, 6, 1):
def stack_effect(o, arg):
return (dis.stack_effect(o, arg) if o != CALL_FUNCTION_EX else
-2 if arg else -1)
else:
stack_effect = dis.stack_effect
hasflow = hasjump | {
POP_BLOCK,
END_FINALLY,
BREAK_LOOP,
RETURN_VALUE,
RAISE_VARARGS,
STOP_CODE,
POP_EXCEPT,
WITH_CLEANUP_START,
WITH_CLEANUP_FINISH,
SETUP_ASYNC_WITH}
coroutine_opcodes = {GET_AWAITABLE, GET_AITER, GET_ANEXT, BEFORE_ASYNC_WITH, SETUP_ASYNC_WITH}
class Label:
pass
class SetLinenoType:
def __repr__(self):
return 'SetLineno'
SetLineno = SetLinenoType()
def isopcode(x):
return x is not SetLineno and not isinstance(x, Label)
# Flags for codeobject.co_flags, taken from Include/code.h, other flags are no longer used
CO_OPTIMIZED = 0x0001
CO_NEWLOCALS = 0x0002
CO_VARARGS = 0x0004
CO_VARKEYWORDS = 0x0008
CO_NESTED = 0x0010
CO_GENERATOR = 0x0020
CO_NOFREE = 0x0040
CO_COROUTINE = 0x0080
CO_ITERABLE_COROUTINE = 0x0100
CO_ASYNC_GENERATOR = 0x0200
CO_FUTURE_BARRY_AS_BDFL = 0x40000
CO_FUTURE_GENERATOR_STOP = 0x80000
class Code(object):
"""An object which holds all the information which a Python code object
holds, but in an easy-to-play-with representation
The attributes are:
Affecting action
code - list of 2-tuples: the code
freevars - list of strings: the free vars of the code (those are names
of variables created in outer functions and used in the function)
args - list of strings: the arguments of the code
kwonly - number of keyword only arguments
varargs - boolean: Does args end with a '*args' argument
varkwargs - boolean: Does args end with a '**kwargs' argument
newlocals - boolean: Should a new local namespace be created
(True in functions, False for module and exec code)
force_generator - set CO_GENERATOR in co_flags for generator Code objects without generator-specific code
Python 3.5:
force_coroutine - set CO_COROUTINE in co_flags for coroutine Code objects (native coroutines) without coroutine-specific code
force_iterable_coroutine - set CO_ITERABLE_COROUTINE in co_flags for generator-based coroutine Code objects
future_generator_stop - set CO_FUTURE_GENERATOR_STOP flag (see PEP-479)
Python 3.6:
force_async_generator - set CO_ASYNC_GENERATOR in co_flags
Not affecting action
name - string: the name of the code (co_name)
filename - string: the file name of the code (co_filename)
firstlineno - int: the first line number (co_firstlineno)
docstring - string or None: the docstring (the first item of co_consts,
if it's str)
code is a list of 2-tuples. The first item is an opcode, or SetLineno, or a
Label instance. The second item is the argument, if applicable, or None"""
def __init__(self, code, freevars, args, kwonly, varargs, varkwargs, newlocals,
name, filename, firstlineno, docstring,
force_generator=False,
*, force_coroutine=False, force_iterable_coroutine=False,
force_async_generator=False, future_generator_stop=False):
self.code = code
self.freevars = freevars
self.args = args
self.kwonly = kwonly
self.varargs = varargs
self.varkwargs = varkwargs
self.newlocals = newlocals
self.name = name
self.filename = filename
self.firstlineno = firstlineno
self.docstring = docstring
self.force_generator = force_generator
self.force_coroutine = force_coroutine
self.force_iterable_coroutine = force_iterable_coroutine
self.force_async_generator = force_async_generator
self.future_generator_stop = future_generator_stop
@staticmethod
def _findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source
Generate pairs offset,lineno as described in Python/compile.c
This is a modified version of dis.findlinestarts, which allows multiplelinestarts
with the same line number"""
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(code.co_lnotab[0::2], code.co_lnotab[1::2]):
if byte_incr:
yield addr, lineno
addr += byte_incr
lineno += line_incr
yield addr, lineno
@classmethod
def from_code(cls, co):
"""Disassemble a Python code object into a Code object"""
free_cell_isection = set(co.co_cellvars) & set(co.co_freevars)
if free_cell_isection:
print(co.co_name + ': has non-empty co.co_cellvars & co.co_freevars', free_cell_isection)
return None
co_code = co.co_code
labels = {addr: Label() for addr in findlabels(co_code)}
linestarts = dict(cls._findlinestarts(co))
cellfree = co.co_cellvars + co.co_freevars
code = []
extended_arg = 0
is_generator = False
is_coroutine = False
for i in range(0, len(co_code), 2):
if i in labels:
code.append((labels[i], None))
if i in linestarts:
code.append((SetLineno, linestarts[i]))
op = Opcode(co_code[i])
arg = co_code[i+1] | extended_arg
if op in hascode:
lastop, lastarg = code[-2]
if lastop != LOAD_CONST:
raise ValueError("%s should be preceded by LOAD_CONST" % op)
sub_code = Code.from_code(lastarg)
if sub_code is None:
print(co.co_name + ': has unexpected subcode block')
return None
code[-2] = (LOAD_CONST, sub_code)
if op == opcode.EXTENDED_ARG:
extended_arg = arg << 8
else:
if op not in hasarg:
code.append((op, None))
continue
extended_arg = 0
byteplay_arg = co.co_consts[arg] if op in hasconst else \
co.co_names[arg] if op in hasname else \
labels[arg] if op in hasjabs else \
labels[i + 2 + arg] if op in hasjrel else \
co.co_varnames[arg] if op in haslocal else \
cmp_op[arg] if op in hascompare else \
cellfree[arg] if op in hasfree else \
arg
code.append((op, byteplay_arg))
if op == YIELD_VALUE or op == YIELD_FROM:
is_generator = True
if op in coroutine_opcodes:
is_coroutine = True
varargs = not not co.co_flags & CO_VARARGS
varkwargs = not not co.co_flags & CO_VARKEYWORDS
force_coroutine = not is_coroutine and (co.co_flags & CO_COROUTINE)
force_iterable_coroutine = co.co_flags & CO_ITERABLE_COROUTINE
force_async_generator = co.co_flags & CO_ASYNC_GENERATOR
is_generator = False if force_async_generator else is_generator
force_generator = not is_generator and (co.co_flags & CO_GENERATOR)
assert not (force_coroutine and force_iterable_coroutine)
assert not (force_coroutine and force_async_generator)
assert not (force_iterable_coroutine and force_async_generator)
future_generator_stop = co.co_flags & CO_FUTURE_GENERATOR_STOP
return cls(code=code,
freevars=co.co_freevars,
args=co.co_varnames[:co.co_argcount + varargs + varkwargs + co.co_kwonlyargcount],
kwonly=co.co_kwonlyargcount,
varargs=varargs,
varkwargs=varkwargs,
newlocals=not not co.co_flags & CO_NEWLOCALS,
name=co.co_name,
filename=co.co_filename,
firstlineno=co.co_firstlineno,
docstring=co.co_consts[0] if co.co_consts and isinstance(co.co_consts[0], str) else None,
force_generator=force_generator,
force_coroutine=force_coroutine,
force_iterable_coroutine=force_iterable_coroutine,
force_async_generator=force_async_generator,
future_generator_stop=future_generator_stop)
def __eq__(self, other):
try:
if (self.freevars != other.freevars or
self.args != other.args or
self.kwonly != other.kwonly or
self.varargs != other.varargs or
self.varkwargs != other.varkwargs or
self.newlocals != other.newlocals or
self.name != other.name or
self.filename != other.filename or
self.firstlineno != other.firstlineno or
self.docstring != other.docstring or
self.force_generator != other.force_generator or
len(self.code) != len(other.code)):
return False
else:
if (self.force_coroutine != other.force_coroutine or
self.force_iterable_coroutine != other.force_iterable_coroutine or
self.future_generator_stop != other.future_generator_stop or
self.force_async_generator != other.force_async_generator):
return False
# This isn't trivial due to labels
lmap = {}
for (op1, arg1), (op2, arg2) in zip(self.code, other.code):
if isinstance(op1, Label):
if lmap.setdefault(arg1, arg2) is not arg2:
return False
else:
if op1 != op2:
return False
if op1 in hasjump:
if lmap.setdefault(arg1, arg2) is not arg2:
return False
elif arg1 != arg2:
return False
return True
except:
return False
def _compute_stacksize(self, logging=False):
code = self.code
label_pos = {op[0]: pos for pos, op in enumerate(code) if isinstance(op[0], Label)}
# sf_targets are the targets of SETUP_FINALLY opcodes. They are recorded
# because they have special stack behaviour. If an exception was raised
# in the block pushed by a SETUP_FINALLY opcode, the block is popped
# and 3 objects are pushed. On return or continue, the block is popped
# and 2 objects are pushed. If nothing happened, the block is popped by
# a POP_BLOCK opcode and 1 object is pushed by a (LOAD_CONST, None)
# operation
# Our solution is to record the stack state of SETUP_FINALLY targets
# as having 3 objects pushed, which is the maximum. However, to make
# stack recording consistent, the get_next_stacks function will always
# yield the stack state of the target as if 1 object was pushed, but
# this will be corrected in the actual stack recording
sf_targets = {label_pos[arg] for op, arg in code
if (op == SETUP_FINALLY or op == SETUP_WITH or op == SETUP_ASYNC_WITH)}
states = [None] * len(code)
maxsize = 0
class BlockType(Enum):
DEFAULT = 0,
TRY_FINALLY = 1,
TRY_EXCEPT = 2,
LOOP_BODY = 3,
WITH_BLOCK = 4,
EXCEPTION = 5,
SILENCED_EXCEPTION_BLOCK = 6,
class State:
def __init__(self, pos=0, stack=(0,), block_stack=(BlockType.DEFAULT,), log=[]):
self._pos = pos
self._stack = stack
self._block_stack = block_stack
self._log = log
@property
def pos(self):
return self._pos
@property
def stack(self):
return self._stack
@stack.setter
def stack(self, val):
self._stack = val
def newstack(self, n):
if self._stack[-1] < -n:
raise ValueError("Popped a non-existing element at %s %s" %
(self._pos, code[self._pos - 4: self._pos + 3]))
return self._stack[:-1] + (self._stack[-1] + n,)
@property
def block_stack(self):
return self._block_stack
@property
def log(self):
return self._log
def newlog(self, msg):
if not logging:
return None
log_msg = str(self._pos) + ": " + msg
if self._stack:
log_msg += " (on stack: "
log_depth = 2
log_depth = min(log_depth, len(self._stack))
for pos in range(-1, -log_depth, -1):
log_msg += str(self._stack[pos]) + ", "
log_msg += str(self._stack[-log_depth])
log_msg += ")"
else:
log_msg += " (empty stack)"
return [log_msg] + self._log
op = [State()]
while op:
cur_state = op.pop()
o = sum(cur_state.stack)
if o > maxsize:
maxsize = o
o, arg = code[cur_state.pos]
if isinstance(o, Label):
if cur_state.pos in sf_targets:
cur_state.stack = cur_state.newstack(5)
if states[cur_state.pos] is None:
states[cur_state.pos] = cur_state
elif states[cur_state.pos].stack != cur_state.stack:
check_pos = cur_state.pos + 1
while code[check_pos][0] not in hasflow:
check_pos += 1
if code[check_pos][0] not in (RETURN_VALUE, RAISE_VARARGS, STOP_CODE):
if cur_state.pos not in sf_targets:
raise ValueError("Inconsistent code at %s %s %s\n%s" %
(cur_state.pos, cur_state.stack, states[cur_state.pos].stack,
code[cur_state.pos - 5:cur_state.pos + 4]))
else:
# SETUP_FINALLY target inconsistent code!
#
# Since Python 3.2 assigned exception is cleared at the end of
# the except clause (named exception handler).
# To perform this CPython (checked in version 3.4.3) adds special
# bytecode in exception handler which currently breaks 'regularity' of bytecode.
# Exception handler is wrapped in try/finally block and POP_EXCEPT opcode
# is inserted before END_FINALLY, as a result cleanup-finally block is executed outside
# except handler. It's not a bug, as it doesn't cause any problems during execution, but
# it breaks 'regularity' and we can't check inconsistency here. Maybe issue should be
# posted to Python bug tracker.
pass
continue
else:
continue
if o not in (BREAK_LOOP, RETURN_VALUE, RAISE_VARARGS, STOP_CODE):
next_pos = cur_state.pos + 1
if not isopcode(o):
op += State(next_pos, cur_state.stack, cur_state.block_stack, cur_state.log),
elif o not in hasflow:
if o in hasarg and not isinstance(arg, int):
se = stack_effect(o, 0)
else:
se = stack_effect(o, arg)
log = cur_state.newlog("non-flow command (" + str(o) + ", se = " + str(se) + ")")
op += State(next_pos, cur_state.newstack(se), cur_state.block_stack, log),
elif o == FOR_ITER:
inside_for_log = cur_state.newlog("FOR_ITER (+1)")
op += State(label_pos[arg], cur_state.newstack(-1), cur_state.block_stack, cur_state.log),\
State(next_pos, cur_state.newstack(1), cur_state.block_stack, inside_for_log)
elif o in (JUMP_FORWARD, JUMP_ABSOLUTE):
after_jump_log = cur_state.newlog(str(o))
op += State(label_pos[arg], cur_state.stack, cur_state.block_stack, after_jump_log),
elif o in (JUMP_IF_FALSE_OR_POP, JUMP_IF_TRUE_OR_POP):
after_jump_log = cur_state.newlog(str(o) + ", jumped")
log = cur_state.newlog(str(o) + ", not jumped (-1)")
op += State(label_pos[arg], cur_state.stack, cur_state.block_stack, after_jump_log),\
State(next_pos, cur_state.newstack(-1), cur_state.block_stack, log)
elif o in {POP_JUMP_IF_TRUE, POP_JUMP_IF_FALSE}:
after_jump_log = cur_state.newlog(str(o) + ", jumped (-1)")
log = cur_state.newlog(str(o) + ", not jumped (-1)")
op += State(label_pos[arg], cur_state.newstack(-1), cur_state.block_stack, after_jump_log),\
State(next_pos, cur_state.newstack(-1), cur_state.block_stack, log)
elif o == CONTINUE_LOOP:
next_stack, next_block_stack = cur_state.stack, cur_state.block_stack
last_popped_block = None
while next_block_stack[-1] != BlockType.LOOP_BODY:
last_popped_block = next_block_stack[-1]
next_stack, next_block_stack = next_stack[:-1], next_block_stack[:-1]
if next_stack != cur_state.stack:
log = cur_state.newlog("CONTINUE_LOOP, from non-loop block")
else:
log = cur_state.newlog("CONTINUE_LOOP")
jump_to_pos = label_pos[arg]
if last_popped_block == BlockType.WITH_BLOCK:
next_stack = next_stack[:-1] + (next_stack[-1] - 1,)
op += State(jump_to_pos, next_stack, next_block_stack, log),
elif o == SETUP_LOOP:
inside_loop_log = cur_state.newlog("SETUP_LOOP (+block)")
op += State(label_pos[arg], cur_state.stack, cur_state.block_stack, cur_state.log),\
State(next_pos, cur_state.stack + (0,), cur_state.block_stack + (BlockType.LOOP_BODY,), inside_loop_log)
elif o == SETUP_EXCEPT:
inside_except_log = cur_state.newlog("SETUP_EXCEPT, exception (+6, +block)")
inside_try_log = cur_state.newlog("SETUP_EXCEPT, try-block (+block)")
op += State(label_pos[arg], cur_state.stack + (6,), cur_state.block_stack + (BlockType.EXCEPTION,), inside_except_log),\
State(next_pos, cur_state.stack + (0,), cur_state.block_stack + (BlockType.TRY_EXCEPT,), inside_try_log)
elif o == SETUP_FINALLY:
inside_finally_block = cur_state.newlog("SETUP_FINALLY (+1)")
inside_try_log = cur_state.newlog("SETUP_FINALLY try-block (+block)")
op += State(label_pos[arg], cur_state.newstack(1), cur_state.block_stack, inside_finally_block),\
State(next_pos, cur_state.stack + (0,), cur_state.block_stack + (BlockType.TRY_FINALLY,), inside_try_log)
elif o == POP_BLOCK:
log = cur_state.newlog("POP_BLOCK (-block)")
op += State(next_pos, cur_state.stack[:-1], cur_state.block_stack[:-1], log),
elif o == POP_EXCEPT:
log = cur_state.newlog("POP_EXCEPT (-block)")
op += State(next_pos, cur_state.stack[:-1], cur_state.block_stack[:-1], log),
elif o == END_FINALLY:
if cur_state.block_stack[-1] == BlockType.SILENCED_EXCEPTION_BLOCK:
log = cur_state.newlog("END_FINALLY pop silenced exception block (-block)")
op += State(next_pos, cur_state.stack[:-1], cur_state.block_stack[:-1], log),
elif cur_state.block_stack[-1] == BlockType.EXCEPTION:
# Reraise exception
pass
else:
log = cur_state.newlog("END_FINALLY (-6)")
op += State(next_pos, cur_state.newstack(-6), cur_state.block_stack, log),
elif o == SETUP_WITH or o == SETUP_ASYNC_WITH:
inside_with_block = cur_state.newlog("SETUP_WITH, with-block (+1, +block)")
inside_finally_block = cur_state.newlog("SETUP_WITH, finally (+1)")
op += State(label_pos[arg], cur_state.newstack(1), cur_state.block_stack, inside_finally_block),\
State(next_pos, cur_state.stack + (1,), cur_state.block_stack + (BlockType.WITH_BLOCK,), inside_with_block)
elif o == WITH_CLEANUP_START:
# There is special case when 'with' __exit__ function returns True,
# that's the signal to silence exception, in this case additional element is pushed
# and next END_FINALLY command won't reraise exception.
# Emulate this situation on WITH_CLEANUP_START with creating special block which will be
# handled differently by WITH_CLEANUP_FINISH and will cause END_FINALLY not to reraise exception.
log = cur_state.newlog("WITH_CLEANUP_START (+1)")
silenced_exception_log = cur_state.newlog("WITH_CLEANUP_START silenced_exception (+block)")
op += State(next_pos, cur_state.newstack(1), cur_state.block_stack, log),\
State(next_pos, cur_state.newstack(-7) + (9,), cur_state.block_stack + (BlockType.SILENCED_EXCEPTION_BLOCK,), silenced_exception_log)
elif o == WITH_CLEANUP_FINISH:
if cur_state.block_stack[-1] == BlockType.SILENCED_EXCEPTION_BLOCK:
# See comment in WITH_CLEANUP_START handler
log = cur_state.newlog("WITH_CLEANUP_FINISH silenced_exception (-1)")
op += State(next_pos, cur_state.newstack(-1), cur_state.block_stack, log),
else:
log = cur_state.newlog("WITH_CLEANUP_FINISH (-2)")
op += State(next_pos, cur_state.newstack(-2), cur_state.block_stack, log),
else:
raise ValueError("Unhandled opcode %s" % o)
return maxsize + 6 # for exception raise in deepest place
def to_code(self, from_function=False):
"""Assemble a Python code object from a Code object"""
num_fastnames = sum(1 for op, arg in self.code if isopcode(op) and op in haslocal)
is_function = self.newlocals or num_fastnames > 0 or len(self.args) > 0
nested = is_function and from_function
co_flags = {op[0] for op in self.code}
if not self.force_async_generator:
is_generator = (self.force_generator or
(YIELD_VALUE in co_flags or YIELD_FROM in co_flags)
)
else:
is_generator = False
no_free = (not self.freevars) and (not co_flags & hasfree)
is_native_coroutine = bool(self.force_coroutine or (co_flags & coroutine_opcodes))
assert not (is_native_coroutine and self.force_iterable_coroutine)
assert not (is_native_coroutine and self.force_async_generator)
co_flags =\
(not(STORE_NAME in co_flags or LOAD_NAME in co_flags or DELETE_NAME in co_flags)) |\
(self.newlocals and CO_NEWLOCALS) |\
(self.varargs and CO_VARARGS) |\
(self.varkwargs and CO_VARKEYWORDS) |\
(is_generator and CO_GENERATOR) |\
(no_free and CO_NOFREE) |\
(nested and CO_NESTED)
co_flags |= (is_native_coroutine and CO_COROUTINE) |\
(self.force_iterable_coroutine and CO_ITERABLE_COROUTINE) |\
(self.future_generator_stop and CO_FUTURE_GENERATOR_STOP) |\
(self.force_async_generator and CO_ASYNC_GENERATOR)
co_consts = [self.docstring]
co_names = []
co_varnames = list(self.args)
co_freevars = tuple(self.freevars)
# Find all cellvars beforehand for two reasons
# Need the number of them to construct the numeric arg for ops in hasfree
# Need to put args which are cells in the beginning of co_cellvars
cellvars = {arg for op, arg in self.code
if isopcode(op) and op in hasfree
and arg not in co_freevars}
co_cellvars = [jumps for jumps in self.args if jumps in cellvars]
def index(seq, item, eq=True, can_append=True):
for i, x in enumerate(seq):
if x == item if eq else x is item:
return i
if can_append:
seq.append(item)
return len(seq) - 1
else:
raise IndexError("Item not found")
jumps = []
label_pos = {}
lastlineno = self.firstlineno
lastlinepos = 0
co_code = bytearray()
co_lnotab = bytearray()
for i, (op, arg) in enumerate(self.code):
if isinstance(op, Label):
label_pos[op] = len(co_code)
elif op is SetLineno:
incr_lineno = arg - lastlineno
incr_pos = len(co_code) - lastlinepos
lastlineno = arg
lastlinepos += incr_pos
if incr_lineno != 0 or incr_pos != 0:
while incr_pos > 255:
co_lnotab += b"\xFF\0"
incr_pos -= 255
while incr_lineno > 255:
co_lnotab += bytes((incr_pos, 255))
incr_pos = 0
incr_lineno -= 255
if incr_pos or incr_lineno:
co_lnotab += bytes((incr_pos, incr_lineno))
elif op == opcode.EXTENDED_ARG:
self.code[i + 1][1] |= 1 << 32
else:
if op in hasconst:
if (isinstance(arg, Code) and
i + 2 < len(self.code) and
self.code[i + 2][0] in hascode):
arg = arg.to_code(from_function=is_function)
assert arg is not None
arg = index(co_consts, arg, 0)
elif op in hasname:
arg = index(co_names, arg)
elif op in hasjump:
jumps.append((len(co_code), arg))
co_code += bytes((0x90, 0, op, 0))
continue
elif op in haslocal:
arg = index(co_varnames, arg)
elif op in hascompare:
arg = index(cmp_op, arg, can_append=False)
elif op in hasfree:
try:
arg = index(co_freevars, arg, can_append=False) + len(cellvars)
except IndexError:
arg = index(co_cellvars, arg)
if arg is None:
arg = 0
if arg > 0xFFFFFF:
co_code += (opcode.EXTENDED_ARG | (arg >> 16 & 0xFF00)).to_bytes(2, "little")
if arg > 0xFFFF:
co_code += (opcode.EXTENDED_ARG | (arg >> 8 & 0xFF00)).to_bytes(2, "little")
if arg > 0xFF:
co_code += (opcode.EXTENDED_ARG | (arg & 0xFF00)).to_bytes(2, "little")
co_code += (op | (arg & 0xFF) << 8).to_bytes(2, "little")
for pos, label in jumps:
jump = label_pos[label]
if co_code[pos+2] in hasjrel:
jump -= pos + 4
if jump > 0xFFFF:
raise NotImplementedError("Multiple EXTENDED_ARG jumps not implemented")
co_code[pos + 3] = jump & 0xFF
co_code[pos + 1] = jump >> 8 & 0xFF
co_argcount = len(self.args) - self.varargs - self.varkwargs - self.kwonly
co_stacksize = self._compute_stacksize()
return CodeType(co_argcount, self.kwonly, len(co_varnames), co_stacksize, co_flags,
bytes(co_code), tuple(co_consts), tuple(co_names), tuple(co_varnames),
self.filename, self.name, self.firstlineno, bytes(co_lnotab), co_freevars,
tuple(co_cellvars))
|
test/shared/test_utils.py | roock/cloudsplaining | 1,411 | 11152673 | <reponame>roock/cloudsplaining
import unittest
from cloudsplaining.shared.utils import remove_wildcard_only_actions, remove_read_level_actions
class TestUtils(unittest.TestCase):
def test_remove_wildcard_only_actions(self):
actions = [
# 3 wildcard only actions
"secretsmanager:getrandompassword",
"secretsmanager:listsecrets",
# This one is wildcard OR "secret"
"secretsmanager:putsecretvalue",
]
results = remove_wildcard_only_actions(actions)
# print(results)
self.assertListEqual(results, ["secretsmanager:PutSecretValue"])
def test_remove_read_level_actions(self):
actions = [
"ssm:GetParameters",
"ecr:PutImage"
]
result = remove_read_level_actions(actions)
expected_result = ['ecr:PutImage']
self.assertListEqual(result, expected_result)
|
electrumsv/gui/qt/table_widgets.py | electrumsv/electrumsv | 136 | 11152674 | from typing import Callable, Optional
from PyQt5.QtCore import pyqtSignal, Qt
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QHBoxLayout, QToolButton, QWidget
from electrumsv.i18n import _
from .util import KeyEventLineEdit, read_QIcon
class ButtonLayout(QHBoxLayout):
def __init__(self, parent: Optional[QWidget]=None) -> None:
# NOTE(typing) The checker does not have signatures for the parent being an explicit None.
super().__init__(parent) # type: ignore
# The offset to insert the next button at.
self._button_index = 0
self.setSpacing(2)
self.setContentsMargins(0, 2, 0, 2)
def _create_button(self, icon_name: str, on_click: Callable[[], None], tooltip: str) \
-> QToolButton:
button = QToolButton()
button.setIcon(read_QIcon(icon_name))
button.setToolTip(tooltip)
button.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))
button.clicked.connect(on_click)
return button
def add_button(self, icon_name: str, on_click: Callable[[], None], tooltip: str,
position: Optional[int]=None) -> QToolButton:
button = self._create_button(icon_name, on_click, tooltip)
if position is None:
position = self._button_index
self._button_index += 1
self.insertWidget(position, button)
return button
class TableTopButtonLayout(ButtonLayout):
add_signal = pyqtSignal()
refresh_signal = pyqtSignal()
filter_signal = pyqtSignal(str)
def __init__(self, parent: Optional[QWidget]=None, filter_placeholder_text: str="",
enable_filter: bool=True) -> None:
super().__init__(parent)
# The offset to insert the next button at.
self._button_index = 0
self._filter_button: Optional[QToolButton] = None
self._filter_box = KeyEventLineEdit(override_events={Qt.Key.Key_Escape})
# When the focus is in the search box, if the user presses Escape the filtering exits.
self._filter_box.key_event_signal.connect(self._on_search_override_key_press_event)
# As text in the search box changes, the filter updates in real time.
self._filter_box.textChanged.connect(self._on_search_text_changed)
if not filter_placeholder_text:
filter_placeholder_text = _("Your filter text..")
self._filter_box.setPlaceholderText(filter_placeholder_text)
self._filter_box.hide()
self.setSpacing(2)
self.setContentsMargins(0, 2, 0, 2)
self.add_refresh_button()
if enable_filter:
self._filter_button = self.add_filter_button()
self.addWidget(self._filter_box, 1)
self.addStretch(1)
# Find the stretch QSpacerItem and hold a reference so we can add and remove it.
# The reason we do this is that otherwise the stretch item prevents the search box from
# expanding.
self._stretch_item = self.takeAt(self.count()-1)
self.addItem(self._stretch_item)
def add_create_button(self, tooltip: Optional[str]=None) -> QToolButton:
if tooltip is None:
tooltip = _("Add a new entry.")
return self.add_button("icons8-add-new-96-windows.png", self.add_signal.emit, tooltip)
def add_refresh_button(self, tooltip: Optional[str]=None) -> QToolButton:
if tooltip is None:
tooltip = _("Refresh the list.")
return self.add_button("refresh_win10_16.png", self.refresh_signal.emit, tooltip)
def add_filter_button(self, tooltip: Optional[str]=None) -> QToolButton:
if tooltip is None:
tooltip = _("Toggle list searching/filtering (Control+F).")
return self.add_button("icons8-filter-edit-32-windows.png", self.on_toggle_filter,
tooltip)
def _on_search_text_changed(self, text: str) -> None:
if self._filter_box.isHidden():
return
self.filter_signal.emit(text)
def _on_search_override_key_press_event(self, event_key: int) -> None:
if event_key == Qt.Key.Key_Escape:
self.on_toggle_filter()
# Call externally to toggle the filter.
def on_toggle_filter(self) -> None:
assert self._filter_button is not None
if self._filter_box.isHidden():
# Activate filtering and show the text field.
self._filter_button.setIcon(read_QIcon("icons8-clear-filters-32-windows.png"))
self._filter_box.show()
self.removeItem(self._stretch_item)
self._filter_box.setFocus()
else:
self.addItem(self._stretch_item)
# Deactivate filtering and hide the text field.
self._filter_button.setIcon(read_QIcon("icons8-filter-edit-32-windows.png"))
self._filter_box.setText('')
self._filter_box.hide()
self.filter_signal.emit('')
|
exampleapp/dinosaurs/views.py | jupiterFierce31/A-simple-example-of-a-Django-REST-app-Angular2 | 163 | 11152677 | from rest_framework import viewsets
from dinosaurs.serializers import DinosaurSerializer
from dinosaurs.models import Dinosaur
class DinosaurViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Dinosaur.objects.all()
serializer_class = DinosaurSerializer
|
tests/test_prefix.py | cclauss/personfinder | 561 | 11152698 | # Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for prefix.py module."""
from google.appengine.ext import db
import prefix
import unittest
class PersonForTest(db.Model):
name = db.StringProperty()
class PrefixTests(unittest.TestCase):
def test_normalize(self):
assert prefix.normalize(u'hi there') == u'HI THERE'
assert prefix.normalize(u'salut l\xe0') == u'SALUT LA'
assert prefix.normalize(
u'L\xf2ng Str\xefng w\xedth l\xf4ts \xf6f \xc3cc\xebnts') == \
u'LONG STRING WITH LOTS OF ACCENTS'
def test_add_prefix_properties(self):
prefix_properties = ['name']
prefix_types = ['_n_', '_n1_', '_n2_']
all_properties = ['name', 'name_n1_', 'name_n2_', 'name_n_']
prefix.add_prefix_properties(PersonForTest, 'name')
# Test the list of prefix properties was recorded
assert PersonForTest._prefix_properties == prefix_properties
# Test all prefix properties have been added
for prefix_type in prefix_types:
assert hasattr(PersonForTest, 'name' + prefix_type)
# Test that the model class was updated
assert sorted(PersonForTest.properties()) == all_properties
def test_update_prefix_properties(self):
prefix.add_prefix_properties(PersonForTest, 'name')
test_person = PersonForTest(name='John')
prefix.update_prefix_properties(test_person)
assert test_person.name_n_ == 'JOHN'
assert test_person.name_n1_ == 'J'
assert test_person.name_n2_ == 'JO'
def test_filter_prefix(self):
# Test 1-char prefix filter
test_query = PersonForTest.all()
test_criteria = {'name': 'b'}
prefix.filter_prefix(test_query, **test_criteria)
assert test_query._get_query() == {'name_n1_ =': u'B'}
# Test 2-char prefix filter
test_query = PersonForTest.all()
test_criteria = {'name': 'bryan'}
prefix.filter_prefix(test_query, **test_criteria)
assert test_query._get_query() == {'name_n2_ =': u'BR'}
def test_get_prefix_matches(self):
db.put(PersonForTest(name='Bryan'))
db.put(PersonForTest(name='Bruce'))
db.put(PersonForTest(name='Benny'))
db.put(PersonForTest(name='Lenny'))
test_query = PersonForTest.all().order('name')
# Test full string match
test_criteria = {'name': 'bryan'}
test_people = list(person.name for person in prefix.get_prefix_matches(
test_query, 100, **test_criteria))
assert test_people == ['Bryan']
# Test 2-char prefix match
test_criteria = {'name': 'br'}
test_people = set(person.name for person in prefix.get_prefix_matches(
test_query, 100, **test_criteria))
assert test_people == set(['Bruce', 'Bryan'])
# Test 1-char prefix match
test_criteria = {'name': 'b'}
test_people = set(person.name for person in prefix.get_prefix_matches(
test_query, 100, **test_criteria))
assert test_people == set(['Benny', 'Bruce', 'Bryan'])
# Test limit
test_criteria = {'name': 'b'}
test_people = set(person.name for person in prefix.get_prefix_matches(
test_query, 1, **test_criteria))
assert test_people == set(['Benny'])
if __name__ == '__main__':
unittest.main()
|
setup.py | amcgavin/react-render | 138 | 11152706 | from setuptools import setup, find_packages
VERSION = '1.3.2'
setup(
name='react-render-client',
version=VERSION,
packages=find_packages(exclude=['example', 'tests']),
install_requires=[
'django>=1.6',
'requests>=2,<3',
],
description='Render and bundle React components from a Django application',
long_description='Documentation at https://github.com/mic159/react-render',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mic159/react-render',
download_url='https://github.com/mic159/react-render/tarball/{}'.format(VERSION),
keywords=['react', 'reactjs', 'django', 'isomorphic', 'templates'],
license='MIT',
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
]
)
|
pvlib/spa_c_files/setup.py | sjanzou/pvlib-python | 695 | 11152713 | # setup.py
import os
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
DIRNAME = os.path.dirname(__file__)
# patch spa.c
with open(os.path.join(DIRNAME, 'spa.c'), 'rb') as f:
SPA_C = f.read()
# replace timezone with time_zone to avoid nameclash with the function
# __timezone which is defined by a MACRO in pyconfig.h as timezone
# see https://bugs.python.org/issue24643
SPA_C = SPA_C.replace(b'timezone', b'time_zone')
with open(os.path.join(DIRNAME, 'spa.c'), 'wb') as f:
f.write(SPA_C)
# patch spa.h
with open(os.path.join(DIRNAME, 'spa.h'), 'rb') as f:
SPA_H = f.read()
# replace timezone with time_zone to avoid nameclash with the function
# __timezone which is defined by a MACRO in pyconfig.h as timezone
# see https://bugs.python.org/issue24643
SPA_H = SPA_H.replace(b'timezone', b'time_zone')
with open(os.path.join(DIRNAME, 'spa.h'), 'wb') as f:
f.write(SPA_H)
SPA_SOURCES = [os.path.join(DIRNAME, src) for src in ['spa_py.pyx', 'spa.c']]
setup(
ext_modules=cythonize([Extension('spa_py', SPA_SOURCES)])
)
|
pandas_ta/trend/decay.py | ryanrussell/pandas-ta | 2,298 | 11152726 | <gh_stars>1000+
# -*- coding: utf-8 -*-
from numpy import exp as npExp
from pandas import DataFrame
from pandas_ta.utils import get_offset, verify_series
def decay(close, kind=None, length=None, mode=None, offset=None, **kwargs):
"""Indicator: Decay"""
# Validate Arguments
length = int(length) if length and length > 0 else 5
mode = mode.lower() if isinstance(mode, str) else "linear"
close = verify_series(close, length)
offset = get_offset(offset)
if close is None: return
# Calculate Result
_mode = "L"
if mode == "exp" or kind == "exponential":
_mode = "EXP"
diff = close.shift(1) - npExp(-length)
else: # "linear"
diff = close.shift(1) - (1 / length)
diff[0] = close[0]
tdf = DataFrame({"close": close, "diff": diff, "0": 0})
ld = tdf.max(axis=1)
# Offset
if offset != 0:
ld = ld.shift(offset)
# Handle fills
if "fillna" in kwargs:
ld.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
ld.fillna(method=kwargs["fill_method"], inplace=True)
# Name and Categorize it
ld.name = f"{_mode}DECAY_{length}"
ld.category = "trend"
return ld
decay.__doc__ = \
"""Decay
Creates a decay moving forward from prior signals like crosses. The default is
"linear". Exponential is optional as "exponential" or "exp".
Sources:
https://tulipindicators.org/decay
Calculation:
Default Inputs:
length=5, mode=None
if mode == "exponential" or mode == "exp":
max(close, close[-1] - exp(-length), 0)
else:
max(close, close[-1] - (1 / length), 0)
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 1
mode (str): If 'exp' then "exponential" decay. Default: 'linear'
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
|
test/test_orm_ext/apps/Test/models.py | timgates42/uliweb | 202 | 11152731 | from uliweb.orm import *
class User(Model):
username = Field(str)
def print_name(self):
return 'print_name' |
test/programytest/dialog/test_conversation.py | cdoebler1/AIML2 | 345 | 11152785 | <reponame>cdoebler1/AIML2
import unittest
from programy.bot import Bot
from programy.config.bot.bot import BotConfiguration
from programy.context import ClientContext
from programy.dialog.conversation import Conversation
from programy.dialog.question import Question
from programy.dialog.sentence import Sentence
from programy.parser.pattern.match import Match
from programy.parser.pattern.matchcontext import MatchContext
from programy.parser.pattern.nodes.word import PatternWordNode
from programytest.client import TestClient
class ConversationTests(unittest.TestCase):
def test_pop_methods(self):
client = TestClient()
client_context = ClientContext(client, "testid")
client_context.bot = Bot(BotConfiguration(), client)
client_context.bot.configuration.conversations._max_histories = 3
client_context.brain = client_context.bot.brain
conversation = Conversation(client_context)
question1 = Question.create_from_text(client_context, "Hello There")
conversation.record_dialog(question1)
self.assertEquals(1, len(conversation.questions))
question2 = Question.create_from_text(client_context, "How are you")
conversation.record_dialog(question2)
self.assertEquals(2, len(conversation.questions))
conversation.pop_dialog()
self.assertEquals(1, len(conversation.questions))
conversation.pop_dialog()
self.assertEquals(0, len(conversation.questions))
conversation.pop_dialog()
self.assertEquals(0, len(conversation.questions))
def test_properties(self):
client = TestClient()
client_context = ClientContext(client, "testid")
client_context.bot = Bot(BotConfiguration(), client)
client_context.bot.configuration.conversations._max_histories = 3
client_context.brain = client_context.bot.brain
conversation = Conversation(client_context)
conversation.set_property("name1", "value1")
self.assertEquals("value1", conversation.property("name1"))
conversation.set_property("name2", "value2")
self.assertEquals("value2", conversation.property("name2"))
conversation.set_property("name2", "value3")
self.assertEquals("value3", conversation.property("name2"))
self.assertEquals(None, conversation.property("name3"))
def test_topic_pattern(self):
client = TestClient()
client_context = ClientContext(client, "testid")
client_context.bot = Bot(BotConfiguration(), client)
client_context.bot.configuration.conversations._max_histories = 3
client_context.brain = client_context.bot.brain
conversation = Conversation(client_context)
self.assertEquals("*", conversation.get_topic_pattern(client_context))
conversation.set_property("topic", "TOPIC1")
self.assertEquals("TOPIC1", conversation.get_topic_pattern(client_context))
def test_topic_pattern_topic_none(self):
client = TestClient()
client_context = ClientContext(client, "testid")
client_context.bot = Bot(BotConfiguration(), client)
client_context.bot.configuration.conversations._max_histories = 3
client_context.brain = client_context.bot.brain
conversation = Conversation(client_context)
conversation.set_property("topic", None)
self.assertEquals("*", conversation.get_topic_pattern(client_context))
conversation.set_property("topic", "TOPIC1")
self.assertEquals("TOPIC1", conversation.get_topic_pattern(client_context))
def test_parse_last_sentences_from_response(self):
client = TestClient()
client_context = ClientContext(client, "testid")
client_context.bot = Bot(BotConfiguration(), client)
client_context.bot.configuration.conversations._max_histories = 3
client_context.brain = client_context.bot.brain
conversation = Conversation(client_context)
self.assertEquals("*", conversation.parse_last_sentences_from_response(""))
self.assertEquals("*", conversation.parse_last_sentences_from_response("."))
self.assertEquals("HELLO", conversation.parse_last_sentences_from_response("HELLO"))
self.assertEquals("HELLO THERE", conversation.parse_last_sentences_from_response("HELLO THERE"))
self.assertEquals("THERE", conversation.parse_last_sentences_from_response("HELLO. THERE"))
self.assertEquals("THERE", conversation.parse_last_sentences_from_response("HELLO. THERE!"))
def test_that_pattern(self):
client = TestClient()
client_context = ClientContext(client, "testid")
client_context.bot = Bot(BotConfiguration(), client)
client_context.bot.configuration.conversations._max_histories = 3
client_context.brain = client_context.bot.brain
conversation = Conversation(client_context)
self.assertEquals("*", conversation.parse_last_sentences_from_response(""))
self.assertEquals("HELLO", conversation.parse_last_sentences_from_response("HELLO"))
self.assertEquals("HELLO", conversation.parse_last_sentences_from_response(".HELLO"))
self.assertEquals("HELLO", conversation.parse_last_sentences_from_response("HELLO."))
self.assertEquals("HELLO", conversation.parse_last_sentences_from_response(".HELLO."))
self.assertEquals("HELLO THERE", conversation.parse_last_sentences_from_response("HELLO THERE"))
self.assertEquals("THERE", conversation.parse_last_sentences_from_response("HELLO. THERE"))
self.assertEquals("THERE", conversation.parse_last_sentences_from_response("HELLO. THERE!"))
def test_conversation(self):
client = TestClient()
client_context = ClientContext(client, "testid")
client_context.bot = Bot(BotConfiguration(), client)
client_context.bot.configuration.conversations._max_histories = 3
client_context.brain = client_context.bot.brain
conversation = Conversation(client_context)
self.assertIsNotNone(conversation)
self.assertEqual(0, len(conversation._questions))
self.assertEqual(3, conversation._max_histories)
self.assertEqual(1, len(conversation._properties))
with self.assertRaises(Exception):
conversation.current_question()
with self.assertRaises(Exception):
conversation.previous_nth_question(0)
question1 = Question.create_from_text(client_context, "Hello There")
conversation.record_dialog(question1)
self.assertEqual(question1, conversation.current_question())
with self.assertRaises(Exception):
conversation.previous_nth_question(1)
question2 = Question.create_from_text(client_context, "Hello There Again")
conversation.record_dialog(question2)
self.assertEqual(question2, conversation.current_question())
self.assertEqual(question1, conversation.previous_nth_question(1))
with self.assertRaises(Exception):
conversation.previous_nth_question(3)
question3 = Question.create_from_text(client_context, "Hello There Again Again")
conversation.record_dialog(question3)
self.assertEqual(question3, conversation.current_question())
self.assertEqual(question2, conversation.previous_nth_question(1))
with self.assertRaises(Exception):
conversation.previous_nth_question(4)
# Max Histories for this test is 3
# Therefore we should see the first question, pop of the stack
question4 = Question.create_from_text(client_context, "Hello There Again Again Again")
conversation.record_dialog(question4)
self.assertEqual(question4, conversation.current_question())
self.assertEqual(question3, conversation.previous_nth_question(1))
with self.assertRaises(Exception):
conversation.previous_nth_question(5)
def test_parse_last_sentences_from_response(self):
client = TestClient()
client_context = ClientContext(client, "testid")
client_context.bot = Bot(BotConfiguration(), client)
client_context.bot.configuration.conversations._max_histories = 3
client_context.brain = client_context.bot.brain
conversation = Conversation(client_context)
self.assertIsNotNone(conversation)
response = "Hello World"
that = conversation.parse_last_sentences_from_response(response)
self.assertEqual("Hello World", that)
response = "Hello World. Second sentence"
that = conversation.parse_last_sentences_from_response(response)
self.assertEqual("Second sentence", that)
response = "Hello World. Second sentence. Third Sentence"
that = conversation.parse_last_sentences_from_response(response)
self.assertEqual("Third Sentence", that)
def test_to_json(self):
client = TestClient()
client_context = ClientContext(client, "testid")
client_context.bot = Bot(BotConfiguration(), client)
client_context.bot.configuration.conversations._max_histories = 3
client_context.brain = client_context.bot.brain
conversation1 = Conversation(client_context)
conversation1.properties["convo1"] = "value1"
matched_context = MatchContext(100, 100)
matched_context.matched_nodes.append(Match(Match.WORD, PatternWordNode("Hello"), "Hello"))
sentence = Sentence(client_context, text="Hi", response="Hello there", matched_context=matched_context)
question1 = Question.create_from_sentence(sentence)
question1.properties['quest1'] = "value2"
conversation1.record_dialog(question1)
json_data = conversation1.to_json()
self.assertIsNotNone(json_data)
self.assertEqual("testclient", json_data['client_context']['clientid'])
self.assertEqual("testid", json_data['client_context']['userid'])
self.assertEqual("bot", json_data['client_context']['botid'])
self.assertEqual("brain", json_data['client_context']['brainid'])
self.assertEqual(0, json_data['client_context']['depth'])
conversation2 = Conversation.from_json(client_context, json_data)
self.assertEqual(conversation1._client_context.client.id, conversation2._client_context.client.id)
self.assertEqual(conversation1._client_context.userid, conversation2._client_context.userid)
self.assertEqual(conversation1._client_context.bot.id, conversation2._client_context.bot.id)
self.assertEqual(conversation1._client_context.brain.id, conversation2._client_context.brain.id)
self.assertEqual(conversation1._client_context._question_start_time,
conversation2._client_context._question_start_time)
self.assertEqual(conversation1._client_context._question_depth, conversation2._client_context._question_depth)
self.assertEqual(conversation1._client_context._id, conversation2._client_context._id)
self.assertEqual(conversation1.properties, conversation2.properties)
self.assertEqual(conversation1.max_histories, conversation2.max_histories)
self.assertNotEquals(0, len(conversation1.questions))
self.assertNotEquals(0, len(conversation2.questions))
self.assertEqual(len(conversation1.questions), len(conversation2.questions))
for i in range(len(conversation2.questions)):
q1 = conversation1.questions[i]
q2 = conversation2.questions[i]
self.assertEqual(q1.srai, q2.srai)
self.assertEqual(q1._current_sentence_no, q2._current_sentence_no)
self.assertEqual(q1.properties, q2.properties)
self.assertNotEquals(0, len(q1.sentences))
self.assertNotEquals(0, len(q2.sentences))
self.assertEqual(len(q1.sentences), len(q2.sentences))
for j in range(len(q2.sentences)):
s1 = q1.sentences[j]
s2 = q2.sentences[j]
self.assertEqual(s1.words, s2.words)
self.assertEqual(s1.response, s2.response)
self.assertEqual(s1.positivity, s2.positivity)
self.assertEqual(s1.subjectivity, s2.subjectivity)
mc1 = s1.matched_context
mc2 = s2.matched_context
self.assertEquals(mc1.template_node, mc2.template_node)
self.assertEquals(mc1.max_search_depth, mc2.max_search_depth)
self.assertEquals(mc1.max_search_timeout, mc2.max_search_timeout)
time1 = mc1._total_search_start.strftime("%d/%m/%Y, %H:%M:%S")
time2 = mc2._total_search_start.strftime("%d/%m/%Y, %H:%M:%S")
self.assertEquals(time1, time2)
self.assertNotEquals(0, len(mc1.matched_nodes))
self.assertNotEquals(0, len(mc2.matched_nodes))
self.assertEquals(len(mc1.matched_nodes), len(mc2.matched_nodes))
for k in range(len(mc1.matched_nodes)):
mn1 = mc1.matched_nodes[k]
mn2 = mc2.matched_nodes[k]
self.assertEquals(mn1._matched_node_str, mn2._matched_node_str)
self.assertEquals(mn1._matched_node_type, mn2._matched_node_type)
self.assertEquals(mn1._matched_node_multi_word, mn2._matched_node_multi_word)
self.assertEquals(mn1._matched_node_wildcard, mn2._matched_node_wildcard)
self.assertEquals(mn1._matched_node_words, mn2._matched_node_words)
|
tests/st/func/datavisual/constants.py | fapbatista/mindinsight | 216 | 11152803 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Constants for st."""
import tempfile
SUMMARY_BASE_DIR = tempfile.NamedTemporaryFile().name
SUMMARY_DIR_PREFIX = "summary"
SUMMARY_DIR_NUM_FIRST = 5
SUMMARY_DIR_NUM_SECOND = 11
RESERVOIR_DIR_NAME = "reservoir_dir"
RESERVOIR_TRAIN_ID = "./%s" % RESERVOIR_DIR_NAME
RESERVOIR_STEP_NUM = 15
RESERVOIR_DIR_NUM = 1
MULTIPLE_DIR_NAME = "multiple_dir"
MULTIPLE_TRAIN_ID = "./%s" % MULTIPLE_DIR_NAME
MULTIPLE_LOG_NUM = 3
MULTIPLE_DIR_NUM = 1
# Please make sure SUMMARY_DIR_NUM is greater than `MAX_DATA_LOADER_SIZE`.
# Mainly used to test caching.
SUMMARY_DIR_NUM = SUMMARY_DIR_NUM_FIRST\
+ SUMMARY_DIR_NUM_SECOND\
+ RESERVOIR_DIR_NUM\
+ MULTIPLE_DIR_NUM
|
mul_034/script_util.py | cibu/language-resources | 177 | 11152804 | <gh_stars>100-1000
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import os.path
import sys
# Non-hermetic dependency on PyICU. Install this in one of several ways:
# * pip install PyICU
# * apt-get install python-pyicu python3-icu
import icu
from utils import utf8
STOPWORDS = frozenset([
'ACCENT',
'CHARACTER',
'COMBINING',
'CONSONANT',
'CURRENCY',
'DIGIT',
'FRACTION',
'INDEPENDENT',
'INHERENT',
'LETTER',
'LOGOGRAM',
'LOGOTYPE',
'MARK',
'MEDIAL',
'MODIFIER',
'NUMBER',
'PUNCTUATION',
'SARA',
'SEMIVOWEL',
'SIGN',
'STRESS',
'SUBJOINED',
'SYLLABLE',
'SYMBOL',
'TONE',
'VOWEL',
])
DEPENDENT = (
'COMBINING',
'CONSONANT SIGN',
'MARK',
'MEDIAL',
'SARA',
'SUBJOINED',
'VOWEL SIGN',
)
DIGITS = {
'DIGIT ZERO': '0',
'DIGIT ONE': '1',
'DIGIT TWO': '2',
'DIGIT THREE': '3',
'DIGIT FOUR': '4',
'DIGIT FIVE': '5',
'DIGIT SIX': '6',
'DIGIT SEVEN': '7',
'DIGIT EIGHT': '8',
'DIGIT NINE': '9',
}
EXCEPTIONS = {
0x09F2: 'taka',
0x09F3: 'bangladeshi_taka',
0x0BFA: 'number',
0x0D4E: 'reph',
0x109E: 'symbol_shan_one',
0xA8EB: '-letter_u',
0x1107F: 'number_joiner',
0x110BD: 'number',
0x11131: '-o_mark',
0x11132: '-au_mark',
0x111CB: 'vowel_modifier',
}
ALIASES = {
# % NORMATIVE ALIASES / corrections from NameAliases
0x0CDE: 'KANNADA LETTER LLLA',
0x0E9D: 'LAO LETTER FO FON',
0x0E9F: 'LAO LETTER FO FAY',
0x0EA3: 'LAO LETTER RO',
0x0EA5: 'LAO LETTER LO',
0x0FD0: 'TIBETAN MARK BKA- SHOG GI MGO RGYAN',
# = informative aliases / Sanskritic names for Sinhala characters
0x0D82: 'sinhala sign anusvara',
0x0D83: 'sinhala sign visarga',
0x0D85: 'sinhala letter a',
0x0D86: 'sinhala letter aa',
0x0D87: 'sinhala letter ae',
0x0D88: 'sinhala letter aae',
0x0D89: 'sinhala letter i',
0x0D8a: 'sinhala letter ii',
0x0D8b: 'sinhala letter u',
0x0D8c: 'sinhala letter uu',
0x0D8d: 'sinhala letter vocalic r',
0x0D8e: 'sinhala letter vocalic rr',
0x0D8f: 'sinhala letter vocalic l',
0x0D90: 'sinhala letter vocalic ll',
0x0D91: 'sinhala letter e',
0x0D92: 'sinhala letter ee',
0x0D93: 'sinhala letter ai',
0x0D94: 'sinhala letter o',
0x0D95: 'sinhala letter oo',
0x0D96: 'sinhala letter au',
0x0D9a: 'sinhala letter ka',
0x0D9b: 'sinhala letter kha',
0x0D9c: 'sinhala letter ga',
0x0D9d: 'sinhala letter gha',
0x0D9e: 'sinhala letter nga',
0x0D9F: 'sinhala letter nnga',
0x0DA0: 'sinhala letter ca',
0x0DA1: 'sinhala letter cha',
0x0DA2: 'sinhala letter ja',
0x0DA3: 'sinhala letter jha',
0x0DA4: 'sinhala letter nya',
0x0DA5: 'sinhala letter jnya',
0x0DA6: 'sinhala letter nyja',
0x0DA7: 'sinhala letter tta',
0x0DA8: 'sinhala letter ttha',
0x0DA9: 'sinhala letter dda',
0x0DAA: 'sinhala letter ddha',
0x0DAB: 'sinhala letter nna',
0x0DAC: 'sinhala letter nndda',
0x0DAD: 'sinhala letter ta',
0x0DAE: 'sinhala letter tha',
0x0DAF: 'sinhala letter da',
0x0DB0: 'sinhala letter dha',
0x0DB1: 'sinhala letter na',
0x0DB3: 'sinhala letter nda',
0x0DB4: 'sinhala letter pa',
0x0DB5: 'sinhala letter pha',
0x0DB6: 'sinhala letter ba',
0x0DB7: 'sinhala letter bha',
0x0DB8: 'sinhala letter ma',
0x0DB9: 'sinhala letter mba',
0x0DBA: 'sinhala letter ya',
0x0DBB: 'sinhala letter ra',
0x0DBD: 'sinhala letter la',
0x0DC0: 'sinhala letter va',
0x0DC1: 'sinhala letter sha',
0x0DC2: 'sinhala letter ssa',
0x0DC3: 'sinhala letter sa',
0x0DC4: 'sinhala letter ha',
0x0DC5: 'sinhala letter lla',
0x0DC6: 'sinhala letter fa',
0x0DCA: 'sinhala sign virama',
0x0DCF: 'sinhala vowel sign aa',
0x0DD0: 'sinhala vowel sign ae',
0x0DD1: 'sinhala vowel sign aae',
0x0DD2: 'sinhala vowel sign i',
0x0DD3: 'sinhala vowel sign ii',
0x0DD4: 'sinhala vowel sign u',
0x0DD6: 'sinhala vowel sign uu',
0x0DD8: 'sinhala vowel sign vocalic r',
0x0DD9: 'sinhala vowel sign e',
0x0DDA: 'sinhala vowel sign ee',
0x0DDB: 'sinhala vowel sign ai',
0x0DDC: 'sinhala vowel sign o',
0x0DDD: 'sinhala vowel sign oo',
0x0DDE: 'sinhala vowel sign au',
0x0DDF: 'sinhala vowel sign vocalic l',
0x0DF2: 'sinhala vowel sign vocalic rr',
0x0DF3: 'sinhala vowel sign vocalic ll',
}
BRAHMIC_OFFICIAL_INDIA = [
# South and Central Asia-I: Official Scripts of India
'Deva',
'Beng',
'Guru',
'Gujr',
'Orya',
'Taml',
'Telu',
'Knda',
'Mlym',
]
BRAHMIC_OTHER = [
# South and Central Asia-II: Other Modern Scripts
'Sinh',
'Newa',
'Tibt',
'Limb',
'Mtei',
'Cakm',
'Lepc',
'Saur',
# South and Central Asia-III: Ancient Scripts
'Brah',
'Bhks',
'Phag',
'Marc',
# South and Central Asia-IV: Other Historic Scripts
'Sylo',
'Kthi',
'Shrd',
'Takr',
'Sidd',
'Mahj',
'Khoj',
'Sind',
'Mult',
'Tirh',
'Modi',
'Gran',
'Ahom',
# Southeast Asia
'Thai',
'Laoo',
'Mymr',
'Khmr',
'Tale',
'Talu',
'Lana',
'Tavt',
'Cham',
# Indonesia and Oceania
'Tglg',
'Hano',
'Buhd',
'Tagb',
'Bugi',
'Bali',
'Java',
'Rjng',
'Batk',
'Sund',
]
EPSILON = '<epsilon>'
def CharToCodepoint(char):
if len(char) == 1:
return ord(char)
elif len(char) == 2:
hi = ord(char[0])
lo = ord(char[1])
if hi & 0xFC00 == 0xD800 and lo & 0xFC00 == 0xDC00:
hi &= 0x3FF
lo &= 0x3FF
return 0x10000 + (hi << 10) + lo
raise TypeError('CharToCodepoint() expected a character or surrogate pair')
def CharName(c):
cp = CharToCodepoint(c)
if cp in ALIASES:
return ALIASES[cp].upper()
return icu.Char.charName(c)
def RemovePrefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
else:
return s
def ScriptSymbols(script, include_script_code=False):
"""Yields short symbol names for all characters in the given script."""
script_chars = icu.UnicodeSet(r'[\p{%s}\u200C\u200D]' % script.getName())
script_name = script.getName().replace('_', ' ')
utf8.stderr.write('Found %d characters specific to %s (%s)\n' %
(len(script_chars), script_name, script.getShortName()))
prefix = script_name.upper()
for c in script_chars:
label = CharToCodepoint(c)
if label in EXCEPTIONS:
symbol_name = EXCEPTIONS[label]
else:
name = CharName(c)
if not name:
utf8.stderr.write('Warning: No Unicode name for %04X\n' % label)
continue
name = RemovePrefix(name, prefix)
name = RemovePrefix(name, 'ZERO WIDTH')
assert name
for old, new in DIGITS.items():
name = name.replace(old, new)
components = [t for t in name.split() if t not in STOPWORDS]
symbol_name = '_'.join(components).lower()
assert symbol_name, ('Empty symbol name for %04X (%s)' % (label, name))
if symbol_name.startswith('-'):
symbol_name = "'%s" % symbol_name[1:]
if any(substr in name for substr in DEPENDENT):
symbol_name = '-%s' % symbol_name
if include_script_code:
symbol_name = '%s:%s' % (script.getShortName(), symbol_name)
yield symbol_name, label
return
def GetScript(raw_script_name):
"""Returns an icu Script object for a given script name."""
script_codes = icu.Script.getCode(raw_script_name)
if len(script_codes) != 1:
utf8.stderr.write('Error: Unknown or ambiguous script: %s\n' %
raw_script_name)
return None
return icu.Script(script_codes[0])
def IsBijectiveMapping(symbols_and_labels):
"""Checks if the given symbol-label mapping is bijective."""
ok = True
symbol2label = {EPSILON: 0}
label2symbol = {0: EPSILON}
for symbol, label in symbols_and_labels:
if symbol in symbol2label:
utf8.stderr.write('Symbol "%s" is associated with labels %d and %d\n' %
(symbol, symbol2label[symbol], label))
ok = False
else:
symbol2label[symbol] = label
if label in label2symbol:
utf8.stderr.write('Label %d is associated with symbols "%s" and "%s"\n' %
(label, label2symbol[label], symbol))
ok = False
else:
label2symbol[label] = symbol
return ok
def WriteOpenFstSymbolTable(writer, symbols_and_labels):
"""Writes an OpenFst SymbolTable in text format to the given writer."""
writer.write('%s\t0\n' % EPSILON)
for symbol_label in symbols_and_labels:
writer.write('%s\t%d\n' % symbol_label)
writer.flush()
return
def SymbolsToFile(filename, symbols_and_labels, print_info=False):
with utf8.open(filename, mode='w') as writer:
WriteOpenFstSymbolTable(writer, symbols_and_labels)
if print_info:
utf8.stderr.write('Wrote %s with %d symbols\n' %
(filename, len(symbols_and_labels)))
return
def ReadGraphemeDataFromFile(path):
with utf8.open(path, mode='r') as reader:
label = 0xF000
for line in reader:
line = line.strip('\n')
if not line or line.startswith('#'):
continue
fields = line.split('\t')
assert len(fields) >= 2
if len(fields) > 2:
codepoints = fields[2]
else:
codepoints = ''
yield fields[0], fields[1], codepoints, label
label += 1
return
def ReadGraphemeDataDefault(sibling=sys.argv[0]):
path = os.path.join(os.path.dirname(sibling), 'indic_graphemes.tsv')
for row in ReadGraphemeDataFromFile(path):
yield row
return
|
tests/conftest.py | ivoupton/sheet2dict | 208 | 11152806 | import pytest
import sys
from pathlib import Path
sys.path.append(str(Path(".").absolute().parent))
from sheet2dict import Worksheet
@pytest.fixture(scope="module")
def worksheet():
return Worksheet()
|
trakt/objects/person.py | ruinernin/trakt.py | 147 | 11152821 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import from_iso8601_datetime
from trakt.objects.core.helpers import update_attributes
class Person(object):
def __init__(self, client, keys=None, index=None):
self._client = client
self.keys = keys
"""
:type: :class:`~python:list` of :class:`~python:tuple`
Keys (for imdb, tvdb, etc..), defined as:
..code-block::
[
(<service>, <id>)
]
"""
self.index = index
"""
:type: :class:`~python:int`
Playlist item index
"""
self.name = None
"""
:type: :class:`~python:str`
Name
"""
# Timestamps
self.listed_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this item was added to the list (or `None`)
"""
@property
def pk(self):
"""Retrieve the primary key (unique identifier for the item).
Provides the following identifiers (by media type):
- **movie:** imdb
- **show:** tvdb
- **season:** tvdb
- **episode:** tvdb
- **custom_list:** trakt
- **person:** tmdb
:return: :code:`(<service>, <value>)` or :code:`None` if no primary key is available
:rtype: :class:`~python:tuple`
"""
if not self.keys:
return None
return self.keys[0]
def _update(self, info=None, **kwargs):
if not info:
return
update_attributes(self, info, [
'name'
])
# Set timestamps
if 'listed_at' in info:
self.listed_at = from_iso8601_datetime(info.get('listed_at'))
@classmethod
def _construct(cls, client, keys, info=None, index=None, **kwargs):
person = cls(client, keys, index=index)
person._update(info, **kwargs)
return person
def __getstate__(self):
state = self.__dict__
if hasattr(self, '_client'):
del state['_client']
return state
def __repr__(self):
if self.name:
return '<Person %r>' % self.name
return '<Person>'
def __str__(self):
return self.__repr__()
|
tests/test_password_change.py | meals-app/django-graphql-auth | 290 | 11152822 | from django.contrib.auth import get_user_model
from graphql_jwt.refresh_token.models import RefreshToken
from .testCases import RelayTestCase, DefaultTestCase
from graphql_auth.utils import revoke_user_refresh_token
from graphql_auth.constants import Messages
from graphql_auth.utils import get_token, get_token_payload
class PasswordChangeTestCaseMixin:
def setUp(self):
self.user = self.register_user(
email="<EMAIL>", username="gaa", verified=True
)
self.old_pass = self.user.password
def test_password_change(self):
"""
change password
"""
variables = {"user": self.user}
executed = self.make_request(self.get_query(), variables)
self.assertEqual(executed["success"], True)
self.assertEqual(executed["errors"], None)
self.assertTrue(executed["token"])
self.assertTrue(executed["refreshToken"])
self.user.refresh_from_db()
self.assertFalse(self.old_pass == self.user.password)
def test_mismatch_passwords(self):
"""
wrong inputs
"""
variables = {"user": self.user}
executed = self.make_request(self.get_query("wrong"), variables)
self.assertEqual(executed["success"], False)
self.assertTrue(executed["errors"]["newPassword2"])
self.assertFalse(executed["token"])
self.assertFalse(executed["refreshToken"])
self.user.refresh_from_db()
self.assertTrue(self.old_pass == self.user.password)
def test_passwords_validation(self):
"""
easy password
"""
variables = {"user": self.user}
executed = self.make_request(self.get_query("123", "123"), variables)
self.assertEqual(executed["success"], False)
self.assertTrue(executed["errors"]["newPassword2"])
self.assertFalse(executed["token"])
self.assertFalse(executed["refreshToken"])
def test_revoke_refresh_tokens_on_password_change(self):
executed = self.make_request(self.get_login_query())
self.user.refresh_from_db()
refresh_tokens = self.user.refresh_tokens.all()
for token in refresh_tokens:
self.assertFalse(token.revoked)
variables = {"user": self.user}
executed = self.make_request(self.get_query(), variables)
self.assertEqual(executed["success"], True)
self.assertEqual(executed["errors"], None)
self.assertTrue(executed["token"])
self.assertTrue(executed["refreshToken"])
self.user.refresh_from_db()
self.assertFalse(self.old_pass == self.user.password)
refresh_tokens = self.user.refresh_tokens.all()
revoke_user_refresh_token(self.user)
self.user.refresh_from_db()
refresh_tokens = self.user.refresh_tokens.all()
for token in refresh_tokens:
self.assertTrue(token.revoked)
class PasswordChangeTestCase(PasswordChangeTestCaseMixin, DefaultTestCase):
def get_login_query(self):
return """
mutation {
tokenAuth(
username: "username",
password: <PASSWORD>",
)
{ success, errors, refreshToken }
}
""" % (
self.default_password,
)
def get_query(self, new_password1="<PASSWORD>", new_password2="<PASSWORD>"):
return """
mutation {
passwordChange(
oldPassword: <PASSWORD>",
newPassword1: <PASSWORD>",
newPassword2: <PASSWORD>"
)
{ success, errors, token, refreshToken }
}
""" % (
self.default_password,
new_password1,
new_password2,
)
class PasswordChangeRelayTestCase(PasswordChangeTestCaseMixin, RelayTestCase):
def get_login_query(self):
return """
mutation {
tokenAuth(
input: {
username: "username",
password: <PASSWORD>",
}
)
{ success, errors, refreshToken }
}
""" % (
self.default_password,
)
def get_query(self, new_password1="<PASSWORD>", new_password2="<PASSWORD>"):
return """
mutation {
passwordChange(
input: {
oldPassword: <PASSWORD>",
newPassword1: <PASSWORD>",
newPassword2: "%s"
})
{ success, errors, token, refreshToken }
}
""" % (
self.default_password,
<PASSWORD>,
<PASSWORD>,
)
|
tests/test_nse_option_chain.py | sentashani/stolgo | 125 | 11152832 | <reponame>sentashani/stolgo
from stolgo.nse_data import NseData
def main():
nse_data = NseData()
nse_data.get_option_chain_excel('BANKNIFTY','30APR2020')
if __name__ == "__main__":
main()
|
tests/cmdline/repl_inspect.py | sebastien-riou/micropython | 4,538 | 11152862 | # cmdline: -c print("test") -i
# -c option combined with -i option results in REPL
|
utils/__init__.py | aditiapratama/blender-tools | 311 | 11152872 | <gh_stars>100-1000
"""Useful utilities and constants for the addon."""
from importlib import reload
from inspect import isclass
from os.path import basename, dirname, normpath
import sys
import bpy
# This is the addon's directory name, by default "embark_blender_tools", but anyone can change the folder name...
# We use dirname() twice to go up two levels in the file system and then basename to get the folder name.
# Thanks to https://github.com/LuxCoreRender/BlendLuxCore/ for the example of solving the same issue!
ADDON_NAME = basename(dirname(dirname(__file__)))
def get_current_version():
"""Returns the current version of the loaded addon."""
mod = sys.modules[ADDON_NAME]
current_version = mod.bl_info.get("version", (0, 0, 1))
return '.'.join([str(num) for num in current_version])
def get_preferences():
"""Returns the addon's Preferences object."""
return bpy.context.preferences.addons[ADDON_NAME].preferences
def get_source_path():
"""Returns the Embark Addon's Project source path."""
return normpath(get_preferences().source_path)
def reload_addon():
"""Reloads the Embark Addon and all of its modules."""
_addon_name = ADDON_NAME
pref_items = get_preferences().items()
bpy.ops.preferences.addon_disable(module=_addon_name)
# reloadable = [mod for mod in sys.modules.values() if getattr(mod, '__name__', "").startswith(_addon_name)]
# for module in reloadable:
# try:
# print(f"\tReloading {module.__name__}...")
# reload(module)
# except Exception as ex: # pylint: disable=broad-except
# print(f"Error: Failed to reload module '{module.__name__}', reason: {ex}")
bpy.ops.preferences.addon_enable(module=_addon_name)
# Reset the previous preference items onto the reloaded preferences
get_preferences().set_items(pref_items)
def register_recursive(objects):
"""Registers classes with Blender recursively from modules."""
for obj in objects:
if isclass(obj):
bpy.utils.register_class(obj)
elif hasattr(obj, "register"):
obj.register()
elif hasattr(obj, "REGISTER_CLASSES"):
register_recursive(obj.REGISTER_CLASSES)
else:
print(f"Warning: Failed to find anything to register for '{obj}'")
def unregister_recursive(objects):
"""Unregisters classes from Blender recursively from modules."""
for obj in reversed(objects):
if isclass(obj):
bpy.utils.unregister_class(obj)
elif hasattr(obj, "unregister"):
obj.unregister()
elif hasattr(obj, "REGISTER_CLASSES"):
unregister_recursive(obj.REGISTER_CLASSES)
else:
print(f"Warning: Failed to find anything to unregister for '{obj}'")
|
docs_src/options/name/tutorial003.py | madkinsz/typer | 7,615 | 11152873 | import typer
def main(user_name: str = typer.Option(..., "-n")):
typer.echo(f"Hello {user_name}")
if __name__ == "__main__":
typer.run(main)
|
migrations/versions/2dfac13a4c78_add_logsource_unique.py | vault-the/changes | 443 | 11152886 | <gh_stars>100-1000
"""Add LogSource unique constraint on name
Revision ID: 2dfac13a4c78
Revises: <PASSWORD>
Create Date: 2013-12-06 10:56:15.727933
"""
from __future__ import absolute_import, print_function
# revision identifiers, used by Alembic.
revision = '2dfac13a4c78'
down_revision = '5896e31725d'
from alembic import op
from sqlalchemy.sql import table, select
import sqlalchemy as sa
def upgrade():
connection = op.get_bind()
logsources_table = table(
'logsource',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('name', sa.String(64), nullable=True),
)
logchunks_table = table(
'logchunk',
sa.Column('source_id', sa.GUID(), nullable=False),
)
done = set()
for logsource in connection.execute(logsources_table.select()):
# migrate group to suite
key = (logsource.build_id, logsource.name)
if key in done:
continue
print("Checking LogSource %s - %s" % (
logsource.build_id, logsource.name))
query = logchunks_table.delete().where(
logchunks_table.c.source_id.in_(select([logchunks_table]).where(
sa.and_(
logsources_table.c.build_id == logsource.build_id,
logsources_table.c.name == logsource.name,
logsources_table.c.id != logsource.id,
),
))
)
connection.execute(query)
query = logsources_table.delete().where(
sa.and_(
logsources_table.c.build_id == logsource.build_id,
logsources_table.c.name == logsource.name,
logsources_table.c.id != logsource.id,
)
)
connection.execute(query)
done.add(key)
op.create_unique_constraint(
'unq_logsource_key', 'logsource', ['build_id', 'name'])
op.drop_index('idx_logsource_build_id', 'logsource')
def downgrade():
op.drop_constraint('unq_logsource_key', 'logsource')
op.create_index('idx_logsource_build_id', 'logsource', ['build_id'])
|
qdtrack/models/roi_heads/track_heads/__init__.py | mageofboy/qdtrack | 271 | 11152902 | <filename>qdtrack/models/roi_heads/track_heads/__init__.py
from .quasi_dense_embed_head import QuasiDenseEmbedHead
__all__ = ['QuasiDenseEmbedHead'] |
wooey/migrations/0011_script_versioning_cleanup.py | fridmundklaus/wooey | 1,572 | 11152908 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wooey', '0010_script_versioning_data_migration'),
]
operations = [
migrations.RemoveField(
model_name='scriptparameter',
name='script',
),
migrations.RemoveField(
model_name='scriptparametergroup',
name='script',
),
migrations.RemoveField(
model_name='wooeyjob',
name='script',
),
migrations.RemoveField(
model_name='script',
name='script_path',
),
migrations.RemoveField(
model_name='script',
name='script_version',
),
migrations.AlterField(
model_name='scriptversion',
name='script',
field=models.ForeignKey(related_name='script_version', to='wooey.Script', on_delete=models.PROTECT),
),
migrations.AlterField(
model_name='scriptparameter',
name='script_version',
field=models.ForeignKey(to='wooey.ScriptVersion', on_delete=models.PROTECT),
),
migrations.AlterField(
model_name='scriptparametergroup',
name='script_version',
field=models.ForeignKey(to='wooey.ScriptVersion', on_delete=models.PROTECT),
),
migrations.AlterField(
model_name='wooeyjob',
name='script_version',
field=models.ForeignKey(to='wooey.ScriptVersion', on_delete=models.PROTECT),
),
]
|
utils/adamw.py | dingmyu/HR-NAS | 122 | 11152912 | import math
import torch
import logging
from torch.optim.optimizer import Optimizer
from utils.common import index_tensor_in
from utils.common import check_tensor_in
class AdamW(Optimizer):
r"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=1e-2, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# Perform stepweight decay
p.mul_(1 - group['lr'] * group['weight_decay'])
# Perform optimization step
grad = p.grad
if grad.is_sparse:
raise RuntimeError('AdamW does not support sparse gradients')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
p.addcdiv_(exp_avg, denom, value=-step_size)
return loss
def compress_mask(self, info, verbose=False):
"""Adjust parameters values by masks for dynamic network shrinkage."""
var_old = info['var_old']
var_new = info['var_new']
mask_hook = info['mask_hook']
mask = info['mask']
if verbose:
logging.info('RMSProp compress: {} -> {}'.format(
info['var_old_name'], info['var_new_name']))
found = False
for group in self.param_groups:
index = index_tensor_in(var_old, group['params'], raise_error=False)
found = index is not None
if found:
if check_tensor_in(var_old, self.state):
state = self.state.pop(var_old)
if len(state) != 0: # generate new state
new_state = {'step': state['step']}
for key in ['exp_avg', 'exp_avg_sq', 'max_exp_avg_sq']:
if key in state:
new_state[key] = torch.zeros_like(
var_new.data, device=var_old.device)
mask_hook(new_state[key], state[key], mask)
new_state[key].to(state[key].device)
self.state[var_new] = new_state
# update group
del group['params'][index]
group['params'].append(var_new)
break
assert found, 'Var: {} not in RMSProp'.format(info['var_old_name'])
def compress_drop(self, info, verbose=False):
"""Remove unused parameters for dynamic network shrinkage."""
var_old = info['var_old']
if verbose:
logging.info('RMSProp drop: {}'.format(info['var_old_name']))
assert info['type'] == 'variable'
found = False
for group in self.param_groups:
index = index_tensor_in(var_old, group['params'], raise_error=False)
found = index is not None
if found:
if check_tensor_in(var_old, self.state):
self.state.pop(var_old)
del group['params'][index]
assert found, 'Var: {} not in RMSProp'.format(info['var_old_name'])
|
etl/parsers/etw/Microsoft_Windows_WMI.py | IMULMUL/etl-parser | 104 | 11152915 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-WMI
GUID : 1edeee53-0afe-4609-b846-d8c0b2075b1f
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("1edeee53-0afe-4609-b846-d8c0b2075b1f"), event_id=67, version=0)
class Microsoft_Windows_WMI_67_0(Etw):
pattern = Struct(
"BackupFile" / WString
)
@declare(guid=guid("1edeee53-0afe-4609-b846-d8c0b2075b1f"), event_id=68, version=0)
class Microsoft_Windows_WMI_68_0(Etw):
pattern = Struct(
"BackupFile" / WString,
"Error" / WString
)
|
basisnet/personalization/centralized_emnist/trainer.py | xxdreck/google-research | 23,901 | 11152932 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs centralied training and personalization on EMNIST."""
import collections
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from basisnet.personalization.centralized_emnist import data_processing
from basisnet.personalization.centralized_emnist import emnist_models
from basisnet.personalization.centralized_emnist import training_specs
# Training hyperparameters
flags.DEFINE_integer('client_datasets_random_seed', 1,
'Random seed for client sampling.')
flags.DEFINE_float('client_learning_rate', 1e-3,
'learning rate for client training.')
# Training loop configuration
flags.DEFINE_string(
'experiment_name', 'test',
'The name of this experiment. Will be append to '
'--root_output_dir to separate experiment results.')
flags.mark_flag_as_required('experiment_name')
flags.DEFINE_string('root_output_dir', '/tmp/basisnet/centralized_emnist',
'Root directory for writing experiment output.')
flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')
flags.DEFINE_integer(
'rounds_per_eval', 100,
'How often to evaluate the global model on the validation dataset.')
flags.DEFINE_integer('rounds_per_checkpoint', 100,
'How often to checkpoint the global model.')
flags.DEFINE_string('modeldir', '', 'The dir for saving checkpoints and logs.')
flags.DEFINE_bool('debug', False, 'If true, reduce batch size and do not use'
'tf_function.')
# For personalization
flags.DEFINE_integer(
'fine_tune_epoch', 20, 'number of epochs for fine-tuning'
'to use from test set for per-round validation.')
flags.DEFINE_integer('num_basis', 4,
'number of basis to learn, 1 = original model.')
flags.DEFINE_float(
'num_filters_expand', 1,
'number of expanding Conv channel size.')
flags.DEFINE_float(
'temp', 1.0, 'temperature for softmax of generating the client embedding.')
_SUPPORTED_EMBEDDING_TYPE = ['lookup']
flags.DEFINE_enum('embedding_type', 'lookup', _SUPPORTED_EMBEDDING_TYPE,
'The type of the client embedding.')
flags.DEFINE_boolean('run_sweep', False, 'Whether to'
' run hyper parameter tunning with sweep.')
flags.DEFINE_boolean('digit_only', False, 'digit_only for emnist')
flags.DEFINE_boolean('global_embedding', False,
'train with global_embedding only')
flags.DEFINE_boolean('with_dist', False, 'use label distribution as the inputs')
FLAGS = flags.FLAGS
def main(argv):
tf.compat.v2.enable_v2_behavior()
# necessary to enable hyperparameter explorations.
# xm.setup_work_unit()
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=FLAGS.digit_only)
if 'test' in FLAGS.experiment_name:
logging.info('Test run ...')
num_client = 20
num_test_client = 20
epochs = 1
else:
num_client = 2500
num_test_client = 900
epochs = 40
train_batch_size = 256
cliend_encodings = {}
for i, idx in enumerate(emnist_train.client_ids):
cliend_encodings[idx] = i
all_client_ids = np.array(emnist_train.client_ids)
np.random.shuffle(all_client_ids)
train_client_ids = all_client_ids[:num_client]
test_client_ids = all_client_ids[num_client:num_client + num_test_client]
train_tuple, _, test_tuple = data_processing.parse_data(
emnist_train,
emnist_test,
train_client_ids,
cliend_encodings,
with_dist=FLAGS.with_dist)
ft_train_tuple, ft_sp_train_tuple, ft_test_tuple = data_processing.parse_data(
emnist_train,
emnist_test,
test_client_ids,
cliend_encodings,
with_dist=FLAGS.with_dist)
dataset = data_processing.pack_dataset(
train_tuple, mode='train', with_dist=FLAGS.with_dist)
val_dataset = data_processing.pack_dataset(
test_tuple, mode='test', with_dist=FLAGS.with_dist)
if len(argv) > 1:
raise app.UsageError('Expected no command-line arguments, '
'got: {}'.format(argv))
task_spec = training_specs.TaskSpec(
fine_tune_epoch=FLAGS.fine_tune_epoch,
num_basis=FLAGS.num_basis,
num_filters_expand=FLAGS.num_filters_expand,
temp=FLAGS.temp,
embedding_type=FLAGS.embedding_type)
model_builder = emnist_models.get_model_builder(
task_spec,
only_digits=FLAGS.digit_only,
batch_size=train_batch_size,
with_dist=FLAGS.with_dist,
global_embedding_only=FLAGS.global_embedding)
basisnet = model_builder()
basisnet.summary()
learning_rate = FLAGS.client_learning_rate
logging.info(learning_rate)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
basisnet.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
basisnet.fit(
dataset, epochs=epochs, validation_data=val_dataset, verbose=2)
results = basisnet.evaluate(val_dataset)
acc = results[1]
logging.info(acc)
checkpoint_path = FLAGS.modeldir + 'emnist_basis_%d_lr%f_%s.ckpt' % (
FLAGS.num_basis, FLAGS.client_learning_rate, FLAGS.experiment_name)
basisnet.save_weights(checkpoint_path)
# Personalization
per_batch_size = 20
def eval_per_acc(preds, dataset):
pred_cls = np.argmax(preds, -1)
dataset = dataset.unbatch()
per_acc_dict = collections.OrderedDict()
for y_hat, (x, y)in zip(pred_cls, dataset):
clnt_id = str(x['input_id'])
if clnt_id not in per_acc_dict:
per_acc_dict[clnt_id] = {'cnt': 0, 'correct': 0}
per_acc_dict[clnt_id]['cnt'] += 1
per_acc_dict[clnt_id]['correct'] += int(y_hat == y.numpy())
per_acc_list = [d['correct'] / d['cnt'] for d in per_acc_dict.values()]
return per_acc_list
def finetuning(mode,
ft_dataset,
ft_dataset_test,
train_size=1,
fix_basis=True,
global_exp=False):
logging.info('==============')
logging.info(mode)
logging.info(train_size)
logging.info('Bases fixed' if fix_basis else 'Bases not fixed')
logging.info(
'Global experiment' if global_exp else 'Personalized experiment')
logging.info('==============')
per_model_builder = emnist_models.get_model_builder(
task_spec,
only_digits=FLAGS.digit_only,
batch_size=per_batch_size,
with_dist=FLAGS.with_dist,
global_embedding_only=global_exp)
local_basisnet = per_model_builder()
local_basisnet.summary()
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
local_basisnet.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
local_basisnet.set_weights(basisnet.get_weights())
if fix_basis:
if FLAGS.global_embedding or FLAGS.num_basis == 1:
# local fine-tune the whole network
pass
else:
# only fine-tune the embedding
logging.info('Fix basis')
for layer in local_basisnet.layers:
if layer.name != 'embedding':
layer.trainable = False
preds = local_basisnet.predict(ft_dataset_test)
per_acc_list = eval_per_acc(preds, ft_dataset_test)
logging.info('Before fine-tuning')
logging.info(np.nanmean(per_acc_list))
logging.info(per_acc_list)
for ep in range(FLAGS.fine_tune_epoch):
local_basisnet.fit(
ft_dataset, epochs=1, verbose=0, validation_data=ft_dataset_test)
preds = local_basisnet.predict(ft_dataset_test)
post_acc_list = eval_per_acc(preds, ft_dataset_test)
logging.info('Fine-tune epoch%d', ep)
logging.info(np.nanmean(post_acc_list))
logging.info(post_acc_list)
return local_basisnet
ft_dataset = data_processing.pack_dataset(
ft_train_tuple,
mode='train',
batch_size=per_batch_size,
with_dist=FLAGS.with_dist)
sp_ft_dataset = data_processing.pack_dataset(
ft_sp_train_tuple,
mode='train',
batch_size=per_batch_size,
with_dist=FLAGS.with_dist)
ft_val_dataset = data_processing.pack_dataset(
ft_test_tuple,
mode='test',
batch_size=per_batch_size,
with_dist=FLAGS.with_dist)
# Not fix bases
finetuning(
mode='test',
ft_dataset=ft_dataset,
ft_dataset_test=ft_val_dataset,
fix_basis=False)
finetuning(
mode='test',
ft_dataset=sp_ft_dataset,
ft_dataset_test=ft_val_dataset,
fix_basis=False,
train_size=0.1)
if FLAGS.num_basis == 1:
return
# Fix bases
finetuning(mode='test', ft_dataset=ft_dataset, ft_dataset_test=ft_val_dataset)
finetuning(
mode='test',
ft_dataset=sp_ft_dataset,
ft_dataset_test=ft_val_dataset,
train_size=0.1)
# Global Acc
local_basisnet = finetuning(
mode='test',
ft_dataset=ft_dataset,
ft_dataset_test=ft_val_dataset,
global_exp=True)
finetuning(
mode='test',
ft_dataset=sp_ft_dataset,
ft_dataset_test=ft_val_dataset,
train_size=0.1,
global_exp=True)
global_embedding = local_basisnet.get_layer('embedding').get_weights()[0][0]
new_embedding = np.tile(global_embedding, (3402, 1))
basisnet.get_layer('embedding').set_weights([new_embedding])
finetuning(mode='test', ft_dataset=ft_dataset, ft_dataset_test=ft_val_dataset)
finetuning(
mode='test',
ft_dataset=sp_ft_dataset,
ft_dataset_test=ft_val_dataset,
train_size=0.1)
if __name__ == '__main__':
app.run(main)
|
ecosystem_tools/mindconverter/mindconverter/graph_based_converter/mapper/onnx/nn/lstm_mapper.py | mindspore-ai/mindinsight | 216 | 11152935 | <reponame>mindspore-ai/mindinsight
# Copyright 2021 Huawei Technologies Co., Ltd.All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mapper module."""
import numpy as np
from mindconverter.graph_based_converter.mapper.base import ONNXToMindSporeMapper
from mindconverter.graph_based_converter.constant import ExchangeMessageKeywords, TemplateKeywords, \
WeightType
class LSTMMapper(ONNXToMindSporeMapper):
"""LSTM mapper."""
@staticmethod
def _operation_name_in_ms(*args, **kwargs):
return "nn.LSTM"
@staticmethod
def _convert_params(**kwargs):
"""convert params"""
weights = kwargs["weights"]
input_weights = LSTMMapper._find_val_by_index(0, weights)
embed_dim = input_weights.shape[2]
params = kwargs['params']
output_shape_list = kwargs.get("params").get("output_shape")
output_shape = output_shape_list[0].node_output_shape
# Here the first element determine if the lstm is bidirectional
# `1` means unidirectional. `2` means bidirectional
if output_shape[1] == 2:
return {
"input_size": embed_dim,
"hidden_size": params["hidden_size"],
"bidirectional": True
}
return {
"input_size": embed_dim,
"hidden_size": params["hidden_size"]
}
@staticmethod
def _convert_trained_weights(**kwargs):
weights = kwargs["weights"]
weight_ih = LSTMMapper._find_val_by_index(0, weights)
weight_hh = LSTMMapper._find_val_by_index(1, weights)
bias = LSTMMapper._find_val_by_index(2, weights)
init_h = LSTMMapper._find_val_by_index(3, weights)
init_c = LSTMMapper._find_val_by_index(4, weights)
ih_shape = weight_ih.shape
gpu_weight_ih, ascend_weight_ih = LSTMMapper._get_gpu_ascend_weight(weight_ih)
gpu_weight_hh, ascend_weight_hh = LSTMMapper._get_gpu_ascend_weight(weight_hh)
bidirectional = bool(ih_shape[0] > 1)
gpu_weights = list()
converted_gpu_weights = dict()
converted_ascend_weights = dict()
converted_weights = dict()
gpu_weights.append(gpu_weight_ih[0].reshape(-1, 1, 1))
gpu_weights.append(gpu_weight_hh[0].reshape(-1, 1, 1))
weight_fw0 = np.concatenate([ascend_weight_ih[0], ascend_weight_hh[0]], axis=1)
converted_ascend_weights['weight_fw0'] = {
'data': weight_fw0.transpose(1, 0).astype(np.float16)
}
if bidirectional:
gpu_weights.append(gpu_weight_ih[1].reshape(-1, 1, 1))
gpu_weights.append(gpu_weight_hh[1].reshape(-1, 1, 1))
weight_bw0 = np.concatenate([ascend_weight_ih[1], ascend_weight_hh[1]], axis=1)
converted_ascend_weights['weight_bw0'] = {
'data': weight_bw0.transpose(1, 0).astype(np.float16)
}
if bias is not None:
bias_shape = bias.shape
bias = bias.reshape(bias_shape[0], 2, 4, -1)
gpu_bias = bias[:, :, [0, 2, 3, 1], :]
gpu_bias = gpu_bias.reshape(bias_shape)
gpu_weights.append(gpu_bias.reshape(-1, 1, 1))
ascend_bias = bias[:, :, [0, 3, 2, 1], :]
ascend_bias = (ascend_bias[:, 0, :, :] + ascend_bias[:, 1, :, :]).reshape(bias_shape[0], -1)
converted_ascend_weights['bias_fw0'] = {'data': ascend_bias[0].astype(np.float16)}
if bidirectional:
converted_ascend_weights['bias_bw0'] = {'data': ascend_bias[1].astype(np.float16)}
gpu_weights = np.concatenate(gpu_weights, axis=0)
converted_gpu_weights['weight'] = {'data': gpu_weights}
converted_weights.update(converted_gpu_weights)
converted_weights.update(converted_ascend_weights)
if init_h is not None:
converted_weights['init_h'] = {'data': init_h, 'type': WeightType.PARAMETER.value}
converted_weights['init_c'] = {'data': init_c, 'type': WeightType.PARAMETER.value}
return converted_weights
@staticmethod
def _get_gpu_ascend_weight(weight):
"""Get GPU and Ascend weight."""
weight_shape = weight.shape
weight = weight.reshape(weight_shape[0], 4, -1, weight_shape[-1])
gpu_weight = weight[:, [0, 2, 3, 1], :, :]
gpu_weight = gpu_weight.reshape(weight_shape)
ascend_weight = weight[:, [0, 3, 2, 1], :, :].reshape(weight_shape)
return gpu_weight, ascend_weight
@staticmethod
def _generate_snippet_template(**kwargs):
"""generate snippet template"""
op = kwargs.get("operation")
args = kwargs.get("converted_params", dict())
weights = kwargs.get("weights")
init_h = LSTMMapper._find_val_by_index(3, weights)
output_shape_list = kwargs.get("raw_params").get("output_shape")
output_shape = output_shape_list[0].node_output_shape
output_reshape = (output_shape[0], output_shape[2], output_shape[1], output_shape[3])
trainable_params = kwargs.get("trainable_params", dict())
if not op:
raise ValueError("Can not get MindSpore operation name.")
variable_slot = "var_0"
init_template = f"self.{{{variable_slot}}} = {op}({', '.join(['%s={%s}' % (p, p) for p in args])})"
init_params, construct_params = LSTMMapper._get_init_construct_params(variable_slot, output_reshape)
if init_h is not None:
h_shape = init_h.shape
h_dtype = init_h.dtype
args['h_shape'] = h_shape
args['h_dtype'] = h_dtype
template = LSTMMapper._get_template_with_init_h(variable_slot, init_template, init_params, construct_params)
else:
construct_template = f"opt_{{{variable_slot}}}, (opt_{{{variable_slot}}}_h, " \
f"opt_{{{variable_slot}}}_c) = self.{{{variable_slot}}}" \
f"({{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}})"
template = {
variable_slot: {
TemplateKeywords.INIT.value: [init_template, init_params["cast"], init_params["reshape"],
init_params["transpose"]],
TemplateKeywords.CONSTRUCT.value: [construct_template,
construct_params["cast"],
construct_params["reshape"],
construct_params["transpose"]]
}
}
exchange_msg = LSTMMapper._get_exchange_msg(variable_slot, op, args, weights, trainable_params)
outputs_list = [f"opt_{{{variable_slot}}}", f"opt_{{{variable_slot}}}_h", f"opt_{{{variable_slot}}}_c"]
outputs_mapping = ((0, 0), (1, 1), (2, 2),)
return template, exchange_msg, outputs_list, outputs_mapping
@staticmethod
def _get_init_construct_params(variable_slot, output_reshape):
"""Get init and construct codes for parameters."""
init_reshape = f"self.{{{variable_slot}}}_reshape = P.Reshape()"
init_transpose = f"self.{{{variable_slot}}}_transpose = P.Transpose()"
init_cast = f"self.{{{variable_slot}}}_cast = P.Cast()"
construct_cast = f"opt_{{{variable_slot}}} = " \
f"self.{{{variable_slot}}}_cast(opt_{{{variable_slot}}}, mindspore.float32)"
construct_reshape = f"opt_{{{variable_slot}}} = " \
f"self.{{{variable_slot}}}_reshape(opt_{{{variable_slot}}}, {output_reshape})"
construct_transpose = f"opt_{{{variable_slot}}} = " \
f"self.{{{variable_slot}}}_transpose(opt_{{{variable_slot}}}, (0, 2, 1, 3))"
init_codes = {"cast": init_cast, "reshape": init_reshape, "transpose": init_transpose}
construct_codes = {"cast": construct_cast, "reshape": construct_reshape, "transpose": construct_transpose}
return init_codes, construct_codes
@staticmethod
def _get_template_with_init_h(variable_slot, init_template, init_params, construct_params):
"""Get template with init_h."""
init_h_param = f"self.{{{variable_slot}}}_init_h = " \
f"Parameter(Tensor(np.zeros({{h_shape}}).astype(np.{{h_dtype}})), " \
f"name=None, requires_grad=False)"
init_c_param = f"self.{{{variable_slot}}}_init_c = " \
f"Parameter(Tensor(np.zeros({{h_shape}}).astype(np.{{h_dtype}})), " \
f"name=None, requires_grad=False)"
construct_template = f"opt_{{{variable_slot}}}, (opt_{{{variable_slot}}}_h, " \
f"opt_{{{variable_slot}}}_c) = self.{{{variable_slot}}}" \
f"({{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}}," \
f"(self.{{{variable_slot}}}_init_h, self.{{{variable_slot}}}_init_c))"
template = {
variable_slot: {
TemplateKeywords.INIT.value: [init_h_param, init_c_param, init_template, init_params["cast"],
init_params["reshape"], init_params["transpose"]],
TemplateKeywords.CONSTRUCT.value: [construct_template,
construct_params["cast"],
construct_params["reshape"],
construct_params["transpose"]]
}
}
return template
@staticmethod
def _get_exchange_msg(variable_slot, op, args, weights, trainable_params):
"""Get exchange msg for mapper."""
exchange_msg = {
variable_slot: {
ExchangeMessageKeywords.VariableScope.value.OPERATION.value: op,
ExchangeMessageKeywords.VariableScope.value.VARIABLE_NAME.value: None,
ExchangeMessageKeywords.VariableScope.value.OUTPUT_TYPE.value:
ExchangeMessageKeywords.VariableScope.value.TSR_TYPE.value,
ExchangeMessageKeywords.VariableScope.value.INPUTS.value: [],
ExchangeMessageKeywords.VariableScope.value.GROUP_INPUTS.value: [(1, 2)],
ExchangeMessageKeywords.VariableScope.value.ARGS.value: args,
ExchangeMessageKeywords.VariableScope.value.WEIGHTS.value: weights,
ExchangeMessageKeywords.VariableScope.value.TRAINABLE_PARAMS.value: trainable_params
}
}
return exchange_msg
|
circuit_training/environment/coordinate_descent_placer_main.py | taylormcnally/circuit_training | 280 | 11152941 | <reponame>taylormcnally/circuit_training
# coding=utf-8
# Copyright 2021 The Circuit Training Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A placer that implements coordinate descent algorithm.
The placer can start from a scratch (i.e., empty grid), or from an existing node
locations specified by --init_placement.
The algorithm runs for a given number of epochs (iterations).
For each iteartion, for each node by a given --cd_node_order, place the node
greedily on the best grid location.
If --cd_use_stdcell_placer is True, place hard macros greedily first,
then followed by stdcell placer to place all stdcells.
When --cd_epochs=1, this algorithm is equivalent to greedy algorithm.
Example usage:
python circuit_training/environment/coordinate_descent_placer_main.py
--netlist_file "/path/to/netlist.pb.txt"
--init_placement "/path/to/initial_placement.plc"
"""
import functools
from absl import app
from absl import flags
from circuit_training.environment import coordinate_descent_placer
from circuit_training.environment import environment
from circuit_training.environment import placement_util
import numpy as np
flags.DEFINE_string('netlist_file', None, 'Path to netlist file.')
flags.DEFINE_string('init_placement', None, 'Path to initial placement file.')
flags.DEFINE_string('cd_output_dir', '/tmp/cd', 'CD output dir.')
flags.DEFINE_string('cd_placement_filename', 'cd', 'CD placement filename.')
FLAGS = flags.FLAGS
def main(_):
np.random.seed(FLAGS.seed)
plc = placement_util.create_placement_cost(FLAGS.netlist_file,
FLAGS.init_placement)
if not FLAGS.cd_use_init_location:
plc.unplace_all_nodes()
def cost_fn(plc):
return environment.cost_info_function(plc=plc, done=True)
cost_fn = functools.partial(
cost_fn, wirelength_weight=1.0, density_weight=0.1, congestion_weight=0.1)
placer = coordinate_descent_placer.CoordinateDescentPlacer(plc, cost_fn)
placer.place()
placer.save_placement(FLAGS.cd_output_dir,
f'{FLAGS.cd_placement_filename}.plc')
print(f'Final CD placement can be found at {FLAGS.cd_output_dir}')
if __name__ == '__main__':
app.run(main)
|
tests/cli/test_command_copy.py | EasyPost/biggraphite | 125 | 11152944 | <filename>tests/cli/test_command_copy.py<gh_stars>100-1000
#!/usr/bin/env python
# Copyright 2018 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from biggraphite import metric as bg_metric
from biggraphite.cli import command_copy
from biggraphite.cli.command_list import list_metrics
from tests import test_utils as bg_test_utils
class TestCommandCopy(bg_test_utils.TestCaseWithFakeAccessor):
_POINTS_START = 3600 * 24 * 10
_POINTS_END = _POINTS_START + 3 * 3600
_RETENTION = bg_metric.Retention.from_string("20*15s:1440*60s:48*3600s")
_RETENTION_BIS = bg_metric.Retention.from_string("20*10s:14400*60s:500*3600s")
_POINTS = bg_test_utils._make_easily_queryable_points(
start=_POINTS_START, end=_POINTS_END, period=_RETENTION[1].precision
)
_METRIC_1_NAME = "test.origin.metric_1.toto"
_METRIC_1 = bg_test_utils.make_metric_with_defaults(_METRIC_1_NAME, retention=_RETENTION)
_METRIC_2_NAME = "test.origin.metric_2.tata"
_METRIC_2 = bg_test_utils.make_metric_with_defaults(_METRIC_2_NAME, retention=_RETENTION)
_METRIC_3_NAME = "test.origin.metric_3.tata"
_METRIC_3 = bg_test_utils.make_metric_with_defaults(_METRIC_3_NAME, retention=_RETENTION_BIS)
def setUp(self):
"""Set up a subdirectory of metrics to copy."""
super(TestCommandCopy, self).setUp()
self.accessor.connect()
self.accessor.create_metric(self._METRIC_1)
self.accessor.create_metric(self._METRIC_2)
self.accessor.insert_points(self._METRIC_1, self._POINTS)
self.accessor.flush()
def test_copy_metric(self):
"""Test copy of a single metric with aggregated points."""
cmd_copy = command_copy.CommandCopy()
# Chack that _METRIC_2 is empty
for i in range(3):
pts = self.accessor.fetch_points(
self._METRIC_2,
self._POINTS_START,
self._POINTS_END,
stage=self._METRIC_2.retention[i],
)
self.assertEqual(list(pts), [])
# Copy points from _METRIC_1 to _METRIC_2
cmd_copy._copy_metric(
self.accessor,
self._METRIC_1,
self._METRIC_2,
self._POINTS_START,
self._POINTS_END,
)
self.accessor.flush()
# Check that both metrics have same points
for i in range(3):
pts = self.accessor.fetch_points(
self._METRIC_1,
self._POINTS_START,
self._POINTS_END,
stage=self._METRIC_1.retention[i],
aggregated=False,
)
pts_copy = self.accessor.fetch_points(
self._METRIC_2,
self._POINTS_START,
self._POINTS_END,
stage=self._METRIC_2.retention[i],
aggregated=False,
)
self.assertEqual(list(pts), list(pts_copy))
def test_copy_metric_with_retention(self):
"""Test copy of a metric with aggregated points and retention override.
A given dst_stage should have the same points of the src_stage
that have the same precision, or no point at all.
"""
cmd_copy = command_copy.CommandCopy()
cmd_copy._copy_metric(
self.accessor,
self._METRIC_1,
self._METRIC_3,
self._POINTS_START,
self._POINTS_END,
)
self.accessor.flush()
for i in range(3):
pts = self.accessor.fetch_points(
self._METRIC_1,
self._POINTS_START,
self._POINTS_END,
stage=self._METRIC_1.retention[i],
aggregated=False,
)
pts_copy = self.accessor.fetch_points(
self._METRIC_3,
self._POINTS_START,
self._POINTS_END,
stage=self._METRIC_3.retention[i],
aggregated=False,
)
if i == 0:
self.assertNotEqual(list(pts), list(pts_copy))
else:
self.assertEqual(list(pts), list(pts_copy))
def test_get_metric_tuples_with_metric(self):
"""Test retrieve of a single couple of metrics."""
cmd_copy = command_copy.CommandCopy()
# Test with metric names arguments
expected_metric_tuples = [(self._METRIC_1, self._METRIC_2)]
metric_tuples = cmd_copy._get_metric_tuples(
accessor=self.accessor,
src=self._METRIC_1_NAME,
dst=self._METRIC_2_NAME,
src_retention="",
dst_retention="",
recursive=False,
dry_run=False,
)
self.assertEqual(list(metric_tuples), expected_metric_tuples)
def test_get_metric_tuples_with_directory(self):
"""Test retrieve of a single couple of metrics."""
cmd_copy = command_copy.CommandCopy()
# Test with subdirectory names arguments
self.assertEqual(len(list(list_metrics(self.accessor, "*.**"))), 2)
metric_tuples = cmd_copy._get_metric_tuples(
accessor=self.accessor,
src="test",
dst="copy",
src_retention="",
dst_retention="",
recursive=True,
dry_run=False,
)
self.assertEqual(len(list(metric_tuples)), 2)
self.assertEqual(len(list(list_metrics(self.accessor, "*.**"))), 4)
def test_get_metric_tuples_with_retention(self):
"""Test retrieve of a single couples of metrics overrinding retentions."""
cmd_copy = command_copy.CommandCopy()
metric_tuples = cmd_copy._get_metric_tuples(
accessor=self.accessor,
src=self._METRIC_1_NAME,
dst=self._METRIC_2_NAME,
src_retention="18*42s",
dst_retention="50*300s",
recursive=False,
dry_run=False,
)
retention_str = [m.metadata.retention.as_string for m in list(metric_tuples)[0]]
self.assertEqual(len(retention_str), 2)
self.assertIn("18*42s", retention_str)
self.assertIn("50*300s", retention_str)
if __name__ == "__main__":
unittest.main()
|
sklearn/preprocessing/_function_transformer.py | matiasrvazquez/scikit-learn | 50,961 | 11152984 | <reponame>matiasrvazquez/scikit-learn
import warnings
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..utils.metaestimators import available_if
from ..utils.validation import (
_allclose_dense_sparse,
_check_feature_names_in,
check_array,
)
def _identity(X):
"""The identity function."""
return X
class FunctionTransformer(TransformerMixin, BaseEstimator):
"""Constructs a transformer from an arbitrary callable.
A FunctionTransformer forwards its X (and optionally y) arguments to a
user-defined function or function object and returns the result of this
function. This is useful for stateless transformations such as taking the
log of frequencies, doing custom scaling, etc.
Note: If a lambda is used as the function, then the resulting
transformer will not be pickleable.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <function_transformer>`.
Parameters
----------
func : callable, default=None
The callable to use for the transformation. This will be passed
the same arguments as transform, with args and kwargs forwarded.
If func is None, then func will be the identity function.
inverse_func : callable, default=None
The callable to use for the inverse transformation. This will be
passed the same arguments as inverse transform, with args and
kwargs forwarded. If inverse_func is None, then inverse_func
will be the identity function.
validate : bool, default=False
Indicate that the input X array should be checked before calling
``func``. The possibilities are:
- If False, there is no input validation.
- If True, then X will be converted to a 2-dimensional NumPy array or
sparse matrix. If the conversion is not possible an exception is
raised.
.. versionchanged:: 0.22
The default of ``validate`` changed from True to False.
accept_sparse : bool, default=False
Indicate that func accepts a sparse matrix as input. If validate is
False, this has no effect. Otherwise, if accept_sparse is false,
sparse matrix inputs will cause an exception to be raised.
check_inverse : bool, default=True
Whether to check that or ``func`` followed by ``inverse_func`` leads to
the original inputs. It can be used for a sanity check, raising a
warning when the condition is not fulfilled.
.. versionadded:: 0.20
feature_names_out : callable, 'one-to-one' or None, default=None
Determines the list of feature names that will be returned by the
`get_feature_names_out` method. If it is 'one-to-one', then the output
feature names will be equal to the input feature names. If it is a
callable, then it must take two positional arguments: this
`FunctionTransformer` (`self`) and an array-like of input feature names
(`input_features`). It must return an array-like of output feature
names. The `get_feature_names_out` method is only defined if
`feature_names_out` is not None.
See ``get_feature_names_out`` for more details.
.. versionadded:: 1.1
kw_args : dict, default=None
Dictionary of additional keyword arguments to pass to func.
.. versionadded:: 0.18
inv_kw_args : dict, default=None
Dictionary of additional keyword arguments to pass to inverse_func.
.. versionadded:: 0.18
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`. Defined only when
`validate=True`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `validate=True`
and `X` has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
MaxAbsScaler : Scale each feature by its maximum absolute value.
StandardScaler : Standardize features by removing the mean and
scaling to unit variance.
LabelBinarizer : Binarize labels in a one-vs-all fashion.
MultiLabelBinarizer : Transform between iterable of iterables
and a multilabel format.
Examples
--------
>>> import numpy as np
>>> from sklearn.preprocessing import FunctionTransformer
>>> transformer = FunctionTransformer(np.log1p)
>>> X = np.array([[0, 1], [2, 3]])
>>> transformer.transform(X)
array([[0. , 0.6931...],
[1.0986..., 1.3862...]])
"""
def __init__(
self,
func=None,
inverse_func=None,
*,
validate=False,
accept_sparse=False,
check_inverse=True,
feature_names_out=None,
kw_args=None,
inv_kw_args=None,
):
self.func = func
self.inverse_func = inverse_func
self.validate = validate
self.accept_sparse = accept_sparse
self.check_inverse = check_inverse
self.feature_names_out = feature_names_out
self.kw_args = kw_args
self.inv_kw_args = inv_kw_args
def _check_input(self, X, *, reset):
if self.validate:
return self._validate_data(X, accept_sparse=self.accept_sparse, reset=reset)
return X
def _check_inverse_transform(self, X):
"""Check that func and inverse_func are the inverse."""
idx_selected = slice(None, None, max(1, X.shape[0] // 100))
X_round_trip = self.inverse_transform(self.transform(X[idx_selected]))
if not _allclose_dense_sparse(X[idx_selected], X_round_trip):
warnings.warn(
"The provided functions are not strictly"
" inverse of each other. If you are sure you"
" want to proceed regardless, set"
" 'check_inverse=False'.",
UserWarning,
)
def fit(self, X, y=None):
"""Fit transformer by checking X.
If ``validate`` is ``True``, ``X`` will be checked.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input array.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
FunctionTransformer class instance.
"""
X = self._check_input(X, reset=True)
if self.check_inverse and not (self.func is None or self.inverse_func is None):
self._check_inverse_transform(X)
return self
def transform(self, X):
"""Transform X using the forward function.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input array.
Returns
-------
X_out : array-like, shape (n_samples, n_features)
Transformed input.
"""
X = self._check_input(X, reset=False)
return self._transform(X, func=self.func, kw_args=self.kw_args)
def inverse_transform(self, X):
"""Transform X using the inverse function.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input array.
Returns
-------
X_out : array-like, shape (n_samples, n_features)
Transformed input.
"""
if self.validate:
X = check_array(X, accept_sparse=self.accept_sparse)
return self._transform(X, func=self.inverse_func, kw_args=self.inv_kw_args)
@available_if(lambda self: self.feature_names_out is not None)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
This method is only defined if `feature_names_out` is not None.
Parameters
----------
input_features : array-like of str or None, default=None
Input feature names.
- If `input_features` is None, then `feature_names_in_` is
used as the input feature names. If `feature_names_in_` is not
defined, then names are generated:
`[x0, x1, ..., x(n_features_in_)]`.
- If `input_features` is array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
- If `feature_names_out` is 'one-to-one', the input feature names
are returned (see `input_features` above). This requires
`feature_names_in_` and/or `n_features_in_` to be defined, which
is done automatically if `validate=True`. Alternatively, you can
set them in `func`.
- If `feature_names_out` is a callable, then it is called with two
arguments, `self` and `input_features`, and its return value is
returned by this method.
"""
if hasattr(self, "n_features_in_") or input_features is not None:
input_features = _check_feature_names_in(self, input_features)
if self.feature_names_out == "one-to-one":
if input_features is None:
raise ValueError(
"When 'feature_names_out' is 'one-to-one', either "
"'input_features' must be passed, or 'feature_names_in_' "
"and/or 'n_features_in_' must be defined. If you set "
"'validate' to 'True', then they will be defined "
"automatically when 'fit' is called. Alternatively, you "
"can set them in 'func'."
)
names_out = input_features
elif callable(self.feature_names_out):
names_out = self.feature_names_out(self, input_features)
else:
raise ValueError(
f"feature_names_out={self.feature_names_out!r} is invalid. "
'It must either be "one-to-one" or a callable with two '
"arguments: the function transformer and an array-like of "
"input feature names. The callable must return an array-like "
"of output feature names."
)
return np.asarray(names_out, dtype=object)
def _transform(self, X, func=None, kw_args=None):
if func is None:
func = _identity
return func(X, **(kw_args if kw_args else {}))
def __sklearn_is_fitted__(self):
"""Return True since FunctionTransfomer is stateless."""
return True
def _more_tags(self):
return {"no_validation": not self.validate, "stateless": True}
|
FireModules/Shenanigans/osx_chicken_loop_youtube_video.py | FullMidnight/ThirdDumpsterFire | 862 | 11152994 | <reponame>FullMidnight/ThirdDumpsterFire
#!/usr/bin/python
#
# Filename: osx_chicken_loop_youtube_video.py
#
# Version: 1.0.0
#
# Author: <NAME> (TryCatchHCF)
#
# Summary:
#
# Part of the DumpsterFire Toolset. See documentation at https://github.com/TryCatchHCF/DumpsterFire
#
#
# Description:
#
# Opens a 10-hour chicken sounds loop on Youtube in the default browser
#
import os, sys
from FireModules.fire_module_base_class import *
class osx_chicken_loop_youtube_video( FireModule ):
def __init__(self):
self.commentsStr = "Shenanigans/osx_chicken_loop_youtube_video"
self.textToSayStr = ""
def __init__(self, moofStr):
self.moofStr = moofStr
self.commentsStr = "Shenanigans/osx_chicken_loop_youtube_video"
self.textToSayStr = ""
return;
def Description( self ):
self.Description = "Opens default browser and starts YouTube 10-hour loop of chicken sounds. After turning system volume up to maximum."
return self.Description
def Configure( self ):
self.configStr = "(None)"
return
def GetParameters( self ):
return( self.configStr )
def SetParameters( self, parametersStr ):
# Nothing to set, does not take parameters, so just return
return
def ActivateLogging( self, logFlag ):
print self.commentsStr + ": Setting Logging flag!"
print logFlag
return
def Ignite( self ):
print self.commentsStr + ": Setting system audio to max volume"
os.system( "osascript -e 'set volume output volume 100'")
print self.commentsStr + ": Opening 10-hour loop of chicken sounds on Youtube"
self.commandStr = "open https://www.youtube.com/watch?v=E9BQAAT10Mwi"
os.system( self.commandStr )
return
|
pycoin/block.py | gruve-p/pycoin | 1,210 | 11153033 | <reponame>gruve-p/pycoin
import io
from .encoding.hash import double_sha256
from .encoding.hexbytes import b2h, b2h_rev
from .merkle import merkle
from .satoshi.satoshi_struct import parse_struct, stream_struct
class BadMerkleRootError(Exception):
pass
def difficulty_max_mask_for_bits(bits):
prefix = bits >> 24
mask = (bits & 0x7ffff) << (8 * (prefix - 3))
return mask
class Block(object):
"""A Block is an element of the Bitcoin chain."""
@classmethod
def make_subclass(class_, symbol, tx):
return type(
"%s_%s" % (symbol, class_.__name__),
(class_,),
dict(Tx=tx),
)
@classmethod
def parse(class_, f, include_transactions=True, include_offsets=None, check_merkle_hash=True):
"""
Parse the Block from the file-like object
"""
block = class_.parse_as_header(f)
if include_transactions:
count = parse_struct("I", f)[0]
txs = block._parse_transactions(f, count, include_offsets=include_offsets)
block.set_txs(txs, check_merkle_hash=check_merkle_hash)
return block
@classmethod
def parse_as_header(class_, f):
"""
Parse the Block header from the file-like object
"""
(version, previous_block_hash, merkle_root, timestamp,
difficulty, nonce) = parse_struct("L##LLL", f)
return class_(version, previous_block_hash, merkle_root, timestamp, difficulty, nonce)
@classmethod
def from_bin(class_, bytes):
f = io.BytesIO(bytes)
return class_.parse(f)
def __init__(self, version, previous_block_hash, merkle_root, timestamp, difficulty, nonce):
self.version = version
self.previous_block_hash = previous_block_hash
self.merkle_root = merkle_root
self.timestamp = timestamp
self.difficulty = difficulty
self.nonce = nonce
self.txs = []
def set_nonce(self, nonce):
self.nonce = nonce
if hasattr(self, "__hash"):
del self.__hash
def _calculate_hash(self):
s = io.BytesIO()
self.stream_header(s)
return double_sha256(s.getvalue())
def hash(self):
"""Calculate the hash for the block header. Note that this has the bytes
in the opposite order from how the header is usually displayed (so the
long string of 00 bytes is at the end, not the beginning)."""
if not hasattr(self, "__hash"):
self.__hash = self._calculate_hash()
return self.__hash
@classmethod
def _parse_transactions(class_, f, count, include_offsets=None):
txs = []
for i in range(count):
if include_offsets:
offset_in_block = f.tell()
tx = class_.Tx.parse(f)
txs.append(tx)
if include_offsets:
tx.offset_in_block = offset_in_block
return txs
def set_txs(self, txs, check_merkle_hash=True):
self.txs = txs
if not txs:
return
for tx in txs:
tx.block = self
if check_merkle_hash:
self.check_merkle_hash()
def as_blockheader(self):
return Block(self.version, self.previous_block_hash, self.merkle_root,
self.timestamp, self.difficulty, self.nonce)
def stream_header(self, f):
"""Stream the block header in the standard way to the file-like object f."""
stream_struct("L##LLL", f, self.version, self.previous_block_hash,
self.merkle_root, self.timestamp, self.difficulty, self.nonce)
def _stream_transactions(self, f):
if self.txs:
stream_struct("I", f, len(self.txs))
for tx in self.txs:
tx.stream(f)
def stream(self, f):
"""Stream the block header in the standard way to the file-like object f.
The Block subclass also includes the transactions."""
self.stream_header(f)
self._stream_transactions(f)
def as_bin(self):
"""Return the block (or header) as binary."""
f = io.BytesIO()
self.stream(f)
return f.getvalue()
def as_hex(self):
"""Return the block (or header) as hex."""
return b2h(self.as_bin())
def id(self):
"""Returns the hash of the block displayed with the bytes in the order
they are usually displayed in."""
return b2h_rev(self.hash())
def previous_block_id(self):
"""Returns the hash of the previous block, with the bytes in the order
they are usually displayed in."""
return b2h_rev(self.previous_block_hash)
def check_merkle_hash(self):
"""Raise a BadMerkleRootError if the Merkle hash of the
transactions does not match the Merkle hash included in the block."""
calculated_hash = merkle([tx.hash() for tx in self.txs], double_sha256)
if calculated_hash != self.merkle_root:
raise BadMerkleRootError(
"calculated %s but block contains %s" % (b2h(calculated_hash), b2h(self.merkle_root)))
def __str__(self):
c = '%s%s' % (self.__class__.__name__, '' if self.txs else 'Header')
return "%s [%s] (previous %s)" % (c, self.id(), self.previous_block_id())
def __repr__(self):
return self.__str__()
|
recipes/Python/502289_Persistent_Dictionary_Text/recipe-502289.py | tdiprima/code | 2,023 | 11153035 | <filename>recipes/Python/502289_Persistent_Dictionary_Text/recipe-502289.py
"""
A persistent dict object that uses a text file for storage.
Saved values will update arguments passed to constructor with:
PersistentTextDict(<path>, dict={'a':10}, foo='baz', bla=10)
This behavior allows setting defaults that can be overridden
in a text file on disk.
NOTE: str, int, float, list, dict et. el. support only.
"""
import UserDict
class PersistentTextDict(UserDict.UserDict):
"""
>>> d = PersistentTextDict('test.dict')
>>> d['username'] = 'jsmith'
>>> del d
>>> d1 = PersistentTextDict('test.dict')
>>> d1['username']
'jsmith'
>>> d1['a'] = 10000
>>> d1['a']
10000
>>> d2 = PersistentTextDict('test.dict')
>>> d2['a']
10000
>>> del d2['a']
>>> del d2
>>> d3 = PersistentTextDict('test.dict')
>>> d3.has_key('a')
False
>>> d3.update({'a':9999})
>>> d3['a']
9999
"""
def __init__(self, path, dict=None, **kwargs):
UserDict.UserDict.__init__(self)
self.path = path
try:
self.isLoad = True
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
self.update(eval(open(path, 'r').read()))
self.isLoad = False
except IOError:
self.isLoad = False
def sync(self):
import pprint
open(self.path, 'w').write(pprint.pformat(self.data) + '\n')
def __setitem__(self, key, item):
self.data[key] = item
self.sync()
def __delitem__(self, key):
del self.data[key]
self.sync()
def update(self, dict=None, **kwargs):
UserDict.UserDict.update(self, dict=dict, **kwargs)
if not self.isLoad:
self.sync()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
2019/07/25/Deploy a Flask App to Heroku With a Postgres Database [2019]/flask_qa/wsgi.py | kenjitagawa/youtube_video_code | 492 | 11153039 | from flask_qa import create_app
app = create_app() |
src/variables.py | parthbhope/NC_Concolic_Testing | 102 | 11153105 | <reponame>parthbhope/NC_Concolic_Testing
SIG_NORMAL = 0 # normal exit signal
SIG_COV = 2 # coverage guided exit signal
SIG_ADV = 3 # adversarial exit signal
|
jupyterlab_code_formatter/tests/test_handlers.py | suevii/jupyterlab_code_formatter | 528 | 11153112 | import json
import typing as t
import pkg_resources
import pytest
import requests
from jsonschema import validate
from jupyterlab_code_formatter.formatters import SERVER_FORMATTERS
from jupyterlab_code_formatter.handlers import setup_handlers
from notebook.tests.launchnotebook import NotebookTestBase
def _generate_list_formaters_entry_json_schema(
formatter_name: str,
) -> t.Dict[str, t.Any]:
return {
"type": "object",
"properties": {
formatter_name: {
"type": "object",
"properties": {
"enabled": {"type": "boolean"},
"label": {"type": "string"},
},
}
},
}
EXPECTED_VERSION_SCHEMA = {
"type": "object",
"properties": {
"version": {
"type": "string",
}
},
}
EXPECTED_LIST_FORMATTERS_SCHEMA = {
"type": "object",
"properties": {
"formatters": {
formatter_name: _generate_list_formaters_entry_json_schema(formatter_name)
for formatter_name in SERVER_FORMATTERS
}
},
}
EXPECTED_FROMAT_SCHEMA = {
"type": "object",
"properties": {
"code": {
"type": "array",
"items": {
"type": "object",
"properties": {"code": {"type": "string"}, "error": {"type": "string"}},
},
}
},
}
SIMPLE_VALID_PYTHON_CODE = "x= 22; e =1"
class TestHandlers(NotebookTestBase):
def setUp(self) -> None:
setup_handlers(self.notebook.web_app)
def _create_headers(
self, plugin_version: t.Optional[str] = None
) -> t.Dict[str, str]:
return {
"Plugin-Version": plugin_version
if plugin_version is not None
else pkg_resources.get_distribution("jupyterlab_code_formatter").version
}
def _format_code_request(
self,
formatter: str,
code: t.List[str],
options: t.Dict[str, t.Any],
plugin_version: t.Optional[str] = None,
) -> requests.Response:
return self.request(
verb="POST",
path="/jupyterlab_code_formatter/format",
data=json.dumps(
{
"code": code,
"options": options,
"notebook": True,
"formatter": formatter,
}
),
headers=self._create_headers(plugin_version),
)
@staticmethod
def _check_http_200_and_schema(response):
assert response.status_code == 200
json_result = response.json()
validate(instance=json_result, schema=EXPECTED_FROMAT_SCHEMA)
return json_result
def test_list_formatters(self):
"""Check if the formatters list route works."""
response = self.request(
verb="GET",
path="/jupyterlab_code_formatter/formatters",
headers=self._create_headers(),
)
validate(instance=response.json(), schema=EXPECTED_LIST_FORMATTERS_SCHEMA)
def test_404_on_unknown(self):
"""Check that it 404 correctly if formatter name is bad."""
response = self._format_code_request(
formatter="UNKNOWN", code=[SIMPLE_VALID_PYTHON_CODE], options={}
)
assert response.status_code == 404
def test_can_apply_python_formatter(self):
"""Check that it can apply black with simple config."""
response = self._format_code_request(
formatter="black",
code=[SIMPLE_VALID_PYTHON_CODE],
options={"line_length": 88},
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == "x = 22\ne = 1"
def test_can_use_black_config(self):
"""Check that it can apply black with advanced config."""
given = "some_string='abc'"
expected = "some_string = 'abc'"
response = self._format_code_request(
formatter="black",
options={"line_length": 123, "string_normalization": False},
code=[given],
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_return_error_if_any(self):
"""Check that it returns the error if any."""
bad_python = "this_is_bad = 'hihi"
response = self._format_code_request(
formatter="black",
options={"line_length": 123, "string_normalization": False},
code=[bad_python],
)
json_result = self._check_http_200_and_schema(response)
assert (
json_result["code"][0]["error"] == "Cannot parse: 1:13: this_is_bad = 'hihi"
)
def test_can_handle_magic(self):
"""Check that it's fine to run formatters for code with magic."""
given = '%%timeit\nsome_string = "abc"'
expected = '%%timeit\nsome_string = "abc"'
for formatter in ["black", "yapf", "isort"]:
response = self._format_code_request(
formatter=formatter,
code=[given],
options={},
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_can_handle_shell_cmd(self):
"""Check that it's fine to run formatters for code with shell cmd."""
given = '%%timeit\nsome_string = "abc"\n!pwd'
expected = '%%timeit\nsome_string = "abc"\n!pwd'
for formatter in ["black", "yapf", "isort"]:
response = self._format_code_request(
formatter=formatter,
code=[given],
options={},
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_can_handle_incompatible_magic_language(self):
"""Check that it will ignore incompatible magic language cellblock."""
given = "%%html\n<h1>Hi</h1>"
expected = "%%html\n<h1>Hi</h1>"
for formatter in ["black", "yapf", "isort"]:
response = self._format_code_request(
formatter=formatter,
code=[given],
options={},
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_can_handle_incompatible_magic_language_single(self):
"""Check that it will ignore incompatible magic language cellblock with single %."""
given = "%html <h1>Hi</h1>"
expected = "%html <h1>Hi</h1>"
for formatter in ["black", "yapf", "isort"]:
response = self._format_code_request(
formatter=formatter,
code=[given],
options={},
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_can_ipython_help_signle(self) -> None:
"""Check that it will ignore single question mark interactive help lines on the fly."""
given = " bruh?\nprint('test')\n#test?"
expected = ' bruh?\nprint("test")\n# test?'
response = self._format_code_request(
formatter="black",
code=[given],
options={},
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_can_ipython_help_double(self) -> None:
"""Check that it will ignore double question mark interactive help lines on the fly."""
given = " bruh??\nprint('test')\n#test?"
expected = ' bruh??\nprint("test")\n# test?'
response = self._format_code_request(
formatter="black",
code=[given],
options={},
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_will_ignore_question_mark(self) -> None:
"""Check that it will ignore single question mark in comments."""
given = """def f():
# bruh what?
# again bruh? really
# a ? b
print('hi')
x = '?'"""
expected = """def f():
# bruh what?
# again bruh? really
# a ? b
print("hi")
x = "?\""""
response = self._format_code_request(
formatter="black",
code=[given],
options={},
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_will_ignore_question_mark2(self) -> None:
"""Check that it will ignore single question mark in comments."""
given = """def f():
# bruh what??
# again bruh?? really
# a ? b ? c
print('hi')"""
expected = """def f():
# bruh what??
# again bruh?? really
# a ? b ? c
print("hi")"""
response = self._format_code_request(
formatter="black",
code=[given],
options={},
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_will_ignore_question_weird(self) -> None:
given = """wat
wat??"""
expected = """wat
wat??"""
response = self._format_code_request(
formatter="black",
code=[given],
options={},
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_can_use_styler(self):
given = "a = 3; 2"
expected = "a <- 3\n2"
response = self._format_code_request(
formatter="styler",
code=[given],
options={"scope": "tokens"},
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_can_use_styler_2(self):
given = """data_frame(
small = 2 ,
medium = 4,#comment without space
large =6
)"""
expected = """data_frame(
small = 2,
medium = 4, # comment without space
large = 6
)"""
response = self._format_code_request(
code=[given],
options={"strict": False},
formatter="styler",
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_can_use_styler_3(self):
given = "1++1/2*2^2"
expected = "1 + +1/2*2^2"
response = self._format_code_request(
formatter="styler",
options={
"math_token_spacing": {
"one": ["'+'", "'-'"],
"zero": ["'/'", "'*'", "'^'"],
}
},
code=[given],
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_can_use_styler_4(self):
given = """a <- function() {
### not to be indented
# indent normally
33
}"""
expected = """a <- function() {
### not to be indented
# indent normally
33
}"""
response = self._format_code_request(
code=[given],
formatter="styler",
options=dict(
reindention=dict(regex_pattern="^###", indention=0, comments_only=True)
),
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_can_use_styler_5(self):
given = """call(
# SHOULD BE ONE SPACE BEFORE
1,2)
"""
expected = """call(
# SHOULD BE ONE SPACE BEFORE
1, 2
)"""
response = self._format_code_request(
code=[given],
formatter="styler",
options=dict(indent_by=4, start_comments_with_one_space=True),
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_can_use_styler_6(self):
given = "1+1-3"
expected = "1 + 1 - 3"
response = self._format_code_request(
code=[given],
formatter="styler",
options={
"math_token_spacing": "tidyverse_math_token_spacing",
"reindention": "tidyverse_reindention",
},
)
json_result = self._check_http_200_and_schema(response)
assert json_result["code"][0]["code"] == expected
def test_422_on_mismatch_version_1(self):
response = self.request(
verb="GET",
path="/jupyterlab_code_formatter/formatters",
headers=self._create_headers("0.0.0"),
)
assert response.status_code == 422
def test_200_on_version_without_header(self):
response = self.request(
verb="GET",
path="/jupyterlab_code_formatter/version",
)
assert response.status_code == 200
validate(instance=response.json(), schema=EXPECTED_VERSION_SCHEMA)
def test_200_on_version_with_wrong_header(self):
response = self.request(
verb="GET",
path="/jupyterlab_code_formatter/version",
headers=self._create_headers("0.0.0"),
)
assert response.status_code == 200
validate(instance=response.json(), schema=EXPECTED_VERSION_SCHEMA)
def test_200_on_version_with_correct_header(self):
response = self.request(
verb="GET",
path="/jupyterlab_code_formatter/version",
headers=self._create_headers(),
)
assert response.status_code == 200
validate(instance=response.json(), schema=EXPECTED_VERSION_SCHEMA)
|
fiducial_slam/scripts/move_origin.py | teosnare/fiducials | 232 | 11153119 | <reponame>teosnare/fiducials
#!/usr/bin/python
"""
Move origin of fiducial co-ordinate system
"""
import numpy, sys, os
from fiducial_slam.map import Map
if __name__ == "__main__":
argc = len(sys.argv)
if argc != 4 and argc != 5:
print "Usage: %s x y z [file]" % sys.argv[0]
sys.exit(1)
offset = numpy.array([float(sys.argv[1]), float(sys.argv[2]), float(sys.argv[3])])
if argc == 5:
filename = sys.argv[4]
else:
filename = "~/.ros/slam/map.txt"
filename = os.path.expanduser(filename)
map = Map(filename)
fids = map.keys()
for fid in fids:
f = map[fid]
f.position += offset
map.save()
|
jarviscli/plugins/upside-down.py | snehkhajanchi/Jarvis | 2,605 | 11153122 | <filename>jarviscli/plugins/upside-down.py<gh_stars>1000+
from plugin import plugin
from colorama import Fore
@plugin('upside down')
def generate_random_list(jarvis, str):
user_input = jarvis.input("Enter string to be converted to upside-down (only english letters will be converted): ")
result = convert_input(jarvis, user_input)
jarvis.say(result, Fore.GREEN)
def convert_input(jarvis, u_input):
upside_str = 'zʎxʍʌnʇsɹbdouɯןʞſıɥbɟǝpɔqɐ'
normal_str = 'abcdefghijklmnopqrstuvwxyz'
upside_str = upside_str[::-1]
converter_dict = {a: b for a, b in zip(normal_str, upside_str)}
result = ''
for letter in u_input:
if letter in converter_dict:
result += converter_dict[letter]
return result[::-1]
|
RL-Quadcopter/quad_controller_rl/src/quad_controller_rl/util.py | edwardpan/cn-deep-learning | 474 | 11153125 | <filename>RL-Quadcopter/quad_controller_rl/src/quad_controller_rl/util.py
"""Utility functions."""
import pandas as pd
import rospy
from datetime import datetime
def get_param(name):
"""Return parameter value specified in ROS launch file or via command line, e.g. agent:=DDPG."""
return rospy.get_param(name)
def get_timestamp(t=None, format='%Y-%m-%d_%H-%M-%S'):
"""Return timestamp as a string; default: current time, format: YYYY-DD-MM_hh-mm-ss."""
if t is None:
t = datetime.now()
return t.strftime(format)
def plot_stats(csv_filename, columns=['total_reward'], **kwargs):
"""Plot specified columns from CSV file."""
df_stats = pd.read_csv(csv_filename)
df_stats[columns].plot(**kwargs)
|
EventFilter/SiStripRawToDigi/python/test/SiStripDigiValidator_cfi.py | ckamtsikis/cmssw | 852 | 11153154 | import FWCore.ParameterSet.Config as cms
DigiValidator = cms.EDAnalyzer(
"SiStripDigiValidator",
TagCollection1 = cms.untracked.InputTag("DigiSource"),
TagCollection2 = cms.untracked.InputTag("siStripDigis","ZeroSuppressed"),
RawCollection1 = cms.untracked.bool(False),
RawCollection2 = cms.untracked.bool(False),
)
|
graphgym/models/layer_pyg.py | SCAuFish/GraphGym | 940 | 11153160 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric as pyg
from graphgym.config import cfg
from graphgym.models.act import act_dict
from graphgym.contrib.layer.generalconv import (GeneralConvLayer,
GeneralEdgeConvLayer)
from graphgym.contrib.layer import *
import graphgym.register as register
## General classes
class GeneralLayer(nn.Module):
'''General wrapper for layers'''
def __init__(self, name, dim_in, dim_out, has_act=True, has_bn=True,
has_l2norm=False, **kwargs):
super(GeneralLayer, self).__init__()
self.has_l2norm = has_l2norm
has_bn = has_bn and cfg.gnn.batchnorm
self.layer = layer_dict[name](dim_in, dim_out,
bias=not has_bn, **kwargs)
layer_wrapper = []
if has_bn:
layer_wrapper.append(nn.BatchNorm1d(
dim_out, eps=cfg.bn.eps, momentum=cfg.bn.mom))
if cfg.gnn.dropout > 0:
layer_wrapper.append(nn.Dropout(
p=cfg.gnn.dropout, inplace=cfg.mem.inplace))
if has_act:
layer_wrapper.append(act_dict[cfg.gnn.act])
self.post_layer = nn.Sequential(*layer_wrapper)
def forward(self, batch):
batch = self.layer(batch)
if isinstance(batch, torch.Tensor):
batch = self.post_layer(batch)
if self.has_l2norm:
batch = F.normalize(batch, p=2, dim=1)
else:
batch.x = self.post_layer(batch.x)
if self.has_l2norm:
batch.x = F.normalize(batch.x, p=2, dim=1)
return batch
class GeneralMultiLayer(nn.Module):
'''General wrapper for stack of layers'''
def __init__(self, name, num_layers, dim_in, dim_out, dim_inner=None,
final_act=True, **kwargs):
super(GeneralMultiLayer, self).__init__()
dim_inner = dim_in if dim_inner is None else dim_inner
for i in range(num_layers):
d_in = dim_in if i == 0 else dim_inner
d_out = dim_out if i == num_layers - 1 else dim_inner
has_act = final_act if i == num_layers - 1 else True
layer = GeneralLayer(name, d_in, d_out, has_act, **kwargs)
self.add_module('Layer_{}'.format(i), layer)
def forward(self, batch):
for layer in self.children():
batch = layer(batch)
return batch
## Core basic layers
# Input: batch; Output: batch
class Linear(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(Linear, self).__init__()
self.model = nn.Linear(dim_in, dim_out, bias=bias)
def forward(self, batch):
if isinstance(batch, torch.Tensor):
batch = self.model(batch)
else:
batch.x = self.model(batch.x)
return batch
class BatchNorm1dNode(nn.Module):
'''General wrapper for layers'''
def __init__(self, dim_in):
super(BatchNorm1dNode, self).__init__()
self.bn = nn.BatchNorm1d(dim_in, eps=cfg.bn.eps, momentum=cfg.bn.mom)
def forward(self, batch):
batch.x = self.bn(batch.x)
return batch
class BatchNorm1dEdge(nn.Module):
'''General wrapper for layers'''
def __init__(self, dim_in):
super(BatchNorm1dEdge, self).__init__()
self.bn = nn.BatchNorm1d(dim_in, eps=cfg.bn.eps, momentum=cfg.bn.mom)
def forward(self, batch):
batch.edge_attr = self.bn(batch.edge_attr)
return batch
class MLP(nn.Module):
def __init__(self, dim_in, dim_out, bias=True, dim_inner=None,
num_layers=2, **kwargs):
'''
Note: MLP works for 0 layers
'''
super(MLP, self).__init__()
dim_inner = dim_in if dim_inner is None else dim_inner
layers = []
if num_layers > 1:
layers.append(
GeneralMultiLayer('linear', num_layers - 1, dim_in, dim_inner,
dim_inner, final_act=True))
layers.append(Linear(dim_inner, dim_out, bias))
else:
layers.append(Linear(dim_in, dim_out, bias))
self.model = nn.Sequential(*layers)
def forward(self, batch):
if isinstance(batch, torch.Tensor):
batch = self.model(batch)
else:
batch.x = self.model(batch.x)
return batch
class GCNConv(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GCNConv, self).__init__()
self.model = pyg.nn.GCNConv(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index)
return batch
class SAGEConv(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(SAGEConv, self).__init__()
self.model = pyg.nn.SAGEConv(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index)
return batch
class GATConv(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GATConv, self).__init__()
self.model = pyg.nn.GATConv(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index)
return batch
class GINConv(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GINConv, self).__init__()
gin_nn = nn.Sequential(nn.Linear(dim_in, dim_out), nn.ReLU(),
nn.Linear(dim_out, dim_out))
self.model = pyg.nn.GINConv(gin_nn)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index)
return batch
class SplineConv(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(SplineConv, self).__init__()
self.model = pyg.nn.SplineConv(dim_in, dim_out,
dim=1, kernel_size=2, bias=bias)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index,
batch.edge_attr)
return batch
class GeneralConv(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GeneralConv, self).__init__()
self.model = GeneralConvLayer(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index)
return batch
class GeneralEdgeConv(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GeneralEdgeConv, self).__init__()
self.model = GeneralEdgeConvLayer(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.x = self.model(batch.x, batch.edge_index,
edge_feature=batch.edge_attr)
return batch
class GeneralSampleEdgeConv(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GeneralSampleEdgeConv, self).__init__()
self.model = GeneralEdgeConvLayer(dim_in, dim_out, bias=bias)
def forward(self, batch):
edge_mask = torch.rand(batch.edge_index.shape[1]) < cfg.gnn.keep_edge
edge_index = batch.edge_index[:, edge_mask]
edge_feature = batch.edge_attr[edge_mask, :]
batch.x = self.model(batch.x, edge_index,
edge_feature=edge_feature)
return batch
layer_dict = {
'linear': Linear,
'mlp': MLP,
'gcnconv': GCNConv,
'sageconv': SAGEConv,
'gatconv': GATConv,
'splineconv': SplineConv,
'ginconv': GINConv,
'generalconv': GeneralConv,
'generaledgeconv': GeneralEdgeConv,
'generalsampleedgeconv': GeneralSampleEdgeConv,
}
# register additional convs
layer_dict = {**register.layer_dict, **layer_dict}
|
nova/tests/functional/regressions/test_bug_1938326.py | zjzh/nova | 1,874 | 11153167 | # Copyright 2021, Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
class TestMigrateFromDownHost(integrated_helpers._IntegratedTestBase):
"""Regression test for bug #1938326
Assert the behaviour of n-api when requests are made to migrate an instance
from a disabled, forced down, down and disabled and down compute
host.
Bug #1938326 specifically covering the case where a request is made and
accepted to migrate an instance from a disabled and down compute host.
"""
microversion = 'latest'
ADMIN_API = True
def _setup_compute_service(self):
# We want the service to be marked down in a reasonable time while
# ensuring we don't accidentally mark services as down prematurely
self.flags(report_interval=1)
self.flags(service_down_time=6)
# Use two compute services to make it easier to assert the call from
# the dest to the src, we could also test this for same host resize.
self._start_compute('src')
self._start_compute('dest')
def test_migrate_from_disabled_host(self):
"""Assert that migration requests for disabled hosts are allowed
"""
# Launch an instance on src
server = self._create_server(host='src', networks='none')
# Mark the compute service as disabled
source_compute_id = self.api.get_services(
host='src', binary='nova-compute')[0]['id']
self.api.put_service(source_compute_id, {"status": "disabled"})
self._wait_for_service_parameter(
'src', 'nova-compute',
{
'status': 'disabled',
'state': 'up'
}
)
# Assert that we can migrate and confirm from a disabled but up compute
self._migrate_server(server)
self._confirm_resize(server)
def test_migrate_from_forced_down_host(self):
"""Assert that migration requests for forced down hosts are rejected
"""
# Launch an instance on src
server = self._create_server(host='src', networks='none')
# Force down the compute
source_compute_id = self.api.get_services(
host='src', binary='nova-compute')[0]['id']
self.api.put_service(source_compute_id, {'forced_down': 'true'})
# NOTE(gibi): extra retries are needed as the default 10 retries with
# 0.5 second sleep is close to the 6 seconds down timeout
self._wait_for_service_parameter(
'src', 'nova-compute',
{
'forced_down': True,
'state': 'down',
'status': 'enabled'
},
max_retries=20,
)
# Assert that we cannot migrate from a forced down compute
ex = self.assertRaises(
client.OpenStackApiException, self._migrate_server, server)
self.assertEqual(409, ex.response.status_code)
def test_migrate_from_down_host(self):
"""Assert that migration requests from down hosts are rejected
"""
# Launch an instance on src
server = self._create_server(host='src', networks='none')
# Stop the compute service and wait until it's down
self.computes['src'].stop()
# NOTE(gibi): extra retries are needed as the default 10 retries with
# 0.5 second sleep is close to the 6 seconds down timeout
self._wait_for_service_parameter(
'src', 'nova-compute',
{
'state': 'down',
'status': 'enabled'
},
max_retries=20,
)
# Assert that requests to migrate from down computes are rejected
ex = self.assertRaises(
client.OpenStackApiException, self.api.post_server_action,
server['id'], {'migrate': None})
self.assertEqual(409, ex.response.status_code)
def test_migrate_from_disabled_down_host(self):
"""Assert that migration requests for disabled down hosts are rejected
"""
# Launch an instance on src
server = self._create_server(host='src', networks='none')
# Mark the compute service as disabled
source_compute_id = self.api.get_services(
host='src', binary='nova-compute')[0]['id']
self.api.put_service(source_compute_id, {"status": "disabled"})
self._wait_for_service_parameter(
'src', 'nova-compute', {'status': 'disabled'})
# Stop the compute service and wait until it's down
self.computes['src'].stop()
# NOTE(gibi): extra retries are needed as the default 10 retries with
# 0.5 second sleep is close to the 6 seconds down timeout
self._wait_for_service_parameter(
'src', 'nova-compute', {'state': 'down'}, max_retries=20)
ex = self.assertRaises(
client.OpenStackApiException, self.api.post_server_action,
server['id'], {'migrate': None})
self.assertEqual(409, ex.response.status_code)
|
Devices/GatewayConnectedDevices/BtUSB_2_BtUART_Example/TestServer.py | HydAu/AzureConnectTheDots | 235 | 11153200 | '''
Copyright (c) Microsoft Open Technologies, Inc. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
This is tiny tcp server for testing python scripts that translate sensor data to gateway.
Instead of setting up all gateway one can use this tiny sensor just to check if device works and transmits data to the network.
'''
import sys
import socket
HOST = '127.0.0.1'
PORT = 5000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((HOST, PORT))
except socket.error as msg:
print("Bind failed. Error Code : " + str(msg[0]) + " Message " + msg[1])
sys.exit()
print ("Socket bind complete")
s.listen(10)
conn, addr = s.accept()
while True:
data = conn.recv(256)
if len(data) > 0:
strData = data.decode('utf-8')
print strData
else:
break
s.close()
|
backpack/extensions/secondorder/diag_hessian/conv3d.py | jabader97/backpack | 395 | 11153216 | <reponame>jabader97/backpack<filename>backpack/extensions/secondorder/diag_hessian/conv3d.py
"""Module extensions for diagonal Hessian properties of ``torch.nn.Conv3d``."""
from backpack.core.derivatives.conv3d import Conv3DDerivatives
from backpack.extensions.secondorder.diag_hessian.convnd import (
BatchDiagHConvND,
DiagHConvND,
)
class DiagHConv3d(DiagHConvND):
"""Module extension for the Hessian diagonal of ``torch.nn.Conv3d``."""
def __init__(self):
"""Store parameter names and derivatives object."""
super().__init__(derivatives=Conv3DDerivatives(), params=["bias", "weight"])
class BatchDiagHConv3d(BatchDiagHConvND):
"""Module extension for the per-sample Hessian diagonal of ``torch.nn.Conv3d``."""
def __init__(self):
"""Store parameter names and derivatives object."""
super().__init__(derivatives=Conv3DDerivatives(), params=["bias", "weight"])
|
tests/__init__.py | adamserafini/pyxl4 | 366 | 11153230 | import pyxl.codec.register
|
third_party/chromite/cli/cros/cros_buildresult_unittest.py | zipated/src | 2,151 | 11153248 | # -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests the `cros buildresult` command."""
from __future__ import print_function
import json
from chromite.cli import command_unittest
from chromite.cli.cros import cros_buildresult
from chromite.lib import cros_test_lib
FAKE_BUILD_STATUS = {
'id': 1234,
'buildbucket_id': 'buildbucket_value',
'status': 'pass',
'artifacts_url': 'fake_artifacts_url',
'toolchain_url': 'fake_toolchain_url',
'stages': [
{'name': 'stage_a', 'status': 'pass'},
{'name': 'stage_b', 'status': 'pass'},
{'name': 'stage_c', 'status': 'pass'},
],
}
class MockBuildresultCommand(command_unittest.MockCommand):
"""Mock out the `cros buildresult` command."""
TARGET = 'chromite.cli.cros.cros_buildresult.BuildResultCommand'
TARGET_CLASS = cros_buildresult.BuildResultCommand
COMMAND = 'buildresult'
class BuildresultTest(cros_test_lib.MockTestCase):
"""Base class for buildresult command tests."""
def setUp(self):
self.cmd_mock = None
def SetupCommandMock(self, cmd_args):
"""Sets up the `cros buildresult` command mock."""
self.cmd_mock = MockBuildresultCommand(cmd_args)
self.StartPatcher(self.cmd_mock)
return self.cmd_mock.inst.options
class BuildresultReportTest(BuildresultTest):
"""Test the report generation functions."""
def setUp(self):
self.maxDiff = None
def testReport(self):
result = cros_buildresult.Report([FAKE_BUILD_STATUS])
expected = '''cidb_id: 1234
buildbucket_id: buildbucket_value
status: pass
artifacts_url: fake_artifacts_url
toolchain_url: fake_toolchain_url
stages:
stage_a: pass
stage_b: pass
stage_c: pass
'''
self.assertEqual(expected, result)
def testReportJson(self):
result = cros_buildresult.ReportJson([FAKE_BUILD_STATUS])
expected = {
'buildbucket_value': {
'cidb_id': 1234,
'buildbucket_id': 'buildbucket_value',
'status': 'pass',
'artifacts_url': 'fake_artifacts_url',
'toolchain_url': 'fake_toolchain_url',
'stages': {
'stage_a': 'pass',
'stage_b': 'pass',
'stage_c': 'pass',
},
},
}
self.assertEqual(expected, json.loads(result))
|
torch_backend.py | lengstrom/cifar10-fast | 493 | 11153269 | import numpy as np
import torch
from torch import nn
from core import *
from collections import namedtuple
from itertools import count
torch.backends.cudnn.benchmark = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cpu = torch.device("cpu")
@cat.register(torch.Tensor)
def _(*xs):
return torch.cat(xs)
@to_numpy.register(torch.Tensor)
def _(x):
return x.detach().cpu().numpy()
@pad.register(torch.Tensor)
def _(x, border):
return nn.ReflectionPad2d(border)(x)
@transpose.register(torch.Tensor)
def _(x, source, target):
return x.permute([source.index(d) for d in target])
def to(*args, **kwargs):
return lambda x: x.to(*args, **kwargs)
@flip_lr.register(torch.Tensor)
def _(x):
return torch.flip(x, [-1])
#####################
## dataset
#####################
from functools import lru_cache as cache
@cache(None)
def cifar10(root='./data'):
try:
import torchvision
download = lambda train: torchvision.datasets.CIFAR10(root=root, train=train, download=True)
return {k: {'data': v.data, 'targets': v.targets} for k,v in [('train', download(train=True)), ('valid', download(train=False))]}
except ImportError:
from tensorflow.keras import datasets
(train_images, train_labels), (valid_images, valid_labels) = datasets.cifar10.load_data()
return {
'train': {'data': train_images, 'targets': train_labels.squeeze()},
'valid': {'data': valid_images, 'targets': valid_labels.squeeze()}
}
cifar10_mean, cifar10_std = [
(125.31, 122.95, 113.87), # equals np.mean(cifar10()['train']['data'], axis=(0,1,2))
(62.99, 62.09, 66.70), # equals np.std(cifar10()['train']['data'], axis=(0,1,2))
]
cifar10_classes= 'airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck'.split(', ')
#####################
## data loading
#####################
class DataLoader():
def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
return ({'input': x.to(device).half(), 'target': y.to(device).long()} for (x,y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
#GPU dataloading
chunks = lambda data, splits: (data[start:end] for (start, end) in zip(splits, splits[1:]))
even_splits = lambda N, num_chunks: np.cumsum([0] + [(N//num_chunks)+1]*(N % num_chunks) + [N//num_chunks]*(num_chunks - (N % num_chunks)))
def shuffled(xs, inplace=False):
xs = xs if inplace else copy.copy(xs)
np.random.shuffle(xs)
return xs
def transformed(data, targets, transform, max_options=None, unshuffle=False):
i = torch.randperm(len(data), device=device)
data = data[i]
options = shuffled(transform.options(data.shape), inplace=True)[:max_options]
data = torch.cat([transform(x, **choice) for choice, x in zip(options, chunks(data, even_splits(len(data), len(options))))])
return (data[torch.argsort(i)], targets) if unshuffle else (data, targets[i])
class GPUBatches():
def __init__(self, batch_size, transforms=(), dataset=None, shuffle=True, drop_last=False, max_options=None):
self.dataset, self.transforms, self.shuffle, self.max_options = dataset, transforms, shuffle, max_options
N = len(dataset['data'])
self.splits = list(range(0, N+1, batch_size))
if not drop_last and self.splits[-1] != N:
self.splits.append(N)
def __iter__(self):
data, targets = self.dataset['data'], self.dataset['targets']
for transform in self.transforms:
data, targets = transformed(data, targets, transform, max_options=self.max_options, unshuffle=not self.shuffle)
if self.shuffle:
i = torch.randperm(len(data), device=device)
data, targets = data[i], targets[i]
return ({'input': x.clone(), 'target': y} for (x, y) in zip(chunks(data, self.splits), chunks(targets, self.splits)))
def __len__(self):
return len(self.splits) - 1
#####################
## Layers
#####################
#Network
class Network(nn.Module):
def __init__(self, net):
super().__init__()
self.graph = build_graph(net)
for path, (val, _) in self.graph.items():
setattr(self, path.replace('/', '_'), val)
def nodes(self):
return (node for node, _ in self.graph.values())
def forward(self, inputs):
outputs = dict(inputs)
for k, (node, ins) in self.graph.items():
#only compute nodes that are not supplied as inputs.
if k not in outputs:
outputs[k] = node(*[outputs[x] for x in ins])
return outputs
def half(self):
for node in self.nodes():
if isinstance(node, nn.Module) and not isinstance(node, nn.BatchNorm2d):
node.half()
return self
class Identity(namedtuple('Identity', [])):
def __call__(self, x): return x
class Add(namedtuple('Add', [])):
def __call__(self, x, y): return x + y
class AddWeighted(namedtuple('AddWeighted', ['wx', 'wy'])):
def __call__(self, x, y): return self.wx*x + self.wy*y
class Mul(nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def __call__(self, x):
return x*self.weight
class Flatten(nn.Module):
def forward(self, x): return x.view(x.size(0), x.size(1))
class Concat(nn.Module):
def forward(self, *xs): return torch.cat(xs, 1)
class BatchNorm(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-05, momentum=0.1, weight_freeze=False, bias_freeze=False, weight_init=1.0, bias_init=0.0):
super().__init__(num_features, eps=eps, momentum=momentum)
if weight_init is not None: self.weight.data.fill_(weight_init)
if bias_init is not None: self.bias.data.fill_(bias_init)
self.weight.requires_grad = not weight_freeze
self.bias.requires_grad = not bias_freeze
class GhostBatchNorm(BatchNorm):
def __init__(self, num_features, num_splits, **kw):
super().__init__(num_features, **kw)
self.num_splits = num_splits
self.register_buffer('running_mean', torch.zeros(num_features*self.num_splits))
self.register_buffer('running_var', torch.ones(num_features*self.num_splits))
def train(self, mode=True):
if (self.training is True) and (mode is False): #lazily collate stats when we are going to use them
self.running_mean = torch.mean(self.running_mean.view(self.num_splits, self.num_features), dim=0).repeat(self.num_splits)
self.running_var = torch.mean(self.running_var.view(self.num_splits, self.num_features), dim=0).repeat(self.num_splits)
return super().train(mode)
def forward(self, input):
N, C, H, W = input.shape
if self.training or not self.track_running_stats:
return nn.functional.batch_norm(
input.view(-1, C*self.num_splits, H, W), self.running_mean, self.running_var,
self.weight.repeat(self.num_splits), self.bias.repeat(self.num_splits),
True, self.momentum, self.eps).view(N, C, H, W)
else:
return nn.functional.batch_norm(
input, self.running_mean[:self.num_features], self.running_var[:self.num_features],
self.weight, self.bias, False, self.momentum, self.eps)
# Losses
class CrossEntropyLoss(namedtuple('CrossEntropyLoss', [])):
def __call__(self, log_probs, target):
return torch.nn.functional.nll_loss(log_probs, target, reduction='none')
class KLLoss(namedtuple('KLLoss', [])):
def __call__(self, log_probs):
return -log_probs.mean(dim=1)
class Correct(namedtuple('Correct', [])):
def __call__(self, classifier, target):
return classifier.max(dim = 1)[1] == target
class LogSoftmax(namedtuple('LogSoftmax', ['dim'])):
def __call__(self, x):
return torch.nn.functional.log_softmax(x, self.dim, _stacklevel=5)
x_ent_loss = Network({
'loss': (nn.CrossEntropyLoss(reduction='none'), ['logits', 'target']),
'acc': (Correct(), ['logits', 'target'])
})
label_smoothing_loss = lambda alpha: Network({
'logprobs': (LogSoftmax(dim=1), ['logits']),
'KL': (KLLoss(), ['logprobs']),
'xent': (CrossEntropyLoss(), ['logprobs', 'target']),
'loss': (AddWeighted(wx=1-alpha, wy=alpha), ['xent', 'KL']),
'acc': (Correct(), ['logits', 'target']),
})
trainable_params = lambda model: {k:p for k,p in model.named_parameters() if p.requires_grad}
#####################
## Optimisers
#####################
from functools import partial
def nesterov_update(w, dw, v, lr, weight_decay, momentum):
dw.add_(weight_decay, w).mul_(-lr)
v.mul_(momentum).add_(dw)
w.add_(dw.add_(momentum, v))
norm = lambda x: torch.norm(x.reshape(x.size(0),-1).float(), dim=1)[:,None,None,None]
def LARS_update(w, dw, v, lr, weight_decay, momentum):
nesterov_update(w, dw, v, lr*(norm(w)/(norm(dw)+1e-2)).to(w.dtype), weight_decay, momentum)
def zeros_like(weights):
return [torch.zeros_like(w) for w in weights]
def optimiser(weights, param_schedule, update, state_init):
weights = list(weights)
return {'update': update, 'param_schedule': param_schedule, 'step_number': 0, 'weights': weights, 'opt_state': state_init(weights)}
def opt_step(update, param_schedule, step_number, weights, opt_state):
step_number += 1
param_values = {k: f(step_number) for k, f in param_schedule.items()}
for w, v in zip(weights, opt_state):
if w.requires_grad:
update(w.data, w.grad.data, v, **param_values)
return {'update': update, 'param_schedule': param_schedule, 'step_number': step_number, 'weights': weights, 'opt_state': opt_state}
LARS = partial(optimiser, update=LARS_update, state_init=zeros_like)
SGD = partial(optimiser, update=nesterov_update, state_init=zeros_like)
#####################
## training
#####################
from itertools import chain
def reduce(batches, state, steps):
#state: is a dictionary
#steps: are functions that take (batch, state)
#and return a dictionary of updates to the state (or None)
for batch in chain(batches, [None]):
#we send an extra batch=None at the end for steps that
#need to do some tidying-up (e.g. log_activations)
for step in steps:
updates = step(batch, state)
if updates:
for k,v in updates.items():
state[k] = v
return state
#define keys in the state dict as constants
MODEL = 'model'
LOSS = 'loss'
VALID_MODEL = 'valid_model'
OUTPUT = 'output'
OPTS = 'optimisers'
ACT_LOG = 'activation_log'
WEIGHT_LOG = 'weight_log'
#step definitions
def forward(training_mode):
def step(batch, state):
if not batch: return
model = state[MODEL] if training_mode or (VALID_MODEL not in state) else state[VALID_MODEL]
if model.training != training_mode: #without the guard it's slow!
model.train(training_mode)
return {OUTPUT: state[LOSS](model(batch))}
return step
def forward_tta(tta_transforms):
def step(batch, state):
if not batch: return
model = state[MODEL] if (VALID_MODEL not in state) else state[VALID_MODEL]
if model.training:
model.train(False)
logits = torch.mean(torch.stack([model({'input': transform(batch['input'].clone())})['logits'].detach() for transform in tta_transforms], dim=0), dim=0)
return {OUTPUT: state[LOSS](dict(batch, logits=logits))}
return step
def backward(dtype=None):
def step(batch, state):
state[MODEL].zero_grad()
if not batch: return
loss = state[OUTPUT][LOSS]
if dtype is not None:
loss = loss.to(dtype)
loss.sum().backward()
return step
def opt_steps(batch, state):
if not batch: return
return {OPTS: [opt_step(**opt) for opt in state[OPTS]]}
def log_activations(node_names=('loss', 'acc')):
def step(batch, state):
if '_tmp_logs_' not in state:
state['_tmp_logs_'] = []
if batch:
state['_tmp_logs_'].extend((k, state[OUTPUT][k].detach()) for k in node_names)
else:
res = {k: to_numpy(torch.cat(xs)).astype(np.float) for k, xs in group_by_key(state['_tmp_logs_']).items()}
del state['_tmp_logs_']
return {ACT_LOG: res}
return step
epoch_stats = lambda state: {k: np.mean(v) for k, v in state[ACT_LOG].items()}
def update_ema(momentum, update_freq=1):
n = iter(count())
rho = momentum**update_freq
def step(batch, state):
if not batch: return
if (next(n) % update_freq) != 0: return
for v, ema_v in zip(state[MODEL].state_dict().values(), state[VALID_MODEL].state_dict().values()):
if not v.dtype.is_floating_point: continue #skip things like num_batches_tracked.
ema_v *= rho
ema_v += (1-rho)*v
return step
default_train_steps = (forward(training_mode=True), log_activations(('loss', 'acc')), backward(), opt_steps)
default_valid_steps = (forward(training_mode=False), log_activations(('loss', 'acc')))
def train_epoch(state, timer, train_batches, valid_batches, train_steps=default_train_steps, valid_steps=default_valid_steps,
on_epoch_end=(lambda state: state)):
train_summary, train_time = epoch_stats(on_epoch_end(reduce(train_batches, state, train_steps))), timer()
valid_summary, valid_time = epoch_stats(reduce(valid_batches, state, valid_steps)), timer(include_in_total=False) #DAWNBench rules
return {
'train': union({'time': train_time}, train_summary),
'valid': union({'time': valid_time}, valid_summary),
'total time': timer.total_time
}
#on_epoch_end
def log_weights(state, weights):
state[WEIGHT_LOG] = state.get(WEIGHT_LOG, [])
state[WEIGHT_LOG].append({k: to_numpy(v.data) for k,v in weights.items()})
return state
def fine_tune_bn_stats(state, batches, model_key=VALID_MODEL):
reduce(batches, {MODEL: state[model_key]}, [forward(True)])
return state
#misc
def warmup_cudnn(model, loss, batch):
#run forward and backward pass of the model
#to allow benchmarking of cudnn kernels
reduce([batch], {MODEL: model, LOSS: loss}, [forward(True), backward()])
torch.cuda.synchronize()
#####################
## input whitening
#####################
def cov(X):
X = X/np.sqrt(X.size(0) - 1)
return X.t() @ X
def patches(data, patch_size=(3, 3), dtype=torch.float32):
h, w = patch_size
c = data.size(1)
return data.unfold(2,h,1).unfold(3,w,1).transpose(1,3).reshape(-1, c, h, w).to(dtype)
def eigens(patches):
n,c,h,w = patches.shape
Σ = cov(patches.reshape(n, c*h*w))
Λ, V = torch.symeig(Σ, eigenvectors=True)
return Λ.flip(0), V.t().reshape(c*h*w, c, h, w).flip(0)
def whitening_filter(Λ, V, eps=1e-2):
filt = nn.Conv2d(3, 27, kernel_size=(3,3), padding=(1,1), bias=False)
filt.weight.data = (V/torch.sqrt(Λ+eps)[:,None,None,None])
filt.weight.requires_grad = False
return filt
|
plugins/tasks/ansible/src/main/resources/com/walmartlabs/concord/plugins/ansible/callback/concord_out_vars.py | 700software/concord | 158 | 11153289 | <filename>plugins/tasks/ansible/src/main/resources/com/walmartlabs/concord/plugins/ansible/callback/concord_out_vars.py<gh_stars>100-1000
import os
import json
from ansible.plugins.callback import CallbackBase
from concord_ansible_stats import ConcordAnsibleStats
try:
from __main__ import cli
except ImportError:
cli = None
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_NAME = 'concord_out_vars'
CALLBACK_NEEDS_WHITELIST = False
def __init__(self):
super(CallbackModule, self).__init__()
self.out_vars = None
if "CONCORD_OUT_VARS" in os.environ:
self.out_vars = [v.strip() for v in os.environ["CONCORD_OUT_VARS"].split(",")]
self.out_vars_file_name = None
if "CONCORD_OUT_VARS_FILE" in os.environ:
self.out_vars_file_name = os.environ["CONCORD_OUT_VARS_FILE"]
print("Saving out variables:", self.out_vars)
def playbook_on_stats(self, stats):
if not self.out_vars:
return
result = dict()
all_vars = self.var_manager._nonpersistent_fact_cache
for fact in self.out_vars:
fact_by_host = dict()
for host, vars in all_vars.items():
if fact in vars:
fact_by_host[host] = vars[fact]
result[fact] = fact_by_host
if '_stats' in self.out_vars:
result['_stats'] = ConcordAnsibleStats.build_stats_data(stats)
target_file = open(self.out_vars_file_name, "w")
target_file.write(json.dumps(result, indent=2))
print("Variables saved to:", self.out_vars_file_name)
def v2_playbook_on_play_start(self, play):
self.var_manager = play.get_variable_manager()
|
glslc/test/working_directory.py | hbirchtree/shaderc | 2,151 | 11153305 | # Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import expect
from environment import File, Directory
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader
MINIMAL_SHADER = '#version 140\nvoid main() {}'
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirNoArg(expect.ErrorMessage):
"""Tests -working-directory. Behavior cribbed from Clang."""
glslc_args = ['-working-directory']
expected_error = [
"glslc: error: argument to '-working-directory' is missing "
'(expected 1 value)\n',
'glslc: error: no input files\n']
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirEqNoArg(expect.ErrorMessage):
"""Tests -working-directory=<empty>. Behavior cribbed from Clang."""
glslc_args = ['-working-directory=']
expected_error = ['glslc: error: no input files\n']
EMPTY_SHADER_IN_SUBDIR = Directory(
'subdir', [File('shader.vert', MINIMAL_SHADER)])
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirEqNoArgCompileFile(expect.ValidNamedObjectFile):
"""Tests -working-directory=<empty> when compiling input file."""
environment = Directory('.', [EMPTY_SHADER_IN_SUBDIR])
glslc_args = ['-c', '-working-directory=', 'subdir/shader.vert']
# Output file should be generated into subdir/.
expected_object_filenames = ('subdir/shader.vert.spv',)
# @inside_glslc_testsuite('WorkDir')
class TestMultipleWorkDir(expect.ValidNamedObjectFile):
"""Tests that if there are multiple -working-directory=<dir> specified,
only the last one takes effect."""
environment = Directory('.', [EMPTY_SHADER_IN_SUBDIR])
glslc_args = ['-c', '-working-directory=i-dont-exist',
'-working-directory', 'i-think/me-neither',
'-working-directory=subdir', 'shader.vert']
# Output file should be generated into subdir/.
expected_object_filenames = ('subdir/shader.vert.spv',)
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirPosition(expect.ValidNamedObjectFile):
"""Tests that -working-directory=<dir> affects all files before and after
it on the command line."""
environment = Directory('subdir', [
File('shader.vert', MINIMAL_SHADER),
File('cool.frag', MINIMAL_SHADER),
File('bla.vert', MINIMAL_SHADER)
])
glslc_args = ['-c', 'shader.vert', 'bla.vert',
'-working-directory=subdir', 'cool.frag']
# Output file should be generated into subdir/.
expected_object_filenames = (
'subdir/shader.vert.spv', 'subdir/cool.frag.spv', 'subdir/bla.vert.spv')
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirDeepDir(expect.ValidNamedObjectFile):
"""Tests that -working-directory=<dir> works with directory hierarchies."""
environment = Directory('subdir', [
Directory('subsubdir', [
File('one.vert', MINIMAL_SHADER),
File('two.frag', MINIMAL_SHADER)
]),
File('zero.vert', MINIMAL_SHADER)
])
glslc_args = ['-c', 'zero.vert', 'subsubdir/one.vert',
'subsubdir/two.frag', '-working-directory=subdir']
# Output file should be generated into subdir/.
expected_object_filenames = (
'subdir/zero.vert.spv', 'subdir/subsubdir/one.vert.spv',
'subdir/subsubdir/two.frag.spv')
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirCompileFile(expect.ValidNamedObjectFile):
"""Tests -working-directory=<dir> when compiling input file."""
environment = Directory('.', [EMPTY_SHADER_IN_SUBDIR])
glslc_args = ['-c', '-working-directory=subdir', 'shader.vert']
# Output file should be generated into subdir/.
expected_object_filenames = ('subdir/shader.vert.spv',)
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirCompileFileOutput(expect.ValidNamedObjectFile):
"""Tests -working-directory=<dir> when compiling input file and specifying
output filename."""
environment = Directory('.', [
Directory('subdir', [
Directory('bin', []),
File('shader.vert', MINIMAL_SHADER)
])
])
glslc_args = ['-c', '-o', 'bin/spv', '-working-directory=subdir',
'shader.vert']
# Output file should be generated into subdir/bin/.
expected_object_filenames = ('subdir/bin/spv',)
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirArgNoEq(expect.ValidNamedObjectFile):
"""Tests -working-directory <dir>."""
environment = Directory('.', [EMPTY_SHADER_IN_SUBDIR])
glslc_args = ['-working-directory', 'subdir', 'shader.vert']
expected_object_filenames = ('a.spv',)
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirEqInArg(expect.ValidNamedObjectFile):
"""Tests -working-directory=<dir-with-equal-sign-inside>."""
environment = Directory('.', [
Directory('=subdir', [File('shader.vert', MINIMAL_SHADER)]),
])
glslc_args = ['-working-directory==subdir', 'shader.vert']
expected_object_filenames = ('a.spv',)
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirCompileFileAbsolutePath(expect.ValidObjectFile):
"""Tests -working-directory=<dir> when compiling input file with absolute
path."""
shader = FileShader(MINIMAL_SHADER, '.vert')
glslc_args = ['-c', '-working-directory=subdir', shader]
# The -working-directory flag should not affect the placement of the link file.
# The following tests ensure that.
class WorkDirDoesntAffectLinkedFile(expect.ValidNamedObjectFile):
"""A base class for tests asserting that -working-directory has no impact
on the location of the output link file.
"""
environment = Directory('.', [
Directory('subdir', [
File('shader.vert', MINIMAL_SHADER),
# Try to fake glslc into putting the linked file here, though it
# shouldn't (because -working-directory doesn't impact -o).
Directory('bin', [])]),
File('shader.vert', "fake file, doesn't compile."),
Directory('bin', [])])
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirLinkFileDefaultLocation(WorkDirDoesntAffectLinkedFile):
"""Tests that -working-directory doesn't impact the default link-file
location.
"""
glslc_args = ['-working-directory=subdir', 'shader.vert']
expected_object_filenames = ('a.spv',)
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirLinkFileExplicit(WorkDirDoesntAffectLinkedFile):
"""Tests that -working-directory doesn't impact the named link-file
location.
"""
glslc_args = ['-o', 'b.spv', '-working-directory=subdir', 'shader.vert']
expected_object_filenames = ('b.spv',)
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirLinkFileInSubdir(WorkDirDoesntAffectLinkedFile):
"""Tests that -working-directory doesn't impact the link-file sent into an
existing subdirectory.
"""
glslc_args = ['-o', 'bin/spv', '-working-directory=subdir', 'shader.vert']
expected_object_filenames = ('bin/spv',)
# @inside_glslc_testsuite('WorkDir')
class TestWorkDirLinkFileInvalidPath(expect.ErrorMessage):
"""Tests that -working-directory doesn't impact the error generated for an
invalid -o path.
"""
environment = Directory('.', [
Directory('subdir', [
File('shader.vert', MINIMAL_SHADER),
Directory('missing', [])]), # Present here, but missing in parent.
File('shader.vert', "fake file, doesn't compile.")])
glslc_args = [
'-o', 'missing/spv', '-working-directory=subdir', 'shader.vert']
expected_error = ['glslc: error: cannot open output file: ',
"'missing/spv': No such file or directory\n"]
|
utime/callbacks/__init__.py | learning310/U-Time | 138 | 11153318 | from .callbacks import Validation, MaxTrainingTime, MemoryConsumption, CarbonUsageTracking
|
examples/basic/lights.py | hadivafaii/vedo | 836 | 11153345 | """Set custom lights to a 3D scene"""
from vedo import *
man = Mesh(dataurl+'man.vtk').c('white').lighting('glossy')
p1 = Point([1,0,1], c='y')
p2 = Point([0,0,2], c='r')
p3 = Point([-1,-0.5,-1], c='b')
p4 = Point([0,1,0], c='k')
# Add light sources at the given positions
l1 = Light(p1, c='y') # p1 can simply be [1,0,1]
l2 = Light(p2, c='r')
l3 = Light(p3, c='b')
l4 = Light(p4, c='w', intensity=0.5)
show(man, l1, l2, l3, l4, p1, p2, p3, p4, __doc__, axes=1, viewup='z').close()
#####################################################
##### Equivalent code using a Plotter instance: #####
#####################################################
# plt = Plotter(axes=1)
# plt += [man, p1, p2, p3, p4, l1, l2, l3, l4]
# plt.show(viewup='z')
#####################################################
|
config/scripts/topprocess/top.py | hardyoyo/Mutate | 1,488 | 11153346 | <reponame>hardyoyo/Mutate
#!/usr/bin/python2
#!encoding = utf-8
import json
from pprint import pprint
import sys
import os
import time
def printcmd(c):
output({
'name': c[0],
'command':'kill -9 '+c[1],
'subtext':'PID: '+c[1]+', CPU: '+c[2]+'%, RAM: '+c[3]+'%, USER:'+c[4]
})
def nofoundprint():
output({'name': 'Nothing found', 'subtext': ''})
def errorprint():
output({'name': 'Something went wrong', 'subtext': 'config error'})
def shortprint():
output({'name': 'Usage', 'subtext': '[c] ordered by %CPU, [m] ordered by %MEM'})
output({'name': 'top c', 'subtext': 'sort by %CPU'})
output({'name': 'top m', 'subtext': 'sort by %MEM'})
defaultoutput = {
'name' : '',
'command' : '',
'icon' : '',
'subtext' : ''
}
def output(o):
temp = defaultoutput.copy()
temp.update(o)
print "["+temp['name']+"]"
print "command="+temp['command']
print "icon="+temp['icon']
print "subtext="+temp['subtext']
if len(sys.argv) == 0:
errorprint()
sys.exit(-1)
if len(sys.argv) == 1:
shortprint()
sys.exit(-1)
if len(sys.argv) != 2:
shortprint()
sys.exit(-1)
lines = []
cmds = []
if sys.argv[1] == 'c':
f = os.popen('ps axo comm,pid,pcpu,pmem,user k -pcpu |head')
elif sys.argv[1] == 'm':
f = os.popen('ps axo comm,pid,pcpu,pmem,user k -pmem |head')
else:
shortprint()
sys.exit(0)
res = f.read()
res = res.split('\n')
if len(res)==0:
errorprint()
sys.exit(-1)
for i in range(1, len(res)):
cmds.append(res[i].split())
del cmds[-1]
for i in range(0, len(cmds)):
printcmd(cmds[i])
|
pinax_theme_bootstrap/models.py | Harshalszz/pinax-theme-bootstrap | 113 | 11153355 | <filename>pinax_theme_bootstrap/models.py<gh_stars>100-1000
# This file is intentionally left blank to allow Django to load the app.
|
security/resteasy-crypto/src/test/resources/smime_encrypted_signed.py | gytis/Resteasy | 841 | 11153372 | from M2Crypto import BIO, SMIME, X509
s = SMIME.SMIME()
# Load private key and cert.
s.load_key('mycert-private.pem', 'mycert.pem')
# Load the signed/encrypted data.
p7, data = SMIME.smime_load_pkcs7('target/python_encrypted_signed.txt')
# After the above step, 'data' == None.
# Decrypt p7. 'out' now contains a PKCS #7 signed blob.
out = s.decrypt(p7)
# Load the signer's cert.
x509 = X509.load_cert('mycert.pem')
sk = X509.X509_Stack()
sk.push(x509)
s.set_x509_stack(sk)
# Load the signer's CA cert. In this case, because the signer's
# cert is self-signed, it is the signer's cert itself.
st = X509.X509_Store()
st.load_info('mycert.pem')
s.set_x509_store(st)
# Recall 'out' contains a PKCS #7 blob.
# Transform 'out'; verify the resulting PKCS #7 blob.
p7_bio = BIO.MemoryBuffer(out)
p7, data = SMIME.smime_load_pkcs7_bio(p7_bio)
v = s.verify(p7, data)
print v |
backbone/activation.py | PINTO0309/micronet | 221 | 11153380 | import torch
import torch.nn as nn
import torch.nn.functional as F
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
########################################################################
# sigmoid and tanh
########################################################################
# h_sigmoid (x: [-3 3], y: [0, h_max]]
class h_sigmoid(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max / 6
def forward(self, x):
return self.relu(x + 3) * self.h_max
# h_tanh x: [-3, 3], y: [-h_max, h_max]
class h_tanh(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_tanh, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max
def forward(self, x):
return self.relu(x + 3)*self.h_max / 3 - self.h_max
########################################################################
# wrap functions
########################################################################
def get_act_layer(inp, oup, mode='SE1', act_relu=True, act_max=2, act_bias=True, init_a=[1.0, 0.0], reduction=4, init_b=[0.0, 0.0], g=None, act='relu', expansion=True):
layer = None
if mode == 'SE1':
layer = nn.Sequential(
SELayer(inp, oup, reduction=reduction),
nn.ReLU6(inplace=True) if act_relu else nn.Sequential()
)
elif mode == 'SE0':
layer = nn.Sequential(
SELayer(inp, oup, reduction=reduction),
)
elif mode == 'NA':
layer = nn.ReLU6(inplace=True) if act_relu else nn.Sequential()
elif mode == 'LeakyReLU':
layer = nn.LeakyReLU(inplace=True) if act_relu else nn.Sequential()
elif mode == 'RReLU':
layer = nn.RReLU(inplace=True) if act_relu else nn.Sequential()
elif mode == 'PReLU':
layer = nn.PReLU() if act_relu else nn.Sequential()
elif mode == 'DYShiftMax':
layer = DYShiftMax(inp, oup, act_max=act_max, act_relu=act_relu, init_a=init_a, reduction=reduction, init_b=init_b, g=g, expansion=expansion)
return layer
########################################################################
# dynamic activation layers (SE, DYShiftMax, etc)
########################################################################
class SELayer(nn.Module):
def __init__(self, inp, oup, reduction=4):
super(SELayer, self).__init__()
self.oup = oup
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# determine squeeze
squeeze = get_squeeze_channels(inp, reduction)
print('reduction: {}, squeeze: {}/{}'.format(reduction, inp, squeeze))
self.fc = nn.Sequential(
nn.Linear(inp, squeeze),
nn.ReLU(inplace=True),
nn.Linear(squeeze, oup),
h_sigmoid()
)
def forward(self, x):
if isinstance(x, list):
x_in = x[0]
x_out = x[1]
else:
x_in = x
x_out = x
b, c, _, _ = x_in.size()
y = self.avg_pool(x_in).view(b, c)
y = self.fc(y).view(b, self.oup, 1, 1)
return x_out * y
class DYShiftMax(nn.Module):
def __init__(self, inp, oup, reduction=4, act_max=1.0, act_relu=True, init_a=[0.0, 0.0], init_b=[0.0, 0.0], relu_before_pool=False, g=None, expansion=False):
super(DYShiftMax, self).__init__()
self.oup = oup
self.act_max = act_max * 2
self.act_relu = act_relu
self.avg_pool = nn.Sequential(
nn.ReLU(inplace=True) if relu_before_pool == True else nn.Sequential(),
nn.AdaptiveAvgPool2d(1)
)
self.exp = 4 if act_relu else 2
self.init_a = init_a
self.init_b = init_b
# determine squeeze
squeeze = _make_divisible(inp // reduction, 4)
if squeeze < 4:
squeeze = 4
print('reduction: {}, squeeze: {}/{}'.format(reduction, inp, squeeze))
print('init-a: {}, init-b: {}'.format(init_a, init_b))
self.fc = nn.Sequential(
nn.Linear(inp, squeeze),
nn.ReLU(inplace=True),
nn.Linear(squeeze, oup*self.exp),
h_sigmoid()
)
if g is None:
g = 1
self.g = g[1]
if self.g !=1 and expansion:
self.g = inp // self.g
print('group shuffle: {}, divide group: {}'.format(self.g, expansion))
self.gc = inp//self.g
index=torch.Tensor(range(inp)).view(1,inp,1,1)
index=index.view(1,self.g,self.gc,1,1)
indexgs = torch.split(index, [1, self.g-1], dim=1)
indexgs = torch.cat((indexgs[1], indexgs[0]), dim=1)
indexs = torch.split(indexgs, [1, self.gc-1], dim=2)
indexs = torch.cat((indexs[1], indexs[0]), dim=2)
self.index = indexs.view(inp).type(torch.LongTensor)
self.expansion = expansion
def forward(self, x):
x_in = x
x_out = x
b, c, _, _ = x_in.size()
y = self.avg_pool(x_in).view(b, c)
y = self.fc(y).view(b, self.oup*self.exp, 1, 1)
y = (y-0.5) * self.act_max
n2, c2, h2, w2 = x_out.size()
x2 = x_out[:,self.index,:,:]
if self.exp == 4:
a1, b1, a2, b2 = torch.split(y, self.oup, dim=1)
a1 = a1 + self.init_a[0]
a2 = a2 + self.init_a[1]
b1 = b1 + self.init_b[0]
b2 = b2 + self.init_b[1]
z1 = x_out * a1 + x2 * b1
z2 = x_out * a2 + x2 * b2
out = torch.max(z1, z2)
elif self.exp == 2:
a1, b1 = torch.split(y, self.oup, dim=1)
a1 = a1 + self.init_a[0]
b1 = b1 + self.init_b[0]
out = x_out * a1 + x2 * b1
return out
def get_squeeze_channels(inp, reduction):
if reduction == 4:
squeeze = inp // reduction
else:
squeeze = _make_divisible(inp // reduction, 4)
return squeeze
|
python/clx/tests/test_asset_classification.py | shaneding/clx | 143 | 11153394 | <reponame>shaneding/clx<gh_stars>100-1000
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cudf
import clx
from clx.analytics.asset_classification import AssetClassification
import torch
from os import path
import random
import pandas as pd
column1 = [random.randint(1, 24) for _ in range(9000)]
column2 = [random.randint(1, 4) for _ in range(9000)]
column3 = [random.randint(1, 9) for _ in range(9000)]
column4 = [random.randint(1, 26) for _ in range(9000)]
column5 = [random.randint(1, 3) for _ in range(9000)]
column6 = [random.randint(1, 9) for _ in range(9000)]
column7 = [random.randint(1, 37) for _ in range(9000)]
column8 = [random.randint(1, 8) for _ in range(9000)]
column9 = [random.randint(1, 4) for _ in range(9000)]
column10 = [random.randint(1, 11) for _ in range(9000)]
label = [random.randint(0, 6) for _ in range(9000)]
train_pd = pd.DataFrame(list(zip(column1, column2, column3, column4, column5, column6, column7, column8, column9, column10, label)), columns=["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "label"])
train_gdf = cudf.from_pandas(train_pd)
batch_size = 6
epochs = 15
@pytest.mark.parametrize("train_gdf", [train_gdf])
def test_train_model_mixed_cat_cont(tmpdir, train_gdf):
train_gdf = train_gdf.copy()
cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8"]
cont_cols = ["9", "10"]
train_gdf[cont_cols] = normalize_conts(train_gdf[cont_cols])
ac = AssetClassification()
ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs)
if torch.cuda.is_available():
assert isinstance(ac._model, clx.analytics.model.tabular_model.TabularModel)
@pytest.mark.parametrize("train_gdf", [train_gdf])
def test_train_model_all_cat(tmpdir, train_gdf):
train_gdf = train_gdf.copy()
cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
cont_cols = []
ac = AssetClassification()
ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs)
if torch.cuda.is_available():
assert isinstance(ac._model, clx.analytics.model.tabular_model.TabularModel)
@pytest.mark.parametrize("train_gdf", [train_gdf])
def test_train_model_all_cont(tmpdir, train_gdf):
train_gdf = train_gdf.copy()
cont_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
cat_cols = []
train_gdf[cont_cols] = normalize_conts(train_gdf[cont_cols])
ac = AssetClassification()
ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs)
if torch.cuda.is_available():
assert isinstance(ac._model, clx.analytics.model.tabular_model.TabularModel)
@pytest.mark.parametrize("train_gdf", [train_gdf])
def test_predict(tmpdir, train_gdf):
if torch.cuda.is_available():
cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
cont_cols = []
ac = AssetClassification()
ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs)
# predict
test_gdf = train_gdf.head()
test_gdf.drop("label", axis=1)
preds = ac.predict(test_gdf, cat_cols, cont_cols)
assert isinstance(preds, cudf.core.series.Series)
assert len(preds) == len(test_gdf)
assert preds.dtype == int
def test_save_model(tmpdir):
if torch.cuda.is_available():
cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
cont_cols = []
ac = AssetClassification()
ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs)
# save model
ac.save_model(str(tmpdir.join("clx_ac.mdl")))
assert path.exists(str(tmpdir.join("clx_ac.mdl")))
def test_load_model(tmpdir):
if torch.cuda.is_available():
cat_cols = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
cont_cols = []
ac = AssetClassification()
ac.train_model(train_gdf, cat_cols, cont_cols, "label", batch_size, epochs)
# save model
ac.save_model(str(tmpdir.join("clx_ac.mdl")))
assert path.exists(str(tmpdir.join("clx_ac.mdl")))
# load model
ac2 = AssetClassification()
ac2.load_model(str(tmpdir.join("clx_ac.mdl")))
assert isinstance(ac2._model, clx.analytics.model.tabular_model.TabularModel)
def normalize_conts(gdf):
means, stds = (gdf.mean(0), gdf.std(ddof=0))
gdf = (gdf - means) / stds
return gdf
|
test/specs/openapi/test_negative.py | vyachin/schemathesis | 659 | 11153399 | from copy import deepcopy
from test.utils import assert_requests_call
import pytest
from hypothesis import HealthCheck, given, settings
from hypothesis import strategies as st
from hypothesis_jsonschema import from_schema
from hypothesis_jsonschema._canonicalise import FALSEY, canonicalish
from jsonschema import Draft4Validator
import schemathesis
from schemathesis import DataGenerationMethod
from schemathesis.specs.openapi._hypothesis import STRING_FORMATS, is_valid_header
from schemathesis.specs.openapi.constants import LOCATION_TO_CONTAINER
from schemathesis.specs.openapi.negative import mutated, negative_schema
from schemathesis.specs.openapi.negative.mutations import (
MutationContext,
MutationResult,
change_items,
change_properties,
change_type,
negate_constraints,
prevent_unsatisfiable_schema,
remove_required_property,
)
from schemathesis.specs.openapi.utils import is_header_location
OBJECT_SCHEMA = {
"type": "object",
"properties": {
"foo": {"type": "string"},
"bar": {"type": "integer"},
"baf": {"type": ["integer"]},
"baz": {"type": ["array", "object"]},
"bad": {},
},
"required": [
"foo",
"bar",
"baf",
"baz",
],
}
ARRAY_SCHEMA = {"type": "array", "items": OBJECT_SCHEMA}
EMPTY_OBJECT_SCHEMA = {
"type": "object",
}
INTEGER_SCHEMA = {
"type": "integer",
}
def validate_schema(schema):
Draft4Validator.check_schema(schema)
@pytest.mark.parametrize(
"location, schema",
[(location, OBJECT_SCHEMA) for location in sorted(LOCATION_TO_CONTAINER)]
+ [
# These schemas are only possible for "body"
("body", EMPTY_OBJECT_SCHEMA),
("body", ARRAY_SCHEMA),
("body", INTEGER_SCHEMA),
],
)
@given(data=st.data())
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
def test_top_level_strategy(data, location, schema):
if location != "body" and schema.get("type") == "object":
# It always comes this way from Schemathesis
schema["additionalProperties"] = False
validate_schema(schema)
validator = Draft4Validator(schema)
schema = deepcopy(schema)
instance = data.draw(
negative_schema(
schema,
operation_name="GET /users/",
location=location,
media_type="application/json",
custom_formats=STRING_FORMATS,
)
)
assert not validator.is_valid(instance)
if is_header_location(location):
assert is_valid_header(instance)
@pytest.mark.parametrize(
"mutation, schema, validate",
(
# No constraints besides `type`
(negate_constraints, {"type": "integer"}, True),
# Missing type (i.e all types are possible)
(change_type, {}, True),
# All types explicitly
(change_type, {"type": ["string", "integer", "number", "object", "array", "boolean", "null"]}, True),
# No properties to remove
(remove_required_property, {}, True),
# Non-"object" type
(remove_required_property, {"type": "array"}, True),
# No properties at all
(change_properties, {}, True),
# No properties that can be mutated
(change_properties, {"properties": {"foo": {}}}, True),
# No items
(change_items, {"type": "array"}, True),
# `items` accept everything
(change_items, {"type": "array", "items": {}}, True),
(change_items, {"type": "array", "items": True}, False),
# `items` is equivalent to accept-everything schema
(change_items, {"type": "array", "items": {"uniqueItems": False}}, True),
# The first element could be anything
(change_items, {"type": "array", "items": [{}]}, True),
),
)
@given(data=st.data())
@settings(deadline=None)
def test_failing_mutations(data, mutation, schema, validate):
if validate:
validate_schema(schema)
original_schema = deepcopy(schema)
# When mutation can't be applied
# Then it returns "failure"
assert mutation(MutationContext(schema, "body", "application/json"), data.draw, schema) == MutationResult.FAILURE
# And doesn't mutate the input schema
assert schema == original_schema
@given(data=st.data())
@settings(deadline=None)
def test_change_type_urlencoded(data):
# When `application/x-www-form-urlencoded` media type is passed to `change_type`
schema = {"type": "object"}
original_schema = deepcopy(schema)
context = MutationContext(schema, "body", "application/x-www-form-urlencoded")
# Then it should not be mutated
assert change_type(context, data.draw, schema) == MutationResult.FAILURE
# And doesn't mutate the input schema
assert schema == original_schema
@pytest.mark.parametrize(
"mutation, schema",
(
(negate_constraints, {"type": "integer", "minimum": 42}),
(negate_constraints, {"minimum": 42}),
(change_type, {"type": "object"}),
(change_type, {"type": ["object", "array"]}),
(change_type, {"type": ["string", "integer", "number", "object", "array", "boolean"]}),
(remove_required_property, {"properties": {"foo": {}}, "required": ["foo"]}),
(remove_required_property, {"properties": {"foo": {}, "bar": {}}, "required": ["foo"]}),
(remove_required_property, {"required": ["foo"]}),
(change_items, {"type": "array", "items": {"type": "string"}}),
(change_items, {"type": "array", "items": {"type": "string"}, "minItems": 1}),
(change_items, {"type": "array", "items": {"type": "string"}, "minItems": 1, "maxItems": 1}),
(change_items, {"type": "array", "items": [{"type": "string"}]}),
(change_items, {"type": "array", "items": [{"type": "string"}], "minItems": 1}),
(change_items, {"type": "array", "items": [{"type": "string"}], "minItems": 1, "maxItems": 1}),
(change_properties, {"properties": {"foo": {"type": "integer"}}, "type": "object", "required": ["foo"]}),
(change_properties, {"properties": {"foo": {"type": "integer"}}, "type": ["object"]}),
(change_properties, {"properties": {"foo": {"type": "integer"}}, "type": "object"}),
(change_properties, {"properties": {"foo": {"type": "integer"}}}),
(
change_properties,
{
"properties": {"foo": {"type": "string", "minLength": 5}, "bar": {"type": "string", "minLength": 5}},
"type": "object",
"required": ["foo", "bar"],
"additionalProperties": False,
},
),
),
)
@given(data=st.data())
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
def test_successful_mutations(data, mutation, schema):
validate_schema(schema)
validator = Draft4Validator(schema)
schema = deepcopy(schema)
# When mutation can be applied
# Then it returns "success"
assert mutation(MutationContext(schema, "body", "application/json"), data.draw, schema) == MutationResult.SUCCESS
# And the mutated schema is a valid JSON Schema
validate_schema(schema)
# And instances valid for this schema are not valid for the original one
new_instance = data.draw(from_schema(schema))
assert not validator.is_valid(new_instance)
@pytest.mark.parametrize(
"schema",
(
{
"type": "object",
"properties": {
"foo": {"type": "string"},
},
"required": [
"foo",
],
"additionalProperties": False,
},
{
"type": "object",
"properties": {
"foo": {"type": "string", "minLength": 5},
},
"required": [
"foo",
],
"additionalProperties": False,
},
),
)
@given(data=st.data())
@settings(deadline=None, suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
def test_path_parameters_are_string(data, schema):
validator = Draft4Validator(schema)
new_schema = deepcopy(schema)
# When path parameters are mutated
new_schema = data.draw(mutated(new_schema, "path", None))
assert new_schema["type"] == "object"
# Then mutated schema is a valid JSON Schema
validate_schema(new_schema)
# And parameters remain primitive types
new_instance = data.draw(from_schema(new_schema))
assert not isinstance(new_instance["foo"], (list, dict))
# And there should be no additional parameters
assert len(new_instance) == 1
# And instances valid for this schema are not valid for the original one
assert not validator.is_valid(new_instance)
@pytest.mark.parametrize("key", ("components", "description"))
@given(data=st.data())
@settings(deadline=None)
def test_custom_fields_are_intact(data, key):
# When the schema contains some non-JSON Schema keywords (e.g. components from Open API)
schema = {
"type": "object",
"properties": {"X-Foo": {"type": "string", "maxLength": 5}},
"additionalProperties": False,
key: {},
}
# Then they should not be negated
new_schema = data.draw(mutated(schema, "body", "application/json"))
assert key in new_schema
@pytest.mark.parametrize(
"left, right, expected",
(
(MutationResult.SUCCESS, MutationResult.SUCCESS, MutationResult.SUCCESS),
(MutationResult.FAILURE, MutationResult.SUCCESS, MutationResult.SUCCESS),
(MutationResult.SUCCESS, MutationResult.FAILURE, MutationResult.SUCCESS),
(MutationResult.FAILURE, MutationResult.FAILURE, MutationResult.FAILURE),
),
)
def test_mutation_result_success(left, right, expected):
assert left | right == expected
left |= right
assert left == expected
@pytest.mark.parametrize(
"schema",
(
{"minimum": 5, "exclusiveMinimum": True},
{"maximum": 5, "exclusiveMaximum": True},
{"maximum": 5, "exclusiveMaximum": True, "minimum": 1, "exclusiveMinimum": True},
),
)
@given(data=st.data())
@settings(deadline=None)
def test_negate_constraints_keep_dependencies(data, schema):
# When `negate_constraints` is used
schema = deepcopy(schema)
negate_constraints(MutationContext(schema, "body", "application/json"), data.draw, schema)
# Then it should always produce valid schemas
validate_schema(schema)
# E.g. `exclusiveMaximum` / `exclusiveMinimum` only work when `maximum` / `minimum` are present in the same schema
@given(data=st.data())
@settings(deadline=None)
def test_no_unsatisfiable_schemas(data):
schema = {"type": "object", "required": ["foo"]}
mutated_schema = data.draw(mutated(schema, location="body", media_type="application/json"))
assert canonicalish(mutated_schema) != FALSEY
@pytest.mark.parametrize(
"schema, new_type",
(
({"type": "object", "required": ["a"]}, "string"),
({"required": ["a"], "not": {"maxLength": 5}}, "string"),
),
)
def test_prevent_unsatisfiable_schema(schema, new_type):
prevent_unsatisfiable_schema(schema, new_type)
assert canonicalish(schema) != FALSEY
ARRAY_PARAMETER = {"type": "array", "minItems": 1, "items": {"type": "string", "format": "ipv4"}}
OBJECT_PARAMETER = {
"type": "object",
"minProperties": 1,
"properties": {"foo": {"type": "string", "format": "ipv4"}, "bar": {"type": "string", "format": "ipv4"}},
"additionalProperties": False,
}
@pytest.mark.parametrize("explode", (True, False))
@pytest.mark.parametrize(
"location, schema, style",
[("query", ARRAY_PARAMETER, style) for style in ("pipeDelimited", "spaceDelimited")]
+ [("query", OBJECT_PARAMETER, "deepObject")]
+ [
("path", parameter, style)
for parameter in [OBJECT_PARAMETER, ARRAY_PARAMETER]
for style in ("simple", "label", "matrix")
],
)
@pytest.mark.hypothesis_nested
def test_non_default_styles(empty_open_api_3_schema, location, schema, style, explode):
# See GH-1208
# When the schema contains a parameter with a not-default "style"
empty_open_api_3_schema["paths"]["/bug"] = {
"get": {
"parameters": [
{"name": "key", "in": location, "required": True, "style": style, "explode": explode, "schema": schema},
],
"responses": {"200": {"description": "OK"}},
}
}
schema = schemathesis.from_dict(empty_open_api_3_schema)
@given(case=schema["/bug"]["get"].as_strategy(data_generation_method=DataGenerationMethod.negative))
@settings(deadline=None, max_examples=10, suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much])
def test(case):
assert_requests_call(case)
test()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.