blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a6fa620c02a9969300f1da35177f8acf5abe1d9 | 585bc7a21664e7a371950c4811723aae92256c92 | /test.py | f3c46723e224226df8b89453fcbcfb8851c88fe4 | [] | no_license | JacquesLucke/ml_test | bfb421ba6c423bfda545dac7aeabbcc81d71abd8 | 3c743b0b60dcf492a64bea2ed16c7edc4e6a6809 | refs/heads/master | 2023-06-03T20:24:46.733564 | 2021-06-19T17:08:10 | 2021-06-19T17:08:10 | 378,464,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | import numpy as np
weights = np.array(
[
[[1, 2], [3, 4]],
[[-1, -2], [-3, -4]],
[[0, 3], [-1, 2]],
]
)
data_in = np.array(
[
[1, 2],
[3, 4],
]
)
print(weights.shape)
print(weights)
print(data_in.shape)
print(data_in)
print(np.tensordot(weights, data_in, 2))
expected_result = np.array(
[
1 + 4 + 9 + 16, # 30
-1 - 4 - 9 - 16, # -30
0 + 6 - 3 + 8, # 11
]
)
factor = np.array([10, 100, 1])
print(np.tensordot(factor, weights, 1))
# factor = np.broadcast_to(factor, (2, 2, 3))
# print(factor)
# print(factor * weights)
| [
"[email protected]"
] | |
8de2486482a883800948f7d6d08d5ce1676ba874 | 6466eef5477db250879a74935b3b776dc878ff3b | /ideas/views.py | e959cd77ba2866f690fca657fdcb6afc1b18108b | [] | no_license | BakdauletBolatE/django_ideas | 8edb61a569f436865283e82edba3377a150665a8 | ef0258f3aae0c090d38a5098d175bceaddcf67af | refs/heads/master | 2023-03-12T00:02:04.969353 | 2021-03-02T19:41:00 | 2021-03-02T19:41:00 | 324,287,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | from django.shortcuts import render, redirect
from Content.models import Ideas,ICategory
def home(request):
lastideas = Ideas.objects.all()[:3]
categories = ICategory.objects.all()[:4]
data = {
'categories':categories,
'lastideas':lastideas
}
return render(request,'home/home.html',data)
| [
"[email protected]"
] | |
715d48294f51deec24a2be0e2e499cb445e21a45 | 8e52c27f1b2823db67db4438b2b7e22c18254eca | /pytorch/pytorchcv/models/sepreresnet.py | 1b32e85718d516056c6f4de7cde5a45e85de714d | [
"MIT"
] | permissive | earhian/imgclsmob | 5582f5f2d4062b620eecc28d5c4c9245fea47291 | c87c0942420876941868c016211073dec4392e4d | refs/heads/master | 2020-04-12T02:13:55.258601 | 2018-12-17T20:38:19 | 2018-12-17T20:38:19 | 162,242,486 | 1 | 0 | MIT | 2018-12-18T06:40:42 | 2018-12-18T06:40:41 | null | UTF-8 | Python | false | false | 13,510 | py | """
SE-PreResNet, implemented in PyTorch.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEPreResNet', 'sepreresnet18', 'sepreresnet34', 'sepreresnet50', 'sepreresnet50b', 'sepreresnet101',
'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200', 'sepreresnet200b']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, SEBlock
from .preresnet import PreResBlock, PreResBottleneck, PreResInitBlock, PreResActivation
class SEPreResUnit(nn.Module):
"""
SE-PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck,
conv1_stride):
super(SEPreResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
conv1_stride=conv1_stride)
else:
self.body = PreResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
def forward(self, x):
identity = x
x, x_pre_activ = self.body(x)
x = self.se(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = x + identity
return x
class SEPreResNet(nn.Module):
"""
SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SEPreResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 1 if (i == 0) or (j != 0) else 2
stage.add_module("unit{}".format(j + 1), SEPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_sepreresnet(blocks,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join('~', '.torch', 'models'),
**kwargs):
"""
Create SE-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sepreresnet18(**kwargs):
"""
SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs)
def sepreresnet34(**kwargs):
"""
SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs)
def sepreresnet50(**kwargs):
"""
SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs)
def sepreresnet50b(**kwargs):
"""
SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs)
def sepreresnet101(**kwargs):
"""
SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs)
def sepreresnet101b(**kwargs):
"""
SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs)
def sepreresnet152(**kwargs):
"""
SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs)
def sepreresnet152b(**kwargs):
"""
SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs)
def sepreresnet200(**kwargs):
"""
SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an
experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs)
def sepreresnet200b(**kwargs):
"""
SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
from torch.autograd import Variable
pretrained = False
models = [
sepreresnet18,
sepreresnet34,
sepreresnet50,
sepreresnet50b,
sepreresnet101,
sepreresnet101b,
sepreresnet152,
sepreresnet152b,
sepreresnet200,
sepreresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet18 or weight_count == 11776928)
assert (model != sepreresnet34 or weight_count == 21957204)
assert (model != sepreresnet50 or weight_count == 28080472)
assert (model != sepreresnet50b or weight_count == 28080472)
assert (model != sepreresnet101 or weight_count == 49319320)
assert (model != sepreresnet101b or weight_count == 49319320)
assert (model != sepreresnet152 or weight_count == 66814296)
assert (model != sepreresnet152b or weight_count == 66814296)
assert (model != sepreresnet200 or weight_count == 71828312)
assert (model != sepreresnet200b or weight_count == 71828312)
x = Variable(torch.randn(1, 3, 224, 224))
y = net(x)
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| [
"[email protected]"
] | |
824db027a189587196ece3814888aad0001898d6 | d95a672d614ea547a79be582fc9e9e97a09f7d9d | /pack/ex3_18.py | 2e00fe3851e1b7f4f70930d8e8514b207e749b6b | [] | no_license | vt0311/python | 26992c096512df8d0304f6d8b452a663645a8b61 | 51fa4c240b9f69a81f68d75e3f6ffdd9dada8848 | refs/heads/master | 2021-09-06T12:01:40.433969 | 2018-02-06T09:28:32 | 2018-02-06T09:28:32 | 107,950,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | print('방법1 : import 패키지명.모듈명')
import pack.mymod1
print(dir(pack.mymod1)) # mymod1에 정의된 멤버 확인
print(pack.mymod1.__file__)# 경로 명 및 파일 명
print(pack.mymod1.__name__)# 모듈 명
print('mymod1의 함수 호출')
list1 = [1, 3]
list2 = [1, 2]
pack.mymod1.ListHap(list1, list2)
print('다른 모듈의 전역 변수 : ', pack.mymod1.tot)
print('방법2 : from 패키지명 import 모듈명')
from pack import mymod1
mymod1.Kbs()# 모듈명.함수명으로 호출한다.
print('방법3 : from 패키지명.모듈명 import 함수명')
from pack.mymod1 import Mbc
Mbc() # 함수명으로 호출한다.
print('패키지 경로가 다른 곳에 있는 모듈 읽기')
import pack_other.mymod2
print('패키지명.모듈명.함수명()으로 호출')
re_hap = pack_other.mymod2.Hap(5, 3)
print('합 :', re_hap)
print('차 :', pack_other.mymod2.Cha(5, 3))
# PythonPath : C:\Anaconda3\Lib 폴더에 mymod3.py 파일을 미리 복사해둔다.
print('PythonPath가 설정된 폴더의 모듈 읽기 실습')
import mymod3
print('곱1 :', mymod3.Gop(5, 3))
from mymod3 import *
print('곱2 :', Gop(10, 5))
print('\n\n전혀 연관이 없는 폴더의 모듈 읽기')
print('방법1 : Pythonpath에 해당 폴더를 추가한다.')
print('방법2')
import sys
sys.path.append(r'c:/work')
# run time 시점에 'c:/work'를 읽어 들이므로 문제 없음
# 아래 빨간 줄 무시
import mymod4
print('나누기 :', mymod4.Nanugi(5, 3))
| [
"[email protected]"
] | |
82c582ff24e69ccb0acace9ec4ffa7596294a51b | 8a4c32783b2c8e13eca88d193c767bd25e63306c | /algo/gnn/gat.py | 6d087b9d8c3dafdbfde8dff45067056d7cf2b909 | [] | no_license | fs302/GraphWorld | 6765e4ba36d3af2ed5f820e52514096b3aeb10d7 | 9864eaca21f41117adf758f74379efa87692f5f8 | refs/heads/master | 2022-07-31T17:05:32.535380 | 2022-07-08T08:35:15 | 2022-07-08T08:35:15 | 196,156,937 | 5 | 5 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | import torch
from torch.nn import Linear
import torch.nn.functional as F
from torch_geometric.nn import GATConv
class GAT(torch.nn.Module):
def __init__(self, input_channels, hidden_channels, out_channels, heads, dropout_ratio=0.):
super().__init__()
torch.manual_seed(1234567)
self.dropout_ratio = dropout_ratio
self.conv1 = GATConv(input_channels, hidden_channels, heads)
self.conv2 = GATConv(hidden_channels * heads, out_channels)
def forward(self, x, edge_index):
x = F.dropout(x, p=self.dropout_ratio, training=self.training)
x = self.conv1(x, edge_index)
x = F.elu(x)
x = F.dropout(x, p=self.dropout_ratio, training=self.training)
x = self.conv2(x, edge_index)
return x | [
"[email protected]"
] | |
417dfcc3c5f7259d1d81b83fb9ee10f6e487a810 | 801f367bd19b8f2ab08669fd0a85aad7ace961ac | /rl-fmri/tmp_sort_by_covariance.py | b8f808ec8e32693436251db5c839ebcdb7657592 | [
"MIT"
] | permissive | Wendong-Huo/thesis-bodies | d91b694a6b1b6a911476573ed1ed27eb27fb000d | dceb8a36efd2cefc611f6749a52b56b9d3572f7a | refs/heads/main | 2023-04-17T18:32:38.541537 | 2021-03-12T19:53:23 | 2021-03-12T19:53:23 | 623,471,326 | 1 | 0 | null | 2023-04-04T12:45:48 | 2023-04-04T12:45:47 | null | UTF-8 | Python | false | false | 635 | py | import numpy as np
N = 5
t_org = np.arange(N)
pi = np.random.permutation(N)
x_org = np.array([np.random.randn(100)*k for k in range(N)])
S_org = np.cov(x_org)
print("Covariance of sorted time steps", S_org, sep="\n")
t_obs = t_org[pi]
x_obs = x_org[pi]
S_obs = np.cov(x_obs)
print("Covariance of unsorted time steps", S_obs, sep="\n")
#%% Using indexing S[p][:,p]
p = np.argsort(t_obs)
print("Reconstruction equals original:", S_obs[p][:,p] == S_org, sep="\n")
#%% Alternative using Permutation matrix
P = np.eye(N)[p]
print("Permutation matrix", P, sep="\n")
print("Reconstruction equals original:", P@[email protected] == S_org, sep="\n") | [
"[email protected]"
] | |
c5fa64d082c79e76c983683b96b872b39b9f7cbd | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_w_M_to_W_focus_Zok_div/ch036/wiColorJ/Add2Loss/Sob_k05_s001_EroM_Mae_s001/pyr_Tcrop255_p20_j15/pyr_5s/L3/step09_5side_L3.py | 5fb954a0762d3b33eee11d933ad13a32e79db8b1 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,445 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_w_M_to_Wx_Wy_Wz_combine import I_w_M_to_W
from step08_b_use_G_generate_0_util import Tight_crop, Color_jit
from step09_c_train_step import Train_step_I_w_M_to_W
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
color_jit = Color_jit(do_ratio=0.6)
use_what_gen_op = I_w_M_to_W( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 0) )
use_what_train_step = Train_step_I_w_M_to_W( separate_out=True, focus=True, tight_crop=Tight_crop(pad_size=20, resize=(255, 255), jit_scale= 15), color_jit=color_jit )
use_hid_ch = 36
import time
start_time = time.time()
###############################################################################################################################################################################################
##################################
### 5side1
##################################
# "1" 3 6 10 15 21 28 36 45 55
# side1 OK 1
pyramid_1side_1__2side_1__3side_1_4side_1_5s1 = [5, 0, 0, 0, 0, 0, 5]
# 1 "3" 6 10 15 21 28 36 45 55
# side2 OK 4
pyramid_1side_2__2side_1__3side_1_4side_1_5s1 = [5, 1, 0, 0, 0, 1, 5]
pyramid_1side_2__2side_2__3side_1_4side_1_5s1 = [5, 2, 0, 0, 0, 2, 5]
pyramid_1side_2__2side_2__3side_2_4side_1_5s1 = [5, 3, 0, 0, 0, 3, 5]
pyramid_1side_2__2side_2__3side_2_4side_2_5s1 = [5, 4, 0, 0, 0, 4, 5]
# 1 3 "6" 10 15 21 28 36 45 55
# side3 OK 10
pyramid_1side_3__2side_1__3side_1_4side_1_5s1 = [5, 1, 1, 0, 1, 1, 5]
pyramid_1side_3__2side_2__3side_1_4side_1_5s1 = [5, 2, 1, 0, 1, 2, 5]
pyramid_1side_3__2side_2__3side_2_4side_1_5s1 = [5, 3, 1, 0, 1, 3, 5]
pyramid_1side_3__2side_3__3side_1_4side_1_5s1 = [5, 2, 2, 0, 2, 2, 5]
pyramid_1side_3__2side_3__3side_2_4side_1_5s1 = [5, 3, 2, 0, 2, 3, 5]
pyramid_1side_3__2side_3__3side_3_4side_1_5s1 = [5, 3, 3, 0, 3, 3, 5]
pyramid_1side_3__2side_2__3side_2_4side_2_5s1 = [5, 4, 1, 0, 1, 4, 5]
pyramid_1side_3__2side_3__3side_2_4side_2_5s1 = [5, 4, 2, 0, 2, 4, 5]
pyramid_1side_3__2side_3__3side_3_4side_2_5s1 = [5, 4, 3, 0, 3, 4, 5]
pyramid_1side_3__2side_3__3side_3_4side_3_5s1 = [5, 4, 4, 0, 4, 4, 5]
# 1 3 6 "10" 15 21 28 36 45 55
# side4 OK 20
pyramid_1side_4__2side_1__3side_1_4side_1_5s1 = [5, 1, 1, 1, 1, 1, 5]
pyramid_1side_4__2side_2__3side_1_4side_1_5s1 = [5, 2, 1, 1, 1, 2, 5]
pyramid_1side_4__2side_2__3side_2_4side_1_5s1 = [5, 3, 1, 1, 1, 3, 5]
pyramid_1side_4__2side_3__3side_1_4side_1_5s1 = [5, 2, 2, 1, 2, 2, 5]
pyramid_1side_4__2side_3__3side_2_4side_1_5s1 = [5, 3, 2, 1, 2, 3, 5]
pyramid_1side_4__2side_3__3side_3_4side_1_5s1 = [5, 3, 3, 1, 3, 3, 5]
pyramid_1side_4__2side_4__3side_1_4side_1_5s1 = [5, 2, 2, 2, 2, 2, 5]
pyramid_1side_4__2side_4__3side_2_4side_1_5s1 = [5, 3, 2, 2, 2, 3, 5]
pyramid_1side_4__2side_4__3side_3_4side_1_5s1 = [5, 3, 3, 2, 3, 3, 5]
pyramid_1side_4__2side_4__3side_4_4side_1_5s1 = [5, 3, 3, 3, 3, 3, 5]
pyramid_1side_4__2side_2__3side_2_4side_2_5s1 = [5, 4, 1, 1, 1, 4, 5]
pyramid_1side_4__2side_3__3side_2_4side_2_5s1 = [5, 4, 2, 1, 2, 4, 5]
pyramid_1side_4__2side_3__3side_3_4side_2_5s1 = [5, 4, 3, 1, 3, 4, 5]
pyramid_1side_4__2side_4__3side_2_4side_2_5s1 = [5, 4, 2, 2, 2, 4, 5]
pyramid_1side_4__2side_4__3side_3_4side_2_5s1 = [5, 4, 3, 2, 3, 4, 5]
pyramid_1side_4__2side_4__3side_4_4side_2_5s1 = [5, 4, 3, 3, 3, 4, 5]
pyramid_1side_4__2side_3__3side_3_4side_3_5s1 = [5, 4, 4, 1, 4, 4, 5]
pyramid_1side_4__2side_4__3side_3_4side_3_5s1 = [5, 4, 4, 2, 4, 4, 5]
pyramid_1side_4__2side_4__3side_4_4side_3_5s1 = [5, 4, 4, 3, 4, 4, 5]
pyramid_1side_4__2side_4__3side_4_4side_4_5s1 = [5, 4, 4, 4, 4, 4, 5]
##################################
### 5side2
##################################
# "1" 3 6 10 15 21 28 36 45 55
# side3 OK 1
pyramid_1side_2__2side_2__3side_2_4side_2_5s2 = [5, 5, 0, 0, 0, 5, 5]
# 1 "3" 6 10 15 21 28 36 45 55
# side3 OK 4
pyramid_1side_3__2side_2__3side_2_4side_2_5s2 = [5, 5, 1, 0, 1, 5, 5]
pyramid_1side_3__2side_3__3side_2_4side_2_5s2 = [5, 5, 2, 0, 2, 5, 5]
pyramid_1side_3__2side_3__3side_3_4side_2_5s2 = [5, 5, 3, 0, 3, 5, 5]
pyramid_1side_3__2side_3__3side_3_4side_3_5s2 = [5, 5, 4, 0, 4, 5, 5]
# 1 3 "6" 10 15 21 28 36 45 55
# side3 OK 10
pyramid_1side_4__2side_2__3side_2_4side_2_5s2 = [5, 5, 1, 1, 1, 5, 5]
pyramid_1side_4__2side_3__3side_2_4side_2_5s2 = [5, 5, 2, 1, 2, 5, 5]
pyramid_1side_4__2side_3__3side_3_4side_2_5s2 = [5, 5, 3, 1, 3, 5, 5]
pyramid_1side_4__2side_4__3side_2_4side_2_5s2 = [5, 5, 2, 2, 2, 5, 5]
pyramid_1side_4__2side_4__3side_3_4side_2_5s2 = [5, 5, 3, 2, 3, 5, 5]
pyramid_1side_4__2side_4__3side_4_4side_2_5s2 = [5, 5, 3, 3, 3, 5, 5]
pyramid_1side_4__2side_3__3side_3_4side_3_5s2 = [5, 5, 4, 1, 4, 5, 5]
pyramid_1side_4__2side_4__3side_3_4side_3_5s2 = [5, 5, 4, 2, 4, 5, 5]
pyramid_1side_4__2side_4__3side_4_4side_3_5s2 = [5, 5, 4, 3, 4, 5, 5]
pyramid_1side_4__2side_4__3side_4_4side_4_5s2 = [5, 5, 4, 4, 4, 5, 5]
##################################
### 5side3
##################################
# "1" 3 6 10 15 21 28 36 45 55
# side3 OK 1
pyramid_1side_3__2side_3__3side_3_4side_3_5s3 = [5, 5, 5, 0, 5, 5, 5]
# 1 "3" 6 10 15 21 28 36 45 55
# side3 OK 4
pyramid_1side_4__2side_3__3side_3_4side_3_5s3 = [5, 5, 5, 1, 5, 5, 5]
pyramid_1side_4__2side_4__3side_3_4side_3_5s3 = [5, 5, 5, 2, 5, 5, 5]
pyramid_1side_4__2side_4__3side_4_4side_3_5s3 = [5, 5, 5, 3, 5, 5, 5]
pyramid_1side_4__2side_4__3side_4_4side_4_5s3 = [5, 5, 5, 4, 5, 5, 5]
##################################
### 5side4
##################################
# "1" 3 6 10 15 21 28 36 45 55
# side3 OK 1
pyramid_1side_4__2side_4__3side_4_4side_4_5s4 = [5, 5, 5, 5, 5, 5, 5]
###############################################################################################################################################################################################
###############################################################################################################################################################################################
###############################################################################################################################################################################################
##################################
### 1side1
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_1__2side_1__3side_1_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##################################
### 1side2
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_1__3side_1_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_1_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2_4side_2_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2_4side_2_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##################################
### 1side3
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_1__3side_1_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_1_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2_4side_2_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2_4side_2_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
# 1 3 "6" 10 15 21 28 36 45 55
# 2side3 OK 10
ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_1_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2_4side_2_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2_4side_2_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_2_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_2_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3_4side_3_5s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
##################################
### 1side4
##################################
# "1" 3 6 10 15 21 28 36 45 55
# 2side1 OK 1
ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_1__3side_1_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
# 1 "3" 6 10 15 21 28 36 45 55
# 2side2 OK 4
ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_1_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2_4side_2_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2_4side_2_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
# 1 3 "6" 10 15 21 28 36 45 55
# 2side3 OK 10
ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_1_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2_4side_2_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2_4side_2_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_2_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_2_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3_4side_3_5s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
# 1 3 6 "10" 15 21 28 36 45 55
# 2side4 OK 20
ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_1_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2_4side_2_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2_4side_2_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_2_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_2_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3_4side_3_5s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_1_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_2_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_2_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_3_5s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s1, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s2, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s3, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch=use_hid_ch, depth_level=3, out_ch=1, d_amount=3, bottle_divide=True, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4_4side_4_5s4, ch_upper_bound= 2 ** 14).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
###############################################################################################################################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
print(use_model.model_describe)
| [
"[email protected]"
] | |
23ffd822cb9394060d8ed04bdd0e6a6f5ea26806 | 0df7f40b27cffe0b4e009041c35fc1e78e33f82d | /django_api/users/admin.py | c6a9e761a57586af5b9cd2a6a296661ba21db9e3 | [
"MIT"
] | permissive | ridwanray/tay-twitter-microservices | 5be11f166bd0e2dba298da1577549264315d0120 | d5bdb6b6d4fd8333efbb4c79752f8c5efaccb1f0 | refs/heads/master | 2023-01-09T13:22:46.917407 | 2020-11-20T01:16:06 | 2020-11-20T01:16:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | """Admin App Customization is done here"""
from django.contrib import admin
from .models import User
from django.contrib.auth.admin import UserAdmin
class CustomUserAdmin(UserAdmin):
"""Custom Admin Manager for Custom USer Model"""
fieldsets = UserAdmin.fieldsets + (
(None, {'fields': ("follower", "following")}),
)
admin.site.register(User)
| [
"[email protected]"
] | |
2e610648cd9fb047a28f7e2d76bec256eec5c645 | 55647258df0565f19179ffb97ac217708d84ba4a | /social/serializers/comments.py | 4ac4833bd46f2e9cefdd1058d793c2201b05038b | [] | no_license | beatonma/snommoc | 25de0e81af0d9940bdc3aa6420cb5764d50c6d11 | 0a9d37dcad112c5dd98609c1566e74176ae3d89d | refs/heads/main | 2022-03-11T07:53:33.038649 | 2022-03-05T17:03:56 | 2022-03-05T17:03:56 | 188,595,195 | 0 | 0 | null | 2022-02-18T17:54:30 | 2019-05-25T17:35:58 | Python | UTF-8 | Python | false | false | 1,696 | py | import bleach
from rest_framework import serializers
from social.models.comments import Comment
from social.models.mixins import get_target_kwargs
from social.models.token import UserToken
from social.views import contract
class CommentSerializer(serializers.ModelSerializer):
username = serializers.CharField(source="user.username")
class Meta:
model = Comment
fields = [
contract.USER_NAME,
contract.COMMENT_TEXT,
"created_on",
"modified_on",
]
class PostCommentSerializer(serializers.ModelSerializer):
def __init__(self, target, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target = target
token = serializers.CharField()
def validate(self, data):
original_text = data[contract.COMMENT_TEXT]
stripped_text = bleach.clean(
original_text, tags=[], attributes={}, styles=[], strip=True
)
if original_text != stripped_text:
data[contract.FLAGGED] = True
data[contract.COMMENT_TEXT] = stripped_text
return data
class Meta:
model = Comment
fields = [
contract.USER_TOKEN,
contract.COMMENT_TEXT,
]
def update(self, instance, validated_data):
pass
def create(self, validated_data):
comment, _ = Comment.objects.get_or_create(
user=UserToken.objects.get(token=validated_data.get(contract.USER_TOKEN)),
**get_target_kwargs(self.target),
text=validated_data.get(contract.COMMENT_TEXT),
flagged=validated_data.get(contract.FLAGGED, False),
)
return comment
| [
"[email protected]"
] | |
a3f8bde72496e9b464ce228a2d862429620305ee | e9a737a6a9101d201e1ddf4292b31da9c6ed5919 | /ytree/frontends/rockstar/io.py | d16beae61c4767f34351067299f63bb27c35f7ae | [
"BSD-3-Clause"
] | permissive | brittonsmith/ytree | fd1305fc3f35c33741d5441e2c8b6a09cce2bb54 | 0c6a331f38c9758cca663ffd6e740183d359f7aa | refs/heads/main | 2023-05-25T06:02:19.020508 | 2021-04-19T14:35:18 | 2021-04-19T14:35:18 | 203,828,654 | 0 | 0 | NOASSERTION | 2019-08-22T16:01:34 | 2019-08-22T16:01:34 | null | UTF-8 | Python | false | false | 2,804 | py | """
RockstarArbor io classes and member functions
"""
#-----------------------------------------------------------------------------
# Copyright (c) ytree development team. All rights reserved.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
from ytree.data_structures.io import \
CatalogDataFile
from ytree.utilities.io import \
f_text_block
class RockstarDataFile(CatalogDataFile):
def __init__(self, filename, arbor):
self.offsets = None
super(RockstarDataFile, self).__init__(filename, arbor)
def open(self):
self.fh = open(self.filename, "r")
def _parse_header(self):
self.open()
f = self.fh
f.seek(0, 2)
self.file_size = f.tell()
f.seek(0)
while True:
line = f.readline()
if line is None:
self._hoffset = f.tell()
break
elif not line.startswith("#"):
self._hoffset = f.tell() - len(line)
break
elif line.startswith("#a = "):
self.scale_factor = float(line.split(" = ")[1])
self.close()
def _read_data_default(self, rfields, dtypes):
if not rfields:
return {}
fi = self.arbor.field_info
field_data = \
self._create_field_arrays(rfields, dtypes)
offsets = []
self.open()
f = self.fh
f.seek(self._hoffset)
file_size = self.file_size - self._hoffset
for line, offset in f_text_block(f, file_size=file_size):
offsets.append(offset)
sline = line.split()
for field in rfields:
field_data[field].append(sline[fi[field]["column"]])
self.close()
for field in rfields:
field_data[field] = \
np.array(field_data[field], dtype=dtypes[field])
if self.offsets is None:
self.offsets = np.array(offsets)
return field_data
def _read_data_select(self, rfields, tree_nodes, dtypes):
if not rfields:
return {}
fi = self.arbor.field_info
nt = len(tree_nodes)
field_data = \
self._create_field_arrays(rfields, dtypes, size=nt)
self.open()
f = self.fh
for i in range(nt):
f.seek(self.offsets[tree_nodes[i]._fi])
line = f.readline()
sline = line.split()
for field in rfields:
dtype = dtypes[field]
field_data[field][i] = dtype(sline[fi[field]["column"]])
self.close()
return field_data
| [
"[email protected]"
] | |
799a2ad2a3aed25738677f3c563458a4cd38017d | 641fa8341d8c436ad24945bcbf8e7d7d1dd7dbb2 | /content/DEPS | e891ebc72b9fd7dbdb01980defa7890b1268ebd9 | [
"BSD-3-Clause"
] | permissive | massnetwork/mass-browser | 7de0dfc541cbac00ffa7308541394bac1e945b76 | 67526da9358734698c067b7775be491423884339 | refs/heads/master | 2022-12-07T09:01:31.027715 | 2017-01-19T14:29:18 | 2017-01-19T14:29:18 | 73,799,690 | 4 | 4 | BSD-3-Clause | 2022-11-26T11:53:23 | 2016-11-15T09:49:29 | null | UTF-8 | Python | false | false | 3,816 | # Do NOT add chrome to the list below. We shouldn't be including files
# from src/chrome in src/content.
include_rules = [
# The subdirectories in content/ will manually allow their own include
# directories in content/ so we disallow all of them.
"-content",
"+content/app/resources/grit/content_resources.h",
"+content/common",
"+content/grit",
"+content/public/common",
"+content/public/test",
"+content/test",
"+blink/public/resources/grit",
"+cc",
"-cc/blink",
# If you want to use any of these files, move them to src/base first.
"-cc/base/scoped_ptr_algorithm.h",
"-cc/base/scoped_ptr_deque.h",
"-cc/base/scoped_ptr_vector.h",
"-components",
# Content can depend on components that are:
# 1) related to the implementation of the web platform
# 2) shared code between third_party/WebKit and content
# It should not depend on chrome features or implementation details, i.e. the
# original components/ directories which was code split out from chrome/ to be
# shared with iOS. This includes, but isn't limited to, browser features such
# as autofill or extensions, and chrome implementation details such as
# settings, packaging details, installation or crash reporting.
"+crypto",
"+grit/blink_resources.h",
"+grit/content_strings.h",
"+dbus",
"+gpu",
"+media",
"+mojo/common",
"+mojo/edk/embedder",
"+mojo/edk/js",
"+mojo/edk/test",
"+mojo/message_pump",
"+mojo/public",
"+net",
"+ppapi",
"+printing",
"+sandbox",
"+skia",
# In general, content/ should not rely on google_apis, since URLs
# and access tokens should usually be provided by the
# embedder.
#
# There are a couple of specific parts of content that are excepted
# from this rule, e.g. content/browser/speech/DEPS. These are cases of
# implementations that are strongly tied to Google servers, i.e. we
# don't expect alternate implementations to be provided by the
# embedder.
"-google_apis",
# Don't allow inclusion of these other libs we shouldn't be calling directly.
"-v8",
"-tools",
# Allow inclusion of third-party code:
"+third_party/angle",
"+third_party/boringssl/src/include",
"+third_party/flac",
"+third_party/libjingle",
"+third_party/mozilla",
"+third_party/ocmock",
"+third_party/re2",
"+third_party/skia",
"+third_party/sqlite",
"+third_party/khronos",
"+third_party/webrtc",
"+third_party/webrtc_overrides",
"+third_party/zlib/google",
"+third_party/WebKit/public",
"+ui/accelerated_widget_mac",
"+ui/accessibility",
"+ui/android",
# Aura is analogous to Win32 or a Gtk, so it is allowed.
"+ui/aura",
"+ui/base",
"+ui/compositor",
"+ui/display",
"+ui/events",
"+ui/gfx",
"+ui/gl",
"+ui/native_theme",
"+ui/ozone/public",
"+ui/resources/grit/ui_resources.h",
"+ui/resources/grit/webui_resources.h",
"+ui/resources/grit/webui_resources_map.h",
"+ui/shell_dialogs",
"+ui/snapshot",
"+ui/strings/grit/ui_strings.h",
"+ui/surface",
"+ui/touch_selection",
"+ui/wm",
# Content knows about grd files, but the specifics of how to get a resource
# given its id is left to the embedder.
"-ui/base/l10n",
"-ui/base/resource",
# These files aren't related to grd, so they're fine.
"+ui/base/l10n/l10n_util_android.h",
"+ui/base/l10n/l10n_util_win.h",
# Content shouldn't depend on views. While we technically don't need this
# line, since the top level DEPS doesn't allow it, we add it to make this
# explicit.
"-ui/views",
"+storage/browser",
"+storage/common",
# For generated JNI includes.
"+jni",
]
# content -> content/shell dependency is not allowed, except for browser tests.
specific_include_rules = {
".*_browsertest[a-z_]*\.(cc|h)": [
"+content/shell/browser",
"+content/shell/common",
],
}
| [
"[email protected]"
] | ||
2ff06a22caf04d6abf9ee0dadb6a814e357ba72f | 48832d27da16256ee62c364add45f21b968ee669 | /res/scripts/client/gui/scaleform/daapi/view/lobby/clans/search/clansearchinfo.py | effd5375670fa8314a02b23ce4895ec79def0848 | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 7,782 | py | # 2016.08.04 19:50:21 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/clans/search/ClanSearchInfo.py
import weakref
import BigWorld
from adisp import process
from gui import SystemMessages
from gui.clans import formatters as clans_fmts
from gui.clans.clan_controller import g_clanCtrl
from gui.clans.contexts import CreateApplicationCtx
from gui.clans.clan_helpers import ClanListener
from gui.clans.items import formatField
from gui.clans.settings import CLIENT_CLAN_RESTRICTIONS, MAX_CLAN_MEMBERS_COUNT
from gui.Scaleform.daapi.view.lobby.profile.ProfileUtils import HeaderItemsTypes, ProfileUtils
from gui.Scaleform.daapi.view.meta.ClanSearchInfoMeta import ClanSearchInfoMeta
from gui.Scaleform.locale.CLANS import CLANS
from gui.shared.formatters import text_styles
from gui.shared.utils.functions import makeTooltip
from gui.shared.view_helpers import ClanEmblemsHelper
from helpers.i18n import makeString as _ms
from gui.shared import event_dispatcher as shared_events
def _packItemData(text, description, tooltip, icon):
return {'type': HeaderItemsTypes.COMMON,
'text': text,
'description': _ms(description),
'iconPath': ProfileUtils.getIconPath(icon),
'tooltip': tooltip,
'enabled': True}
class ClanSearchInfo(ClanSearchInfoMeta, ClanListener, ClanEmblemsHelper):
def __init__(self):
super(ClanSearchInfo, self).__init__()
self.__dataProvider = None
self.__selectedClan = None
return
def bindDataProvider(self, dataProvider):
self.__dataProvider = weakref.proxy(dataProvider)
def openClanProfile(self):
shared_events.showClanProfileWindow(self.__selectedClan.getClanDbID(), self.__selectedClan.getClanAbbrev())
def onAccountClanProfileChanged(self, profile):
self._updateSetaledState()
@process
def sendRequest(self):
self.as_setWaitingVisibleS(True)
context = CreateApplicationCtx([self.__selectedClan.getClanDbID()])
result = yield g_clanCtrl.sendRequest(context, allowDelay=True)
if result.isSuccess():
SystemMessages.pushMessage(clans_fmts.getAppSentSysMsg(self.__selectedClan.getClanName(), self.__selectedClan.getClanAbbrev()))
self._updateSetaledState()
self.as_setWaitingVisibleS(False)
def requestData(self, clanId):
self.__selectedClan = self.__dataProvider.getClanInfo(clanId)
self._updateDetailedInfo()
self._updateClanEmblem()
self._updateSetaledState()
def onClanEmblem128x128Received(self, clanDbID, emblem):
if clanDbID == self.__selectedClan.getClanDbID():
self.as_setEmblemS(self.getMemoryTexturePath(emblem))
def _populate(self):
super(ClanSearchInfo, self)._populate()
self.__initControls()
def _updateClanEmblem(self):
self.requestClanEmblem128x128(self.__selectedClan.getClanDbID())
def _updateDetailedInfo(self):
clanID = self.__selectedClan.getClanDbID()
clanName = formatField(self.__selectedClan.getClanFullName)
creationDate = formatField(getter=self.__selectedClan.getCreationDate, formatter=BigWorld.wg_getShortDateFormat)
rating = formatField(getter=self.__selectedClan.getPersonalRating, formatter=BigWorld.wg_getIntegralFormat)
battlesCount = formatField(getter=self.__selectedClan.getBattlesCount, formatter=BigWorld.wg_getIntegralFormat)
wins = formatField(getter=self.__selectedClan.getBattleXpAvg, formatter=lambda value: BigWorld.wg_getNiceNumberFormat(value) + '%')
avgExp = formatField(getter=self.__selectedClan.getBattlesPerformanceAvg, formatter=BigWorld.wg_getIntegralFormat)
stats = [_packItemData(battlesCount, CLANS.SEARCH_INFO_STATS_BATTLES, CLANS.SEARCH_INFO_STATS_BATTLES_TOOLTIP, 'avgBattlesCount40x32.png'), _packItemData(wins, CLANS.SEARCH_INFO_STATS_WINS, CLANS.SEARCH_INFO_STATS_WINS_TOOLTIP, 'avgWins40x32.png'), _packItemData(avgExp, CLANS.SEARCH_INFO_STATS_AVGEXP, CLANS.SEARCH_INFO_STATS_AVGEXP_TOOLTIP, 'avgExp40x32.png')]
self.as_setDataS({'clanId': clanID,
'clanName': clanName,
'creationDate': text_styles.main(_ms(CLANS.SEARCH_INFO_CREATIONDATE, date=creationDate)),
'rating': text_styles.promoTitle(rating),
'stats': stats})
def _updateSetaledState(self):
requestSentVisible = False
sendRequestBtnVisible = True
sendRequestBtnEnabled = True
sendRequestTooltip = None
reason = g_clanCtrl.getLimits().canSendApplication(_ClanAdapter(self.__selectedClan)).reason
if reason == CLIENT_CLAN_RESTRICTIONS.NO_RESTRICTIONS:
pass
elif reason == CLIENT_CLAN_RESTRICTIONS.OWN_CLAN:
sendRequestBtnVisible = False
elif reason == CLIENT_CLAN_RESTRICTIONS.ALREADY_IN_CLAN:
sendRequestBtnVisible = False
elif reason == CLIENT_CLAN_RESTRICTIONS.CLAN_IS_FULL:
sendRequestBtnEnabled = False
sendRequestTooltip = makeTooltip(CLANS.SEARCH_INFO_BANNED_TOOLTIP_HEADER, text_styles.error(_ms(CLANS.SEARCH_INFO_BANNED_TOOLTIP_BODY)))
elif reason == CLIENT_CLAN_RESTRICTIONS.CLAN_INVITE_ALREADY_RECEIVED:
sendRequestBtnEnabled = False
sendRequestTooltip = CLANS.SEARCH_INFO_INVITEALREADYACHIEVED_TOOLTIP
elif reason == CLIENT_CLAN_RESTRICTIONS.CLAN_APPLICATION_ALREADY_SENT:
sendRequestBtnEnabled = False
sendRequestTooltip = CLANS.SEARCH_INFO_REQUESTALREADYSENT_TOOLTIP
elif reason == CLIENT_CLAN_RESTRICTIONS.SENT_INVITES_LIMIT_REACHED:
sendRequestBtnEnabled = False
sendRequestTooltip = CLANS.SEARCH_INFO_REQUESTSLIMITEXCEEDED_TOOLTIP
elif reason == CLIENT_CLAN_RESTRICTIONS.CLAN_CONSCRIPTION_CLOSED:
sendRequestBtnEnabled = False
sendRequestTooltip = CLANS.SEARCH_INFO_REQUESTSARENOTACCEPTED_TOOLTIP
elif reason == CLIENT_CLAN_RESTRICTIONS.FORBIDDEN_ACCOUNT_TYPE:
sendRequestBtnEnabled = False
sendRequestTooltip = makeTooltip(CLANS.SEARCH_INFO_FORBIDDENACCOUNTTYPE_TOOLTIP_HEADER, text_styles.error(_ms(CLANS.SEARCH_INFO_FORBIDDENACCOUNTTYPE_TOOLTIP_BODY)))
else:
sendRequestBtnVisible = False
self.as_setStateDataS({'requestSentVisible': requestSentVisible,
'sendRequestBtnVisible': sendRequestBtnVisible,
'sendRequestBtnEnabled': sendRequestBtnEnabled,
'sendRequestTooltip': sendRequestTooltip,
'alertIconVisible': sendRequestBtnVisible and not sendRequestBtnEnabled})
return
def __initControls(self):
self.as_setInitDataS({'ratingDescription': text_styles.stats(_ms(CLANS.SEARCH_INFO_RATINGDESCRIPTION)),
'ratingTooltip': CLANS.SEARCH_INFO_RATINGDESCRIPTION_TOOLTIP,
'requestSent': text_styles.success(_ms(CLANS.SEARCH_INFO_REQUESTSENT)),
'clanProfileBtnLabel': _ms(CLANS.SEARCH_INFO_CLANPROFILEBTN),
'sendRequestBtnLabel': _ms(CLANS.SEARCH_INFO_SENDREQUESTBTN)})
self.as_setWaitingVisibleS(False)
class _ClanAdapter(object):
def __init__(self, clanInfo):
super(_ClanAdapter, self).__init__()
self.__clanInfo = clanInfo
def getDbID(self):
return self.__clanInfo.getClanDbID()
def canAcceptsJoinRequests(self):
return self.__clanInfo.canAcceptsJoinRequests()
def hasFreePlaces(self):
return MAX_CLAN_MEMBERS_COUNT - self.__clanInfo.getMembersCount() > 0
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\clans\search\clansearchinfo.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:50:21 Střední Evropa (letní čas)
| [
"[email protected]"
] | |
359f4e23b3ef1e9e4e2aec0b35aeccbd462a008b | 99c6e64c0bf533e702576c516c3092bf5e641637 | /server.py | c4ec64b92fe2f6b3466ab170489f3924ceec64fd | [
"MIT"
] | permissive | rjc-development/remote-desktop-control | 67ff0f1ae3d7c507f269d982540bacfa666dd322 | c138d6665a25053b4001c4e0c12ff851e401dc3f | refs/heads/master | 2022-12-22T02:26:46.556215 | 2020-09-25T20:01:02 | 2020-09-25T20:01:02 | 298,668,093 | 3 | 1 | null | 2020-09-25T19:59:46 | 2020-09-25T19:59:45 | null | UTF-8 | Python | false | false | 1,761 | py | from starlette.applications import Starlette
from starlette.websockets import WebSocketDisconnect
import json
import logging
import uvicorn
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
app = Starlette()
websockets = {
'web': {},
'desktop': {},
}
async def receive_json(websocket):
message = await websocket.receive_text()
return json.loads(message)
@app.websocket_route('/ws')
async def websocket_endpoint(websocket):
await websocket.accept()
# "Authentication" message
message = await receive_json(websocket)
client_mode = message['client_mode']
client_id = message['client_id']
websockets[client_mode][client_id] = websocket
# Get mirror mode to broadcast messages to the client on the other side
mirror_mode = 'web' if client_mode == 'desktop' else 'desktop'
client_string = f'{client_id}[{client_mode}]'
logger.info(f'Client connected: {client_string}')
while (True):
try:
# Wait for a message from the client
message = await receive_json(websocket)
logger.debug(f'Message received from {client_string}: {message}')
try:
# Broadcast it to the mirror client
await websockets[mirror_mode][client_id].send_text(
json.dumps(message)
)
except KeyError:
logger.debug(
f'Client {client_id}[{mirror_mode}] not connected'
)
except WebSocketDisconnect:
break
del websockets[client_mode][client_id]
await websocket.close()
logger.info(f'Client disconnected: {client_string}')
if __name__ == '__main__':
uvicorn.run(app, host='0.0.0.0', port=8000)
| [
"[email protected]"
] | |
294636258c2d0c16eda8eebc1460b1c2b8febfb3 | 8cf3a19eb3d0f69e5c0237fc504977330e95aac2 | /workflow/scripts/manticore-plotvcf.py | 6fc8c870e989714519620809b58213b502db8b43 | [
"MIT"
] | permissive | NBISweden/manticore-smk | 0c46ab5da5cdf7a40806bfef5ea05558efea8c5e | fd0b4ccd4239dc91dac423d0ea13478d36702561 | refs/heads/main | 2023-08-13T05:44:36.125066 | 2021-10-19T19:12:55 | 2021-10-19T19:12:55 | 308,556,800 | 1 | 2 | MIT | 2021-05-13T07:15:20 | 2020-10-30T07:33:11 | Python | UTF-8 | Python | false | false | 3,059 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import argparse
import random
import numpy as np
import allel
import zarr
import numcodecs
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("white")
sns.set_style("ticks")
populations = ["CHS", "YRI"]
pop_colours = {"CHS": "#FF0000", "YRI": "#008000"}
sample_population = ["CHS", "YRI"]
def do_pca(x, n, ncomp=10):
vidx = np.random.choice(x.shape[0], n, replace=False)
vidx.sort()
y = x.take(vidx, axis=0)
coords, model = allel.pca(y, n_components=ncomp, scaler="patterson")
return coords, model
# Taken from http://alimanfoo.github.io/2015/09/28/fast-pca.html
def plot_pca_coords(coords, model, pc1, pc2, ax, sample_population):
sns.despine(ax=ax, offset=5)
x = coords[:, pc1]
y = coords[:, pc2]
for pop in populations:
flt = sample_population == pop
ax.plot(
x[flt],
y[flt],
marker="o",
linestyle=" ",
color=pop_colours[pop],
label=pop,
markersize=6,
mec="k",
mew=0.5,
)
ax.set_xlabel(
"PC%s (%.1f%%)" % (pc1 + 1, model.explained_variance_ratio_[pc1] * 100)
)
ax.set_ylabel(
"PC%s (%.1f%%)" % (pc2 + 1, model.explained_variance_ratio_[pc2] * 100)
)
def fig_pca(coords, model, title, sample_population):
# plot coords for PCs 1 vs 2, 3 vs 4
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(1, 2, 1)
plot_pca_coords(coords, model, 0, 1, ax, sample_population)
# ax = fig.add_subplot(1, 2, 2)
# plot_pca_coords(coords, model, 2, 3, ax, sample_population)
ax.legend(bbox_to_anchor=(1, 1), loc="upper left")
fig.suptitle(title, y=1.02)
fig.tight_layout()
return fig
if __name__ == "__main__":
if snakemake.params.options != "":
options = snakemake.params.options.split(" ")
sys.argv.extend(options)
parser = argparse.ArgumentParser(description="manticore-plotvcf option parser")
parser.add_argument(
"--subsample-size", metavar="n", type=int, help="subsample size", default=10000
)
args = parser.parse_args()
vcf_path = str(snakemake.input.vcf)
output = snakemake.output[0]
outdir = os.path.dirname(output)
plottype = snakemake.wildcards.type
dev = snakemake.wildcards.ext
## Convert to zarr
zarr_path = os.path.join(outdir, os.path.basename(vcf_path) + ".zarr")
allel.vcf_to_zarr(
vcf_path,
zarr_path,
log=sys.stdout,
fields="*",
alt_number=8,
compressor=numcodecs.Blosc(cname="zstd", clevel=1, shuffle=False),
)
callset = zarr.open_group(zarr_path, mode="r")
g = allel.GenotypeChunkedArray(callset["calldata/GT"])
n = min(len(g), args.subsample_size)
gn = g.to_n_alt()
coords, model = do_pca(gn, n, ncomp=2)
fig = fig_pca(coords, model, "PCA of first four components", sample_population)
fig.savefig(output)
shutil.rmtree(zarr_path)
| [
"[email protected]"
] | |
b1ad2582245f045c6dcbc4be07e3e4d8e9749c61 | aac5982c8dcf26221419086fb90c399b9f4324ef | /DFTB/SlaterKoster/slako_tables/n_cl.py | dd88b37045a4b15c7246ef3a8903f2924b9fa709 | [] | no_license | by-student-2017/DFTBaby-0.1.0-31Jul2019 | 99184d3fa2976d4e02f7f1bddee97e56526d9365 | 92cb73f1a6472f88588986561349d7f2ad1b1c15 | refs/heads/master | 2022-12-12T00:12:50.449505 | 2020-09-01T21:05:59 | 2020-09-01T21:05:59 | 290,116,049 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96,481 | py | # This file has been generated automatically by ./generate_slakotables.py
# from /home/humeniuka/DFTB-0.1.0/DFTB/SlaterKoster/confined_pseudo_atoms/n.pyc and /home/humeniuka/DFTB-0.1.0/DFTB/SlaterKoster/confined_pseudo_atoms/cl.pyc.
from numpy import array
Z1 = 7
Z2 = 17
# overlaps S[(l1,l2,i)] and hamilton matrix elements H[(l1,l2,i)]
# l1 and l2 are the angular quantum numbers of valence orbitals
# on atom1 and atom2 respectively.
# i enumerates the Slater-Koster integrals:
index2symbol = \
{0: 'ss\\sigma', 2: 'sp\\sigma', 4: 'ps\\sigma', 5: 'pp\\pi', 6: 'pp\\sigma'}
# grid for distance d between atomic centers
d = \
array([ 0. , 0.04016064, 0.08032129, 0.12048193,
0.16064257, 0.20080321, 0.24096386, 0.2811245 ,
0.32128514, 0.36144578, 0.40160643, 0.44176707,
0.48192771, 0.52208835, 0.562249 , 0.60240964,
0.64257028, 0.68273092, 0.72289157, 0.76305221,
0.80321285, 0.84337349, 0.88353414, 0.92369478,
0.96385542, 1.00401606, 1.04417671, 1.08433735,
1.12449799, 1.16465863, 1.20481928, 1.24497992,
1.28514056, 1.3253012 , 1.36546185, 1.40562249,
1.44578313, 1.48594378, 1.52610442, 1.56626506,
1.6064257 , 1.64658635, 1.68674699, 1.72690763,
1.76706827, 1.80722892, 1.84738956, 1.8875502 ,
1.92771084, 1.96787149, 2.00803213, 2.04819277,
2.08835341, 2.12851406, 2.1686747 , 2.20883534,
2.24899598, 2.28915663, 2.32931727, 2.36947791,
2.40963855, 2.4497992 , 2.48995984, 2.53012048,
2.57028112, 2.61044177, 2.65060241, 2.69076305,
2.73092369, 2.77108434, 2.81124498, 2.85140562,
2.89156627, 2.93172691, 2.97188755, 3.01204819,
3.05220884, 3.09236948, 3.13253012, 3.17269076,
3.21285141, 3.25301205, 3.29317269, 3.33333333,
3.37349398, 3.41365462, 3.45381526, 3.4939759 ,
3.53413655, 3.57429719, 3.61445783, 3.65461847,
3.69477912, 3.73493976, 3.7751004 , 3.81526104,
3.85542169, 3.89558233, 3.93574297, 3.97590361,
4.01606426, 4.0562249 , 4.09638554, 4.13654618,
4.17670683, 4.21686747, 4.25702811, 4.29718876,
4.3373494 , 4.37751004, 4.41767068, 4.45783133,
4.49799197, 4.53815261, 4.57831325, 4.6184739 ,
4.65863454, 4.69879518, 4.73895582, 4.77911647,
4.81927711, 4.85943775, 4.89959839, 4.93975904,
4.97991968, 5.02008032, 5.06024096, 5.10040161,
5.14056225, 5.18072289, 5.22088353, 5.26104418,
5.30120482, 5.34136546, 5.3815261 , 5.42168675,
5.46184739, 5.50200803, 5.54216867, 5.58232932,
5.62248996, 5.6626506 , 5.70281124, 5.74297189,
5.78313253, 5.82329317, 5.86345382, 5.90361446,
5.9437751 , 5.98393574, 6.02409639, 6.06425703,
6.10441767, 6.14457831, 6.18473896, 6.2248996 ,
6.26506024, 6.30522088, 6.34538153, 6.38554217,
6.42570281, 6.46586345, 6.5060241 , 6.54618474,
6.58634538, 6.62650602, 6.66666667, 6.70682731,
6.74698795, 6.78714859, 6.82730924, 6.86746988,
6.90763052, 6.94779116, 6.98795181, 7.02811245,
7.06827309, 7.10843373, 7.14859438, 7.18875502,
7.22891566, 7.26907631, 7.30923695, 7.34939759,
7.38955823, 7.42971888, 7.46987952, 7.51004016,
7.5502008 , 7.59036145, 7.63052209, 7.67068273,
7.71084337, 7.75100402, 7.79116466, 7.8313253 ,
7.87148594, 7.91164659, 7.95180723, 7.99196787,
8.03212851, 8.07228916, 8.1124498 , 8.15261044,
8.19277108, 8.23293173, 8.27309237, 8.31325301,
8.35341365, 8.3935743 , 8.43373494, 8.47389558,
8.51405622, 8.55421687, 8.59437751, 8.63453815,
8.6746988 , 8.71485944, 8.75502008, 8.79518072,
8.83534137, 8.87550201, 8.91566265, 8.95582329,
8.99598394, 9.03614458, 9.07630522, 9.11646586,
9.15662651, 9.19678715, 9.23694779, 9.27710843,
9.31726908, 9.35742972, 9.39759036, 9.437751 ,
9.47791165, 9.51807229, 9.55823293, 9.59839357,
9.63855422, 9.67871486, 9.7188755 , 9.75903614,
9.79919679, 9.83935743, 9.87951807, 9.91967871,
9.95983936, 10. ])
# overlaps
S = \
{(0, 0, 0): array([ 8.86228024e-01, 8.86227283e-01, 8.85893149e-01,
8.84812784e-01, 8.82570589e-01, 8.78925330e-01,
8.73795193e-01, 8.67236136e-01, 8.59381426e-01,
8.50438154e-01, 8.40632702e-01, 8.30188777e-01,
8.19322952e-01, 8.08216818e-01, 7.97033140e-01,
7.85897795e-01, 7.74901835e-01, 7.64121837e-01,
7.53596604e-01, 7.43342644e-01, 7.33367819e-01,
7.23656177e-01, 7.14193460e-01, 7.04938664e-01,
6.95860753e-01, 6.86920572e-01, 6.78070725e-01,
6.69274735e-01, 6.60491848e-01, 6.51688836e-01,
6.42826923e-01, 6.33878465e-01, 6.24820147e-01,
6.15622641e-01, 6.06280453e-01, 5.96773057e-01,
5.87089362e-01, 5.77228713e-01, 5.67189344e-01,
5.56971689e-01, 5.46577766e-01, 5.36019691e-01,
5.25307359e-01, 5.14448313e-01, 5.03457612e-01,
4.92350710e-01, 4.81145448e-01, 4.69853433e-01,
4.58497598e-01, 4.47094370e-01, 4.35661535e-01,
4.24215709e-01, 4.12776421e-01, 4.01359399e-01,
3.89983763e-01, 3.78664950e-01, 3.67422773e-01,
3.56268618e-01, 3.45218143e-01, 3.34284595e-01,
3.23482084e-01, 3.12823161e-01, 3.02317955e-01,
2.91977221e-01, 2.81811641e-01, 2.71827145e-01,
2.62034286e-01, 2.52437729e-01, 2.43045817e-01,
2.33862571e-01, 2.24891662e-01, 2.16137904e-01,
2.07604374e-01, 1.99292725e-01, 1.91206224e-01,
1.83343314e-01, 1.75706006e-01, 1.68295006e-01,
1.61107663e-01, 1.54144437e-01, 1.47402650e-01,
1.40882207e-01, 1.34578779e-01, 1.28490457e-01,
1.22614183e-01, 1.16947673e-01, 1.11486245e-01,
1.06226569e-01, 1.01164539e-01, 9.62963408e-02,
9.16173069e-02, 8.71236642e-02, 8.28104289e-02,
7.86730611e-02, 7.47069904e-02, 7.09075771e-02,
6.72699555e-02, 6.37892237e-02, 6.04610867e-02,
5.72804396e-02, 5.42426338e-02, 5.13423642e-02,
4.85756845e-02, 4.59375932e-02, 4.34233955e-02,
4.10289471e-02, 3.87495892e-02, 3.65810286e-02,
3.45189265e-02, 3.25591104e-02, 3.06975500e-02,
2.89302075e-02, 2.72531382e-02, 2.56625589e-02,
2.41547313e-02, 2.27262085e-02, 2.13733403e-02,
2.00927557e-02, 1.88812185e-02, 1.77355947e-02,
1.66527610e-02, 1.56298048e-02, 1.46639553e-02,
1.37522328e-02, 1.28921147e-02, 1.20810490e-02,
1.13165845e-02, 1.05963786e-02, 9.91816277e-03,
9.27976204e-03, 8.67914245e-03, 8.11429793e-03,
7.58338197e-03, 7.08445125e-03, 6.61585703e-03,
6.17593637e-03, 5.76312552e-03, 5.37588891e-03,
5.01280848e-03, 4.67251026e-03, 4.35370711e-03,
4.05517025e-03, 3.77571766e-03, 3.51423537e-03,
3.26967759e-03, 3.04102965e-03, 2.82734236e-03,
2.62772519e-03, 2.44133093e-03, 2.26733718e-03,
2.10498817e-03, 1.95356675e-03, 1.81239182e-03,
1.68081986e-03, 1.55825630e-03, 1.44411220e-03,
1.33785804e-03, 1.23898564e-03, 1.14701554e-03,
1.06149956e-03, 9.82015732e-04, 9.08168311e-04,
8.39581479e-04, 7.75903775e-04, 7.16807298e-04,
6.61982545e-04, 6.11138121e-04, 5.64003914e-04,
5.20325696e-04, 4.79865143e-04, 4.42399308e-04,
4.07717572e-04, 3.75626083e-04, 3.45942132e-04,
3.18494900e-04, 2.93124976e-04, 2.69683786e-04,
2.48032712e-04, 2.28042049e-04, 2.09591512e-04,
1.92570534e-04, 1.76869767e-04, 1.62394027e-04,
1.49053174e-04, 1.36761355e-04, 1.25440554e-04,
1.15018004e-04, 1.05425894e-04, 9.66011912e-05,
8.84853349e-05, 8.10240507e-05, 7.41667703e-05,
6.78670590e-05, 6.20815501e-05, 5.67700786e-05,
5.18956281e-05, 4.74237357e-05, 4.33227669e-05,
3.95630745e-05, 3.61178526e-05, 3.29613094e-05,
3.00705480e-05, 2.74241255e-05, 2.50022296e-05,
2.27865540e-05, 2.07602644e-05, 1.89078506e-05,
1.72149624e-05, 1.56683958e-05, 1.42560034e-05,
1.29665974e-05, 1.17898647e-05, 1.07163267e-05,
9.73729740e-06, 8.84475001e-06, 8.03132239e-06,
7.29026526e-06, 6.61537390e-06, 6.00095711e-06,
5.44178958e-06, 4.93307507e-06, 4.47041490e-06,
4.04979292e-06, 3.66752598e-06, 3.32023335e-06,
3.00482115e-06, 2.71846457e-06, 2.45857253e-06,
2.22278374e-06, 2.00893236e-06, 1.81505057e-06,
1.63933971e-06, 1.48013763e-06, 1.33594809e-06,
1.20540220e-06, 1.08724843e-06, 9.80348397e-07,
8.83662647e-07, 7.96246599e-07, 7.17237612e-07,
6.45851394e-07, 5.81375978e-07, 5.23161300e-07,
4.70617316e-07, 4.23209963e-07, 3.80449321e-07,
3.41894769e-07, 3.07144515e-07, 2.75833584e-07,
2.47631337e-07]),
(0, 1, 2): array([ -5.90435203e-18, 3.56363620e-02, 7.03263253e-02,
1.03329886e-01, 1.34174441e-01, 1.62637975e-01,
1.88699616e-01, 2.12480651e-01, 2.34186879e-01,
2.54078105e-01, 2.72425463e-01, 2.89493826e-01,
3.05527842e-01, 3.20739184e-01, 3.35306235e-01,
3.49374140e-01, 3.63048933e-01, 3.76409318e-01,
3.89505196e-01, 4.02356487e-01, 4.14970458e-01,
4.27332784e-01, 4.39420312e-01, 4.51193269e-01,
4.62610627e-01, 4.73627666e-01, 4.84191732e-01,
4.94254426e-01, 5.03768530e-01, 5.12690829e-01,
5.20975696e-01, 5.28587772e-01, 5.35497190e-01,
5.41668618e-01, 5.47090665e-01, 5.51740523e-01,
5.55603149e-01, 5.58678425e-01, 5.60963516e-01,
5.62461101e-01, 5.63175331e-01, 5.63123810e-01,
5.62321124e-01, 5.60782117e-01, 5.58529529e-01,
5.55588817e-01, 5.51987483e-01, 5.47748964e-01,
5.42908938e-01, 5.37496873e-01, 5.31544877e-01,
5.25083419e-01, 5.18148106e-01, 5.10770472e-01,
5.02985980e-01, 4.94826962e-01, 4.86330878e-01,
4.77525456e-01, 4.68444325e-01, 4.59117241e-01,
4.49576695e-01, 4.39852892e-01, 4.29972300e-01,
4.19963213e-01, 4.09853663e-01, 3.99665308e-01,
3.89426023e-01, 3.79155580e-01, 3.68879013e-01,
3.58614813e-01, 3.48381400e-01, 3.38198444e-01,
3.28082743e-01, 3.18049361e-01, 3.08115057e-01,
2.98289576e-01, 2.88587555e-01, 2.79021707e-01,
2.69599085e-01, 2.60331446e-01, 2.51225355e-01,
2.42291193e-01, 2.33532357e-01, 2.24955732e-01,
2.16566224e-01, 2.08369555e-01, 2.00367377e-01,
1.92563085e-01, 1.84958621e-01, 1.77556005e-01,
1.70355598e-01, 1.63358867e-01, 1.56565019e-01,
1.49973667e-01, 1.43584064e-01, 1.37395016e-01,
1.31404651e-01, 1.25610738e-01, 1.20011822e-01,
1.14604749e-01, 1.09386840e-01, 1.04353954e-01,
9.95041607e-02, 9.48333444e-02, 9.03376850e-02,
8.60140926e-02, 8.18583007e-02, 7.78664634e-02,
7.40344095e-02, 7.03580551e-02, 6.68333586e-02,
6.34560370e-02, 6.02217931e-02, 5.71264102e-02,
5.41655724e-02, 5.13353472e-02, 4.86312168e-02,
4.60490203e-02, 4.35847017e-02, 4.12342463e-02,
3.89934968e-02, 3.68585721e-02, 3.48257419e-02,
3.28906760e-02, 3.10498813e-02, 2.92996592e-02,
2.76364050e-02, 2.60566163e-02, 2.45568407e-02,
2.31337080e-02, 2.17840553e-02, 2.05046778e-02,
1.92926267e-02, 1.81446349e-02, 1.70580138e-02,
1.60299582e-02, 1.50578061e-02, 1.41388833e-02,
1.32707130e-02, 1.24508504e-02, 1.16769870e-02,
1.09468901e-02, 1.02583774e-02, 9.60937191e-03,
8.99790638e-03, 8.42203916e-03, 7.87993789e-03,
7.36986890e-03, 6.89016249e-03, 6.43917069e-03,
6.01537167e-03, 5.61730483e-03, 5.24356659e-03,
4.89281702e-03, 4.56381087e-03, 4.25528932e-03,
3.96612001e-03, 3.69520122e-03, 3.44148289e-03,
3.20397513e-03, 2.98173672e-03, 2.77387703e-03,
2.57954073e-03, 2.39792105e-03, 2.22825896e-03,
2.06983087e-03, 1.92194892e-03, 1.78397079e-03,
1.65528614e-03, 1.53531778e-03, 1.42352103e-03,
1.31937631e-03, 1.22240209e-03, 1.13214040e-03,
1.04815931e-03, 9.70052458e-04, 8.97437971e-04,
8.29956701e-04, 7.67269751e-04, 7.09060439e-04,
6.55035815e-04, 6.04902103e-04, 5.58403167e-04,
5.15294150e-04, 4.75339611e-04, 4.38324294e-04,
4.04045877e-04, 3.72314341e-04, 3.42951719e-04,
3.15791437e-04, 2.90677967e-04, 2.67465302e-04,
2.46018462e-04, 2.26210429e-04, 2.07922705e-04,
1.91045336e-04, 1.75475199e-04, 1.61117002e-04,
1.47880764e-04, 1.35684647e-04, 1.24449226e-04,
1.14103611e-04, 1.04580938e-04, 9.58190045e-05,
8.77599507e-05, 8.03502143e-05, 7.35401246e-05,
6.72834196e-05, 6.15372527e-05, 5.62619511e-05,
5.14207416e-05, 4.69795068e-05, 4.29066897e-05,
3.91731896e-05, 3.57519462e-05, 3.26179826e-05,
2.97482599e-05, 2.71214662e-05, 2.47179346e-05,
2.25194978e-05, 2.05093784e-05, 1.86720970e-05,
1.69934350e-05, 1.54602756e-05, 1.40605077e-05,
1.27829827e-05, 1.16174609e-05, 1.05544961e-05,
9.58542855e-06, 8.70227112e-06, 7.89772371e-06,
7.16507657e-06, 6.49809495e-06, 5.89113338e-06,
5.33899798e-06, 4.83691421e-06, 4.38051467e-06,
3.96578987e-06, 3.58907584e-06, 3.24700858e-06,
2.93651272e-06, 2.65478131e-06, 2.39923794e-06,
2.16753143e-06, 1.95752193e-06, 1.76723689e-06,
1.59489209e-06, 1.43885243e-06, 1.29762482e-06,
1.16984957e-06]),
(1, 0, 4): array([ -4.10046655e-18, 2.57069625e-03, 5.06649415e-03,
7.36563195e-03, 9.29752196e-03, 1.06561345e-02,
1.12305392e-02, 1.08173148e-02, 9.25027115e-03,
6.39195968e-03, 2.15289802e-03, -3.51287551e-03,
-1.06137786e-02, -1.91162132e-02, -2.89560823e-02,
-4.00448162e-02, -5.22674442e-02, -6.54963934e-02,
-7.95941869e-02, -9.44082749e-02, -1.09793257e-01,
-1.25597209e-01, -1.41675680e-01, -1.57882932e-01,
-1.74084599e-01, -1.90156993e-01, -2.05978492e-01,
-2.21442317e-01, -2.36452012e-01, -2.50923363e-01,
-2.64776350e-01, -2.77948161e-01, -2.90386258e-01,
-3.02037191e-01, -3.12875784e-01, -3.22868508e-01,
-3.31992123e-01, -3.40241445e-01, -3.47609810e-01,
-3.54097912e-01, -3.59708867e-01, -3.64460926e-01,
-3.68369260e-01, -3.71450443e-01, -3.73729101e-01,
-3.75233178e-01, -3.75992301e-01, -3.76032878e-01,
-3.75392982e-01, -3.74104615e-01, -3.72202322e-01,
-3.69718839e-01, -3.66691971e-01, -3.63155392e-01,
-3.59146226e-01, -3.54698706e-01, -3.49851527e-01,
-3.44633906e-01, -3.39080683e-01, -3.33222447e-01,
-3.27092693e-01, -3.20722350e-01, -3.14138332e-01,
-3.07369517e-01, -3.00444240e-01, -2.93384666e-01,
-2.86218702e-01, -2.78966413e-01, -2.71652842e-01,
-2.64296563e-01, -2.56916236e-01, -2.49531489e-01,
-2.42159094e-01, -2.34814280e-01, -2.27513676e-01,
-2.20267350e-01, -2.13089864e-01, -2.05994023e-01,
-1.98987173e-01, -1.92081104e-01, -1.85282699e-01,
-1.78602388e-01, -1.72044046e-01, -1.65614763e-01,
-1.59319843e-01, -1.53165230e-01, -1.47153157e-01,
-1.41287378e-01, -1.35570356e-01, -1.30004554e-01,
-1.24590936e-01, -1.19331425e-01, -1.14225890e-01,
-1.09274573e-01, -1.04477343e-01, -9.98336429e-02,
-9.53423069e-02, -9.10018357e-02, -8.68113998e-02,
-8.27686573e-02, -7.88716486e-02, -7.51171064e-02,
-7.15037504e-02, -6.80283258e-02, -6.46878529e-02,
-6.14799740e-02, -5.84012935e-02, -5.54487763e-02,
-5.26190976e-02, -4.99090151e-02, -4.73153100e-02,
-4.48345474e-02, -4.24632954e-02, -4.01981705e-02,
-3.80357138e-02, -3.59727992e-02, -3.40057952e-02,
-3.21313660e-02, -3.03462832e-02, -2.86473425e-02,
-2.70312218e-02, -2.54948373e-02, -2.40352408e-02,
-2.26489531e-02, -2.13332347e-02, -2.00851569e-02,
-1.89018812e-02, -1.77806469e-02, -1.67187476e-02,
-1.57135360e-02, -1.47625511e-02, -1.38632917e-02,
-1.30134763e-02, -1.22105427e-02, -1.14524323e-02,
-1.07369802e-02, -1.00621421e-02, -9.42586207e-03,
-8.82625034e-03, -8.26144272e-03, -7.72968670e-03,
-7.22929280e-03, -6.75861069e-03, -6.31607670e-03,
-5.90021685e-03, -5.50957650e-03, -5.14278944e-03,
-4.79856890e-03, -4.47567826e-03, -4.17289830e-03,
-3.88910955e-03, -3.62324102e-03, -3.37426611e-03,
-3.14120771e-03, -2.92316495e-03, -2.71922095e-03,
-2.52856112e-03, -2.35039272e-03, -2.18396257e-03,
-2.02856383e-03, -1.88352663e-03, -1.74821893e-03,
-1.62203516e-03, -1.50440633e-03, -1.39479931e-03,
-1.29270717e-03, -1.19764888e-03, -1.10917837e-03,
-1.02687224e-03, -9.50331429e-04, -8.79180430e-04,
-8.13062112e-04, -7.51646920e-04, -6.94622572e-04,
-6.41694924e-04, -5.92588392e-04, -5.47044936e-04,
-5.04822558e-04, -4.65693452e-04, -4.29445761e-04,
-3.95884681e-04, -3.64812822e-04, -3.36061188e-04,
-3.09468110e-04, -2.84877753e-04, -2.62148838e-04,
-2.41148841e-04, -2.21753605e-04, -2.03847146e-04,
-1.87321215e-04, -1.72075075e-04, -1.58014321e-04,
-1.45052163e-04, -1.33106973e-04, -1.22102846e-04,
-1.11969636e-04, -1.02641631e-04, -9.40584125e-05,
-8.61629094e-05, -7.89036248e-05, -7.22302341e-05,
-6.60983802e-05, -6.04661998e-05, -5.52948197e-05,
-5.05482012e-05, -4.61930715e-05, -4.21986648e-05,
-3.85363767e-05, -3.51797923e-05, -3.21045229e-05,
-2.92880163e-05, -2.67093925e-05, -2.43493940e-05,
-2.21903243e-05, -2.02157410e-05, -1.84105144e-05,
-1.67607292e-05, -1.52535419e-05, -1.38771286e-05,
-1.26205914e-05, -1.14738890e-05, -1.04277761e-05,
-9.47379061e-06, -8.60414115e-06, -7.81164542e-06,
-7.08970737e-06, -6.43228699e-06, -5.83381951e-06,
-5.28922123e-06, -4.79380696e-06, -4.34331289e-06,
-3.93382613e-06, -3.56170754e-06, -3.22368071e-06,
-2.91673643e-06, -2.63811176e-06, -2.38528645e-06,
-2.15595021e-06, -1.94799825e-06, -1.75950000e-06,
-1.58869507e-06, -1.43398103e-06, -1.29388847e-06,
-1.16707972e-06, -1.05234158e-06, -9.48554600e-07,
-8.54711209e-07, -7.69888504e-07, -6.93245486e-07,
-6.24017920e-07]),
(1, 1, 5): array([ 8.01412658e-01, 8.01375276e-01, 8.01240918e-01,
8.00986269e-01, 8.00539724e-01, 7.99846721e-01,
7.98830402e-01, 7.97430452e-01, 7.95563402e-01,
7.93178174e-01, 7.90221069e-01, 7.86646409e-01,
7.82424286e-01, 7.77525331e-01, 7.71939951e-01,
7.65660107e-01, 7.58683333e-01, 7.51029382e-01,
7.42709113e-01, 7.33740781e-01, 7.24157042e-01,
7.13982666e-01, 7.03261582e-01, 6.92020762e-01,
6.80304422e-01, 6.68152887e-01, 6.55599389e-01,
6.42692405e-01, 6.29470287e-01, 6.15977980e-01,
6.02250493e-01, 5.88329367e-01, 5.74253475e-01,
5.60054892e-01, 5.45778440e-01, 5.31450317e-01,
5.17103829e-01, 5.02769675e-01, 4.88477703e-01,
4.74253143e-01, 4.60120444e-01, 4.46104037e-01,
4.32226837e-01, 4.18505237e-01, 4.04958666e-01,
3.91602749e-01, 3.78455804e-01, 3.65525378e-01,
3.52827556e-01, 3.40372377e-01, 3.28167487e-01,
3.16221828e-01, 3.04542267e-01, 2.93132340e-01,
2.81998869e-01, 2.71143331e-01, 2.60571576e-01,
2.50282579e-01, 2.40276987e-01, 2.30556372e-01,
2.21119525e-01, 2.11965022e-01, 2.03092022e-01,
1.94497728e-01, 1.86179746e-01, 1.78133465e-01,
1.70356853e-01, 1.62844961e-01, 1.55594241e-01,
1.48600243e-01, 1.41857048e-01, 1.35359987e-01,
1.29104728e-01, 1.23084702e-01, 1.17295868e-01,
1.11730959e-01, 1.06385298e-01, 1.01253126e-01,
9.63280208e-02, 9.16046852e-02, 8.70768621e-02,
8.27394963e-02, 7.85858636e-02, 7.46107819e-02,
7.08081184e-02, 6.71728859e-02, 6.36986240e-02,
6.03803634e-02, 5.72122789e-02, 5.41893672e-02,
5.13060429e-02, 4.85576521e-02, 4.59387326e-02,
4.34443337e-02, 4.10696404e-02, 3.88100645e-02,
3.66609712e-02, 3.46177454e-02, 3.26763259e-02,
3.08321675e-02, 2.90813652e-02, 2.74196765e-02,
2.58435816e-02, 2.43491389e-02, 2.29325495e-02,
2.15906191e-02, 2.03197638e-02, 1.91168066e-02,
1.79785698e-02, 1.69020037e-02, 1.58842190e-02,
1.49224461e-02, 1.40138633e-02, 1.31559659e-02,
1.23462160e-02, 1.15822884e-02, 1.08617421e-02,
1.01824390e-02, 9.54227224e-03, 8.93925517e-03,
8.37141799e-03, 7.83694709e-03, 7.33407978e-03,
6.86110104e-03, 6.41639870e-03, 5.99847545e-03,
5.60582997e-03, 5.23712295e-03, 4.89097520e-03,
4.56616329e-03, 4.26148075e-03, 3.97577004e-03,
3.70795893e-03, 3.45701531e-03, 3.22197047e-03,
3.00189309e-03, 2.79591586e-03, 2.60319686e-03,
2.42294815e-03, 2.25441995e-03, 2.09691663e-03,
1.94976679e-03, 1.81233913e-03, 1.68403730e-03,
1.56430471e-03, 1.45259885e-03, 1.34842135e-03,
1.25130356e-03, 1.16080499e-03, 1.07648832e-03,
9.97964631e-04, 9.24864017e-04, 8.56836715e-04,
7.93552411e-04, 7.34700302e-04, 6.79988636e-04,
6.29144622e-04, 5.81912146e-04, 5.38048846e-04,
4.97328497e-04, 4.59538815e-04, 4.24483318e-04,
3.91973787e-04, 3.61834713e-04, 3.33904474e-04,
3.08029235e-04, 2.84067272e-04, 2.61882788e-04,
2.41352153e-04, 2.22358556e-04, 2.04794600e-04,
1.88555018e-04, 1.73548015e-04, 1.59682410e-04,
1.46876966e-04, 1.35054290e-04, 1.24142703e-04,
1.14075586e-04, 1.04790951e-04, 9.62306539e-05,
8.83410196e-05, 8.10716197e-05, 7.43762909e-05,
6.82119403e-05, 6.25380127e-05, 5.73173231e-05,
5.25153577e-05, 4.81000386e-05, 4.40416286e-05,
4.03125350e-05, 3.68872243e-05, 3.37419803e-05,
3.08548202e-05, 2.82055453e-05, 2.57753695e-05,
2.35469028e-05, 2.15041153e-05, 1.96322074e-05,
1.79173377e-05, 1.63470171e-05, 1.49094026e-05,
1.35938292e-05, 1.23902559e-05, 1.12896644e-05,
1.02834010e-05, 9.36376224e-06, 8.52359684e-06,
7.75627775e-06, 7.05572163e-06, 6.41634702e-06,
5.83300906e-06, 5.30097966e-06, 4.81590218e-06,
4.37378053e-06, 3.97094484e-06, 3.60403008e-06,
3.26994831e-06, 2.96586092e-06, 2.68917275e-06,
2.43750108e-06, 2.20865532e-06, 2.00063984e-06,
1.81162123e-06, 1.63992224e-06, 1.48400936e-06,
1.34247790e-06, 1.21404585e-06, 1.09754015e-06,
9.91886675e-07, 8.96110852e-07, 8.09315242e-07,
7.30689464e-07, 6.59482003e-07, 5.95018763e-07,
5.36678473e-07, 4.83898070e-07, 4.36164765e-07,
3.93011552e-07, 3.54010019e-07, 3.18773438e-07,
2.86949200e-07, 2.58216747e-07, 2.32284094e-07,
2.08887398e-07, 1.87785349e-07, 1.68758520e-07,
1.51609046e-07, 1.36157957e-07, 1.22240431e-07,
1.09708882e-07]),
(1, 1, 6): array([ 8.01457198e-01, 8.01335221e-01, 8.00906682e-01,
7.99995049e-01, 7.98348642e-01, 7.95677212e-01,
7.91687506e-01, 7.86105124e-01, 7.78695546e-01,
7.69269680e-01, 7.57692080e-01, 7.43880852e-01,
7.27802265e-01, 7.09471465e-01, 6.88945048e-01,
6.66312938e-01, 6.41697305e-01, 6.15246116e-01,
5.87120906e-01, 5.57505424e-01, 5.26584909e-01,
4.94554644e-01, 4.61611161e-01, 4.27949628e-01,
3.93764300e-01, 3.59236429e-01, 3.24547555e-01,
2.89868362e-01, 2.55356625e-01, 2.21160152e-01,
1.87420179e-01, 1.54260128e-01, 1.21792166e-01,
9.01260256e-02, 5.93434708e-02, 2.95296450e-02,
7.58396811e-04, -2.69190892e-02, -5.34509679e-02,
-7.87977355e-02, -1.02924051e-01, -1.25814550e-01,
-1.47451598e-01, -1.67824401e-01, -1.86931487e-01,
-2.04779477e-01, -2.21377601e-01, -2.36736810e-01,
-2.50881483e-01, -2.63832534e-01, -2.75616033e-01,
-2.86257619e-01, -2.95792156e-01, -3.04251393e-01,
-3.11672645e-01, -3.18092663e-01, -3.23554655e-01,
-3.28092287e-01, -3.31748197e-01, -3.34559804e-01,
-3.36571648e-01, -3.37825588e-01, -3.38358544e-01,
-3.38212657e-01, -3.37429886e-01, -3.36045258e-01,
-3.34102262e-01, -3.31634613e-01, -3.28684026e-01,
-3.25283370e-01, -3.21467226e-01, -3.17271683e-01,
-3.12729107e-01, -3.07870845e-01, -3.02730067e-01,
-2.97331199e-01, -2.91705387e-01, -2.85881947e-01,
-2.79881664e-01, -2.73732677e-01, -2.67455686e-01,
-2.61077154e-01, -2.54613316e-01, -2.48085533e-01,
-2.41512621e-01, -2.34914228e-01, -2.28304227e-01,
-2.21698709e-01, -2.15111790e-01, -2.08557378e-01,
-2.02046932e-01, -1.95593257e-01, -1.89205713e-01,
-1.82894125e-01, -1.76667487e-01, -1.70533845e-01,
-1.64500052e-01, -1.58572168e-01, -1.52757226e-01,
-1.47059420e-01, -1.41483306e-01, -1.36030852e-01,
-1.30707436e-01, -1.25514609e-01, -1.20454177e-01,
-1.15528801e-01, -1.10739013e-01, -1.06085743e-01,
-1.01569123e-01, -9.71892030e-02, -9.29458136e-02,
-8.88381166e-02, -8.48650680e-02, -8.10254208e-02,
-7.73175634e-02, -7.37403055e-02, -7.02912328e-02,
-6.69682483e-02, -6.37692650e-02, -6.06920205e-02,
-5.77337929e-02, -5.48921348e-02, -5.21646867e-02,
-4.95477414e-02, -4.70389767e-02, -4.46354591e-02,
-4.23342732e-02, -4.01324724e-02, -3.80270680e-02,
-3.60150088e-02, -3.40935002e-02, -3.22594957e-02,
-3.05102832e-02, -2.88423561e-02, -2.72532093e-02,
-2.57399567e-02, -2.42998514e-02, -2.29299918e-02,
-2.16277350e-02, -2.03903778e-02, -1.92153631e-02,
-1.81001754e-02, -1.70422861e-02, -1.60392718e-02,
-1.50888288e-02, -1.41886105e-02, -1.33363942e-02,
-1.25300691e-02, -1.17675688e-02, -1.10467962e-02,
-1.03658268e-02, -9.72278987e-03, -9.11586198e-03,
-8.54328318e-03, -8.00342971e-03, -7.49458101e-03,
-7.01522684e-03, -6.56386431e-03, -6.13904657e-03,
-5.73940360e-03, -5.36362082e-03, -5.01044447e-03,
-4.67865405e-03, -4.36709187e-03, -4.07466314e-03,
-3.80031327e-03, -3.54302859e-03, -3.30186354e-03,
-3.07590905e-03, -2.86429912e-03, -2.66621038e-03,
-2.48084980e-03, -2.30748104e-03, -2.14539848e-03,
-1.99393010e-03, -1.85244050e-03, -1.72032918e-03,
-1.59702766e-03, -1.48199520e-03, -1.37472502e-03,
-1.27474871e-03, -1.18157952e-03, -1.09480410e-03,
-1.01402173e-03, -9.38840508e-04, -8.68903841e-04,
-8.03874117e-04, -7.43432074e-04, -6.87276874e-04,
-6.35125311e-04, -5.86711663e-04, -5.41784622e-04,
-5.00111684e-04, -4.61472062e-04, -4.25658785e-04,
-3.92479218e-04, -3.61751287e-04, -3.33306490e-04,
-3.06984067e-04, -2.82638365e-04, -2.60125033e-04,
-2.39316612e-04, -2.20091674e-04, -2.02336575e-04,
-1.85945178e-04, -1.70818777e-04, -1.56865456e-04,
-1.43999090e-04, -1.32139581e-04, -1.21212446e-04,
-1.11148324e-04, -1.01882524e-04, -9.33549685e-05,
-8.55100812e-05, -7.82958428e-05, -7.16640610e-05,
-6.55701181e-05, -5.99725461e-05, -5.48329158e-05,
-5.01155676e-05, -4.57874228e-05, -4.18178220e-05,
-3.81785259e-05, -3.48433627e-05, -3.17880462e-05,
-2.89901290e-05, -2.64289259e-05, -2.40852562e-05,
-2.19414879e-05, -1.99812679e-05, -1.81896235e-05,
-1.65527304e-05, -1.50576435e-05, -1.36926440e-05,
-1.24469011e-05, -1.13104101e-05, -1.02739903e-05,
-9.32917533e-06, -8.46820604e-06, -7.68392335e-06,
-6.96976052e-06, -6.31970564e-06, -5.72821337e-06,
-5.19020638e-06, -4.70105356e-06, -4.25645682e-06,
-3.85252588e-06, -3.48567728e-06, -3.15262702e-06,
-2.85037418e-06])}
# hamiltonian matrix elements
H = \
{(0, 0, 0): array([ -7.54272275e-01, -9.21621305e-01, -1.20163052e+00,
-1.46942042e+00, -1.66789672e+00, -1.78530445e+00,
-1.82999543e+00, -1.81746023e+00, -1.76409081e+00,
-1.68468399e+00, -1.59114688e+00, -1.49254436e+00,
-1.39535044e+00, -1.30387923e+00, -1.22074614e+00,
-1.14734188e+00, -1.08407929e+00, -1.03075854e+00,
-9.86774672e-01, -9.51204700e-01, -9.23059702e-01,
-9.01270283e-01, -8.84783657e-01, -8.72591482e-01,
-8.63770074e-01, -8.57500997e-01, -8.53038589e-01,
-8.49743121e-01, -8.47083636e-01, -8.44616150e-01,
-8.41963829e-01, -8.38846869e-01, -8.35052423e-01,
-8.30391443e-01, -8.24783098e-01, -8.18142442e-01,
-8.10423530e-01, -8.01644862e-01, -7.91818512e-01,
-7.80984802e-01, -7.69188341e-01, -7.56508662e-01,
-7.43013415e-01, -7.28777723e-01, -7.13884044e-01,
-6.98418789e-01, -6.82463232e-01, -6.66091145e-01,
-6.49388436e-01, -6.32426158e-01, -6.15273727e-01,
-5.97992626e-01, -5.80648507e-01, -5.63295323e-01,
-5.45987365e-01, -5.28771167e-01, -5.11695156e-01,
-4.94791623e-01, -4.78097785e-01, -4.61641126e-01,
-4.45452207e-01, -4.29554896e-01, -4.13966117e-01,
-3.98704339e-01, -3.83785100e-01, -3.69216635e-01,
-3.55011833e-01, -3.41174564e-01, -3.27713143e-01,
-3.14628548e-01, -3.01922109e-01, -2.89595316e-01,
-2.77646922e-01, -2.66074332e-01, -2.54876020e-01,
-2.44044827e-01, -2.33577789e-01, -2.23470732e-01,
-2.13714783e-01, -2.04305399e-01, -1.95234306e-01,
-1.86496543e-01, -1.78082063e-01, -1.69984135e-01,
-1.62194816e-01, -1.54707245e-01, -1.47511909e-01,
-1.40601243e-01, -1.33966842e-01, -1.27600962e-01,
-1.21495057e-01, -1.15641823e-01, -1.10032693e-01,
-1.04659903e-01, -9.95157722e-02, -9.45927330e-02,
-8.98831332e-02, -8.53793636e-02, -8.10747981e-02,
-7.69618697e-02, -7.30337865e-02, -6.92829800e-02,
-6.57037421e-02, -6.22891256e-02, -5.90326036e-02,
-5.59285323e-02, -5.29705844e-02, -5.01530558e-02,
-4.74701955e-02, -4.49165250e-02, -4.24867934e-02,
-4.01758121e-02, -3.79784908e-02, -3.58900405e-02,
-3.39057106e-02, -3.20211820e-02, -3.02318393e-02,
-2.85334957e-02, -2.69221314e-02, -2.53938815e-02,
-2.39448744e-02, -2.25715444e-02, -2.12704971e-02,
-2.00380568e-02, -1.88711464e-02, -1.77666663e-02,
-1.67216087e-02, -1.57331462e-02, -1.47984907e-02,
-1.39150095e-02, -1.30802307e-02, -1.22916996e-02,
-1.15471827e-02, -1.08442809e-02, -1.01810023e-02,
-9.55530879e-03, -8.96529406e-03, -8.40906216e-03,
-7.88487224e-03, -7.39102678e-03, -6.92594301e-03,
-6.48809806e-03, -6.07602001e-03, -5.68831937e-03,
-5.32369512e-03, -4.98086353e-03, -4.65862999e-03,
-4.35587328e-03, -4.07152185e-03, -3.80451172e-03,
-3.55387973e-03, -3.31870532e-03, -3.09810773e-03,
-2.89124946e-03, -2.69735893e-03, -2.51565445e-03,
-2.34544151e-03, -2.18604583e-03, -2.03682601e-03,
-1.89718119e-03, -1.76654227e-03, -1.64437405e-03,
-1.53016101e-03, -1.42341929e-03, -1.32369720e-03,
-1.23056321e-03, -1.14360899e-03, -1.06245363e-03,
-9.86737349e-04, -9.16119763e-04, -8.50281347e-04,
-7.88914190e-04, -7.31738294e-04, -6.78484078e-04,
-6.28899583e-04, -5.82747205e-04, -5.39804371e-04,
-4.99861967e-04, -4.62722633e-04, -4.28202232e-04,
-3.96131664e-04, -3.66337340e-04, -3.38672028e-04,
-3.12994079e-04, -2.89165690e-04, -2.67062296e-04,
-2.46566656e-04, -2.27568510e-04, -2.09964517e-04,
-1.93657887e-04, -1.78558299e-04, -1.64580705e-04,
-1.51646782e-04, -1.39682603e-04, -1.28619124e-04,
-1.18392356e-04, -1.08942102e-04, -1.00212921e-04,
-9.21519842e-05, -8.47118696e-05, -7.78454419e-05,
-7.15115117e-05, -6.56707961e-05, -6.02868404e-05,
-5.53254624e-05, -5.07551907e-05, -4.65467723e-05,
-4.26728449e-05, -3.91080573e-05, -3.58289245e-05,
-3.28136278e-05, -3.00418759e-05, -2.74948713e-05,
-2.51552879e-05, -2.30069367e-05, -2.10348680e-05,
-1.92252791e-05, -1.75653686e-05, -1.60433072e-05,
-1.46481403e-05, -1.33697232e-05, -1.21986813e-05,
-1.11264006e-05, -1.01449141e-05, -9.24684129e-06,
-8.42537267e-06, -7.67425144e-06, -6.98768138e-06,
-6.36034779e-06, -5.78732991e-06, -5.26413176e-06,
-4.78661431e-06, -4.35088936e-06, -3.95346118e-06,
-3.59109799e-06, -3.26081929e-06, -2.95989612e-06,
-2.68581415e-06, -2.43627165e-06, -2.20914853e-06,
-2.00250307e-06, -1.81456099e-06, -1.64368586e-06,
-1.48838310e-06, -1.34729167e-06, -1.21914624e-06,
-1.10280714e-06, -9.97224826e-07, -9.01436571e-07,
-8.14565346e-07]),
(0, 1, 2): array([ 2.99979515e-17, -5.05610794e-01, -9.16078680e-01,
-1.19982511e+00, -1.36361928e+00, -1.42870693e+00,
-1.41979951e+00, -1.35997757e+00, -1.26853956e+00,
-1.16072700e+00, -1.04776061e+00, -9.37607362e-01,
-8.35508960e-01, -7.44659075e-01, -6.66683119e-01,
-6.02122398e-01, -5.50702807e-01, -5.11628358e-01,
-4.83816407e-01, -4.65950687e-01, -4.56703584e-01,
-4.54725503e-01, -4.58716057e-01, -4.67476800e-01,
-4.79889988e-01, -4.94988468e-01, -5.11889618e-01,
-5.29828423e-01, -5.48171088e-01, -5.66377985e-01,
-5.83993133e-01, -6.00668473e-01, -6.16130143e-01,
-6.30144909e-01, -6.42586838e-01, -6.53340733e-01,
-6.62335628e-01, -6.69573511e-01, -6.75052659e-01,
-6.78809215e-01, -6.80886528e-01, -6.81371803e-01,
-6.80338164e-01, -6.77872340e-01, -6.74066979e-01,
-6.69025005e-01, -6.62841640e-01, -6.55612071e-01,
-6.47440931e-01, -6.38420085e-01, -6.28640205e-01,
-6.18183392e-01, -6.07139380e-01, -5.95584105e-01,
-5.83594386e-01, -5.71239883e-01, -5.58591904e-01,
-5.45703246e-01, -5.32633754e-01, -5.19429796e-01,
-5.06144656e-01, -4.92822127e-01, -4.79496101e-01,
-4.66204760e-01, -4.52981535e-01, -4.39850848e-01,
-4.26843066e-01, -4.13976241e-01, -4.01274964e-01,
-3.88752963e-01, -3.76425369e-01, -3.64306856e-01,
-3.52407874e-01, -3.40737495e-01, -3.29305501e-01,
-3.18113717e-01, -3.07169676e-01, -2.96479338e-01,
-2.86040863e-01, -2.75859069e-01, -2.65932707e-01,
-2.56265543e-01, -2.46853048e-01, -2.37695609e-01,
-2.28791662e-01, -2.20140885e-01, -2.11738600e-01,
-2.03582815e-01, -1.95670045e-01, -1.87997495e-01,
-1.80560849e-01, -1.73357485e-01, -1.66382456e-01,
-1.59631875e-01, -1.53101668e-01, -1.46787627e-01,
-1.40685140e-01, -1.34789367e-01, -1.29096883e-01,
-1.23602352e-01, -1.18301441e-01, -1.13188132e-01,
-1.08259502e-01, -1.03510087e-01, -9.89349038e-02,
-9.45302705e-02, -9.02909899e-02, -8.62127251e-02,
-8.22907755e-02, -7.85206943e-02, -7.48981895e-02,
-7.14188183e-02, -6.80780984e-02, -6.48718391e-02,
-6.17956986e-02, -5.88459443e-02, -5.60180275e-02,
-5.33079904e-02, -5.07120097e-02, -4.82263468e-02,
-4.58470380e-02, -4.35705376e-02, -4.13934652e-02,
-3.93116278e-02, -3.73219207e-02, -3.54209916e-02,
-3.36055171e-02, -3.18723711e-02, -3.02183610e-02,
-2.86404504e-02, -2.71358121e-02, -2.57015012e-02,
-2.43349184e-02, -2.30329397e-02, -2.17932318e-02,
-2.06132243e-02, -1.94905190e-02, -1.84226213e-02,
-1.74072771e-02, -1.64422212e-02, -1.55253469e-02,
-1.46545888e-02, -1.38279014e-02, -1.30433408e-02,
-1.22990845e-02, -1.15932668e-02, -1.09241528e-02,
-1.02901082e-02, -9.68954536e-03, -9.12082580e-03,
-8.58248856e-03, -8.07311476e-03, -7.59132359e-03,
-7.13578409e-03, -6.70527726e-03, -6.29850336e-03,
-5.91433571e-03, -5.55165159e-03, -5.20936785e-03,
-4.88646354e-03, -4.58195976e-03, -4.29492791e-03,
-4.02445489e-03, -3.76967682e-03, -3.52978255e-03,
-3.30398438e-03, -3.09152732e-03, -2.89170359e-03,
-2.70383656e-03, -2.52727819e-03, -2.36141453e-03,
-2.20564229e-03, -2.05941447e-03, -1.92219455e-03,
-1.79347673e-03, -1.67277877e-03, -1.55964492e-03,
-1.45364267e-03, -1.35435900e-03, -1.26140568e-03,
-1.17442567e-03, -1.09303672e-03, -1.01692274e-03,
-9.45773889e-04, -8.79281966e-04, -8.17168971e-04,
-7.59170362e-04, -7.05034645e-04, -6.54523785e-04,
-6.07412696e-04, -5.63489510e-04, -5.22552469e-04,
-4.84414882e-04, -4.48898416e-04, -4.15834993e-04,
-3.85067783e-04, -3.56447608e-04, -3.29836334e-04,
-3.05100449e-04, -2.82120225e-04, -2.60773681e-04,
-2.40954950e-04, -2.22561634e-04, -2.05497953e-04,
-1.89673201e-04, -1.75003342e-04, -1.61409632e-04,
-1.48817710e-04, -1.37158153e-04, -1.26366161e-04,
-1.16381038e-04, -1.07145864e-04, -9.86075018e-05,
-9.07166434e-05, -8.34267894e-05, -7.66946897e-05,
-7.04801295e-05, -6.47455122e-05, -5.94558430e-05,
-5.45784647e-05, -5.00828991e-05, -4.59407727e-05,
-4.21258457e-05, -3.86136652e-05, -3.53814025e-05,
-3.24078436e-05, -2.96733661e-05, -2.71596520e-05,
-2.48497964e-05, -2.27280287e-05, -2.07798534e-05,
-1.89918345e-05, -1.73512293e-05, -1.58465354e-05,
-1.44670392e-05, -1.32027900e-05, -1.20446192e-05,
-1.09840200e-05, -1.00131557e-05, -9.12475774e-06,
-8.31212623e-06, -7.56910175e-06, -6.88996313e-06,
-6.26945314e-06, -5.70275625e-06, -5.18536106e-06,
-4.71318398e-06, -4.28243912e-06, -3.88963086e-06,
-3.53155593e-06]),
(1, 0, 4): array([ 3.88879812e-19, -2.58226172e-02, -7.52559897e-02,
-1.50407248e-01, -2.43125833e-01, -3.42798763e-01,
-4.39953686e-01, -5.27216404e-01, -5.99715041e-01,
-6.54623591e-01, -6.90888231e-01, -7.08731162e-01,
-7.09246932e-01, -6.94149322e-01, -6.65457293e-01,
-6.25283751e-01, -5.75744581e-01, -5.18893675e-01,
-4.56567467e-01, -3.90515153e-01, -3.22216758e-01,
-2.53005261e-01, -1.84026674e-01, -1.16215235e-01,
-5.03848947e-02, 1.28660509e-02, 7.30311586e-02,
1.29730920e-01, 1.82714888e-01, 2.31813716e-01,
2.76927910e-01, 3.18045096e-01, 3.55204023e-01,
3.88464627e-01, 4.17965263e-01, 4.43837643e-01,
4.66233996e-01, 4.85357221e-01, 5.01386103e-01,
5.14520499e-01, 5.24949920e-01, 5.32887453e-01,
5.38517956e-01, 5.42028829e-01, 5.43602492e-01,
5.43423319e-01, 5.41656177e-01, 5.38460253e-01,
5.33990787e-01, 5.28386696e-01, 5.21779266e-01,
5.14285809e-01, 5.06025758e-01, 4.97101730e-01,
4.87611967e-01, 4.77645333e-01, 4.67287387e-01,
4.56606531e-01, 4.45673411e-01, 4.34544259e-01,
4.23279292e-01, 4.11928353e-01, 4.00531315e-01,
3.89130087e-01, 3.77760628e-01, 3.66451344e-01,
3.55232857e-01, 3.44125989e-01, 3.33154875e-01,
3.22334873e-01, 3.11681912e-01, 3.01210382e-01,
2.90930879e-01, 2.80853024e-01, 2.70985637e-01,
2.61332434e-01, 2.51899978e-01, 2.42693648e-01,
2.33713298e-01, 2.24962786e-01, 2.16441898e-01,
2.08153232e-01, 2.00094045e-01, 1.92264435e-01,
1.84663104e-01, 1.77289312e-01, 1.70139556e-01,
1.63211820e-01, 1.56503039e-01, 1.50010446e-01,
1.43730304e-01, 1.37659730e-01, 1.31794426e-01,
1.26130633e-01, 1.20664418e-01, 1.15391717e-01,
1.10308175e-01, 1.05409252e-01, 1.00691245e-01,
9.61492678e-02, 9.17790334e-02, 8.75752271e-02,
8.35343695e-02, 7.96514865e-02, 7.59218716e-02,
7.23416213e-02, 6.89059319e-02, 6.56105475e-02,
6.24510072e-02, 5.94230211e-02, 5.65224306e-02,
5.37450188e-02, 5.10865617e-02, 4.85430653e-02,
4.61104659e-02, 4.37851094e-02, 4.15628774e-02,
3.94400654e-02, 3.74130733e-02, 3.54784035e-02,
3.36324558e-02, 3.18719264e-02, 3.01936465e-02,
2.85940032e-02, 2.70700707e-02, 2.56188243e-02,
2.42372828e-02, 2.29226371e-02, 2.16720583e-02,
2.04828634e-02, 1.93525230e-02, 1.82784717e-02,
1.72583910e-02, 1.62896750e-02, 1.53702307e-02,
1.44978507e-02, 1.36704658e-02, 1.28859720e-02,
1.21424430e-02, 1.14379707e-02, 1.07707734e-02,
1.01391169e-02, 9.54130345e-03, 8.97572090e-03,
8.44085481e-03, 7.93518198e-03, 7.45728243e-03,
7.00581789e-03, 6.57949744e-03, 6.17701165e-03,
5.79718233e-03, 5.43887235e-03, 5.10098278e-03,
4.78245978e-03, 4.48233347e-03, 4.19959480e-03,
3.93335645e-03, 3.68274454e-03, 3.44692047e-03,
3.22509512e-03, 3.01651543e-03, 2.82046927e-03,
2.63626220e-03, 2.46323884e-03, 2.30078509e-03,
2.14830838e-03, 2.00524348e-03, 1.87106123e-03,
1.74525825e-03, 1.62735456e-03, 1.51689705e-03,
1.41344354e-03, 1.31659290e-03, 1.22595414e-03,
1.14116002e-03, 1.06186178e-03, 9.87730924e-04,
9.18456875e-04, 8.53744305e-04, 7.93316433e-04,
7.36919057e-04, 6.84283467e-04, 6.35186057e-04,
5.89409037e-04, 5.46737170e-04, 5.06976494e-04,
4.69943123e-04, 4.35462930e-04, 4.03371677e-04,
3.73514604e-04, 3.45746524e-04, 3.19929687e-04,
2.95936982e-04, 2.73647429e-04, 2.52947398e-04,
2.33731177e-04, 2.15898563e-04, 1.99357045e-04,
1.84017524e-04, 1.69800318e-04, 1.56624537e-04,
1.44420185e-04, 1.33119761e-04, 1.22660329e-04,
1.12982459e-04, 1.04031270e-04, 9.57554880e-05,
8.81068280e-05, 8.10403508e-05, 7.45142350e-05,
6.84894198e-05, 6.29293738e-05, 5.78000959e-05,
5.30701346e-05, 4.87098975e-05, 4.46919437e-05,
4.09908314e-05, 3.75828410e-05, 3.44459535e-05,
3.15596749e-05, 2.89049304e-05, 2.64640076e-05,
2.42205778e-05, 2.21594623e-05, 2.02665228e-05,
1.85286553e-05, 1.69337759e-05, 1.54706255e-05,
1.41288484e-05, 1.28987998e-05, 1.17716470e-05,
1.07392212e-05, 9.79377996e-06, 8.92836605e-06,
8.13651349e-06, 7.41222797e-06, 6.75000208e-06,
6.14473611e-06, 5.59174544e-06, 5.08669288e-06,
4.62559115e-06, 4.20478457e-06, 3.82088290e-06,
3.47077915e-06, 3.15163649e-06, 2.86079760e-06,
2.59586791e-06, 2.35463010e-06, 2.13504101e-06,
1.93523325e-06]),
(1, 1, 5): array([ -1.82157381e-01, -1.92035465e-01, -2.19299667e-01,
-2.60585667e-01, -3.11829589e-01, -3.69339662e-01,
-4.29658327e-01, -4.90189672e-01, -5.48711207e-01,
-6.03760486e-01, -6.54198244e-01, -6.99330135e-01,
-7.38789854e-01, -7.72420134e-01, -8.00241140e-01,
-8.22457878e-01, -8.39312844e-01, -8.51129676e-01,
-8.58303593e-01, -8.61190875e-01, -8.60216823e-01,
-8.55777010e-01, -8.48261159e-01, -8.38033139e-01,
-8.25451518e-01, -8.10855125e-01, -7.94543395e-01,
-7.76798422e-01, -7.57883770e-01, -7.38038658e-01,
-7.17466722e-01, -6.96364069e-01, -6.74906844e-01,
-6.53226186e-01, -6.31476293e-01, -6.09761929e-01,
-5.88175784e-01, -5.66815154e-01, -5.45750188e-01,
-5.25041520e-01, -5.04735873e-01, -4.84883276e-01,
-4.65517398e-01, -4.46659500e-01, -4.28331740e-01,
-4.10549206e-01, -3.93324818e-01, -3.76656202e-01,
-3.60552009e-01, -3.45009093e-01, -3.30022675e-01,
-3.15585919e-01, -3.01690884e-01, -2.88326458e-01,
-2.75482911e-01, -2.63146384e-01, -2.51306841e-01,
-2.39947571e-01, -2.29054596e-01, -2.18613677e-01,
-2.08609811e-01, -1.99028248e-01, -1.89854807e-01,
-1.81074004e-01, -1.72672143e-01, -1.64633341e-01,
-1.56944852e-01, -1.49592278e-01, -1.42562968e-01,
-1.35843798e-01, -1.29421430e-01, -1.23283919e-01,
-1.17420154e-01, -1.11817876e-01, -1.06467196e-01,
-1.01356223e-01, -9.64755561e-02, -9.18155395e-02,
-8.73662118e-02, -8.31189339e-02, -7.90648510e-02,
-7.51961484e-02, -7.15041576e-02, -6.79817353e-02,
-6.46212835e-02, -6.14162167e-02, -5.83592182e-02,
-5.54442461e-02, -5.26648200e-02, -5.00153040e-02,
-4.74898157e-02, -4.50832601e-02, -4.27901025e-02,
-4.06053997e-02, -3.85243672e-02, -3.65425011e-02,
-3.46554178e-02, -3.28587645e-02, -3.11487633e-02,
-2.95212924e-02, -2.79728380e-02, -2.64996240e-02,
-2.50985623e-02, -2.37662454e-02, -2.24993682e-02,
-2.12952482e-02, -2.01508068e-02, -1.90634231e-02,
-1.80304386e-02, -1.70493476e-02, -1.61177299e-02,
-1.52333687e-02, -1.43939420e-02, -1.35974361e-02,
-1.28417927e-02, -1.21251393e-02, -1.14455028e-02,
-1.08011831e-02, -1.01904850e-02, -9.61183200e-03,
-9.06363652e-03, -8.54446143e-03, -8.05288813e-03,
-7.58755120e-03, -7.14714884e-03, -6.73049595e-03,
-6.33636089e-03, -5.96369658e-03, -5.61135713e-03,
-5.27837301e-03, -4.96375737e-03, -4.66655714e-03,
-4.38590019e-03, -4.12093618e-03, -3.87086543e-03,
-3.63491083e-03, -3.41235052e-03, -3.20247883e-03,
-3.00462568e-03, -2.81815422e-03, -2.64247357e-03,
-2.47700228e-03, -2.32119574e-03, -2.17453063e-03,
-2.03652478e-03, -1.90669122e-03, -1.78458739e-03,
-1.66979353e-03, -1.56191438e-03, -1.46054259e-03,
-1.36532240e-03, -1.27591236e-03, -1.19198714e-03,
-1.11323389e-03, -1.03935535e-03, -9.70072690e-04,
-9.05120898e-04, -8.44251496e-04, -7.87224099e-04,
-7.33813752e-04, -6.83806770e-04, -6.37006004e-04,
-5.93216760e-04, -5.52257555e-04, -5.13961304e-04,
-4.78164422e-04, -4.44718268e-04, -4.13474479e-04,
-3.84300392e-04, -3.57067701e-04, -3.31659287e-04,
-3.07954680e-04, -2.85852678e-04, -2.65247540e-04,
-2.46046861e-04, -2.28160074e-04, -2.11503130e-04,
-1.95997079e-04, -1.81567832e-04, -1.68144700e-04,
-1.55662176e-04, -1.44057616e-04, -1.33273695e-04,
-1.23256152e-04, -1.13953126e-04, -1.05316582e-04,
-9.73018176e-05, -8.98666663e-05, -8.29716245e-05,
-7.65797212e-05, -7.06564094e-05, -6.51691613e-05,
-6.00875105e-05, -5.53833487e-05, -5.10301839e-05,
-4.70031605e-05, -4.32791866e-05, -3.98367973e-05,
-3.66555199e-05, -3.37169526e-05, -3.10032039e-05,
-2.84982324e-05, -2.61865570e-05, -2.40544141e-05,
-2.20880763e-05, -2.02754863e-05, -1.86052787e-05,
-1.70667590e-05, -1.56500392e-05, -1.43459864e-05,
-1.31460701e-05, -1.20423784e-05, -1.10275377e-05,
-1.00947247e-05, -9.23760867e-06, -8.45033823e-06,
-7.72748649e-06, -7.06401167e-06, -6.45526874e-06,
-5.89694695e-06, -5.38503173e-06, -4.91585105e-06,
-4.48598997e-06, -4.09229188e-06, -3.73184643e-06,
-3.40196110e-06, -3.10015784e-06, -2.82414555e-06,
-2.57180262e-06, -2.34119620e-06, -2.13051875e-06,
-1.93813376e-06, -1.76249536e-06, -1.60221726e-06,
-1.45599975e-06, -1.32266058e-06, -1.20111303e-06,
-1.09035787e-06, -9.89465217e-07, -8.97593026e-07,
-8.13965444e-07, -7.37870582e-07, -6.68653447e-07,
-6.05719278e-07, -5.48516427e-07, -4.96538955e-07,
-4.49327932e-07, -4.06465787e-07, -3.67560871e-07,
-3.32261804e-07]),
(1, 1, 6): array([ -1.82167505e-01, -2.10454249e-01, -2.88338882e-01,
-4.00611209e-01, -5.31108065e-01, -6.65880544e-01,
-7.94245365e-01, -9.08727126e-01, -1.00465750e+00,
-1.07963870e+00, -1.13294921e+00, -1.16512707e+00,
-1.17745913e+00, -1.17178104e+00, -1.15017721e+00,
-1.11483465e+00, -1.06792678e+00, -1.01160308e+00,
-9.47805555e-01, -8.78392902e-01, -8.04986210e-01,
-7.29061658e-01, -6.51920167e-01, -5.74643813e-01,
-4.98214590e-01, -4.23379418e-01, -3.50796572e-01,
-2.80992405e-01, -2.14349602e-01, -1.51167027e-01,
-9.16618251e-02, -3.59518121e-02, 1.59008603e-02,
6.38784218e-02, 1.08035078e-01, 1.48436067e-01,
1.85177569e-01, 2.18410839e-01, 2.48271018e-01,
2.74923772e-01, 2.98529152e-01, 3.19278248e-01,
3.37335586e-01, 3.52882890e-01, 3.66091721e-01,
3.77141162e-01, 3.86188174e-01, 3.93396791e-01,
3.98921260e-01, 4.02904724e-01, 4.05483690e-01,
4.06781009e-01, 4.06926703e-01, 4.06031595e-01,
4.04204057e-01, 4.01544443e-01, 3.98149670e-01,
3.94098333e-01, 3.89473798e-01, 3.84341299e-01,
3.78775343e-01, 3.72836859e-01, 3.66573884e-01,
3.60040734e-01, 3.53283243e-01, 3.46339349e-01,
3.39250070e-01, 3.32043776e-01, 3.24754565e-01,
3.17404378e-01, 3.10017749e-01, 3.02616749e-01,
2.95218495e-01, 2.87839920e-01, 2.80496471e-01,
2.73197163e-01, 2.65955194e-01, 2.58782910e-01,
2.51684109e-01, 2.44669301e-01, 2.37742973e-01,
2.30914356e-01, 2.24184506e-01, 2.17559121e-01,
2.11042163e-01, 2.04638385e-01, 1.98348498e-01,
1.92175558e-01, 1.86121172e-01, 1.80187426e-01,
1.74374991e-01, 1.68685913e-01, 1.63119992e-01,
1.57678001e-01, 1.52360345e-01, 1.47167162e-01,
1.42098126e-01, 1.37152528e-01, 1.32331050e-01,
1.27632290e-01, 1.23055729e-01, 1.18598919e-01,
1.14262653e-01, 1.10044896e-01, 1.05943982e-01,
1.01959545e-01, 9.80894464e-02, 9.43323971e-02,
9.06865671e-02, 8.71503079e-02, 8.37220284e-02,
8.03998544e-02, 7.71817609e-02, 7.40659375e-02,
7.10502851e-02, 6.81333298e-02, 6.53125085e-02,
6.25858466e-02, 5.99514394e-02, 5.74073767e-02,
5.49513644e-02, 5.25815272e-02, 5.02961025e-02,
4.80921809e-02, 4.59681425e-02, 4.39219559e-02,
4.19515253e-02, 4.00549222e-02, 3.82300293e-02,
3.64748457e-02, 3.47875605e-02, 3.31661110e-02,
3.16088223e-02, 3.01131989e-02, 2.86777609e-02,
2.73006112e-02, 2.59800178e-02, 2.47140250e-02,
2.35009428e-02, 2.23389883e-02, 2.12265340e-02,
2.01619388e-02, 1.91435181e-02, 1.81696682e-02,
1.72388997e-02, 1.63495892e-02, 1.55002482e-02,
1.46894793e-02, 1.39158973e-02, 1.31779757e-02,
1.24744083e-02, 1.18039003e-02, 1.11651631e-02,
1.05569348e-02, 9.97808531e-03, 9.42730155e-03,
8.90350614e-03, 8.40558299e-03, 7.93243708e-03,
7.48303430e-03, 7.05637060e-03, 6.65148691e-03,
6.26741302e-03, 5.90322552e-03, 5.55805691e-03,
5.23104665e-03, 4.92135819e-03, 4.62820750e-03,
4.35083529e-03, 4.08850458e-03, 3.84051192e-03,
3.60614681e-03, 3.38477521e-03, 3.17575864e-03,
2.97849138e-03, 2.79238947e-03, 2.61689719e-03,
2.45148202e-03, 2.29562885e-03, 2.14885140e-03,
2.01070554e-03, 1.88068198e-03, 1.75838243e-03,
1.64340631e-03, 1.53534171e-03, 1.43382221e-03,
1.33849492e-03, 1.24902069e-03, 1.16507539e-03,
1.08634978e-03, 1.01255072e-03, 9.43395578e-04,
8.78622945e-04, 8.17979544e-04, 7.61224782e-04,
7.08133304e-04, 6.58488076e-04, 6.12088007e-04,
5.68734982e-04, 5.28253525e-04, 4.90457443e-04,
4.55189058e-04, 4.22292856e-04, 3.91622482e-04,
3.63037872e-04, 3.36408913e-04, 3.11612838e-04,
2.88532554e-04, 2.67058115e-04, 2.47086308e-04,
2.28519761e-04, 2.11266439e-04, 1.95239907e-04,
1.80359653e-04, 1.66548963e-04, 1.53736126e-04,
1.41854127e-04, 1.30839871e-04, 1.20634290e-04,
1.11181894e-04, 1.02430548e-04, 9.43314163e-05,
8.68391723e-05, 7.99112989e-05, 7.35078101e-05,
6.75913327e-05, 6.21271544e-05, 5.70826191e-05,
5.24274875e-05, 4.81333269e-05, 4.41739469e-05,
4.05249340e-05, 3.71628309e-05, 3.40665211e-05,
3.12161983e-05, 2.85933421e-05, 2.61808207e-05,
2.39626364e-05, 2.19239932e-05, 2.00510768e-05,
1.83310972e-05, 1.67522509e-05, 1.53034960e-05,
1.39746462e-05, 1.27563454e-05, 1.16397385e-05,
1.06168148e-05, 9.68009476e-06, 8.82263354e-06,
8.03804343e-06])}
# dipoles
Dipole = \
{(0, 0, 3): array([ -5.74408734e-18, -1.01986255e-02, -2.04693651e-02,
-3.08451520e-02, -4.13129302e-02, -5.18292287e-02,
-6.23359990e-02, -7.27751939e-02, -8.30949656e-02,
-9.32591111e-02, -1.03245585e-01, -1.13045490e-01,
-1.22663185e-01, -1.32109473e-01, -1.41403584e-01,
-1.50566312e-01, -1.59617551e-01, -1.68580059e-01,
-1.77469507e-01, -1.86296840e-01, -1.95071467e-01,
-2.03794021e-01, -2.12465153e-01, -2.21072309e-01,
-2.29605152e-01, -2.38046804e-01, -2.46373357e-01,
-2.54563263e-01, -2.62589624e-01, -2.70426904e-01,
-2.78043494e-01, -2.85411762e-01, -2.92504399e-01,
-2.99288628e-01, -3.05745163e-01, -3.11843886e-01,
-3.17559956e-01, -3.22875089e-01, -3.27770401e-01,
-3.32228437e-01, -3.36233149e-01, -3.39777021e-01,
-3.42851677e-01, -3.45447656e-01, -3.47562868e-01,
-3.49196885e-01, -3.50352807e-01, -3.51029638e-01,
-3.51238224e-01, -3.50985130e-01, -3.50279751e-01,
-3.49132047e-01, -3.47556449e-01, -3.45565184e-01,
-3.43175613e-01, -3.40402876e-01, -3.37268393e-01,
-3.33785851e-01, -3.29974990e-01, -3.25854230e-01,
-3.21444314e-01, -3.16765408e-01, -3.11835882e-01,
-3.06675964e-01, -3.01306813e-01, -2.95744520e-01,
-2.90011901e-01, -2.84124931e-01, -2.78105141e-01,
-2.71968934e-01, -2.65732742e-01, -2.59415038e-01,
-2.53032354e-01, -2.46599516e-01, -2.40134023e-01,
-2.33646512e-01, -2.27153053e-01, -2.20667981e-01,
-2.14200540e-01, -2.07764787e-01, -2.01369873e-01,
-1.95029012e-01, -1.88748531e-01, -1.82538497e-01,
-1.76406971e-01, -1.70363120e-01, -1.64411861e-01,
-1.58560131e-01, -1.52813272e-01, -1.47176851e-01,
-1.41654623e-01, -1.36251624e-01, -1.30970373e-01,
-1.25813849e-01, -1.20784612e-01, -1.15884736e-01,
-1.11115505e-01, -1.06477746e-01, -1.01973086e-01,
-9.76012124e-02, -9.33623131e-02, -8.92548744e-02,
-8.52797380e-02, -8.14352431e-02, -7.77199675e-02,
-7.41332655e-02, -7.06730577e-02, -6.73376724e-02,
-6.41249849e-02, -6.10328840e-02, -5.80592231e-02,
-5.52015107e-02, -5.24571329e-02, -4.98235143e-02,
-4.72978706e-02, -4.48777802e-02, -4.25600636e-02,
-4.03418889e-02, -3.82204708e-02, -3.61929916e-02,
-3.42563994e-02, -3.24078937e-02, -3.06447770e-02,
-2.89636035e-02, -2.73618145e-02, -2.58365642e-02,
-2.43850476e-02, -2.30045284e-02, -2.16922624e-02,
-2.04455543e-02, -1.92618929e-02, -1.81386655e-02,
-1.70735109e-02, -1.60636506e-02, -1.51069335e-02,
-1.42010256e-02, -1.33437209e-02, -1.25327553e-02,
-1.17660565e-02, -1.10415527e-02, -1.03572948e-02,
-9.71138653e-03, -9.10195638e-03, -8.52722431e-03,
-7.98550846e-03, -7.47512476e-03, -6.99449077e-03,
-6.54211179e-03, -6.11654177e-03, -5.71633321e-03,
-5.34016409e-03, -4.98676111e-03, -4.65489552e-03,
-4.34339158e-03, -4.05116601e-03, -3.77709763e-03,
-3.52019709e-03, -3.27949291e-03, -3.05405728e-03,
-2.84301715e-03, -2.64554155e-03, -2.46084388e-03,
-2.28816499e-03, -2.12678944e-03, -1.97604556e-03,
-1.83529100e-03, -1.70391366e-03, -1.58134465e-03,
-1.46704202e-03, -1.36049275e-03, -1.26121274e-03,
-1.16873819e-03, -1.08264203e-03, -1.00251662e-03,
-9.27977243e-04, -8.58662037e-04, -7.94231248e-04,
-7.34365219e-04, -6.78761887e-04, -6.27139663e-04,
-5.79239561e-04, -5.34795421e-04, -4.93581445e-04,
-4.55380300e-04, -4.19980883e-04, -3.87192103e-04,
-3.56834063e-04, -3.28737658e-04, -3.02744435e-04,
-2.78706065e-04, -2.56484160e-04, -2.35948571e-04,
-2.16979506e-04, -1.99463872e-04, -1.83296216e-04,
-1.68378900e-04, -1.54620143e-04, -1.41935448e-04,
-1.30244601e-04, -1.19475340e-04, -1.09556376e-04,
-1.00425136e-04, -9.20222637e-05, -8.42925317e-05,
-7.71845564e-05, -7.06508455e-05, -6.46473758e-05,
-5.91330908e-05, -5.40699974e-05, -4.94229429e-05,
-4.51593428e-05, -4.12489523e-05, -3.76638158e-05,
-3.43782003e-05, -3.13681238e-05, -2.86114770e-05,
-2.60878856e-05, -2.37785002e-05, -2.16659324e-05,
-1.97341192e-05, -1.79682263e-05, -1.63545669e-05,
-1.48805931e-05, -1.35347267e-05, -1.23062686e-05,
-1.11853733e-05, -1.01630099e-05, -9.23083803e-06,
-8.38122653e-06, -7.60712554e-06, -6.90211097e-06,
-6.26027628e-06, -5.67610730e-06, -5.14464001e-06,
-4.66130434e-06, -4.22189512e-06, -3.82257057e-06,
-3.45980146e-06, -3.13036718e-06, -2.83130756e-06,
-2.55991881e-06, -2.31373597e-06, -2.09049356e-06,
-1.88812610e-06, -1.70475790e-06, -1.53865294e-06,
-1.38824813e-06, -1.25210848e-06, -1.12892371e-06,
-1.01750152e-06]),
(0, 1, 1): array([ 7.32060052e-01, 7.31703994e-01, 7.30640672e-01,
7.28904618e-01, 7.26519962e-01, 7.23533447e-01,
7.19985230e-01, 7.15923178e-01, 7.11381899e-01,
7.06400122e-01, 7.01012615e-01, 6.95246635e-01,
6.89126351e-01, 6.82669817e-01, 6.75900228e-01,
6.68825063e-01, 6.61455144e-01, 6.53810997e-01,
6.45894091e-01, 6.37713734e-01, 6.29282339e-01,
6.20602147e-01, 6.11690743e-01, 6.02550776e-01,
5.93195747e-01, 5.83636208e-01, 5.73876273e-01,
5.63935929e-01, 5.53824162e-01, 5.43557786e-01,
5.33146189e-01, 5.22605893e-01, 5.11950753e-01,
5.01194345e-01, 4.90358522e-01, 4.79452159e-01,
4.68493912e-01, 4.57497552e-01, 4.46480096e-01,
4.35455676e-01, 4.24439742e-01, 4.13446696e-01,
4.02492220e-01, 3.91588085e-01, 3.80748316e-01,
3.69984747e-01, 3.59313309e-01, 3.48740343e-01,
3.38280302e-01, 3.27943734e-01, 3.17738106e-01,
3.07674418e-01, 2.97761083e-01, 2.88003485e-01,
2.78411265e-01, 2.68988538e-01, 2.59744506e-01,
2.50681616e-01, 2.41803960e-01, 2.33117033e-01,
2.24623558e-01, 2.16325790e-01, 2.08227107e-01,
2.00328762e-01, 1.92632060e-01, 1.85136693e-01,
1.77844471e-01, 1.70754372e-01, 1.63866512e-01,
1.57180203e-01, 1.50693195e-01, 1.44404137e-01,
1.38312288e-01, 1.32414211e-01, 1.26709079e-01,
1.21192556e-01, 1.15862876e-01, 1.10717097e-01,
1.05751322e-01, 1.00962727e-01, 9.63474362e-02,
9.19026714e-02, 8.76236961e-02, 8.35074368e-02,
7.95495428e-02, 7.57468510e-02, 7.20943851e-02,
6.85888549e-02, 6.52257242e-02, 6.20013907e-02,
5.89114476e-02, 5.59524504e-02, 5.31198786e-02,
5.04097732e-02, 4.78181842e-02, 4.53413437e-02,
4.29753574e-02, 4.07162133e-02, 3.85605297e-02,
3.65042162e-02, 3.45439132e-02, 3.26757304e-02,
3.08966123e-02, 2.92029097e-02, 2.75910455e-02,
2.60581683e-02, 2.46008274e-02, 2.32160631e-02,
2.19008259e-02, 2.06521773e-02, 1.94673106e-02,
1.83435399e-02, 1.72780434e-02, 1.62683640e-02,
1.53119447e-02, 1.44064617e-02, 1.35493979e-02,
1.27385858e-02, 1.19718570e-02, 1.12471648e-02,
1.05624431e-02, 9.91580149e-03, 9.30537749e-03,
8.72934729e-03, 8.18598563e-03, 7.67369685e-03,
7.19084531e-03, 6.73599570e-03, 6.30762524e-03,
5.90440351e-03, 5.52499777e-03, 5.16811960e-03,
4.83257785e-03, 4.51721651e-03, 4.22094746e-03,
3.94271614e-03, 3.68154003e-03, 3.43645740e-03,
3.20656397e-03, 2.99099599e-03, 2.78895156e-03,
2.59964946e-03, 2.42235368e-03, 2.25636506e-03,
2.10103106e-03, 1.95570798e-03, 1.81980532e-03,
1.69276597e-03, 1.57406465e-03, 1.46317244e-03,
1.35962193e-03, 1.26296635e-03, 1.17278140e-03,
1.08866412e-03, 1.01023387e-03, 9.37132570e-04,
8.69023995e-04, 8.05591737e-04, 7.46534560e-04,
6.91570681e-04, 6.40434635e-04, 5.92880600e-04,
5.48671062e-04, 5.07584242e-04, 4.69415756e-04,
4.33969671e-04, 4.01065644e-04, 3.70529262e-04,
3.42202151e-04, 3.15933794e-04, 2.91585849e-04,
2.69021038e-04, 2.48120719e-04, 2.28765400e-04,
2.10849108e-04, 1.94270158e-04, 1.78934225e-04,
1.64753395e-04, 1.51645640e-04, 1.39533677e-04,
1.28346052e-04, 1.18015327e-04, 1.08479744e-04,
9.96814358e-05, 9.15657042e-05, 8.40823065e-05,
7.71845158e-05, 7.08287583e-05, 6.49745292e-05,
5.95841448e-05, 5.46226470e-05, 5.00574595e-05,
4.58583048e-05, 4.19973236e-05, 3.84485179e-05,
3.51877501e-05, 3.21927243e-05, 2.94428117e-05,
2.69186342e-05, 2.46027187e-05, 2.24783961e-05,
2.05306679e-05, 1.87453360e-05, 1.71096680e-05,
1.56113464e-05, 1.42394326e-05, 1.29837367e-05,
1.18347847e-05, 1.07838631e-05, 9.82296349e-06,
8.94468290e-06, 8.14220355e-06, 7.40922670e-06,
6.73996329e-06, 6.12908471e-06, 5.57169545e-06,
5.06329290e-06, 4.59972803e-06, 4.17720095e-06,
3.79221462e-06, 3.44154543e-06, 3.12225308e-06,
2.83162939e-06, 2.56719152e-06, 2.32666514e-06,
2.10796236e-06, 1.90917421e-06, 1.72855011e-06,
1.56448344e-06, 1.41551488e-06, 1.28029648e-06,
1.15761022e-06, 1.04632226e-06, 9.45415526e-07,
8.53949523e-07, 7.71070977e-07, 6.96001337e-07,
6.28030190e-07, 5.66504021e-07, 5.10832395e-07,
4.60475839e-07, 4.14943014e-07, 3.73785469e-07,
3.36597525e-07, 3.03007171e-07, 2.72675515e-07,
2.45296616e-07, 2.20593383e-07, 1.98309763e-07,
1.78216374e-07]),
(0, 1, 4): array([ 7.32100738e-01, 7.31034418e-01, 7.27879456e-01,
7.22748118e-01, 7.15799246e-01, 7.07210779e-01,
6.97159926e-01, 6.85809535e-01, 6.73301560e-01,
6.59752742e-01, 6.45256475e-01, 6.29884726e-01,
6.13691446e-01, 5.96717423e-01, 5.78993407e-01,
5.60543753e-01, 5.41391223e-01, 5.21557110e-01,
5.01065035e-01, 4.79943949e-01, 4.58224787e-01,
4.35945442e-01, 4.13147445e-01, 3.89880936e-01,
3.66198474e-01, 3.42157215e-01, 3.17821080e-01,
2.93255014e-01, 2.68527009e-01, 2.43705630e-01,
2.18864092e-01, 1.94072263e-01, 1.69399459e-01,
1.44919808e-01, 1.20694603e-01, 9.67932815e-02,
7.32810612e-02, 5.02137107e-02, 2.76481907e-02,
5.63750274e-03, -1.57671166e-02, -3.65260177e-02,
-5.65985248e-02, -7.59458434e-02, -9.45374161e-02,
-1.12347238e-01, -1.29353344e-01, -1.45532759e-01,
-1.60876045e-01, -1.75371045e-01, -1.89010634e-01,
-2.01789294e-01, -2.13709149e-01, -2.24771382e-01,
-2.34984043e-01, -2.44355178e-01, -2.52901037e-01,
-2.60630376e-01, -2.67561061e-01, -2.73709460e-01,
-2.79097851e-01, -2.83748535e-01, -2.87681722e-01,
-2.90922456e-01, -2.93497755e-01, -2.95429033e-01,
-2.96747573e-01, -2.97476417e-01, -2.97647406e-01,
-2.97285570e-01, -2.96417635e-01, -2.95073519e-01,
-2.93280885e-01, -2.91066270e-01, -2.88460100e-01,
-2.85483770e-01, -2.82166972e-01, -2.78537783e-01,
-2.74616543e-01, -2.70431662e-01, -2.66004326e-01,
-2.61362476e-01, -2.56523531e-01, -2.51511016e-01,
-2.46345829e-01, -2.41050587e-01, -2.35641513e-01,
-2.30137983e-01, -2.24557160e-01, -2.18916513e-01,
-2.13230741e-01, -2.07516597e-01, -2.01786797e-01,
-1.96054838e-01, -1.90333452e-01, -1.84634480e-01,
-1.78968413e-01, -1.73344882e-01, -1.67774842e-01,
-1.62265807e-01, -1.56825965e-01, -1.51460332e-01,
-1.46178118e-01, -1.40983849e-01, -1.35882343e-01,
-1.30879638e-01, -1.25979004e-01, -1.21184270e-01,
-1.16498235e-01, -1.11923557e-01, -1.07462619e-01,
-1.03116943e-01, -9.88876637e-02, -9.47757238e-02,
-9.07814891e-02, -8.69058413e-02, -8.31479709e-02,
-7.95075026e-02, -7.59839829e-02, -7.25766791e-02,
-6.92841805e-02, -6.61053868e-02, -6.30392312e-02,
-6.00828396e-02, -5.72350431e-02, -5.44938473e-02,
-5.18571740e-02, -4.93228805e-02, -4.68886489e-02,
-4.45520574e-02, -4.23109317e-02, -4.01626958e-02,
-3.81051652e-02, -3.61350331e-02, -3.42502617e-02,
-3.24482401e-02, -3.07264938e-02, -2.90822683e-02,
-2.75131066e-02, -2.60164130e-02, -2.45897417e-02,
-2.32306446e-02, -2.19366046e-02, -2.07051999e-02,
-1.95341270e-02, -1.84209670e-02, -1.73634349e-02,
-1.63593566e-02, -1.54065780e-02, -1.45028489e-02,
-1.36461329e-02, -1.28344289e-02, -1.20657643e-02,
-1.13382198e-02, -1.06500353e-02, -9.99927943e-03,
-9.38428951e-03, -8.80338299e-03, -8.25492750e-03,
-7.73737352e-03, -7.24922492e-03, -6.78904874e-03,
-6.35543381e-03, -5.94703671e-03, -5.56258493e-03,
-5.20084191e-03, -4.86061139e-03, -4.54077633e-03,
-4.24025643e-03, -3.95801665e-03, -3.69306907e-03,
-3.44445202e-03, -3.21127655e-03, -2.99268200e-03,
-2.78784626e-03, -2.59598821e-03, -2.41636709e-03,
-2.24827865e-03, -2.09104948e-03, -1.94404687e-03,
-1.80668666e-03, -1.67834715e-03, -1.55850976e-03,
-1.44666695e-03, -1.34231641e-03, -1.24500285e-03,
-1.15429311e-03, -1.06977569e-03, -9.91061335e-04,
-9.17782171e-04, -8.49591962e-04, -7.86161648e-04,
-7.27186579e-04, -6.72375899e-04, -6.21456010e-04,
-5.74171756e-04, -5.30280808e-04, -4.89558668e-04,
-4.51789641e-04, -4.16778950e-04, -3.84330821e-04,
-3.54273662e-04, -3.26442833e-04, -3.00683852e-04,
-2.76851806e-04, -2.54811749e-04, -2.34437637e-04,
-2.15610900e-04, -1.98220983e-04, -1.82164826e-04,
-1.67346157e-04, -1.53674910e-04, -1.41067215e-04,
-1.29445338e-04, -1.18736232e-04, -1.08872074e-04,
-9.97899239e-05, -9.14311155e-05, -8.37411468e-05,
-7.66693036e-05, -7.01684095e-05, -6.41946272e-05,
-5.87075052e-05, -5.36694528e-05, -4.90454845e-05,
-4.48031838e-05, -4.09126214e-05, -3.73459605e-05,
-3.40775676e-05, -3.10835969e-05, -2.83421858e-05,
-2.58331012e-05, -2.35373066e-05, -2.14375752e-05,
-1.95179403e-05, -1.77636133e-05, -1.61609995e-05,
-1.46975292e-05, -1.33616617e-05, -1.21427211e-05,
-1.10308956e-05, -1.00171867e-05, -9.09327134e-06,
-8.25151551e-06, -7.48494604e-06, -6.78706723e-06,
-6.15199776e-06, -5.57430554e-06, -5.04900064e-06,
-4.57151600e-06]),
(1, 0, 6): array([ 7.00504319e-01, 7.00380474e-01, 7.00003107e-01,
6.99380297e-01, 6.98494516e-01, 6.97343484e-01,
6.95911388e-01, 6.94189510e-01, 6.92153509e-01,
6.89788624e-01, 6.87078609e-01, 6.84004291e-01,
6.80549938e-01, 6.76697817e-01, 6.72441334e-01,
6.67764295e-01, 6.62657476e-01, 6.57126517e-01,
6.51162601e-01, 6.44767444e-01, 6.37949793e-01,
6.30711095e-01, 6.23070464e-01, 6.15033453e-01,
6.06619002e-01, 5.97844026e-01, 5.88720299e-01,
5.79275677e-01, 5.69528205e-01, 5.59503886e-01,
5.49221638e-01, 5.38707238e-01, 5.27984421e-01,
5.17075397e-01, 5.06011359e-01, 4.94809946e-01,
4.83497612e-01, 4.72096511e-01, 4.60631007e-01,
4.49122161e-01, 4.37591752e-01, 4.26060477e-01,
4.14549622e-01, 4.03075827e-01, 3.91657939e-01,
3.80312066e-01, 3.69057746e-01, 3.57904821e-01,
3.46870778e-01, 3.35968665e-01, 3.25208568e-01,
3.14603146e-01, 3.04162565e-01, 2.93893804e-01,
2.83807510e-01, 2.73908892e-01, 2.64207700e-01,
2.54707018e-01, 2.45411366e-01, 2.36326366e-01,
2.27454805e-01, 2.18798983e-01, 2.10362015e-01,
2.02144916e-01, 1.94148735e-01, 1.86372706e-01,
1.78818155e-01, 1.71483548e-01, 1.64368461e-01,
1.57471603e-01, 1.50790108e-01, 1.44322050e-01,
1.38065925e-01, 1.32017722e-01, 1.26175885e-01,
1.20535498e-01, 1.15094095e-01, 1.09848054e-01,
1.04792901e-01, 9.99251748e-02, 9.52403891e-02,
9.07351362e-02, 8.64041774e-02, 8.22438154e-02,
7.82492022e-02, 7.44166149e-02, 7.07406607e-02,
6.72175021e-02, 6.38422116e-02, 6.06107121e-02,
5.75182105e-02, 5.45608282e-02, 5.17337269e-02,
4.90325796e-02, 4.64531111e-02, 4.39912316e-02,
4.16427471e-02, 3.94033972e-02, 3.72695001e-02,
3.52367636e-02, 3.33015686e-02, 3.14598507e-02,
2.97083102e-02, 2.80431356e-02, 2.64606123e-02,
2.49576809e-02, 2.35307838e-02, 2.21768054e-02,
2.08925795e-02, 1.96750542e-02, 1.85213223e-02,
1.74285901e-02, 1.63939701e-02, 1.54149118e-02,
1.44887967e-02, 1.36132284e-02, 1.27856553e-02,
1.20038507e-02, 1.12656030e-02, 1.05688219e-02,
9.91141632e-03, 9.29146149e-03, 8.70707299e-03,
8.15641061e-03, 7.63773633e-03, 7.14943509e-03,
6.68987225e-03, 6.25759465e-03, 5.85108872e-03,
5.46901532e-03, 5.11004525e-03, 4.77289789e-03,
4.45638536e-03, 4.15935973e-03, 3.88074042e-03,
3.61948459e-03, 3.37461917e-03, 3.14519616e-03,
2.93032544e-03, 2.72915788e-03, 2.54090425e-03,
2.36479983e-03, 2.20012418e-03, 2.04619468e-03,
1.90237331e-03, 1.76803563e-03, 1.64260720e-03,
1.52554674e-03, 1.41634443e-03, 1.31449200e-03,
1.21953727e-03, 1.13104924e-03, 1.04861948e-03,
9.71861340e-04, 9.00410261e-04, 8.33923556e-04,
7.72080102e-04, 7.14577955e-04, 6.61130733e-04,
6.11470732e-04, 5.65346284e-04, 5.22524206e-04,
4.82780537e-04, 4.45906230e-04, 4.11708523e-04,
3.80003564e-04, 3.50621844e-04, 3.23400556e-04,
2.98191401e-04, 2.74854018e-04, 2.53259252e-04,
2.33280193e-04, 2.14806197e-04, 1.97727091e-04,
1.81944682e-04, 1.67365271e-04, 1.53901992e-04,
1.41474025e-04, 1.30006091e-04, 1.19427493e-04,
1.09672908e-04, 1.00680897e-04, 9.23951766e-05,
8.47631492e-05, 7.77352689e-05, 7.12660643e-05,
6.53133005e-05, 5.98376879e-05, 5.48027744e-05,
5.01747100e-05, 4.59221475e-05, 4.20159474e-05,
3.84290800e-05, 3.51366950e-05, 3.21156623e-05,
2.93445559e-05, 2.68036137e-05, 2.44745796e-05,
2.23403671e-05, 2.03855531e-05, 1.85955011e-05,
1.69570256e-05, 1.54577039e-05, 1.40863752e-05,
1.28323217e-05, 1.16859999e-05, 1.06385459e-05,
9.68174159e-06, 8.80803995e-06, 8.01051397e-06,
7.28277574e-06, 6.61895261e-06, 6.01363113e-06,
5.46184415e-06, 4.95902844e-06, 4.50099850e-06,
4.08391224e-06, 3.70423666e-06, 3.35874089e-06,
3.04445769e-06, 2.75865827e-06, 2.49885617e-06,
2.26276647e-06, 2.04829831e-06, 1.85353968e-06,
1.67673876e-06, 1.51629644e-06, 1.37074921e-06,
1.23875675e-06, 1.11910278e-06, 1.01066708e-06,
9.12437987e-07, 8.23477127e-07, 7.42942749e-07,
6.70058925e-07, 6.04122372e-07, 5.44492534e-07,
4.90585989e-07, 4.41867461e-07, 3.97853740e-07,
3.58104208e-07, 3.22218275e-07, 2.89831033e-07,
2.60612740e-07, 2.34261790e-07, 2.10503920e-07,
1.89091814e-07, 1.69801778e-07, 1.52427781e-07,
1.36785349e-07]),
(1, 0, 13): array([ 7.00543251e-01, 7.00066888e-01, 6.98640915e-01,
6.96271854e-01, 6.92979892e-01, 6.88803487e-01,
6.83801705e-01, 6.78054648e-01, 6.71658863e-01,
6.64725848e-01, 6.57375084e-01, 6.49730227e-01,
6.41915518e-01, 6.34050110e-01, 6.26246346e-01,
6.18607832e-01, 6.11225210e-01, 6.04177616e-01,
5.97531605e-01, 5.91337185e-01, 5.85634958e-01,
5.80450384e-01, 5.75798602e-01, 5.71680845e-01,
5.68090353e-01, 5.65012287e-01, 5.62420654e-01,
5.60284861e-01, 5.58568907e-01, 5.57233216e-01,
5.56231049e-01, 5.55517117e-01, 5.55044808e-01,
5.54759895e-01, 5.54620464e-01, 5.54574033e-01,
5.54570453e-01, 5.54568677e-01, 5.54523919e-01,
5.54394417e-01, 5.54138451e-01, 5.53725159e-01,
5.53120897e-01, 5.52292692e-01, 5.51214786e-01,
5.49864939e-01, 5.48223435e-01, 5.46268994e-01,
5.43992574e-01, 5.41381309e-01, 5.38426783e-01,
5.35120691e-01, 5.31462177e-01, 5.27448341e-01,
5.23082421e-01, 5.18366937e-01, 5.13311992e-01,
5.07919563e-01, 5.02200399e-01, 4.96162837e-01,
4.89821095e-01, 4.83188799e-01, 4.76277120e-01,
4.69101812e-01, 4.61680321e-01, 4.54024339e-01,
4.46155113e-01, 4.38085581e-01, 4.29837294e-01,
4.21424963e-01, 4.12865022e-01, 4.04177011e-01,
3.95378028e-01, 3.86484473e-01, 3.77516209e-01,
3.68484731e-01, 3.59409408e-01, 3.50308228e-01,
3.41191844e-01, 3.32078752e-01, 3.22980670e-01,
3.13915836e-01, 3.04892702e-01, 2.95925569e-01,
2.87026452e-01, 2.78209068e-01, 2.69481350e-01,
2.60854234e-01, 2.52336823e-01, 2.43938569e-01,
2.35666558e-01, 2.27529842e-01, 2.19533976e-01,
2.11685318e-01, 2.03989685e-01, 1.96452185e-01,
1.89076830e-01, 1.81867000e-01, 1.74827394e-01,
1.67959739e-01, 1.61266451e-01, 1.54747239e-01,
1.48405716e-01, 1.42241409e-01, 1.36254249e-01,
1.30445331e-01, 1.24813440e-01, 1.19357948e-01,
1.14077439e-01, 1.08970491e-01, 1.04035550e-01,
9.92704023e-02, 9.46726342e-02, 9.02397399e-02,
8.59688383e-02, 8.18576061e-02, 7.79023573e-02,
7.40998703e-02, 7.04469723e-02, 6.69403474e-02,
6.35761940e-02, 6.03510784e-02, 5.72617043e-02,
5.43032781e-02, 5.14726166e-02, 4.87658896e-02,
4.61793043e-02, 4.37090818e-02, 4.13514034e-02,
3.91024246e-02, 3.69586139e-02, 3.49161720e-02,
3.29717153e-02, 3.11209603e-02, 2.93608056e-02,
2.76877242e-02, 2.60983739e-02, 2.45892429e-02,
2.31571398e-02, 2.17988176e-02, 2.05112151e-02,
1.92913308e-02, 1.81361587e-02, 1.70428280e-02,
1.60086193e-02, 1.50307677e-02, 1.41066661e-02,
1.32338461e-02, 1.24098984e-02, 1.16323796e-02,
1.08990591e-02, 1.02077746e-02, 9.55642299e-03,
8.94297929e-03, 8.36558476e-03, 7.82226533e-03,
7.31130066e-03, 6.83098016e-03, 6.37966078e-03,
5.95579188e-03, 5.55788954e-03, 5.18454207e-03,
4.83437692e-03, 4.50609588e-03, 4.19847569e-03,
3.91033963e-03, 3.64055855e-03, 3.38808395e-03,
3.15190923e-03, 2.93107756e-03, 2.72468145e-03,
2.53184725e-03, 2.35176822e-03, 2.18367176e-03,
2.02682351e-03, 1.88053089e-03, 1.74414116e-03,
1.61703760e-03, 1.49863447e-03, 1.38838474e-03,
1.28578551e-03, 1.19030830e-03, 1.10151190e-03,
1.01896790e-03, 9.42255683e-04, 8.70995552e-04,
8.04828357e-04, 7.43414988e-04, 6.86436414e-04,
6.33592854e-04, 5.84603696e-04, 5.39203733e-04,
4.97148754e-04, 4.58206765e-04, 4.22160756e-04,
3.88809394e-04, 3.57962409e-04, 3.29444464e-04,
3.03087953e-04, 2.78742255e-04, 2.56256645e-04,
2.35499789e-04, 2.16346257e-04, 1.98678827e-04,
1.82388183e-04, 1.67372922e-04, 1.53538785e-04,
1.40797499e-04, 1.29067136e-04, 1.18271668e-04,
1.08340386e-04, 9.92074147e-05, 9.08116929e-05,
8.30968903e-05, 7.60102595e-05, 6.95030429e-05,
6.35301921e-05, 5.80498894e-05, 5.30234394e-05,
4.84149801e-05, 4.41912860e-05, 4.03215972e-05,
3.67776612e-05, 3.35333198e-05, 3.05643110e-05,
2.78482417e-05, 2.53645236e-05, 2.30940679e-05,
2.10193767e-05, 1.91242149e-05, 1.73937736e-05,
1.58143887e-05, 1.43732290e-05, 1.30587467e-05,
1.18602753e-05, 1.07679624e-05, 9.77278638e-06,
8.86643066e-06, 8.04129078e-06, 7.29034973e-06,
6.60718242e-06, 5.98591815e-06, 5.42114039e-06,
4.90789849e-06, 4.44168995e-06, 4.01832470e-06,
3.63402897e-06, 3.28532412e-06, 2.96902529e-06,
2.68222651e-06]),
(1, 1, 7): array([ -7.55267449e-18, 2.08011789e-02, 4.15719999e-02,
6.22807353e-02, 8.28909232e-02, 1.03363416e-01,
1.23653735e-01, 1.43715430e-01, 1.63495610e-01,
1.82942648e-01, 2.02002485e-01, 2.20620625e-01,
2.38744764e-01, 2.56322305e-01, 2.73305612e-01,
2.89648530e-01, 3.05307554e-01, 3.20247857e-01,
3.34434384e-01, 3.47836760e-01, 3.60432866e-01,
3.72200657e-01, 3.83129204e-01, 3.93204009e-01,
4.02421308e-01, 4.10779510e-01, 4.18276782e-01,
4.24922331e-01, 4.30723898e-01, 4.35696063e-01,
4.39851174e-01, 4.43208293e-01, 4.45788260e-01,
4.47610112e-01, 4.48704351e-01, 4.49091663e-01,
4.48799741e-01, 4.47857021e-01, 4.46293329e-01,
4.44137177e-01, 4.41418198e-01, 4.38167740e-01,
4.34417630e-01, 4.30195603e-01, 4.25533190e-01,
4.20459800e-01, 4.15008371e-01, 4.09202844e-01,
4.03075855e-01, 3.96655102e-01, 3.89966529e-01,
3.83037714e-01, 3.75894698e-01, 3.68560258e-01,
3.61060885e-01, 3.53417466e-01, 3.45655843e-01,
3.37794217e-01, 3.29852542e-01, 3.21851749e-01,
3.13809412e-01, 3.05742597e-01, 2.97668882e-01,
2.89603141e-01, 2.81560456e-01, 2.73552673e-01,
2.65594633e-01, 2.57696943e-01, 2.49871664e-01,
2.42129223e-01, 2.34477507e-01, 2.26925680e-01,
2.19482907e-01, 2.12154672e-01, 2.04949612e-01,
1.97871111e-01, 1.90925873e-01, 1.84118488e-01,
1.77451984e-01, 1.70930621e-01, 1.64556629e-01,
1.58333656e-01, 1.52262142e-01, 1.46344526e-01,
1.40581172e-01, 1.34973898e-01, 1.29521513e-01,
1.24224905e-01, 1.19083003e-01, 1.14095722e-01,
1.09261487e-01, 1.04579960e-01, 1.00048913e-01,
9.56666021e-02, 9.14311380e-02, 8.73406553e-02,
8.33928111e-02, 7.95849102e-02, 7.59149793e-02,
7.23797280e-02, 6.89767862e-02, 6.57027713e-02,
6.25554356e-02, 5.95313705e-02, 5.66271377e-02,
5.38402702e-02, 5.11672078e-02, 4.86049781e-02,
4.61503489e-02, 4.38001092e-02, 4.15511634e-02,
3.94004492e-02, 3.73445651e-02, 3.53806076e-02,
3.35053965e-02, 3.17160462e-02, 3.00092163e-02,
2.83820904e-02, 2.68317542e-02, 2.53554248e-02,
2.39502014e-02, 2.26134144e-02, 2.13423788e-02,
2.01343931e-02, 1.89868833e-02, 1.78974418e-02,
1.68635379e-02, 1.58829433e-02, 1.49532267e-02,
1.40722591e-02, 1.32378659e-02, 1.24479134e-02,
1.17004093e-02, 1.09933920e-02, 1.03249950e-02,
9.69338599e-03, 9.09683980e-03, 8.53364107e-03,
8.00216496e-03, 7.50083724e-03, 7.02818512e-03,
6.58275657e-03, 6.16316814e-03, 5.76809359e-03,
5.39628196e-03, 5.04648619e-03, 4.71755189e-03,
4.40838038e-03, 4.11792606e-03, 3.84512393e-03,
3.58902675e-03, 3.34872013e-03, 3.12332770e-03,
2.91201111e-03, 2.71397134e-03, 2.52844991e-03,
2.35472981e-03, 2.19212995e-03, 2.03999738e-03,
1.89771638e-03, 1.76470243e-03, 1.64041006e-03,
1.52430890e-03, 1.41589957e-03, 1.31471841e-03,
1.22031839e-03, 1.13228437e-03, 1.05021309e-03,
9.73735007e-04, 9.02497046e-04, 8.36172063e-04,
7.74434425e-04, 7.16999552e-04, 6.63580246e-04,
6.13919390e-04, 5.67769226e-04, 5.24898280e-04,
4.85089568e-04, 4.48139406e-04, 4.13854905e-04,
3.82056543e-04, 3.52574067e-04, 3.25250486e-04,
2.99937917e-04, 2.76495965e-04, 2.54794858e-04,
2.34713203e-04, 2.16137228e-04, 1.98960631e-04,
1.83083985e-04, 1.68414556e-04, 1.54865529e-04,
1.42355805e-04, 1.30810327e-04, 1.20158731e-04,
1.10335402e-04, 1.01279407e-04, 9.29340767e-05,
8.52460058e-05, 7.81666869e-05, 7.16497654e-05,
6.56532127e-05, 6.01371735e-05, 5.50656122e-05,
5.04036968e-05, 4.61202088e-05, 4.21859753e-05,
3.85737781e-05, 3.52584622e-05, 3.22167947e-05,
2.94272207e-05, 2.68698022e-05, 2.45260469e-05,
2.23788821e-05, 2.04125314e-05, 1.86124348e-05,
1.69651487e-05, 1.54582363e-05, 1.40802581e-05,
1.28206470e-05, 1.16696278e-05, 1.06182409e-05,
9.65820623e-06, 8.78190305e-06, 7.98232302e-06,
7.25300889e-06, 6.58803166e-06, 5.98193623e-06,
5.42969749e-06, 4.92673052e-06, 4.46878941e-06,
4.05201757e-06, 3.67282042e-06, 3.32795397e-06,
3.01441252e-06, 2.72945680e-06, 2.47057837e-06,
2.23548052e-06, 2.02204543e-06, 1.82835058e-06,
1.65263369e-06, 1.49328434e-06, 1.34882803e-06,
1.21792506e-06, 1.09934409e-06, 9.91959868e-07,
8.94752016e-07, 8.06792879e-07, 7.27224428e-07,
6.55273894e-07]),
(1, 1, 8): array([ -1.26224844e-18, -1.13825651e-02, -2.27847042e-02,
-3.42236363e-02, -4.57098354e-02, -5.72483735e-02,
-6.88355219e-02, -8.04618067e-02, -9.21070890e-02,
-1.03748256e-01, -1.15355378e-01, -1.26893854e-01,
-1.38327181e-01, -1.49614613e-01, -1.60716854e-01,
-1.71592500e-01, -1.82199808e-01, -1.92503123e-01,
-2.02463772e-01, -2.12045764e-01, -2.21219376e-01,
-2.29953396e-01, -2.38226413e-01, -2.46011956e-01,
-2.53293797e-01, -2.60056719e-01, -2.66284832e-01,
-2.71973049e-01, -2.77114174e-01, -2.81708007e-01,
-2.85751832e-01, -2.89249955e-01, -2.92208172e-01,
-2.94631309e-01, -2.96535287e-01, -2.97926855e-01,
-2.98820252e-01, -2.99230450e-01, -2.99174652e-01,
-2.98668951e-01, -2.97731109e-01, -2.96381078e-01,
-2.94639686e-01, -2.92524284e-01, -2.90056420e-01,
-2.87256013e-01, -2.84146931e-01, -2.80744656e-01,
-2.77073649e-01, -2.73153994e-01, -2.69004330e-01,
-2.64645548e-01, -2.60097184e-01, -2.55376050e-01,
-2.50502927e-01, -2.45493506e-01, -2.40368583e-01,
-2.35141807e-01, -2.29828794e-01, -2.24446480e-01,
-2.19008719e-01, -2.13529144e-01, -2.08022098e-01,
-2.02499545e-01, -1.96973830e-01, -1.91454366e-01,
-1.85953651e-01, -1.80480262e-01, -1.75044335e-01,
-1.69654583e-01, -1.64317407e-01, -1.59040548e-01,
-1.53831969e-01, -1.48696062e-01, -1.43640518e-01,
-1.38667923e-01, -1.33784272e-01, -1.28993588e-01,
-1.24298442e-01, -1.19702717e-01, -1.15208390e-01,
-1.10818922e-01, -1.06534678e-01, -1.02358080e-01,
-9.82895889e-02, -9.43311343e-02, -9.04817666e-02,
-8.67426299e-02, -8.31130032e-02, -7.95931810e-02,
-7.61820416e-02, -7.28797321e-02, -6.96845567e-02,
-6.65953671e-02, -6.36108777e-02, -6.07298718e-02,
-5.79506925e-02, -5.52713672e-02, -5.26906408e-02,
-5.02059982e-02, -4.78158452e-02, -4.55176032e-02,
-4.33098382e-02, -4.11899670e-02, -3.91553984e-02,
-3.72045091e-02, -3.53345980e-02, -3.35435481e-02,
-3.18289900e-02, -3.01885818e-02, -2.86200852e-02,
-2.71212984e-02, -2.56896793e-02, -2.43231736e-02,
-2.30194480e-02, -2.17764507e-02, -2.05916706e-02,
-1.94631051e-02, -1.83886524e-02, -1.73663168e-02,
-1.63939817e-02, -1.54697421e-02, -1.45916667e-02,
-1.37577927e-02, -1.29662669e-02, -1.22153868e-02,
-1.15033125e-02, -1.08284871e-02, -1.01891357e-02,
-9.58376763e-03, -9.01082877e-03, -8.46878846e-03,
-7.95624045e-03, -7.47179021e-03, -7.01412314e-03,
-6.58193798e-03, -6.17402595e-03, -5.78916895e-03,
-5.42622236e-03, -5.08407733e-03, -4.76170760e-03,
-4.45809149e-03, -4.17225979e-03, -3.90328528e-03,
-3.65029947e-03, -3.41242277e-03, -3.18885444e-03,
-2.97883344e-03, -2.78163776e-03, -2.59651300e-03,
-2.42280839e-03, -2.25989299e-03, -2.10716148e-03,
-1.96403382e-03, -1.82995825e-03, -1.70441108e-03,
-1.58689914e-03, -1.47695467e-03, -1.37412783e-03,
-1.27799571e-03, -1.18815743e-03, -1.10424111e-03,
-1.02588201e-03, -9.52737305e-04, -8.84491781e-04,
-8.20839189e-04, -7.61497442e-04, -7.06189543e-04,
-6.54665063e-04, -6.06683511e-04, -5.62024004e-04,
-5.20461481e-04, -4.81806010e-04, -4.45859790e-04,
-4.12449768e-04, -3.81407508e-04, -3.52576246e-04,
-3.25809173e-04, -3.00968596e-04, -2.77923691e-04,
-2.56553237e-04, -2.36741722e-04, -2.18383448e-04,
-2.01378753e-04, -1.85632322e-04, -1.71056740e-04,
-1.57570192e-04, -1.45095993e-04, -1.33562508e-04,
-1.22902727e-04, -1.13054224e-04, -1.03958497e-04,
-9.55608809e-05, -8.78109686e-05, -8.06614166e-05,
-7.40680534e-05, -6.79899345e-05, -6.23890103e-05,
-5.72292098e-05, -5.24781487e-05, -4.81044720e-05,
-4.40801071e-05, -4.03781557e-05, -3.69746234e-05,
-3.38458531e-05, -3.09710065e-05, -2.83305287e-05,
-2.59061213e-05, -2.36808992e-05, -2.16392908e-05,
-1.97668316e-05, -1.80501458e-05, -1.64768151e-05,
-1.50353851e-05, -1.37152676e-05, -1.25067004e-05,
-1.14006680e-05, -1.03888247e-05, -9.46350332e-06,
-8.61761551e-06, -7.84459597e-06, -7.13844198e-06,
-6.49359755e-06, -5.90495072e-06, -5.36780131e-06,
-4.87781551e-06, -4.43101900e-06, -4.02375105e-06,
-3.65263834e-06, -3.31461025e-06, -3.00681323e-06,
-2.72666782e-06, -2.47175085e-06, -2.23989228e-06,
-2.02907192e-06, -1.83745270e-06, -1.66335274e-06,
-1.50523185e-06, -1.36166479e-06, -1.23136234e-06,
-1.11314174e-06, -1.00592193e-06, -9.08712154e-07,
-8.20614605e-07, -7.40801502e-07, -6.68515530e-07,
-6.03072297e-07, -5.43850305e-07, -4.90270628e-07,
-4.41814923e-07]),
(1, 1, 14): array([ -2.01907896e-17, -1.97025554e-03, -4.04801820e-03,
-6.32522865e-03, -8.86686757e-03, -1.17044762e-02,
-1.48346731e-02, -1.82206831e-02, -2.17970160e-02,
-2.54743582e-02, -2.91461551e-02, -3.26945068e-02,
-3.59951685e-02, -3.89235115e-02, -4.13583841e-02,
-4.31850218e-02, -4.42991037e-02, -4.46087850e-02,
-4.40340477e-02, -4.25122000e-02, -3.99926401e-02,
-3.64413296e-02, -3.18387352e-02, -2.61794234e-02,
-1.94722352e-02, -1.17353411e-02, -3.00159782e-03,
6.68683821e-03, 1.72803587e-02, 2.87215113e-02,
4.09435181e-02, 5.38780371e-02, 6.74521699e-02,
8.15844950e-02, 9.62015879e-02, 1.11219625e-01,
1.26554534e-01, 1.42130941e-01, 1.57867603e-01,
1.73686804e-01, 1.89509479e-01, 2.05268542e-01,
2.20892228e-01, 2.36311821e-01, 2.51464646e-01,
2.66293806e-01, 2.80743942e-01, 2.94761273e-01,
3.08304343e-01, 3.21329345e-01, 3.33798292e-01,
3.45674129e-01, 3.56930458e-01, 3.67539626e-01,
3.77481823e-01, 3.86739059e-01, 3.95302735e-01,
4.03156382e-01, 4.10296040e-01, 4.16714569e-01,
4.22415976e-01, 4.27404050e-01, 4.31679421e-01,
4.35252003e-01, 4.38134415e-01, 4.40333469e-01,
4.41869564e-01, 4.42753256e-01, 4.43008450e-01,
4.42649957e-01, 4.41697517e-01, 4.40175483e-01,
4.38105632e-01, 4.35510680e-01, 4.32418416e-01,
4.28846038e-01, 4.24822805e-01, 4.20377585e-01,
4.15528140e-01, 4.10305450e-01, 4.04730764e-01,
3.98836284e-01, 3.92639466e-01, 3.86167532e-01,
3.79445157e-01, 3.72499822e-01, 3.65350669e-01,
3.58021773e-01, 3.50535019e-01, 3.42913042e-01,
3.35175193e-01, 3.27344199e-01, 3.19437470e-01,
3.11474111e-01, 3.03472426e-01, 2.95449684e-01,
2.87421562e-01, 2.79402791e-01, 2.71410542e-01,
2.63456991e-01, 2.55555433e-01, 2.47714356e-01,
2.39949459e-01, 2.32269039e-01, 2.24682134e-01,
2.17199816e-01, 2.09828906e-01, 2.02577235e-01,
1.95451052e-01, 1.88456547e-01, 1.81599497e-01,
1.74884283e-01, 1.68314853e-01, 1.61894696e-01,
1.55626460e-01, 1.49513731e-01, 1.43557249e-01,
1.37758372e-01, 1.32118436e-01, 1.26638249e-01,
1.21317427e-01, 1.16156119e-01, 1.11154576e-01,
1.06309610e-01, 1.01621260e-01, 9.70879172e-02,
9.27078602e-02, 8.84791158e-02, 8.43994444e-02,
8.04662557e-02, 7.66774564e-02, 7.30301185e-02,
6.95220119e-02, 6.61486188e-02, 6.29078905e-02,
5.97965831e-02, 5.68116494e-02, 5.39494373e-02,
5.12068150e-02, 4.85803131e-02, 4.60666824e-02,
4.36626201e-02, 4.13646110e-02, 3.91692670e-02,
3.70733746e-02, 3.50734326e-02, 3.31661356e-02,
3.13483449e-02, 2.96168997e-02, 2.79684007e-02,
2.63998060e-02, 2.49080954e-02, 2.34902515e-02,
2.21433120e-02, 2.08646032e-02, 1.96510215e-02,
1.84999880e-02, 1.74088286e-02, 1.63749279e-02,
1.53957996e-02, 1.44690260e-02, 1.35922766e-02,
1.27632284e-02, 1.19796627e-02, 1.12394710e-02,
1.05405866e-02, 9.88098910e-03, 9.25880197e-03,
8.67218912e-03, 8.11938212e-03, 7.59868224e-03,
7.10842366e-03, 6.64706513e-03, 6.21310760e-03,
5.80510478e-03, 5.42167895e-03, 5.06151807e-03,
4.72336829e-03, 4.40602234e-03, 4.10834507e-03,
3.82929227e-03, 3.56770863e-03, 3.32266115e-03,
3.09322369e-03, 2.87846372e-03, 2.67754227e-03,
2.48965607e-03, 2.31403772e-03, 2.14995753e-03,
1.99672279e-03, 1.85367911e-03, 1.72020045e-03,
1.59570787e-03, 1.47964381e-03, 1.37148189e-03,
1.27073041e-03, 1.17691901e-03, 1.08961198e-03,
1.00838653e-03, 9.32863102e-04, 8.62651718e-04,
7.97415205e-04, 7.36826908e-04, 6.80578639e-04,
6.28380538e-04, 5.79961606e-04, 5.35067912e-04,
4.93459332e-04, 4.54911232e-04, 4.19213507e-04,
3.86169126e-04, 3.55592923e-04, 3.27311924e-04,
3.01165469e-04, 2.77001661e-04, 2.54679057e-04,
2.34066032e-04, 2.15039405e-04, 1.97484335e-04,
1.81293557e-04, 1.66366914e-04, 1.52610964e-04,
1.39939332e-04, 1.28271428e-04, 1.17531912e-04,
1.07650739e-04, 9.85630757e-05, 9.02083395e-05,
8.25306382e-05, 7.54777066e-05, 6.90015913e-05,
6.30577321e-05, 5.76038885e-05, 5.26018826e-05,
4.80161903e-05, 4.38137604e-05, 3.99641353e-05,
3.64390392e-05, 3.32124559e-05, 3.02602078e-05,
2.75600142e-05, 2.50913919e-05, 2.28353083e-05,
2.07742592e-05, 1.88922330e-05, 1.71742138e-05,
1.56066093e-05, 1.41768048e-05, 1.28731769e-05,
1.16850558e-05])}
| [
"[email protected]"
] | |
eb2ca5d1b00bedc68345d9103057a0bd38c3425f | a79da24bda658f588fd8e71c7e63f01931c1a694 | /bigapple/venv/lib/python3.7/site-packages/plotly/graph_objs/bar/_stream.py | 8f528873536d267aa02021460ab7bf8155d2a086 | [] | no_license | replicantdeca/bigapple-insys | 60519b486f13e1a3eb18b5ba637e45deaf8e1d8e | 5e7328fb94362fbb04a71c2e297bffd83443eebc | refs/heads/master | 2020-03-27T12:57:31.894182 | 2019-12-01T11:25:13 | 2019-12-01T11:25:13 | 146,580,916 | 0 | 1 | null | 2018-08-29T10:00:28 | 2018-08-29T10:00:27 | null | UTF-8 | Python | false | false | 3,812 | py | from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Stream(BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self['maxpoints']
@maxpoints.setter
def maxpoints(self, val):
self['maxpoints'] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self['token']
@token.setter
def token(self, val):
self['token'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'bar'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.bar.Stream
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
"""
super(Stream, self).__init__('stream')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.bar.Stream
constructor must be a dict or
an instance of plotly.graph_objs.bar.Stream"""
)
# Import validators
# -----------------
from plotly.validators.bar import (stream as v_stream)
# Initialize validators
# ---------------------
self._validators['maxpoints'] = v_stream.MaxpointsValidator()
self._validators['token'] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('maxpoints', None)
self.maxpoints = maxpoints if maxpoints is not None else _v
_v = arg.pop('token', None)
self.token = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
| [
"[email protected]"
] | |
92108883e03e44c0b266ef7a49d9dea628e31e41 | 61b475c33745dbe11d88ea288cbdee279f89c610 | /src/izi/apps/dashboard/widgets.py | 8fe3723a9b44c00ad92a5dec957228dfddf69ec0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | izi-ecommerce/izi-core | a092ea285d0dbd83d17427de3157a9f1e77d6c51 | 21176be2d41f0cf54ca954f294209c585f643dba | refs/heads/master | 2020-03-30T08:37:39.045514 | 2018-10-08T02:58:46 | 2018-10-08T02:58:46 | 151,029,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,969 | py | import copy
import re
from django.forms import Widget
from django.urls import reverse
class RelatedFieldWidgetWrapper(Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
IZI dashboard.
"""
template_name = 'izi/dashboard/widgets/related_widget_wrapper.html'
IS_POPUP_VALUE = '1'
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
def __init__(self, widget, rel):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
return self.widget.media
def get_related_url(self, info, action, *args):
app_label = info[0]
model_object_name = info[1]
# Convert the model's object name into lowercase, with dashes between
# the camel-cased words
model_object_name = '-'.join(re.sub('([a-z])([A-Z])', r'\1 \2', model_object_name).lower().split())
# Does not specify current app
return reverse("dashboard:%s-%s-%s" % (app_label, model_object_name, action), args=args)
def get_context(self, name, value, attrs):
rel_opts = self.rel.model._meta
info = (rel_opts.app_label, rel_opts.object_name)
self.widget.choices = self.choices
url_params = '&'.join("%s=%s" % param for param in [
(RelatedFieldWidgetWrapper.TO_FIELD_VAR, self.rel.get_related_field().name),
(RelatedFieldWidgetWrapper.IS_POPUP_VAR, RelatedFieldWidgetWrapper.IS_POPUP_VALUE),
])
context = {
'rendered_widget': self.widget.render(name, value, attrs),
'name': name,
'url_params': url_params,
'model': rel_opts.verbose_name,
}
change_related_template_url = self.get_related_url(info, 'update', '__fk__')
context.update(
change_related_template_url=change_related_template_url,
)
add_related_url = self.get_related_url(info, 'create')
context.update(
add_related_url=add_related_url,
)
delete_related_template_url = self.get_related_url(info, 'delete', '__fk__')
context.update(
delete_related_template_url=delete_related_template_url,
)
return context
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def value_omitted_from_data(self, data, files, name):
return self.widget.value_omitted_from_data(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
| [
"[email protected]"
] | |
dfb4aafe897b5263ebd18b74bb4a504d4d203e7f | 5db0fab37c2b8a618d85d3b60fab9f806c416474 | /src/python/pants/backend/go/util_rules/embedcfg.py | d2ed6cc938823c9793be977e5ede6db43a726bc2 | [
"Apache-2.0"
] | permissive | pantsbuild/pants | 4988d1ac5474ec95f94ce2218aeb759401e4b011 | 98cbda8545f0d58c586ed2daa76fefd729d5e0d5 | refs/heads/main | 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 | Apache-2.0 | 2023-09-14T19:33:33 | 2012-12-17T17:39:04 | Python | UTF-8 | Python | false | false | 4,141 | py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
from dataclasses import dataclass
from typing import Any, Iterable, Mapping
from pants.util.frozendict import FrozenDict
from pants.util.strutil import strip_prefix
@dataclass(frozen=True)
class EmbedConfig:
patterns: FrozenDict[str, tuple[str, ...]]
files: FrozenDict[str, str]
def __init__(self, patterns: Mapping[str, Iterable[str]], files: Mapping[str, str]) -> None:
"""Configuration passed to the Go compiler to configure file embedding.
The compiler relies entirely on the caller to map embed patterns to actual filesystem
paths. All embed patterns
contained in the package must be mapped. Consult
`FirstPartyPkgAnalysis.embed_patterns` for the embed patterns obtained from analysis.
:param patterns: Maps each pattern provided via a //go:embed directive to a list of file
paths relative to the package directory for files to embed for that pattern. When the
embedded variable is an `embed.FS`, those relative file paths define the virtual
directory hierarchy exposed by the embed.FS filesystem abstraction. The relative file
paths are resolved to actual filesystem paths for their content by consulting the
`files` dictionary.
:param files: Maps each virtual, relative file path used as a value in the `patterns`
dictionary to the actual filesystem path with that file's content.
"""
object.__setattr__(self, "patterns", FrozenDict({k: tuple(v) for k, v in patterns.items()}))
object.__setattr__(self, "files", FrozenDict(files))
@classmethod
def from_json_dict(
cls, d: dict[str, Any], prefix_to_strip: str | None = None
) -> EmbedConfig | None:
patterns = d.get("Patterns", {})
files = d.get("Files", {})
if prefix_to_strip:
files = {key: strip_prefix(value, prefix_to_strip) for key, value in files.items()}
result = cls(
patterns=FrozenDict({key: tuple(value) for key, value in patterns.items()}),
files=FrozenDict(files),
)
return result if result else None
def to_embedcfg(self) -> bytes:
data = {
"Patterns": dict(self.patterns),
"Files": dict(self.files),
}
return json.dumps(data).encode("utf-8")
def __bool__(self) -> bool:
return bool(self.patterns) or bool(self.files)
def merge(self, other: EmbedConfig) -> EmbedConfig:
"""Merge two EmbedConfig's into one.
Overlapping keys must have the same values.
"""
overlapping_patterns_keys = set(self.patterns.keys()) & set(other.patterns.keys())
for key in overlapping_patterns_keys:
if self.patterns[key] != other.patterns[key]:
raise AssertionError(
"Unable to merge conflicting golang file embed configurations. This should not have occurred. "
"Please open an issue at https://github.com/pantsbuild/pants/issues/new/choose "
"with the following information: "
f"Patterns Key: {key}; Left: {self.patterns[key]}; Right: {other.patterns[key]} "
)
overlapping_files_keys = set(self.files.keys()) & set(other.files.keys())
for key in overlapping_files_keys:
if self.files[key] != other.files[key]:
raise AssertionError(
"Unable to merge conflicting golang file embed configurations. This should not have occurred. "
"Please open an issue at https://github.com/pantsbuild/pants/issues/new/choose "
"with the following information: "
f"Files Key: {key}; Left: {self.patterns[key]}; Right: {other.patterns[key]} "
)
return EmbedConfig(
patterns={**self.patterns, **other.patterns},
files={**self.files, **other.files},
)
| [
"[email protected]"
] | |
411e8160fa633fee9650fda54c0078f8af778b18 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/5011/404005011.py | fca6c0597cac9c112b926b948f6c0cd9e7cdd1ee | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 3,993 | py | from bots.botsconfig import *
from records005011 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'SR',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'ZC1', MIN: 0, MAX: 1},
{ID: 'BX', MIN: 0, MAX: 1},
{ID: 'BNX', MIN: 0, MAX: 1},
{ID: 'M3', MIN: 1, MAX: 1},
{ID: 'N9', MIN: 1, MAX: 30},
{ID: 'CM', MIN: 0, MAX: 2},
{ID: 'M1', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'N7', MIN: 1, MAX: 500, LEVEL: [
{ID: 'EM', MIN: 0, MAX: 1},
{ID: 'VC', MIN: 0, MAX: 36, LEVEL: [
{ID: 'N1', MIN: 0, MAX: 2, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'H3', MIN: 0, MAX: 1},
]},
]},
{ID: 'M7', MIN: 0, MAX: 5},
{ID: 'N5', MIN: 0, MAX: 1},
{ID: 'IC', MIN: 0, MAX: 1},
{ID: 'IM', MIN: 0, MAX: 1},
{ID: 'M12', MIN: 0, MAX: 2},
{ID: 'E1', MIN: 0, MAX: 2, LEVEL: [
{ID: 'E4', MIN: 0, MAX: 1},
{ID: 'E5', MIN: 0, MAX: 13},
{ID: 'PI', MIN: 0, MAX: 1},
]},
{ID: 'GA', MIN: 0, MAX: 15},
{ID: 'REF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'N10', MIN: 0, MAX: 15},
{ID: 'N1', MIN: 0, MAX: 5, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
]},
]},
]},
{ID: 'NA', MIN: 0, MAX: 10},
{ID: 'F9', MIN: 1, MAX: 1},
{ID: 'D9', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 1, MAX: 15, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 2},
{ID: 'PER', MIN: 0, MAX: 2},
{ID: 'BL', MIN: 0, MAX: 12},
]},
{ID: 'S1', MIN: 0, MAX: 12, LEVEL: [
{ID: 'S2', MIN: 0, MAX: 2},
{ID: 'S9', MIN: 0, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 1},
]},
{ID: 'R2', MIN: 1, MAX: 13},
{ID: 'R9', MIN: 0, MAX: 1},
{ID: 'E1', MIN: 0, MAX: 2, LEVEL: [
{ID: 'E4', MIN: 0, MAX: 1},
{ID: 'E5', MIN: 0, MAX: 13},
{ID: 'PI', MIN: 0, MAX: 1},
]},
{ID: 'H3', MIN: 0, MAX: 20},
{ID: 'PS', MIN: 0, MAX: 5},
{ID: 'LX', MIN: 1, MAX: 25, LEVEL: [
{ID: 'L5', MIN: 1, MAX: 15},
{ID: 'L0', MIN: 0, MAX: 25, LEVEL: [
{ID: 'MEA', MIN: 0, MAX: 3},
{ID: 'L1', MIN: 0, MAX: 10},
{ID: 'PI', MIN: 0, MAX: 30},
]},
{ID: 'X1', MIN: 0, MAX: 6},
]},
{ID: 'T1', MIN: 0, MAX: 64, LEVEL: [
{ID: 'T2', MIN: 0, MAX: 30},
{ID: 'T3', MIN: 0, MAX: 12},
{ID: 'T6', MIN: 0, MAX: 1},
{ID: 'T8', MIN: 0, MAX: 99},
]},
{ID: 'L3', MIN: 0, MAX: 1},
{ID: 'LS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'LH1', MIN: 1, MAX: 1000, LEVEL: [
{ID: 'LH2', MIN: 0, MAX: 4},
{ID: 'LH3', MIN: 0, MAX: 10},
{ID: 'LFH', MIN: 0, MAX: 20},
{ID: 'LEP', MIN: 0, MAX: 3},
{ID: 'LH4', MIN: 0, MAX: 4},
{ID: 'LHT', MIN: 0, MAX: 3},
{ID: 'LHR', MIN: 0, MAX: 5},
{ID: 'PER', MIN: 0, MAX: 5},
{ID: 'N1', MIN: 0, MAX: 10, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 2},
]},
]},
{ID: 'LE', MIN: 1, MAX: 1},
]},
{ID: 'PER', MIN: 0, MAX: 5},
{ID: 'LH2', MIN: 0, MAX: 6},
{ID: 'LHR', MIN: 0, MAX: 1},
{ID: 'LH6', MIN: 0, MAX: 5},
{ID: 'XH', MIN: 0, MAX: 1},
{ID: 'X7', MIN: 0, MAX: 10},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
992ad12a575aa28017fbf8115beb2a0579bad5bb | 5dd47abf7061201d9378e73e51f08fbb314ba2fd | /envdsys/envcontacts/migrations/0065_auto_20210223_0054.py | 007d402317fba9799fbecce80a479a8fe0db8323 | [
"Unlicense"
] | permissive | NOAA-PMEL/envDataSystem | 4d264ae5209015e4faee648f37608d68a4461d0a | 4db4a3569d2329658799a3eef06ce36dd5c0597d | refs/heads/master | 2023-02-23T22:33:14.334737 | 2021-07-22T01:09:16 | 2021-07-22T01:09:16 | 191,809,007 | 1 | 0 | Unlicense | 2023-02-08T00:45:54 | 2019-06-13T17:50:03 | Python | UTF-8 | Python | false | false | 1,175 | py | # Generated by Django 3.1.7 on 2021-02-23 00:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('envcontacts', '0064_auto_20210223_0034'),
]
operations = [
migrations.AlterField(
model_name='person',
name='email1_type',
field=models.CharField(choices=[('W', 'Work'), ('H', 'Home'), ('O', 'Other')], default='W', max_length=1),
),
migrations.AlterField(
model_name='person',
name='email2_type',
field=models.CharField(choices=[('W', 'Work'), ('H', 'Home'), ('O', 'Other')], default='W', max_length=1),
),
migrations.AlterField(
model_name='person',
name='phone1_type',
field=models.CharField(choices=[('O', 'Other'), ('W', 'Work'), ('M', 'Mobile'), ('H', 'Home')], default='M', max_length=1),
),
migrations.AlterField(
model_name='person',
name='phone2_type',
field=models.CharField(choices=[('O', 'Other'), ('W', 'Work'), ('M', 'Mobile'), ('H', 'Home')], default='M', max_length=1),
),
]
| [
"[email protected]"
] | |
69c8613e89b0b70ecdbf7d9a9cc3558b46d87771 | 7f7bf9a5827d1441f18f568fc75ed5bf0159ca6c | /Декоратор/2/2функцию-декораторv41.py | f7a19566515c70ee88069f9c42bb8055b942a228 | [] | no_license | KorsakovPV/yandex_contest | 08bcff4eaf38d46a8348ac3abbb5f496857fe8e4 | f67917ef710f5b138142b11ec4e6e4678b23e408 | refs/heads/master | 2023-01-06T13:04:07.955570 | 2020-10-24T20:22:41 | 2020-10-24T20:22:41 | 290,097,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | def cache3(func):
cache = {'res': func(), 'counter': 0}
def save_three_times():
if cache['counter'] == 3:
cache['counter'] = 0
cache['res'] = func()
return cache['res']
cache['counter'] += 1
return cache['res']
return save_three_times
@cache3
def heavy():
print('Сложные вычисления')
return 1
print(heavy())
# Сложные вычисления
# 1
print(heavy())
# 1
print(heavy())
# 1
# Опять кеш устарел, надо вычислять заново
print(heavy())
# Сложные вычисления
print(heavy())
print(heavy())
print(heavy())
print(heavy())
print(heavy())
print(heavy())
print(heavy())
print(heavy())
print(heavy())
print(heavy())
print(heavy())
print(heavy())
print(heavy())
| [
"[email protected]"
] | |
f5d7ee6e684505ed22b422d7ed9221e0d6c707d5 | 0eefc9995ce927964969cbae247e28fd98f4998e | /src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/image/psx_tim.py | 12103cca74f785a478567753e17eccc22387f220 | [
"MIT"
] | permissive | SamuelWAnderson45/hobbits | 993bde59d2fd96b1824e4f85ba1913eba12c8f3f | 412f4ca50aa6aa2d26a1d05913f21f2ab0198eba | refs/heads/master | 2022-12-14T09:36:46.663303 | 2020-08-30T17:05:53 | 2020-08-30T17:05:53 | 291,514,129 | 0 | 0 | null | 2020-08-30T17:02:44 | 2020-08-30T17:02:43 | null | UTF-8 | Python | false | false | 3,662 | py | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class PsxTim(KaitaiStruct):
class BppType(Enum):
bpp_4 = 0
bpp_8 = 1
bpp_16 = 2
bpp_24 = 3
SEQ_FIELDS = ["magic", "flags", "clut", "img"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['magic']['start'] = self._io.pos()
self.magic = self._io.read_bytes(4)
self._debug['magic']['end'] = self._io.pos()
if not self.magic == b"\x10\x00\x00\x00":
raise kaitaistruct.ValidationNotEqualError(b"\x10\x00\x00\x00", self.magic, self._io, u"/seq/0")
self._debug['flags']['start'] = self._io.pos()
self.flags = self._io.read_u4le()
self._debug['flags']['end'] = self._io.pos()
if self.has_clut:
self._debug['clut']['start'] = self._io.pos()
self.clut = self._root.Bitmap(self._io, self, self._root)
self.clut._read()
self._debug['clut']['end'] = self._io.pos()
self._debug['img']['start'] = self._io.pos()
self.img = self._root.Bitmap(self._io, self, self._root)
self.img._read()
self._debug['img']['end'] = self._io.pos()
class Bitmap(KaitaiStruct):
SEQ_FIELDS = ["len", "origin_x", "origin_y", "width", "height", "body"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len']['start'] = self._io.pos()
self.len = self._io.read_u4le()
self._debug['len']['end'] = self._io.pos()
self._debug['origin_x']['start'] = self._io.pos()
self.origin_x = self._io.read_u2le()
self._debug['origin_x']['end'] = self._io.pos()
self._debug['origin_y']['start'] = self._io.pos()
self.origin_y = self._io.read_u2le()
self._debug['origin_y']['end'] = self._io.pos()
self._debug['width']['start'] = self._io.pos()
self.width = self._io.read_u2le()
self._debug['width']['end'] = self._io.pos()
self._debug['height']['start'] = self._io.pos()
self.height = self._io.read_u2le()
self._debug['height']['end'] = self._io.pos()
self._debug['body']['start'] = self._io.pos()
self.body = self._io.read_bytes((self.len - 12))
self._debug['body']['end'] = self._io.pos()
@property
def has_clut(self):
if hasattr(self, '_m_has_clut'):
return self._m_has_clut if hasattr(self, '_m_has_clut') else None
self._m_has_clut = (self.flags & 8) != 0
return self._m_has_clut if hasattr(self, '_m_has_clut') else None
@property
def bpp(self):
if hasattr(self, '_m_bpp'):
return self._m_bpp if hasattr(self, '_m_bpp') else None
self._m_bpp = (self.flags & 3)
return self._m_bpp if hasattr(self, '_m_bpp') else None
| [
"[email protected]"
] | |
cf7003a96795ac941e196feb0ba340851ebef983 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-cse/huaweicloudsdkcse/v1/model/engine_reference.py | f492745795a7c0b25ba3bb1a8314bd1147a93b69 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 11,145 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class EngineReference:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'vpc': 'str',
'az_list': 'list[str]',
'network_id': 'str',
'subnet_cidr': 'str',
'subnet_cidr_v6': 'str',
'subnet_gateway': 'str',
'public_ip_id': 'str',
'service_limit': 'int',
'instance_limit': 'int',
'inputs': 'dict(str, str)'
}
attribute_map = {
'vpc': 'vpc',
'az_list': 'az_list',
'network_id': 'network_id',
'subnet_cidr': 'subnet_cidr',
'subnet_cidr_v6': 'subnet_cidr_v6',
'subnet_gateway': 'subnet_gateway',
'public_ip_id': 'public_ip_id',
'service_limit': 'service_limit',
'instance_limit': 'instance_limit',
'inputs': 'inputs'
}
def __init__(self, vpc=None, az_list=None, network_id=None, subnet_cidr=None, subnet_cidr_v6=None, subnet_gateway=None, public_ip_id=None, service_limit=None, instance_limit=None, inputs=None):
"""EngineReference
The model defined in huaweicloud sdk
:param vpc: vpc名称
:type vpc: str
:param az_list: 微服务引擎专享版部署的可用区列表
:type az_list: list[str]
:param network_id: 微服务引擎专享版子网网络ID
:type network_id: str
:param subnet_cidr: 微服务引擎专享版ipv4子网划分
:type subnet_cidr: str
:param subnet_cidr_v6: 微服务引擎专享版ipv6子网划分
:type subnet_cidr_v6: str
:param subnet_gateway: 微服务引擎专享版子网网关
:type subnet_gateway: str
:param public_ip_id: 微服务引擎专享版公网地址ID
:type public_ip_id: str
:param service_limit: 微服务引擎专享版可支持的微服务总数
:type service_limit: int
:param instance_limit: 微服务引擎专享版可支持的实例总数
:type instance_limit: int
:param inputs: 微服务引擎专享版附加参数
:type inputs: dict(str, str)
"""
self._vpc = None
self._az_list = None
self._network_id = None
self._subnet_cidr = None
self._subnet_cidr_v6 = None
self._subnet_gateway = None
self._public_ip_id = None
self._service_limit = None
self._instance_limit = None
self._inputs = None
self.discriminator = None
if vpc is not None:
self.vpc = vpc
if az_list is not None:
self.az_list = az_list
if network_id is not None:
self.network_id = network_id
if subnet_cidr is not None:
self.subnet_cidr = subnet_cidr
if subnet_cidr_v6 is not None:
self.subnet_cidr_v6 = subnet_cidr_v6
if subnet_gateway is not None:
self.subnet_gateway = subnet_gateway
if public_ip_id is not None:
self.public_ip_id = public_ip_id
if service_limit is not None:
self.service_limit = service_limit
if instance_limit is not None:
self.instance_limit = instance_limit
if inputs is not None:
self.inputs = inputs
@property
def vpc(self):
"""Gets the vpc of this EngineReference.
vpc名称
:return: The vpc of this EngineReference.
:rtype: str
"""
return self._vpc
@vpc.setter
def vpc(self, vpc):
"""Sets the vpc of this EngineReference.
vpc名称
:param vpc: The vpc of this EngineReference.
:type vpc: str
"""
self._vpc = vpc
@property
def az_list(self):
"""Gets the az_list of this EngineReference.
微服务引擎专享版部署的可用区列表
:return: The az_list of this EngineReference.
:rtype: list[str]
"""
return self._az_list
@az_list.setter
def az_list(self, az_list):
"""Sets the az_list of this EngineReference.
微服务引擎专享版部署的可用区列表
:param az_list: The az_list of this EngineReference.
:type az_list: list[str]
"""
self._az_list = az_list
@property
def network_id(self):
"""Gets the network_id of this EngineReference.
微服务引擎专享版子网网络ID
:return: The network_id of this EngineReference.
:rtype: str
"""
return self._network_id
@network_id.setter
def network_id(self, network_id):
"""Sets the network_id of this EngineReference.
微服务引擎专享版子网网络ID
:param network_id: The network_id of this EngineReference.
:type network_id: str
"""
self._network_id = network_id
@property
def subnet_cidr(self):
"""Gets the subnet_cidr of this EngineReference.
微服务引擎专享版ipv4子网划分
:return: The subnet_cidr of this EngineReference.
:rtype: str
"""
return self._subnet_cidr
@subnet_cidr.setter
def subnet_cidr(self, subnet_cidr):
"""Sets the subnet_cidr of this EngineReference.
微服务引擎专享版ipv4子网划分
:param subnet_cidr: The subnet_cidr of this EngineReference.
:type subnet_cidr: str
"""
self._subnet_cidr = subnet_cidr
@property
def subnet_cidr_v6(self):
"""Gets the subnet_cidr_v6 of this EngineReference.
微服务引擎专享版ipv6子网划分
:return: The subnet_cidr_v6 of this EngineReference.
:rtype: str
"""
return self._subnet_cidr_v6
@subnet_cidr_v6.setter
def subnet_cidr_v6(self, subnet_cidr_v6):
"""Sets the subnet_cidr_v6 of this EngineReference.
微服务引擎专享版ipv6子网划分
:param subnet_cidr_v6: The subnet_cidr_v6 of this EngineReference.
:type subnet_cidr_v6: str
"""
self._subnet_cidr_v6 = subnet_cidr_v6
@property
def subnet_gateway(self):
"""Gets the subnet_gateway of this EngineReference.
微服务引擎专享版子网网关
:return: The subnet_gateway of this EngineReference.
:rtype: str
"""
return self._subnet_gateway
@subnet_gateway.setter
def subnet_gateway(self, subnet_gateway):
"""Sets the subnet_gateway of this EngineReference.
微服务引擎专享版子网网关
:param subnet_gateway: The subnet_gateway of this EngineReference.
:type subnet_gateway: str
"""
self._subnet_gateway = subnet_gateway
@property
def public_ip_id(self):
"""Gets the public_ip_id of this EngineReference.
微服务引擎专享版公网地址ID
:return: The public_ip_id of this EngineReference.
:rtype: str
"""
return self._public_ip_id
@public_ip_id.setter
def public_ip_id(self, public_ip_id):
"""Sets the public_ip_id of this EngineReference.
微服务引擎专享版公网地址ID
:param public_ip_id: The public_ip_id of this EngineReference.
:type public_ip_id: str
"""
self._public_ip_id = public_ip_id
@property
def service_limit(self):
"""Gets the service_limit of this EngineReference.
微服务引擎专享版可支持的微服务总数
:return: The service_limit of this EngineReference.
:rtype: int
"""
return self._service_limit
@service_limit.setter
def service_limit(self, service_limit):
"""Sets the service_limit of this EngineReference.
微服务引擎专享版可支持的微服务总数
:param service_limit: The service_limit of this EngineReference.
:type service_limit: int
"""
self._service_limit = service_limit
@property
def instance_limit(self):
"""Gets the instance_limit of this EngineReference.
微服务引擎专享版可支持的实例总数
:return: The instance_limit of this EngineReference.
:rtype: int
"""
return self._instance_limit
@instance_limit.setter
def instance_limit(self, instance_limit):
"""Sets the instance_limit of this EngineReference.
微服务引擎专享版可支持的实例总数
:param instance_limit: The instance_limit of this EngineReference.
:type instance_limit: int
"""
self._instance_limit = instance_limit
@property
def inputs(self):
"""Gets the inputs of this EngineReference.
微服务引擎专享版附加参数
:return: The inputs of this EngineReference.
:rtype: dict(str, str)
"""
return self._inputs
@inputs.setter
def inputs(self, inputs):
"""Sets the inputs of this EngineReference.
微服务引擎专享版附加参数
:param inputs: The inputs of this EngineReference.
:type inputs: dict(str, str)
"""
self._inputs = inputs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EngineReference):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
dc5f16a21eaf87f548dcafd572e064bea67e1b04 | 928508a85d16987a1382b7adcd7142dee7ac986f | /Practice_Python/6.String_lists.py | 0d9f652a42a79e534901e740e8c4129af9c11942 | [] | no_license | TheoRobin76/Data_Engineering22 | ed29798d0660d6ac73985f510ef7cea942ab77ad | 26299a61a8b4372d9ed2664d4ad623c9d12d23d0 | refs/heads/main | 2023-06-17T05:43:40.347544 | 2021-07-14T13:23:23 | 2021-07-14T13:23:23 | 378,177,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | word = input("Please enter a word and I will tell you if it is a palindrome: ")
if word == word[::-1]:
print(f"Congratulations, {word} is a palindrome")
else:
print(f"My Condolences, {word} is not a palindrome")
| [
"[email protected]"
] | |
ff131d5fea0ba01e244ca696d7d1c796629e8173 | cb5719d2e21f02be07368790ae5649b249c9a20e | /apps/tests/log_databus/test_collectorhandler.py | 60505d52d55ea82f8d6a149d6e5a34ee771aba5b | [
"MIT"
] | permissive | Toread-jxj/bk-log | fff57337cc39fdd6a85890bf84baff8aa23539be | 7d68cad3a2c78511141c9af2a016fcd603f62707 | refs/heads/master | 2023-07-15T01:22:47.200423 | 2021-08-19T03:08:25 | 2021-08-19T03:08:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,559 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import copy
from unittest.mock import patch
from django.test import TestCase
from apps.log_databus.constants import LogPluginInfo
from apps.log_databus.handlers.collector import CollectorHandler
BK_DATA_ID = 1
TABLE_ID = "2_log.test_table"
SUBSCRIPTION_ID = 2
TASK_ID = 3
NEW_TASK_ID = 4
LAST_TASK_ID = 5
PARAMS = {
"bk_biz_id": 706,
"collector_config_name": "采集项名称",
"collector_scenario_id": "row",
"category_id": "application",
"target_object_type": "HOST",
"target_node_type": "TOPO",
"target_nodes": [
{"bk_inst_id": 33, "bk_obj_id": "module"},
],
"data_encoding": "UTF-8",
"bk_data_name": "abc",
"description": "这是一个描述",
"params": {
"paths": ["/log/abc"],
"conditions": {
"type": "match",
"match_type": "include",
"match_content": "delete",
"separator": "|",
"separator_filters": [
{"fieldindex": 1, "word": "val1", "op": "=", "logic_op": "or"},
{"fieldindex": 2, "word": "val2", "op": "=", "logic_op": "or"},
],
},
"tail_files": True,
"ignore_older": 1,
"max_bytes": 1,
},
"storage_cluster_id": "default",
"storage_expires": 1,
}
PART_FAILED_INSTANCE_DATA = {
"instances": [
{
"status": "FAILED",
"host_statuses": [
{"status": "UNKNOWN", "version": "3.0.10", "name": "unifytlogc"},
{"status": "UNKNOWN", "version": "3.0.10", "name": "unifytlogc"},
],
"running_task": None,
"instance_id": "host|instance|host|127.0.0.1-0-0",
"create_time": "2019-09-19T20:32:19.957883",
"instance_info": {
"host": {
"bk_host_name": "rbtnode1",
"bk_supplier_account": "0",
"bk_cloud_id": [
{
"bk_obj_name": "",
"id": "0",
"bk_obj_id": "plat",
"bk_obj_icon": "",
"bk_inst_id": 0,
"bk_inst_name": "default area",
}
],
"bk_host_innerip": "127.0.0.1",
},
"service": {},
},
},
{
"status": "SUCCESS",
"host_statuses": [
{"status": "RUNNING", "version": "3.0.10", "name": "unifytlogc"},
{"status": "RUNNING", "version": "3.0.10", "name": "unifytlogc"},
],
"running_task": None,
"instance_id": "host|instance|host|127.0.0.1-0-0",
"create_time": "2019-09-19T20:32:19.957883",
"instance_info": {
"host": {
"bk_host_name": "rbtnode1",
"bk_supplier_account": "0",
"bk_cloud_id": [
{
"bk_obj_name": "",
"id": "0",
"bk_obj_id": "plat",
"bk_obj_icon": "",
"bk_inst_id": 0,
"bk_inst_name": "default area",
}
],
"bk_host_innerip": "127.0.0.1",
},
"service": {},
},
},
],
"subscription_id": SUBSCRIPTION_ID,
}
CONFIG_DATA = {
"data_id_config": {"option": {"encoding": "encoding data"}, "data_name": "data name"},
"result_table_config": "",
"subscription_config": [
{
"steps": [
{
"config": {"plugin_name": LogPluginInfo.NAME, "plugin_version": LogPluginInfo.VERSION},
"type": "PLUGIN",
"id": LogPluginInfo.NAME,
"params": {
"context": {
"dataid": BK_DATA_ID,
"local": [
{
"paths": ["testlogic_op"],
"delimiter": "|",
"filters": [
{"conditions": [{"index": 1, "key": "val1", "op": "="}]},
{"conditions": [{"index": 1, "key": "val1", "op": "="}]},
],
"encoding": "UTF-8",
}
],
}
},
}
]
}
],
}
class CCModuleTest(object):
"""
mock CCApi.search_module
"""
def bulk_request(self, params=None):
return []
class CCBizHostsTest(object):
"""
mock CCApi.list_biz_hosts
"""
def bulk_request(self, params=None):
return []
class CCSetTest(object):
"""
mock CCApi.list_biz_hosts
"""
def bulk_request(self, params=None):
return []
class CCBizHostsFilterTest(object):
"""
mock CCApi.list_biz_hosts
"""
def bulk_request(self, params=None):
return [
{
"bk_os_name": "",
"bk_host_id": 2000006651,
"bk_cloud_id": 0,
"bk_supplier_account": "tencent",
"bk_host_innerip": "127.0.0.2",
"bk_os_type": "1",
},
]
FILTER_ILLEGAL_IPS_BIZ_ID = 215
FILTER_ILLEGAL_IPS_IP_LIST = ["127.0.0.1"]
def subscription_statistic(params):
return [
{
"subscription_id": SUBSCRIPTION_ID,
"status": [
{"status": "SUCCESS", "count": 0},
{"status": "PENDING", "count": 0},
{"status": "FAILED", "count": 0},
{"status": "RUNNING", "count": 0},
],
"versions": [],
"instances": 0,
}
]
class TestCollectorHandler(TestCase):
@staticmethod
@patch("apps.api.TransferApi.create_data_id", lambda _: {"bk_data_id": BK_DATA_ID})
@patch("apps.api.NodeApi.create_subscription", lambda _: {"subscription_id": SUBSCRIPTION_ID})
@patch("apps.api.NodeApi.run_subscription_task", lambda _: {"task_id": TASK_ID})
def create(params=None):
"""
创建 CollectorHandler实例对象,并创建一个采集配置
"""
if params:
result = CollectorHandler().update_or_create(params=params)
else:
params = copy.deepcopy(PARAMS)
params["params"]["conditions"]["type"] = "separator"
result = CollectorHandler().update_or_create(params=params)
return params, result
@patch("apps.api.NodeApi.switch_subscription", lambda _: {})
@patch("apps.decorators.user_operation_record.delay", return_value=None)
@patch("apps.api.NodeApi.subscription_statistic", subscription_statistic)
def test_update_or_create(self, *args, **kwargs):
"""
测试'创建采集配置'函数 CollectorHandler.update_or_create
"""
params, result = self.create()
self.assertEqual(result["bk_data_id"], BK_DATA_ID)
self.assertEqual(result["collector_config_name"], params["collector_config_name"])
self.assertEqual(result["subscription_id"], SUBSCRIPTION_ID)
self.assertEqual(result["task_id_list"], [str(TASK_ID)])
@patch("apps.utils.thread.MultiExecuteFunc.append")
@patch("apps.utils.thread.MultiExecuteFunc.run")
@patch("apps.api.NodeApi.switch_subscription", lambda _: {})
@patch("apps.api.CCApi.search_biz_inst_topo", lambda _: [])
@patch("apps.api.CCApi.search_module", CCModuleTest())
@patch("apps.api.CCApi.search_set", CCSetTest())
@patch("apps.api.CCApi.list_biz_hosts", CCBizHostsTest())
@patch("apps.decorators.user_operation_record.delay", return_value=None)
def test_retrieve(self, mock_run, mock_append, *args, **kwargs):
"""
测试'获取采集配置'函数 CollectorHandler.retrieve
"""
_, result = self.create()
mock_append.return_value = ""
mock_run.return_value = CONFIG_DATA
collector_config_id = result["collector_config_id"]
collector = CollectorHandler(collector_config_id=collector_config_id)
res = collector.retrieve()
self.assertEqual(res.get("data_encoding"), "UTF-8")
self.assertIsNone(res.get("storage_cluster_id"))
self.assertIsNone(res.get("retention"))
self.assertEqual(res.get("collector_config_id"), collector_config_id)
self.assertEqual(res.get("collector_scenario_id"), "row")
@patch("apps.api.CCApi.list_biz_hosts", CCBizHostsFilterTest())
def test_filter_illegal_ips(self):
self.assertEqual(
CollectorHandler._filter_illegal_ips(
bk_biz_id=FILTER_ILLEGAL_IPS_BIZ_ID, ip_list=FILTER_ILLEGAL_IPS_IP_LIST
),
["127.0.0.1"],
)
| [
"[email protected]"
] | |
e516493b77004a907bb16ef524f73968b6760fc5 | 8cf633e92a0671c8201268620a0372f250c8aeb2 | /204.计数质数.py | dc347d29caae6593e34a2ccf06dfa29044656b36 | [
"Unlicense"
] | permissive | SprintGhost/LeetCode | 76da5c785009d474542e5f2cdac275675b8e60b8 | cdf1a86c83f2daedf674a871c4161da7e8fad17c | refs/heads/develop | 2021-06-06T04:04:28.883692 | 2021-01-01T14:09:26 | 2021-01-01T14:09:26 | 230,635,046 | 0 | 0 | Unlicense | 2020-12-11T14:55:36 | 2019-12-28T16:34:39 | Python | UTF-8 | Python | false | false | 607 | py | #
# @lc app=leetcode.cn id=204 lang=python3
#
# [204] 计数质数
#
# Accepted
# 20/20 cases passed (124 ms)
# Your runtime beats 85.76 % of python3 submissions
# Your memory usage beats 28.36 % of python3 submissions (36.8 MB)
# @lc code=start
class Solution:
def countPrimes(self, n: int) -> int:
if n < 2:
return 0
isPrime = [1] * n
isPrime[0] = isPrime[1] = 0
for i in range(2, int(n ** 0.5) + 1):
if isPrime[i]:
isPrime[i * i:n:i] = [0] * ((n - 1 - i * i) // i + 1)
return sum(isPrime)
# @lc code=end
| [
"[email protected]"
] | |
5b3d7a86142ca1e96291cb8b7355a821f2d2b495 | 3e7cce8dc203bcdbd35fccfaf974a9af0613d838 | /profilemaker/views.py | 5bb04d58dafd44c0f3899b259bb47bc561694b2d | [] | no_license | MdReyajuddin/blog | 146ac2b0b6967261b943535c819b403817390cd5 | 7da318865cef3116d50bcf0eb20d800e57290b90 | refs/heads/master | 2022-12-15T15:44:30.969717 | 2020-01-25T08:07:37 | 2020-01-25T08:07:37 | 236,128,083 | 0 | 0 | null | 2022-11-22T05:16:03 | 2020-01-25T05:06:16 | Python | UTF-8 | Python | false | false | 892 | py | from django.shortcuts import render
from .forms import Profile_Form
from .models import User_Profile
# Create your views here.
IMAGE_FILE_TYPES = ['png', 'jpg', 'jpeg']
def create_profile(request):
form = Profile_Form()
if request.method == 'POST':
form = Profile_Form(request.POST, request.FILES)
if form.is_valid():
user_pr = form.save(commit=False)
user_pr.display_picture = request.FILES['display_picture']
file_type = user_pr.display_picture.url.split('.')[-1]
file_type = file_type.lower()
if file_type not in IMAGE_FILE_TYPES:
return render(request, 'profilemaker/error.html')
user_pr.save()
return render(request, 'profilemaker/details.html', {'user_pr': user_pr})
context = {"form": form,}
return render(request, 'profilemaker/create.html', context) | [
"[email protected]"
] | |
5aa57daf3cc68ef88f14f8f3ba7cba5f03636d50 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_8/34.py | 13a5dbdcd1c07973c0fd5e97e7a59e2773a1dde9 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,034 | py | """The grouper class has been reciped from:
http://code.activestate.com/recipes/387776/
"""
class Grouper(object):
"""This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using .join(), tested for connectedness
using .joined(), and all disjoint sets can be retreived using
.get().
The objects being joined must be hashable.
For example:
>>> g = grouper.Grouper()
>>> g.join('a', 'b')
>>> g.join('b', 'c')
>>> g.join('d', 'e')
>>> list(g.get())
[['a', 'b', 'c'], ['d', 'e']]
>>> g.joined('a', 'b')
True
>>> g.joined('a', 'c')
True
>>> g.joined('a', 'd')
False"""
def __init__(self, init=[]):
mapping = self._mapping = {}
for x in init:
mapping[x] = [x]
def join(self, a, *args):
"""Join given arguments into the same set.
Accepts one or more arguments."""
mapping = self._mapping
set_a = mapping.setdefault(a, [a])
for arg in args:
set_b = mapping.get(arg)
if set_b is None:
set_a.append(arg)
mapping[arg] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
def joined(self, a, b):
"""Returns True if a and b are members of the same set."""
mapping = self._mapping
try:
return mapping[a] is mapping[b]
except KeyError:
return False
def __iter__(self):
"""Returns an iterator returning each of the disjoint sets as a list."""
seen = set()
for elem, group in self._mapping.iteritems():
if elem not in seen:
yield group
seen.update(group)
fin = open('B-small.in', 'r');
fout = open('B-small.out', 'w');
primes = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211,223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331,337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449,457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587,593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,709,719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,827,829,839,853,857,859,863,877,881,883,887,907,911,919,929,937,941,947,953,967,971,977,983,991,997]
def common(x, y, l):
for p in primes:
if p >= l and x % p == 0 and y % p == 0:
return True
return False
T = int(fin.readline());
for i in range(T):
args = fin.readline().split(' ')
A = int(args[0])
B = int(args[1])
P = int(args[2])
g = Grouper(range(A,B+1))
for j in range(A,B+1):
for h in range(j+1, B+1):
if not g.joined(j, h) and common(j, h, P):
g.join(j, h)
fout.write('Case #' + str(i + 1) + ': ' + str(len(list(g))) + '\n')
| [
"[email protected]"
] | |
5e034aa33708e97b5aedbcbf1bba6cf51df4c4f9 | e06c7fd594c52425ab7fc5498c07ae14daf9578b | /api/admin.py | cafd7a142d565ea707d870d9d0d7f3375129165e | [] | no_license | rwheeler-7864/simplenight-api | bc35560eca1e1c25092a1bcdc4af1633367413b8 | 602646911a0155df5b70991d1445c10cee18cd33 | refs/heads/master | 2023-03-12T03:10:51.516499 | 2021-02-25T20:40:44 | 2021-02-25T20:40:44 | 342,370,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | from django import forms
from django.contrib import admin
from django.forms import TextInput
from api.models.models import (
Booking,
OrganizationFeatures,
PropertyInfo,
Venue,
VenueMedia,
VenueContact,
VenueDetail,
PaymentMethod,
# ProductMedia,
ProductHotelsMedia,
ProductsNightLifeMedia,
ProductGroup,
ProductsNightLife,
ProductHotel,
ProductsHotelRoomDetails,
ProductsHotelRoomPricing,
)
@admin.register(Booking)
class BookingAdmin(admin.ModelAdmin):
list_per_page = 100
@admin.register(OrganizationFeatures)
class OrganizationFeatureInline(admin.ModelAdmin):
class Form(forms.ModelForm):
class Meta:
model = OrganizationFeatures
fields = "__all__"
widgets = {
"value": TextInput(attrs={"size": 60}),
}
form = Form
list_display = ("organization_name", "name", "value")
list_filter = ("organization__name",)
widgets = {
"value": TextInput(attrs={"size": 20}),
}
@admin.register(PropertyInfo)
class PropertyInfoAdmin(admin.ModelAdmin):
list_display = ("provider", "provider_code", "type", "language_code", "description")
list_filter = ("provider_code", "language_code")
admin.site.register(Venue)
admin.site.register(VenueMedia)
admin.site.register(VenueContact)
admin.site.register(VenueDetail)
admin.site.register(PaymentMethod)
admin.site.register(ProductHotelsMedia)
admin.site.register(ProductsNightLifeMedia)
admin.site.register(ProductHotel)
admin.site.register(ProductGroup)
admin.site.register(ProductsNightLife)
admin.site.register(ProductsHotelRoomDetails)
admin.site.register(ProductsHotelRoomPricing)
| [
"[email protected]"
] | |
506003b3dae22a077719c309c33a12355ed13292 | 7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3 | /codeforces/cf251-275/cf260/a2.py | a83e5ffe122cda371ad06555dc20bfd8fbd03eb2 | [] | no_license | roiti46/Contest | c0c35478cd80f675965d10b1a371e44084f9b6ee | c4b850d76796c5388d2e0d2234f90dc8acfaadfa | refs/heads/master | 2021-01-17T13:23:30.551754 | 2017-12-10T13:06:42 | 2017-12-10T13:06:42 | 27,001,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | # -*- coding: utf-8 -*-
import sys,copy,math,heapq,itertools as it,fractions,re,bisect,collections as coll
n = int(raw_input())
for loop in xrange(n):
a, b = map(int, raw_input().split())
if a < b:
print "Happy Alex"
break
else:
print "Poor Alex"
| [
"[email protected]"
] | |
b548144cbea5bfa89e439e9720a806f5b4e08fe8 | 544cfadc742536618168fc80a5bd81a35a5f2c99 | /tools/test/connectivity/acts_tests/tests/google/nr/sa5g/Sa5gSmsTest.py | 011062f94542ad88a258d6dd33ea3382f50f7a21 | [] | no_license | ZYHGOD-1/Aosp11 | 0400619993b559bf4380db2da0addfa9cccd698d | 78a61ca023cbf1a0cecfef8b97df2b274ac3a988 | refs/heads/main | 2023-04-21T20:13:54.629813 | 2021-05-22T05:28:21 | 2021-05-22T05:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,188 | py | #!/usr/bin/env python3.4
#
# Copyright 2021 - Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Script for 5G SA SMS scenarios
"""
import time
from acts.test_decorators import test_tracker_info
from acts_contrib.test_utils.tel.TelephonyBaseTest import TelephonyBaseTest
from acts_contrib.test_utils.tel.tel_test_utils import ensure_phones_idle
from acts_contrib.test_utils.tel.tel_5g_test_utils import disable_apm_mode_both_devices
from acts_contrib.test_utils.tel.tel_5g_test_utils import provision_device_for_5g
from acts_contrib.test_utils.tel.tel_5g_test_utils import provision_both_devices_for_volte
from acts_contrib.test_utils.tel.tel_5g_test_utils import verify_5g_attach_for_both_devices
from acts_contrib.test_utils.tel.tel_sms_utils import _sms_test_mo
from acts_contrib.test_utils.tel.tel_sms_utils import _long_sms_test_mo
class Sa5gSmsTest(TelephonyBaseTest):
def setup_class(self):
super().setup_class()
def setup_test(self):
TelephonyBaseTest.setup_test(self)
def teardown_test(self):
ensure_phones_idle(self.log, self.android_devices)
""" Tests Begin """
@test_tracker_info(uuid="8949d1c7-1719-4960-b79c-041b467fb5ef")
@TelephonyBaseTest.tel_test_wrap
def test_5g_sa_sms_mo_mt(self):
"""Test SMS between two phones in 5g SA
Provision devices in 5g SA
Send and Verify SMS from PhoneA to PhoneB
Verify both devices are still on 5g SA
Returns:
True if success.
False if failed.
"""
ads = self.android_devices
if not provision_device_for_5g(self.log, ads, sa_5g=True):
return False
if not _sms_test_mo(self.log, ads):
return False
if not verify_5g_attach_for_both_devices(self.log, ads, True):
return False
self.log.info("PASS - SMS test over 5G SA validated")
return True
@test_tracker_info(uuid="5c7a717b-1f98-44b7-95e7-0e83afb82a84")
@TelephonyBaseTest.tel_test_wrap
def test_5g_sa_sms_long_message_mo_mt(self):
"""Test SMS basic function between two phone.
Phones in sa 5G network.
Airplane mode is off.
Send SMS from PhoneA to PhoneB.
Verify received message on PhoneB is correct.
Returns:
True if success.
False if failed.
"""
ads = self.android_devices
if not disable_apm_mode_both_devices(self.log, ads):
return False
if not provision_device_for_5g(self.log, ads, sa_5g=True):
return False
return _long_sms_test_mo(self.log, ads)
""" Tests End """
| [
"[email protected]"
] | |
f3ff45256bbea619dbbf64fd88fabf51d085e602 | 0c15310c93117c916211b214bf5e87bdb05e768b | /hyperion/__init__.py | f1feff3c2c50c50e70b91d7fb71f52e9b778df21 | [
"BSD-2-Clause"
] | permissive | hyperion-rt/hyperion | 553efc0bd2c279409f65381d769546770b728236 | 743e085dd03ac8f92796773e55a69fd5b50700c1 | refs/heads/main | 2023-08-30T20:57:38.751726 | 2023-08-25T20:41:29 | 2023-08-25T20:41:29 | 1,442,240 | 41 | 19 | BSD-2-Clause | 2023-08-25T20:41:31 | 2011-03-05T04:43:44 | Python | UTF-8 | Python | false | false | 2,344 | py | from __future__ import print_function, division
from .version import __version__
# Set up the test function
_test_runner = None
def _get_test_runner():
from .testing.helper import TestRunner
return TestRunner(__path__[0])
def test(package=None, test_path=None, args=None, plugins=None,
verbose=False, pastebin=None, generate_reference=False,
bit_level_tests=False, coverage=False):
'''
Run Hyperion tests using py.test. A proper set of arguments is
constructed and passed to `pytest.main`.
Parameters
----------
package : str, optional
The name of a specific package to test, e.g. 'model' or
'densities'. If nothing is specified all default Hyperion tests
are run.
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
args : str, optional
Additional arguments to be passed to `pytest.main` in the `args`
keyword argument.
plugins : list, optional
Plugins to be passed to `pytest.main` in the `plugins` keyword
argument.
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying `-v` in `args`.
pastebin : {'failed','all',None}, optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
generate_reference : str
Generate reference results for bit-level tests
bit_level_tests : bool
Run bit-level tests. These are time-consuming tests that check the
exact validity of the output, but they are disabled by default.
coverage : bool, optional
Generate a test coverage report. The result will be placed in
the directory htmlcov.
See Also
--------
pytest.main : py.test function wrapped by `run_tests`.
'''
test_runner = _get_test_runner()
return test_runner.run_tests(
package=package, test_path=test_path, args=args,
plugins=plugins, verbose=verbose, pastebin=pastebin,
generate_reference=generate_reference,
bit_level_tests=bit_level_tests, coverage=coverage)
| [
"[email protected]"
] | |
ba79ccbb978bcafbf81db83fd7e2fa58372de286 | 1ec96ae64d42a735278c6ae718cc0205e846a79e | /qpwcnet/app/quantize/test_infer_tflite.py | 45c4358b9a7326e67f1def3e95d1066e81d036f2 | [] | no_license | fenguoo/qpwcnet | c3139c04787ecd8abf88d7aa7c106fd8715d92ab | 3fa803d4b55d46559eb96543ce0683fa4dd4d737 | refs/heads/master | 2023-03-08T20:37:50.137699 | 2021-02-25T18:40:24 | 2021-02-25T18:40:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py | #!/usr/bin/env python3
import tensorflow as tf
import numpy as np
import faulthandler
import cv2
from qpwcnet.core.util import disable_gpu
from qpwcnet.data.augment import image_resize, image_augment
from qpwcnet.data.tfrecord import get_reader
from qpwcnet.core.vis import flow_to_image
from qpwcnet.vis.show import show
def main():
faulthandler.enable()
# NOTE(ycho): Mysteriously, tflite segfaults if `channels_first`.
tf.keras.backend.set_image_data_format('channels_last')
# my_devices = tf.config.experimental.list_physical_devices(device_type='CPU')
# tf.config.experimental.set_visible_devices(devices= my_devices, device_type='CPU')
# disable_gpu()
# Load the TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="/tmp/qpwcnet.tflite")
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
print(input_details)
output_details = interpreter.get_output_details()
print(output_details)
# Test the model on random input data.
input_shape = input_details[0]['shape']
input_data = np.array(
np.random.random_sample(input_shape),
dtype=np.float32)
print(input_data.shape) # 1, 6, 256, 512
print('set_tensor')
interpreter.set_tensor(input_details[0]['index'], input_data)
print('invoke')
interpreter.invoke()
print('?')
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
output_data = interpreter.get_tensor(output_details[-1]['index'])
print(output_data.shape)
def preprocess(ims, flo):
# 0-255 -> 0.0-1.0
ims = tf.cast(ims,
tf.float32) * tf.constant(1.0 / 255.0,
dtype=tf.float32)
# resize, no augmentation.
ims, flo = image_resize(ims, flo, (256, 512))
# ims, flo = image_augment(ims, flo, (256, 512))
# 0.0-1.0 -> -0.5, 0.5
ims = ims - 0.5
# Convert to correct data format
data_format = tf.keras.backend.image_data_format()
if data_format == 'channels_first':
ims = einops.rearrange(ims, '... h w c -> ... c h w')
flo = einops.rearrange(flo, '... h w c -> ... c h w')
return ims, flo
if True:
# TODO(ycho): Cleanup dataset loading pattern for opt-flow datasets.
glob_pattern = '/media/ssd/datasets/sintel-processed/shards/sintel-*.tfrecord'
filenames = tf.data.Dataset.list_files(glob_pattern).shuffle(32)
# dataset = get_reader(filenames).shuffle(buffer_size=1024).repeat().batch(8)
# dataset = get_reader(filenames).batch(8).repeat()
dataset = get_reader(filenames).shuffle(
buffer_size=32).map(preprocess).batch(1)
for ims, flo in dataset:
interpreter.set_tensor(
input_details[0]['index'],
ims) # ims.numpy()?
interpreter.invoke()
flo_pred = output_data = interpreter.get_tensor(
output_details[-1]['index'])
flo_pred_rgb = flow_to_image(flo_pred)
show('flo_pred_rgb', flo_pred_rgb[0], True)
cv2.waitKey(0)
break
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
066630c5fc08c6a3f8d7aea2d8c7356f89559c49 | afc677459e46635ceffccf60d1daf50e62694557 | /ACME/utility/isskinny.py | b174ab247419e98a7beff22f70e10383e0180265 | [
"MIT"
] | permissive | mauriziokovacic/ACME | 056b06da4bf66d89087fcfcbe0fd0a2e255d09f3 | 2615b66dd4addfd5c03d9d91a24c7da414294308 | refs/heads/master | 2020-05-23T23:40:06.667416 | 2020-01-10T14:42:01 | 2020-01-10T14:42:01 | 186,997,977 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | from .row import *
from .col import *
from .ismatrix import *
def isskinny(*tensors):
"""
Returns whether or not the input tensor is a skinny matrix
A fat matrix is a matrix where the number of columns is smaller than the rows
Parameters
----------
*tensors : Tensor
a sequence of tensors
Returns
-------
bool
True if all the tensors are skinny, False otherwise
"""
return all([ismatrix(t) and (row(t) > col(t)) for t in tensors])
| [
"[email protected]"
] | |
555dc967aabaab2c6844ce81e8e30bd1621e7673 | af368ad82efda90ca9de73c57f2822aa27a21044 | /rigl/rigl_tf2/networks.py | ff96229cb5555728ea5fe8612ba59a1157351158 | [
"Apache-2.0"
] | permissive | google-research/rigl | e24f05bfd872f31194a047cf1b3a0bfa12ab45aa | d39fc7d46505cb3196cb1edeb32ed0b6dd44c0f9 | refs/heads/master | 2023-08-25T04:54:29.014303 | 2023-01-13T13:40:32 | 2023-01-26T17:47:13 | 224,050,000 | 324 | 61 | Apache-2.0 | 2022-07-04T22:02:04 | 2019-11-25T22:03:16 | Python | UTF-8 | Python | false | false | 2,777 | py | # coding=utf-8
# Copyright 2022 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module has networks used in experiments.
"""
from typing import Optional, Tuple # Non-expensive-to-import types.
import gin
import tensorflow.compat.v2 as tf
@gin.configurable(allowlist=['hidden_sizes', 'use_batch_norm'])
def lenet5(input_shape,
num_classes,
activation,
kernel_regularizer,
use_batch_norm = False,
hidden_sizes = (6, 16, 120, 84)):
"""Lenet5 implementation."""
network = tf.keras.Sequential()
kwargs = {
'activation': activation,
'kernel_regularizer': kernel_regularizer,
}
def maybe_add_batchnorm():
if use_batch_norm:
network.add(tf.keras.layers.BatchNormalization())
network.add(tf.keras.layers.Conv2D(
hidden_sizes[0], 5, input_shape=input_shape, **kwargs))
network.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
maybe_add_batchnorm()
network.add(tf.keras.layers.Conv2D(hidden_sizes[1], 5, **kwargs))
network.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
maybe_add_batchnorm()
network.add(tf.keras.layers.Flatten())
network.add(tf.keras.layers.Dense(hidden_sizes[2], **kwargs))
maybe_add_batchnorm()
network.add(tf.keras.layers.Dense(hidden_sizes[3], **kwargs))
maybe_add_batchnorm()
kwargs['activation'] = None
network.add(tf.keras.layers.Dense(num_classes, **kwargs))
return network
@gin.configurable(allowlist=['hidden_sizes', 'use_batch_norm'])
def mlp(input_shape,
num_classes,
activation,
kernel_regularizer,
use_batch_norm = False,
hidden_sizes = (300, 100)):
"""Lenet5 implementation."""
network = tf.keras.Sequential()
kwargs = {
'activation': activation,
'kernel_regularizer': kernel_regularizer
}
def maybe_add_batchnorm():
if use_batch_norm:
network.add(tf.keras.layers.BatchNormalization())
network.add(tf.keras.layers.Flatten(input_shape=input_shape))
network.add(tf.keras.layers.Dense(hidden_sizes[0], **kwargs))
maybe_add_batchnorm()
network.add(tf.keras.layers.Dense(hidden_sizes[1], **kwargs))
maybe_add_batchnorm()
kwargs['activation'] = None
network.add(tf.keras.layers.Dense(num_classes, **kwargs))
return network
| [
"[email protected]"
] | |
0d6be2d20aad56dc93e13e89dae8a30e5c924f42 | e1eaed6dde62fc54eb317d28dbd18e0740e3e8f3 | /official/projects/yt8m/configs/__init__.py | 2785613f22bdd5886332e53a03d96f3d529b7fd9 | [
"Apache-2.0"
] | permissive | nlpming/models | cf5008d2e66d2b66b6d61423e214f2f9f9fbe472 | 3cbf0748529d787dd09fa3ed031e557f0ddfa268 | refs/heads/master | 2021-12-03T03:29:16.042489 | 2021-11-23T14:09:10 | 2021-11-23T14:09:10 | 206,007,973 | 0 | 0 | Apache-2.0 | 2019-09-03T06:47:46 | 2019-09-03T06:47:46 | null | UTF-8 | Python | false | false | 692 | py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configs package definition."""
from official.projects.yt8m.configs import yt8m
| [
"[email protected]"
] | |
40d07a435766b7b05fd6c3cfdb3b1ec4ba000f2b | 8fe42fb556547bc60bd34b8c5230c01d7ef2dc89 | /src/operaciones.py | c2b28d668244639975b2d9d79f52146085af3388 | [] | no_license | Davidlazarog/Shark-Project | 1c8b3225f41a32dbcabace88c776043aeac42066 | e6baf3c346aa324566ac687af885a7abcd66b5a2 | refs/heads/master | 2022-12-06T20:03:15.679049 | 2020-08-31T19:06:54 | 2020-08-31T19:06:54 | 291,052,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,788 | py | def fatality(x):
if x == "Y":
return True
elif x == "N":
return False
elif x == " N":
return False
elif x == "N ":
return False
elif x == "y":
return True
return 'Unknown'
def Type(x):
if x == "Unprovoked":
return 'Unprovoked'
elif x == "Provoked":
return 'Provoked'
elif x == "Sea Disaster":
return 'Unprovoked'
return 'Unknown'
import re
arms = ["(.*)?arms(.*)?", "(.*)?hand(.*)?" ]
leg = ["(.*)leg(.*)?", "(.*)?foot(.*)?" ]
fatal = ["(.*)?atal(.*)?", "(.*)?ody(.*)?" ]
ribs = ["(.*)?ribs(.*)?"]
noinjury = ["(.*)?o injur(.*)?"]
def menu (x) :
Arms = "ARMS"
Leg = 'LEGS'
Fatal = 'FATAL'
Ribs = 'RIBS'
Noinjury = 'NO INJURY'
x = x.lower()
if type(x) != str:
return 'Unknown'
else:
for a in arms:
if re.search (a,x):
x = Arms
return x
for l in leg:
if re.search (l,x):
x = Leg
return x
for i in fatal:
if re.search (i,x):
x = Fatal
return x
for r in ribs:
if re.search (r,x):
x = Ribs
return x
for n in noinjury:
if re.search (n,x):
x = Noinjury
return x
return 'Unknown'
pizza = ["(.*)?no injury(.*)?" ]
human = ["(.*)?fatal(.*)?", "(.*)?legs(.*)?", "(.*)?arms(.*)?", "(.*)?ribs(.*)?"]
def pizzavshuman (x):
Pizza = 'PIZZA'
Human = 'HUMAN'
x = x.lower()
for p in pizza:
if re.search (p,x):
x = Pizza
return x
for h in human:
if re.search (h,x):
x = Human
return x
return 'UNKNOWN' | [
"[email protected]"
] | |
d4d3db0d9c63d6caa39c78c51416d5cd71a05bf5 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-2/23fcae7c9e2969cf6c9ffb019dda90e2abce2337-<v2_runner_on_unreachable>-fix.py | a2bf157a0a16543a0a827efdc86b55d5841fa9c6 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py |
def v2_runner_on_unreachable(self, result):
self._preprocess_result(result)
msg = 'unreachable'
display_color = C.COLOR_UNREACHABLE
task_result = self._process_result_output(result, msg)
self._display.display((' ' + task_result), display_color, stderr=self.display_failed_stderr)
| [
"[email protected]"
] | |
11e3a016835e75036ea8902a59cb6d19e5fba1d5 | ad8a182386d9cbffc5949e06435aee9dc65e79cd | /src/test/py/bazel/testdata/runfiles_test/bar/bar.py | a88b10b28ba73e4b68fbd2bcab7611e28f473e07 | [
"Apache-2.0"
] | permissive | JefferyQ/bazel | 2284f1792cc10b96fa485bfc7e54e064d2733750 | e666d3b88efb59f2a8f10b18882c9f13801508d9 | refs/heads/master | 2020-03-06T19:08:35.713184 | 2018-03-27T16:18:53 | 2018-03-27T16:20:19 | 127,021,388 | 1 | 0 | Apache-2.0 | 2018-03-27T17:18:14 | 2018-03-27T17:18:14 | null | UTF-8 | Python | false | false | 846 | py | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock Python binary, only used in tests."""
from __future__ import print_function
from bazel_tools.tools.runfiles import runfiles
print('Hello Python Bar!')
r = runfiles.Create()
print('rloc=%s' % r.Rlocation('foo_ws/bar/bar-py-data.txt'))
| [
"[email protected]"
] | |
d6942f802ebb324f1d183331af8a70085d17d2a8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03260/s433501845.py | f916e360749b81eba126bb8d969b1f1e0e46b12a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | import sys
def main():
a, b = map(int, input().split())
for c in range(1, 4):
if (a * b * c) % 2 == 1:
print('Yes')
sys.exit(0)
print('No')
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
3209c668a67a07c6ad75cfad7348b0fa17bc9382 | eae3d77ac72c168cee7701462f1fc45d7d4dcd91 | /SWEA/5653_줄기세포배양.py | 352a068ba658b7efae977a5f073569e60fa98afe | [] | no_license | ByeongjunCho/Algorithm-TIL | ed2f018d50bd2483bd1175ff9bf7e91913c14766 | ad79125a1498915fe97c1d57ee6860b06c410958 | refs/heads/master | 2022-07-19T15:12:23.689319 | 2020-05-18T08:37:09 | 2020-05-18T08:37:09 | 256,399,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,765 | py | # 5653. [모의 SW 역량테스트] 줄기세포배양
for tc in range(1, 1+int(input())):
N, M, K = map(int, input().split()) # 행, 열, 시간
arr = [list(map(int, input().split())) for _ in range(N)]
# 좌표를 dict 에 입력
cell = {} # 살아있는 세포
deadcell = set() # 죽은세포
for i in range(N):
for j in range(M):
if arr[i][j]:
cell.update({(i, j): [arr[i][j], -arr[i][j]]}) # 행렬값, 활성화여부
def clone():
dy = [0, 0, -1, 1]
dx = [1, -1, 0, 0]
keys = set(cell.keys())
for k in keys:
val = cell[k]
# 활성화된 것이 아니라면
if 0 > val[1]:
cell.update({k: [val[0], val[1] + 1]})
# 활성화된 세포라면
elif 0 <= val[1] < val[0]:
cell.update({k: [val[0], val[1] + 1]})
for i in range(4):
wy, wx = dy[i] + k[0], dx[i] + k[1]
# 기존에 있는 셀이 금방 복사된 것이면서 생명력이 작다면
if (wy, wx) not in keys and cell.get((wy, wx)) and cell.get((wy, wx))[0] == -cell.get((wy, wx))[1] and cell.get((wy, wx))[0] < val[0]:
cell.update({(wy, wx): [val[0], -val[0]]})
# 복사하려는 자리에 cell이 없다면
elif not cell.get((wy, wx)) and (wy, wx) not in deadcell: # deadcell.get((wy, wx))
cell.update({(wy, wx): [val[0], -val[0]]})
if val[0] == val[1] +1:
cell.pop(k)
deadcell.add(k)
for _ in range(K):
clone()
count = len(cell)
print('#{} {}'.format(tc, count)) | [
"[email protected]"
] | |
c6786c9dbf4949ae65f9b5146056f5c8542d97d3 | 76658a65823ea493038c1d037eb0bc1eda6733d3 | /chapter8/decorator.py | 1af2950bea033ac2d44c7197d914df3b0ee7bdea | [] | no_license | Asunqingwen/OOR_Edition3 | 2f14446f26a6615aea58920f67f6656c74257c4c | 92481b15fc365f8d5b903f6e5eb0974e9ff2af33 | refs/heads/master | 2022-08-15T05:49:33.619528 | 2020-01-13T01:53:47 | 2020-01-13T01:53:47 | 230,414,693 | 0 | 0 | null | 2022-07-29T23:04:04 | 2019-12-27T09:33:05 | Python | UTF-8 | Python | false | false | 933 | py | # -*- coding: utf-8 -*-
# @Time : 2020/1/7 0007 14:40
# @Author : 没有蜡笔的小新
# @E-mail : [email protected]
# @FileName: decorator.py
# @Software: PyCharm
# @Blog :https://blog.csdn.net/Asunqingwen
# @GitHub :https://github.com/Asunqingwen
# @WebSite : labixiaoxin.me
import time
def log_calls(func):
def wrapper(*args, **kwargs):
now = time.time()
print(
"Calling {0} with {1} and {2}".format(
func.__name__, args, kwargs
)
)
return_value = func(*args, **kwargs)
print(
"Executed {0} in {1}ms".format(func.__name__, time.time() - now)
)
return return_value
return wrapper
@log_calls
def test1(a, b, c):
print("\ttest1 called")
@log_calls
def test2(a, b):
print("\ttest2 called")
@log_calls
def test3(a, b):
print("\ttest3 called")
time.sleep(1)
# test1 = log_calls(test1)
# test2 = log_calls(test2)
# test3 = log_calls(test3)
test1(1, 2, 3)
test2(4, b=5)
test3(6, 7)
| [
"[email protected]"
] | |
aa7d5492970481f8d5ecd055913b223e31a05a4a | f93ea26173e6b72ff46b3abb2a5250bfb0636cdd | /tests/test_multiple.py | fc19b1449f2d61c628ba768b08192cf784420648 | [
"MIT"
] | permissive | eng-tools/eqsig | 53d1dc695ffbe132a7fef871d825d9b7011f821c | 8a70f4c7152bc0f0901d457b6acbca256d1a6473 | refs/heads/master | 2023-02-26T06:58:43.243878 | 2022-08-16T03:23:04 | 2022-08-16T03:23:04 | 125,842,866 | 22 | 10 | MIT | 2023-02-08T00:41:12 | 2018-03-19T10:46:43 | Python | UTF-8 | Python | false | false | 720 | py | import numpy as np
from eqsig import multiple
def test_same_start():
time = np.linspace(0, 102, 10200)
acc = np.sin(time)
dt = 0.01
cluster = multiple.Cluster([acc, acc + 0.3], dt=dt)
cluster.same_start()
diff = np.sum(cluster.values_by_index(0) - cluster.values_by_index(1))
assert diff < 1.0e-10, diff
def test_time_match():
time = np.linspace(0, 102, 1020)
acc = np.sin(time)
dt = 0.01
cluster = multiple.Cluster([acc[:-6], acc[6:]], dt=dt)
cluster.time_match(verbose=0)
diff = np.sum(cluster.values_by_index(0)[6:-5] - cluster.values_by_index(1)[6:-5])
assert diff == 0.0, diff
if __name__ == '__main__':
test_same_start()
# test_time_match()
| [
"[email protected]"
] | |
bec4c93a740437135214e47923fb069380ba88e1 | eb61d62ca1f6f0123e3771105f5dfbbd6115138d | /.history/19-22-07-21_20210905233329.py | 1c3e07808a594a922ce4ec611612db0be63d70cf | [] | no_license | Alopezm5/CORRECTO-2 | e0f14bcc3a88c0e222d10e3261e68532008bc42e | 223613f1fb04dce3fac9f82f243cb2f22fe100f3 | refs/heads/main | 2023-07-29T06:52:48.147424 | 2021-09-12T20:33:27 | 2021-09-12T20:33:27 | 388,995,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,447 | py | class MENU ():
def __init__(self,titulo,opciones=[]):
self.titulo = titulo
self.opciones = opciones
def menu(self):
print(self.titulo)
for opcion in self.opciones:
print(opcion)
opc=input("Elije opcion [1 ..... {}]:".format(len(self.opciones)))
return opc
opc=" "
while opc!="5":
menu1=MENU("Menú Principal" , ["1)Calculadora","2)Numeros","3)Listas","4)Cadenas","5)Salir"])
opc=menu1.menu()
if opc=="1":
opc1=" "
while opc !=""
menu1=MENU("Menú Calculadora",["1)Suma","2)Resta","3)Multiplicacion" , "4) División" , "5) Salir" ])
opc1=menu1.menu()
if opc1 == "1" :
print("Opcion Suma")
n1=int(input("Ingresar n1: "))
n2=int(input("Ingresar n2: "))
suma=n1+n2
print("{} + {} = {}".format( n1 , n2 , suma ))
elif opc1 == "2" :
print ( "Opcion Resta" )
n1 = int ( input ( "Ingresar n1:" ))
n2 = int ( input ( "Ingresar n2:" ))
resta = n1 - n2
print ( "{} - {} = {}".format( n1 , n2 , resta ))
elif opc1 == "3" :
print ( "Opcion Multiplicacion" )
n1 = int ( input ( "Ingresar n1:" ))
n2 = int ( input ( "Ingresar n2:" ))
multiplicacion = n1 * n2
print ( "{} * {} = {}".format( n1 , n2 , multiplicacion ))
elif opc1 == "4" :
print ( "Opcion Division" )
n1 = int ( input ( "Ingresar n1:" ))
n2 = int ( input ( "Ingresar n2:" ))
division = n1 / n2
print ( "{} / {} = {}".format( n1 , n2 , division ))
elif opc1 == "5" :
print ( "Opcion Salir" )
elif opc == "2" :
menu2 = MENU ( "Menú Numero" , [ "1) Perfecto" , "2) Primo" , "3) Salir" ])
opc2 = input ( "Elije opcion [1 ..... 3]:" )
elif opc == "3" :
print ( "Listas de menú" )
elif opc == "4" :
print ( "Menú Cadenas" )
elif opc == "5" :
print ( "Menú Salir" )
else:
print ( "Opcion no valida" )
| [
"[email protected]"
] | |
f448fb85f2fa97dfe22916d51c24dde06f126299 | 910c97ce255f39af7ef949664b4346e8cb5d6a0e | /managerlib/dblib/.svn/text-base/db_cloudfs_account.py.svn-base | a35cee46cf8948ff6bed248f5925041ed5a0724e | [] | no_license | sun3shines/manager_monitor | f3742a4fde95b456f51d0a18feb78f3d4048c560 | f49d741203d8476f2249a49d90fecc86143ac622 | refs/heads/master | 2021-01-17T06:47:14.375088 | 2016-04-29T06:43:05 | 2016-04-29T06:43:05 | 57,361,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | # -*- coding: utf-8 -*-
from urllib import unquote
from managerlib.db.db_account import account2id,insert_account,account_exists
from managerlib.db.table.stobj import delete_stobj
# atdelete -> db_cloudfs_account_delete
# atput -> db_cloudfs_account_put
# atexists -> db_cloudfs_account_exists
# athead -> db_cloudfs_account_head
# atmeta -> db_cloudfs_account_meta
# atget -> db_cloudfs_account_get
# atpost -> db_cloudfs_account_post
def db_cloudfs_account_delete(newPath,conn):
aid = account2id(conn,newPath)
return delete_stobj(conn,aid)
def db_cloudfs_account_put(newPath,conn):
return insert_account(conn,newPath)
def db_cloudfs_account_exists(newPath,conn):
return account_exists(conn,newPath)
######################################
def db_cloudfs_account_head(path):
return True,''
def db_cloudfs_account_meta(path):
return True,''
def db_cloudfs_account_get(path):
return True,''
def db_cloudfs_account_post(path):
return True,'' | [
"[email protected]"
] | ||
0fbc76d0300d598b7281028957299fbf30355f1c | 24de7e00fc6888efe2540d078d4bada6b21174ae | /glue_jupyter/ipyvolume/volume.py | c14a3500bd5b6ef0f413a9b7f7c63bf6c5dcc0f6 | [] | no_license | NoahLiot/glue-jupyter | 542e136f0688c98380b46ab8d7cb68dd577fc57b | 012703c603c5eae7c99ab93d832eb60f307e3c95 | refs/heads/master | 2020-03-27T05:15:10.485726 | 2018-08-09T10:16:29 | 2018-08-09T10:16:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,775 | py | import ipyvolume as ipv
import ipywidgets as widgets
import traitlets
from IPython.display import display
import numpy as np
import matplotlib.colors
from .scatter import IpyvolumeScatterLayerArtist
from ..utils import reduce_size
#from glue_vispy_viewers.common.layer_state import VispyLayerState
from glue_vispy_viewers.volume.layer_state import VolumeLayerState
from glue.core.data_combo_helper import ComponentIDComboHelper
from glue.external.echo import (CallbackProperty, SelectionCallbackProperty)
from glue.core.data import Subset
from glue.core.exceptions import IncompatibleAttribute
from ..link import link, dlink, calculation, link_component_id_to_select_widget, on_change
class IpyvolumeLayerState(VolumeLayerState):
pass
opacity_scale = CallbackProperty()
render_method = CallbackProperty()
lighting = CallbackProperty()
max_resolution = CallbackProperty()
vmin = CallbackProperty()
vmax = CallbackProperty()
clamp_min = CallbackProperty()
clamp_max = CallbackProperty()
# attribute = SelectionCallbackProperty()
def __init__(self, layer=None, **kwargs):
super(IpyvolumeLayerState, self).__init__(layer=layer)
self.opacity_scale = 0.1
self.render_method = 'NORMAL'
self.lighting = True
self.max_resolution = 256
self.vmin = 0.
self.vmax = 1.
self.clamp_min = False
self.clamp_max = False
from glue_vispy_viewers.common.layer_artist import VispyLayerArtist
def _transfer_function_rgba(color, N=256, max_opacity=1):
r, g, b = matplotlib.colors.to_rgb(color)
data = np.zeros((N, 4), dtype=np.float32)
ramp = np.linspace(0, 1, N)
data[...,0] = r
data[...,1] = g
data[...,2] = b
data[...,3] = ramp*max_opacity
return data
class IpyvolumeVolumeLayerArtist(VispyLayerArtist):
def __init__(self, ipyvolume_viewer=None, state=None, layer=None, layer_state=None):
super(IpyvolumeVolumeLayerArtist, self).__init__(layer)
self.layer = layer or layer_state.layer
self.ipyvolume_viewer = ipyvolume_viewer
self.figure = self.ipyvolume_viewer.figure
self._viewer_state = ipyvolume_viewer.state
assert ipyvolume_viewer.state == state
self.state = layer_state or IpyvolumeLayerState(layer=self.layer)
self.transfer_function = ipv.TransferFunction(rgba=_transfer_function_rgba(self.state.color))
if self.state not in self._viewer_state.layers:
self._viewer_state.layers.append(self.state)
#ipv.figure(self.ipyvolume_viewer.figure)
self.volume = None
self.last_shape = None
def clear(self):
pass
def redraw(self):
pass
def update(self):
# print(self.layer)
if isinstance(self.layer, Subset):
try:
mask = self.layer.to_mask()
except IncompatibleAttribute:
# The following includes a call to self.clear()
self.disable("Subset cannot be applied to this data")
return
else:
self._enabled = True
# if self.state.subset_mode == 'outline':
# data = mask.astype(np.float32)
# else:
data = self.layer.data[self.state.attribute].astype(np.float32)
data *= mask
else:
data = self.layer[self.state.attribute]
#data = self.layer.data[self.state.attribute].astype(np.float32)
#print(data, data.shape, self.state.attribute)
finite_mask = np.isfinite(data)
finite_data = data[finite_mask]
finite_mask_normalized = finite_data - finite_data.min()
finite_mask_normalized = finite_mask_normalized / finite_mask_normalized.max()
data_min, data_max = np.nanmin(data), np.nanmax(data) #np.percentile(finite_data, 1), np.percentile(finite_data, 99)
# data_min, data_max = 0, 1
# self.state.data_min = data_min
# self.state.data_max = data_max
#data_min, data_max = None, None
self.last_shape = shape = data.shape
if self.volume is None:
with self.figure:
self.volume = ipv.volshow(data, data_min=data_min, data_max=data_max, extent=[[0, shape[0]], [0, shape[1]], [0, shape[2]]], controls=False,
tf=self.transfer_function)#, volume_rendering_method=self.state.render_method)
else:
self.ipyvolume_viewer.figure.volume_data_original = data
self.ipyvolume_viewer.figure.volume_data_min = data_min
self.ipyvolume_viewer.figure.volume_data_max = data_max
def _update_transfer_function(self):
self.transfer_function.rgba = _transfer_function_rgba(self.state.color, max_opacity=self.state.alpha)
def create_widgets(self):
self.widget_lighting = widgets.Checkbox(description='lighting', value=self.state.lighting)
link((self.state, 'lighting'), (self.widget_lighting, 'value'))
link((self.state, 'lighting'), (self.figure, 'volume_rendering_lighting'))
render_methods = 'NORMAL MAX_INTENSITY'.split()
self.widget_render_method = widgets.Dropdown(options=render_methods, value=self.state.render_method, description='method')
link((self.state, 'render_method'), (self.widget_render_method, 'value'))
link((self.state, 'render_method'), (self.figure, 'volume_rendering_method'))
self.size_options = [32, 64, 128, 128+64, 256, 256+128, 512]
options = [(str(k), k) for k in self.size_options]
self.widget_max_resolution = widgets.Dropdown(options=options, value=128, description='max resolution')
link((self.state, 'max_resolution'), (self.widget_max_resolution, 'value'))
link((self.state, 'max_resolution'), (self.figure, 'volume_data_max_shape'))
#on_change([(self.state, 'max_resolution')])(self.update)
self.widget_data_min = widgets.FloatSlider(description='min', min=0, max=1, value=self.state.vmin, step=0.001)
link((self.state, 'vmin'), (self.widget_data_min, 'value'))
link((self.state, 'vmin'), (self.figure, 'volume_show_min'))
link((self.ipyvolume_viewer.figure, 'volume_data_min'), (self.widget_data_min, 'min'))
link((self.ipyvolume_viewer.figure, 'volume_data_max'), (self.widget_data_min, 'max'))
self.widget_data_max = widgets.FloatSlider(description='max', min=0, max=1, value=self.state.vmax, step=0.001)
link((self.state, 'vmax'), (self.widget_data_max, 'value'))
link((self.state, 'vmax'), (self.figure, 'volume_show_max'))
link((self.ipyvolume_viewer.figure, 'volume_data_min'), (self.widget_data_max, 'min'))
link((self.ipyvolume_viewer.figure, 'volume_data_max'), (self.widget_data_max, 'max'))
self.widget_clamp_min = widgets.Checkbox(description='clamp minimum', value=self.state.clamp_min)
link((self.state, 'clamp_min'), (self.widget_clamp_min, 'value'))
link((self.state, 'clamp_min'), (self.figure, 'volume_clamp_min'))
self.widget_clamp_max = widgets.Checkbox(description='clamp maximum', value=self.state.clamp_max)
link((self.state, 'clamp_max'), (self.widget_clamp_max, 'value'))
link((self.state, 'clamp_max'), (self.figure, 'volume_clamp_max'))
self.widget_color = widgets.ColorPicker(value=self.state.color, description='color')
link((self.state, 'color'), (self.widget_color, 'value'))
self.widget_opacity = widgets.FloatSlider(description='opacity', min=0, max=1, value=self.state.alpha, step=0.001)
link((self.state, 'alpha'), (self.widget_opacity, 'value'))
self.widget_opacity_scale = widgets.FloatLogSlider(description='opacity scale', base=10, min=-3, max=3, value=self.state.opacity_scale, step=0.01)
link((self.state, 'opacity_scale'), (self.widget_opacity_scale, 'value'))
link((self.state, 'opacity_scale'), (self.figure, 'opacity_scale'))
on_change([(self.state, 'color', 'alpha')])(self._update_transfer_function)
self.widget_reset_zoom = widgets.Button(description="Reset zoom")
def reset_zoom(*ignore):
with self.figure:
if self.last_shape is not None:
ipv.xlim(0, self.last_shape[0])
ipv.ylim(0, self.last_shape[1])
ipv.zlim(0, self.last_shape[2])
self.widget_reset_zoom.on_click(reset_zoom)
return widgets.VBox([self.widget_render_method, self.widget_lighting, self.widget_data_min,
self.widget_data_max, self.widget_clamp_min, self.widget_clamp_max,
self.widget_max_resolution, self.widget_reset_zoom, self.widget_color, self.widget_opacity, self.widget_opacity_scale])
| [
"[email protected]"
] | |
c50124e6e37ed41624bd020b878be1a0fd742599 | 61a21ed2dcdfe9a43588c5582eea38ce8fdfcbf2 | /akshare/stock/stock_dzjy.py | b132c81a9b31b7160848277fdb6ca9908ccb012b | [
"MIT"
] | permissive | huanghyw/akshare | 44187c6c56872d499651bb62c178ee837c776388 | ed84e937773c0420cc003793d74b73e64223e08b | refs/heads/master | 2023-04-22T07:06:08.929307 | 2021-05-02T16:05:59 | 2021-05-02T16:05:59 | 319,346,216 | 13 | 5 | MIT | 2021-05-02T16:05:59 | 2020-12-07T14:32:08 | null | UTF-8 | Python | false | false | 15,145 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/12/4 20:31
Desc: 东方财富网-数据中心-大宗交易-市场统计
http://data.eastmoney.com/dzjy/dzjy_sctj.aspx
"""
import demjson
import pandas as pd
import requests
def stock_dzjy_sctj() -> pd.DataFrame:
"""
东方财富网-数据中心-大宗交易-市场统计
http://data.eastmoney.com/dzjy/dzjy_sctj.aspx
:return: 市场统计表
:rtype: pandas.DataFrame
"""
url = "http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get"
params = {
"type": "DZJYSCTJ",
"token": "70f12f2f4f091e459a279469fe49eca5",
"cmd": "",
"st": "TDATE",
"sr": "-1",
"p": "1",
"ps": "50000",
"js": "var xoqCPdgn={pages:(tp),data:(x)}",
"rt": "53569504",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text.split("=")[1])
temp_df = pd.DataFrame(data_json["data"])
temp_df.columns = [
"交易日期",
"上证指数",
"上证指数涨跌幅",
"大宗交易成交总额",
"溢价成交总额",
"溢价成交总额占比",
"折价成交总额",
"折价成交总额占比",
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"])
temp_df["上证指数"] = round(temp_df["上证指数"], 2)
temp_df["上证指数涨跌幅"] = round(temp_df["上证指数涨跌幅"], 4)
temp_df["大宗交易成交总额"] = round(temp_df["大宗交易成交总额"].astype(float), 2)
temp_df["溢价成交总额"] = round(temp_df["溢价成交总额"].astype(float), 2)
temp_df["溢价成交总额占比"] = round(temp_df["溢价成交总额占比"].astype(float), 4)
temp_df["折价成交总额"] = round(temp_df["折价成交总额"].astype(float), 2)
temp_df["折价成交总额占比"] = round(temp_df["折价成交总额占比"].astype(float), 4)
return temp_df
def stock_dzjy_mrmx(symbol: str = '债券', start_date: str = '2020-12-04', end_date: str = '2020-12-04') -> pd.DataFrame:
"""
东方财富网-数据中心-大宗交易-每日明细
http://data.eastmoney.com/dzjy/dzjy_mrmxa.aspx
:param symbol: choice of {'A股', 'B股', '债券'}
:type symbol: str
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日明细
:rtype: pandas.DataFrame
"""
symbol_map = {
'A股': 'EQA',
'B股': 'EQB',
'债券': 'BD0',
}
url = "http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get"
params = {
"type": "DZJYXQ",
"token": "70f12f2f4f091e459a279469fe49eca5",
"cmd": "",
"st": "SECUCODE",
"sr": "1",
"p": "1",
"ps": "5000",
"js": "var kBPzKdtj={pages:(tp),data:(x)}",
'filter': f"(Stype='{symbol_map[symbol]}')(TDATE>=^{start_date}^ and TDATE<=^{end_date}^)",
"rt": "53569504",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text.split("=")[1])
temp_df = pd.DataFrame(data_json["data"])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
if symbol in {'A股'}:
temp_df.columns = [
"序号",
"交易日期",
"证券代码",
"证券简称",
"成交价",
"成交量",
"成交额",
"_",
"买方营业部",
"_",
"卖方营业部",
"_",
"_",
"涨跌幅",
"收盘价",
"_",
"折溢率",
"成交额/流通市值",
"_",
"_",
"_",
"_",
"_",
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"])
temp_df = temp_df[[
"序号",
"交易日期",
"证券代码",
"证券简称",
"涨跌幅",
"收盘价",
"成交价",
"折溢率",
"成交量",
"成交额",
"成交额/流通市值",
"买方营业部",
"卖方营业部",
]]
return temp_df
if symbol in {'B股'}:
temp_df.columns = [
"序号",
"交易日期",
"证券代码",
"证券简称",
"成交价",
"成交量",
"成交额",
"_",
"买方营业部",
"_",
"卖方营业部",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"])
temp_df = temp_df[[
"序号",
"交易日期",
"证券代码",
"证券简称",
"成交价",
"成交量",
"成交额",
"买方营业部",
"卖方营业部",
]]
return temp_df
if symbol in {'债券'}:
temp_df.columns = [
"序号",
"交易日期",
"证券代码",
"证券简称",
"成交价",
"成交量",
"成交额",
"_",
"买方营业部",
"_",
"卖方营业部",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"])
temp_df = temp_df[[
"序号",
"交易日期",
"证券代码",
"证券简称",
"成交价",
"成交量",
"成交额",
"买方营业部",
"卖方营业部",
]]
return temp_df
def stock_dzjy_mrtj(start_date: str = '2020-12-04', end_date: str = '2020-12-04') -> pd.DataFrame:
"""
东方财富网-数据中心-大宗交易-每日统计
http://data.eastmoney.com/dzjy/dzjy_mrtj.aspx
:param start_date: 开始日期
:type start_date: str
:param end_date: 结束日期
:type end_date: str
:return: 每日统计
:rtype: pandas.DataFrame
"""
url = "http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get"
params = {
"type": "DZJYGGTJ",
"token": "70f12f2f4f091e459a279469fe49eca5",
"cmd": "",
"st": "Cjeltszb",
"sr": "-1",
"p": "1",
"ps": "50000",
"js": "var xoqCPdgn={pages:(tp),data:(x)}",
'filter': f'(TDATE>=^{start_date}^ and TDATE<=^{end_date}^)',
"rt": "53569504",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text.split("=")[1])
temp_df = pd.DataFrame(data_json["data"])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
"序号",
"交易日期",
"证券代码",
"证券简称",
"涨跌幅",
"收盘价",
"成交均价",
"折溢率",
"成交笔数",
"成交总额",
"成交总量",
"_",
"成交总额/流通市值",
"_",
"_",
"_",
"_",
]
temp_df["交易日期"] = pd.to_datetime(temp_df["交易日期"])
temp_df = temp_df[[
"序号",
"交易日期",
"证券代码",
"证券简称",
"涨跌幅",
"收盘价",
"成交均价",
"折溢率",
"成交笔数",
"成交总量",
"成交总额",
"成交总额/流通市值",
]]
return temp_df
def stock_dzjy_hygtj(period: str = '近三月') -> pd.DataFrame:
"""
东方财富网-数据中心-大宗交易-活跃 A 股统计
http://data.eastmoney.com/dzjy/dzjy_hygtj.aspx
:param period: choice of {'近一月', '近三月', '近六月', '近一年'}
:type period: str
:return: 活跃 A 股统计
:rtype: pandas.DataFrame
"""
period_map = {
'近一月': '1',
'近三月': '3',
'近六月': '6',
'近一年': '12',
}
url = "http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get"
params = {
"type": "DZJY_HHGGTJ",
"token": "70f12f2f4f091e459a279469fe49eca5",
"cmd": "",
"st": "SBSumCount",
"sr": "-1",
"p": "1",
"ps": "50000",
"js": "var xoqCPdgn={pages:(tp),data:(x)}",
'filter': f'(TYPE={period_map[period]})',
"rt": "53569504",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text.split("=")[1])
temp_df = pd.DataFrame(data_json["data"])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
"序号",
"_",
"最近上榜日",
"证券代码",
"证券简称",
"涨跌幅",
"最新价",
"上榜次数-总计",
"上榜次数-溢价",
"上榜次数-折价",
"总成交额",
"_",
"折溢率",
"成交总额/流通市值",
"上榜日后平均涨跌幅-1日",
"上榜日后平均涨跌幅-5日",
"上榜日后平均涨跌幅-10日",
"上榜日后平均涨跌幅-20日",
"_",
"_",
"_",
"_",
]
temp_df["最近上榜日"] = pd.to_datetime(temp_df["最近上榜日"])
temp_df = temp_df[[
"序号",
"证券代码",
"证券简称",
"最新价",
"涨跌幅",
"最近上榜日",
"上榜次数-总计",
"上榜次数-溢价",
"上榜次数-折价",
"总成交额",
"折溢率",
"成交总额/流通市值",
"上榜日后平均涨跌幅-1日",
"上榜日后平均涨跌幅-5日",
"上榜日后平均涨跌幅-10日",
"上榜日后平均涨跌幅-20日",
]]
return temp_df
def stock_dzjy_hyyybtj(period: str = '近3日') -> pd.DataFrame:
"""
东方财富网-数据中心-大宗交易-活跃营业部统计
http://data.eastmoney.com/dzjy/dzjy_hyyybtj.aspx
:param period: choice of {'当前交易日', '近3日', '近5日', '近10日', '近30日'}
:type period: str
:return: 活跃营业部统计
:rtype: pandas.DataFrame
"""
period_map = {
'当前交易日': '1',
'近3日': '3',
'近5日': '5',
'近10日': '10',
'近30日': '30',
}
url = "http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get"
params = {
"type": "DZJY_HHYYBTJ",
"token": "70f12f2f4f091e459a279469fe49eca5",
"cmd": "",
"st": "BCount",
"sr": "-1",
"p": "1",
"ps": "50000",
"js": "var xoqCPdgn={pages:(tp),data:(x)}",
'filter': f'(TYPE={period_map[period]})',
"rt": "53569504",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text.split("=")[1])
temp_df = pd.DataFrame(data_json["data"])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
"序号",
"_",
"最近上榜日",
"_",
"营业部名称",
"次数总计-买入",
"次数总计-卖出",
"成交金额统计-买入",
"成交金额统计-卖出",
"成交金额统计-净买入额",
"买入的股票",
]
temp_df["最近上榜日"] = pd.to_datetime(temp_df["最近上榜日"])
temp_df = temp_df[[
"序号",
"最近上榜日",
"营业部名称",
"次数总计-买入",
"次数总计-卖出",
"成交金额统计-买入",
"成交金额统计-卖出",
"成交金额统计-净买入额",
"买入的股票",
]]
return temp_df
def stock_dzjy_yybph(period: str = '近三月') -> pd.DataFrame:
"""
东方财富网-数据中心-大宗交易-营业部排行
http://data.eastmoney.com/dzjy/dzjy_yybph.aspx
:param period: choice of {'近一月', '近三月', '近六月', '近一年'}
:type period: str
:return: 营业部排行
:rtype: pandas.DataFrame
"""
period_map = {
'近一月': '1',
'近三月': '3',
'近六月': '6',
'近一年': '12',
}
url = "http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get"
params = {
"type": "DZJY_YYBHB",
"token": "70f12f2f4f091e459a279469fe49eca5",
"cmd": "",
"st": "BCount",
"sr": "-1",
"p": "1",
"ps": "50000",
"js": "var xoqCPdgn={pages:(tp),data:(x)}",
'filter': f'(TYPE={period_map[period]})',
"rt": "53569504",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text.split("=")[1])
temp_df = pd.DataFrame(data_json["data"])
temp_df.reset_index(inplace=True)
temp_df['index'] = range(1, len(temp_df)+1)
temp_df.columns = [
"序号",
"_",
"_",
"营业部名称",
"上榜后1天-买入次数",
"上榜后1天-平均涨幅",
"_",
"_",
"上榜后1天-上涨概率",
"上榜后5天-平均涨幅",
"_",
"_",
"上榜后5天-上涨概率",
"上榜后10天-平均涨幅",
"_",
"_",
"上榜后10天-上涨概率",
"上榜后20天-平均涨幅",
"_",
"_",
"上榜后20天-上涨概率"
]
temp_df = temp_df[[
"序号",
"营业部名称",
"上榜后1天-买入次数",
"上榜后1天-平均涨幅",
"上榜后1天-上涨概率",
"上榜后5天-平均涨幅",
"上榜后5天-上涨概率",
"上榜后10天-平均涨幅",
"上榜后10天-上涨概率",
"上榜后20天-平均涨幅",
"上榜后20天-上涨概率"
]]
return temp_df
if __name__ == "__main__":
stock_dzjy_sctj_df = stock_dzjy_sctj()
print(stock_dzjy_sctj_df)
stock_dzjy_mrmx_df = stock_dzjy_mrmx(symbol='债券', start_date='2020-12-04', end_date='2020-12-04')
print(stock_dzjy_mrmx_df)
stock_dzjy_mrtj_df = stock_dzjy_mrtj(start_date='2020-12-04', end_date='2020-12-04')
print(stock_dzjy_mrtj_df)
stock_dzjy_hygtj_df = stock_dzjy_hygtj(period='近三月')
print(stock_dzjy_hygtj_df)
stock_dzjy_hyyybtj_df = stock_dzjy_hyyybtj(period='近3日')
print(stock_dzjy_hyyybtj_df)
stock_dzjy_yybph_df = stock_dzjy_yybph(period='近三月')
print(stock_dzjy_yybph_df)
| [
"[email protected]"
] | |
2576f671397537847ead9f33f72b37b62d262c6c | 61004e474b7b2ad0071c16766f0f7874f04f9466 | /examples/dataflow-xml-pubsub-to-gcs/python/beamPubSubXml2Gcs.py | 714dcc12868880a5d623a66cf9fb253735ed5a8a | [
"Apache-2.0"
] | permissive | GoogleCloudPlatform/professional-services | eb79751efae765a8c691a745e520f44f51bd715c | 0f51121b945bd74c7f667e74e8861fceda87565c | refs/heads/main | 2023-09-05T02:57:33.328973 | 2023-08-30T14:40:30 | 2023-08-30T14:40:30 | 91,730,359 | 2,626 | 1,381 | Apache-2.0 | 2023-09-14T20:13:42 | 2017-05-18T19:29:27 | Python | UTF-8 | Python | false | false | 6,666 | py | # Copyright 2023 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from datetime import datetime
import logging
import json
import random
from apache_beam import DoFn, GroupByKey, io, ParDo, Pipeline, \
PTransform, WindowInto, WithKeys, Map
from apache_beam.io import fileio
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.transforms.window import FixedWindows
import xml.etree.ElementTree as ET
class GroupMessagesByFixedWindows(PTransform):
"""A composite transform that groups Pub/Sub messages based on publish time
and outputs a list of tuples, each containing a message and its publish
time.
"""
def __init__(self, window_size, num_shards=5):
# Set window size to 60 seconds * window_size.
self.window_size = int(window_size * 60)
self.num_shards = num_shards
def expand(self, pcoll):
return (
pcoll
# Bind window info to each element using element timestamp (or \
# publish time).
| "Window into fixed intervals"
>> WindowInto(FixedWindows(self.window_size))
| "Add timestamp to windowed elements" >> ParDo(AddTimestamp())
# Assign a random key to each windowed element based on the \
# number of shards.
| "Add key" >> WithKeys(
lambda _: random.randint(0, self.num_shards - 1)
)
# Group windowed elements by key. All the elements in the same \
# window must fit memory for this. If not, you need to use \
# `beam.util.BatchElements`.
| "Group by key" >> GroupByKey()
| "Drop shard key after grouping" >> Map(lambda element: element[1])
)
class AddTimestamp(DoFn):
def process(self, element, publish_time=DoFn.TimestampParam):
"""Processes each parsed element by extracting the message body and its
received time into a tuple.
"""
yield (
{
"ts": datetime.utcfromtimestamp(float(publish_time)).
strftime("%Y-%m-%d %H:%M:%S.%f")
} | element
)
class ParseXML(DoFn):
def process(self, message_body):
"""Parse all tags and attributes from an XML and serialize them to a
dict for later storage."""
try:
parsedXml = ET.fromstring(message_body)
allTags = []
allTagsText = []
for element in parsedXml:
allTags.append(element.tag)
allTagsText.append(element.text)
yield {"tags": allTags, "text": allTagsText}
except Exception as e:
yield {"error": str(e), "raw_contents": message_body}
def run(project_id,
input_topic,
gcs_path,
window_size,
num_shards,
runner,
region,
pipeline_args=None):
# Set `save_main_session` to True so DoFns can access globally imported
# modules.
input_topic = "projects/{0}/topics/{1}".format(project_id, input_topic)
if gcs_path[-1] == "/":
gcs_path = gcs_path[:-1]
output_path = "{0}/output/".format(gcs_path)
provided_args = {
"project": project_id,
"runner": runner,
"region": region,
"staging_location": "{0}/staging/".format(gcs_path),
"temp_location": "{0}/temp/".format(gcs_path),
"streaming": True,
"save_main_session": True
}
pipeline_options = PipelineOptions(
pipeline_args, **provided_args
)
with Pipeline(options=pipeline_options) as pipeline:
(
pipeline
# Because `timestamp_attribute` is unspecified in `ReadFromPubSub`,
# Beam binds the publish time returned by the Pub/Sub server for
# each message to the element's timestamp parameter, accessible via
# `DoFn.TimestampParam`.
# https://beam.apache.org/releases/pydoc/current/apache_beam.io.gcp.pubsub.html#apache_beam.io.gcp.pubsub.ReadFromPubSub
# https://cloud.google.com/pubsub/docs/stream-messages-dataflow#set_up_your_pubsub_project
| "Read from Pub/Sub" >> io.ReadFromPubSub(topic=input_topic)
| "Parse XML tags and attributes" >> ParDo(ParseXML())
| "Window into" >> GroupMessagesByFixedWindows(window_size,
num_shards)
| "Serialize" >> Map(json.dumps, indent = 2)
| "Write to GCS" >> fileio.WriteToFiles(path=output_path, shards=0)
)
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
"--project_id",
help="The GCP project that hosts the PubSub and Dataflow.",
)
parser.add_argument(
"--input_topic_id",
help="The Cloud Pub/Sub topic to read from.",
)
parser.add_argument(
"--runner",
help="""The beam runner to be used. For cloud Dataflow:
'DataflowRunner'. For local debugging: 'DirectRunner'.
[Defaults to: 'DataflowRunner']""",
default='DataflowRunner',
)
parser.add_argument(
"--region",
help="The GCP region for Dataflow. [Defaults to: 'us-central1']",
default='us-central1',
)
parser.add_argument(
"--window_size",
type=float,
default=1.0,
help="Output file's window size in minutes. [Defaults to: 1.0]",
)
parser.add_argument(
"--gcs_path",
help="Path of the output GCS file including the prefix.",
)
parser.add_argument(
"--num_shards",
type=int,
default=5,
help="""Number of shards to use when writing windowed elements to GCS.
[Defaults to: 5]""",
)
known_args, pipeline_args = parser.parse_known_args()
run(
known_args.project_id,
known_args.input_topic_id,
known_args.gcs_path,
known_args.window_size,
known_args.num_shards,
known_args.runner,
known_args.region,
pipeline_args,
)
| [
"[email protected]"
] | |
b03ff75b410cad50551f9771c230a5908f250464 | 8a62bbff9378187a898f336532bb49de18cb88e4 | /2020-rnn-transducer/configs/rna3c-lm4a.convtrain.switchout6.l2a_1e_4.nohdf.encbottle256.attwb5_am.dec1la-n128.decdrop03.decwdrop03.pretrain_less2_rep6.mlr50.emit2.fl2.rep.fixmask.ctcalignfix-ctcalign-p0-4la.chunk60.encctc.devtrain.config | b61cbb234d7f86453b238052a65f98e61fbba6dc | [] | no_license | rwth-i6/returnn-experiments | e2cdecb67febe646d702282ced8c290f1dd8edd0 | a46021329c030af361e0becb25ea92afca9610ce | refs/heads/master | 2023-06-08T08:56:11.891782 | 2023-05-30T12:46:45 | 2023-05-30T12:46:45 | 67,426,132 | 159 | 52 | null | 2023-05-30T12:46:46 | 2016-09-05T14:07:48 | Python | UTF-8 | Python | false | false | 45,596 | config | #!crnn/rnn.py
# kate: syntax python;
# -*- mode: python -*-
# sublime: syntax 'Packages/Python Improved/PythonImproved.tmLanguage'
# vim:set expandtab tabstop=4 fenc=utf-8 ff=unix ft=python:
# via:
# /u/irie/setups/switchboard/2018-02-13--end2end-zeyer/config-train/bpe_1k.multihead-mlp-h1.red8.enc6l.encdrop03.decbs.ls01.pretrain2.nbd07.config
# Kazuki BPE1k baseline, from Interspeech paper.
import os
import numpy
from subprocess import check_output, CalledProcessError
from TFUtil import DimensionTag
# task
use_tensorflow = True
task = config.value("task", "train")
device = "gpu"
multiprocessing = True
update_on_device = True
debug_mode = False
if int(os.environ.get("DEBUG", "0")):
print("** DEBUG MODE")
debug_mode = True
if config.has("beam_size"):
beam_size = config.int("beam_size", 0)
print("** beam_size %i" % beam_size)
else:
if task == "train":
beam_size = 4
else:
beam_size = 12
_cf_cache = {}
def cf(filename):
"""Cache manager"""
if filename in _cf_cache:
return _cf_cache[filename]
if debug_mode or check_output(["hostname"]).strip().decode("utf8") in ["cluster-cn-211", "sulfid"]:
print("use local file: %s" % filename)
return filename # for debugging
try:
cached_fn = check_output(["cf", filename]).strip().decode("utf8")
except CalledProcessError:
print("Cache manager: Error occured, using local file")
return filename
assert os.path.exists(cached_fn)
_cf_cache[filename] = cached_fn
return cached_fn
# data
target = "bpe"
target_num_labels = 1030
targetb_num_labels = target_num_labels + 1 # with blank
targetb_blank_idx = target_num_labels
time_tag = DimensionTag(kind=DimensionTag.Types.Spatial, description="time")
output_len_tag = DimensionTag(kind=DimensionTag.Types.Spatial, description="output-len") # it's downsampled time
# use "same_dim_tags_as": {"t": time_tag} if same time tag ("data" and "alignment"). e.g. for RNA. not for RNN-T.
extern_data = {
"data": {"dim": 40, "same_dim_tags_as": {"t": time_tag}}, # Gammatone 40-dim
"alignment": {"dim": targetb_num_labels, "sparse": True, "same_dim_tags_as": {"t": output_len_tag}},
#"align_score": {"shape": (1,), "dtype": "float32"},
}
if task != "train":
# During train, we add this via the network (from prev alignment, or linear seg). Otherwise it's not available.
extern_data["targetb"] = {"dim": targetb_num_labels, "sparse": True, "available_for_inference": False}
extern_data[target] = {"dim": target_num_labels, "sparse": True} # must not be used for chunked training
EpochSplit = 6
_import_baseline_setup = "ctcalign.prior0.lstm4la.withchar.lrkeyfix"
_alignment = "%s.epoch-150" % _import_baseline_setup
def get_sprint_dataset(data):
assert data in {"train", "devtrain", "cv", "dev", "hub5e_01", "rt03s"}
epoch_split = {"train": EpochSplit}.get(data, 1)
corpus_name = {"cv": "train", "devtrain": "train"}.get(data, data) # train, dev, hub5e_01, rt03s
hdf_files = None
if data in {"train", "cv", "devtrain"}:
hdf_files = ["base/dump-align/data/%s.data-%s.hdf" % (_alignment, {"cv": "dev", "devtrain": "train"}.get(data, data))]
# see /u/tuske/work/ASR/switchboard/corpus/readme
# and zoltans mail https://mail.google.com/mail/u/0/#inbox/152891802cbb2b40
files = {}
files["config"] = "config/training.config"
files["corpus"] = "/work/asr3/irie/data/switchboard/corpora/%s.corpus.gz" % corpus_name
if data in {"train", "cv", "devtrain"}:
files["segments"] = "dependencies/seg_%s" % {"train":"train", "cv":"cv_head3000", "devtrain": "train_head3000"}[data]
files["features"] = "/u/tuske/work/ASR/switchboard/feature.extraction/gt40_40/data/gt.%s.bundle" % corpus_name
for k, v in sorted(files.items()):
assert os.path.exists(v), "%s %r does not exist" % (k, v)
estimated_num_seqs = {"train": 227047, "cv": 3000, "devtrain": 3000} # wc -l segment-file
args = [
"--config=" + files["config"],
lambda: "--*.corpus.file=" + cf(files["corpus"]),
lambda: "--*.corpus.segments.file=" + (cf(files["segments"]) if "segments" in files else ""),
lambda: "--*.feature-cache-path=" + cf(files["features"]),
"--*.log-channel.file=/dev/null",
"--*.window-size=1",
]
if not hdf_files:
args += [
"--*.corpus.segment-order-shuffle=true",
"--*.segment-order-sort-by-time-length=true",
"--*.segment-order-sort-by-time-length-chunk-size=%i" % {"train": epoch_split * 1000}.get(data, -1),
]
d = {
"class": "ExternSprintDataset", "sprintTrainerExecPath": "sprint-executables/nn-trainer",
"sprintConfigStr": args,
"suppress_load_seqs_print": True, # less verbose
}
d.update(sprint_interface_dataset_opts)
partition_epochs_opts = {
"partition_epoch": epoch_split,
"estimated_num_seqs": (estimated_num_seqs[data] // epoch_split) if data in estimated_num_seqs else None,
}
if hdf_files:
align_opts = {
"class": "HDFDataset", "files": hdf_files,
"use_cache_manager": True,
"seq_list_filter_file": files["segments"], # otherwise not right selection
#"unique_seq_tags": True # dev set can exist multiple times
}
align_opts.update(partition_epochs_opts) # this dataset will control the seq list
if data == "train":
align_opts["seq_ordering"] = "laplace:%i" % (estimated_num_seqs[data] // 1000)
align_opts["seq_order_seq_lens_file"] = "/u/zeyer/setups/switchboard/dataset/data/seq-lens.train.txt.gz"
d = {
"class": "MetaDataset",
"datasets": {"sprint": d, "align": align_opts},
"data_map": {
"data": ("sprint", "data"),
# target: ("sprint", target),
"alignment": ("align", "data"),
#"align_score": ("align", "scores")
},
"seq_order_control_dataset": "align", # it must support get_all_tags
}
else:
d.update(partition_epochs_opts)
return d
sprint_interface_dataset_opts = {
"input_stddev": 3.,
"bpe": {
'bpe_file': '/work/asr3/irie/data/switchboard/subword_clean/ready/swbd_clean.bpe_code_1k',
'vocab_file': '/work/asr3/irie/data/switchboard/subword_clean/ready/vocab.swbd_clean.bpe_code_1k',
# 'seq_postfix': [0] # no EOS needed for RNN-T
}}
train = get_sprint_dataset("train")
dev = get_sprint_dataset("cv")
eval_datasets = {"devtrain": get_sprint_dataset("devtrain")}
cache_size = "0"
window = 1
# Note: We control the warmup in the pretrain construction.
learning_rate = 0.001
min_learning_rate = learning_rate / 50.
def summary(name, x):
"""
:param str name:
:param tf.Tensor x: (batch,time,feature)
"""
import tensorflow as tf
# tf.summary.image wants [batch_size, height, width, channels],
# we have (batch, time, feature).
img = tf.expand_dims(x, axis=3) # (batch,time,feature,1)
img = tf.transpose(img, [0, 2, 1, 3]) # (batch,feature,time,1)
tf.summary.image(name, img, max_outputs=10)
tf.summary.scalar("%s_max_abs" % name, tf.reduce_max(tf.abs(x)))
mean = tf.reduce_mean(x)
tf.summary.scalar("%s_mean" % name, mean)
stddev = tf.sqrt(tf.reduce_mean(tf.square(x - mean)))
tf.summary.scalar("%s_stddev" % name, stddev)
tf.summary.histogram("%s_hist" % name, tf.reduce_max(tf.abs(x), axis=2))
def _mask(x, batch_axis, axis, pos, max_amount, mask_value=0.):
"""
:param tf.Tensor x: (batch,time,[feature])
:param int batch_axis:
:param int axis:
:param tf.Tensor pos: (batch,)
:param int|tf.Tensor max_amount: inclusive
:param float|int mask_value:
"""
import tensorflow as tf
ndim = x.get_shape().ndims
n_batch = tf.shape(x)[batch_axis]
dim = tf.shape(x)[axis]
amount = tf.random_uniform(shape=(n_batch,), minval=1, maxval=max_amount + 1, dtype=tf.int32)
pos2 = tf.minimum(pos + amount, dim)
idxs = tf.expand_dims(tf.range(0, dim), 0) # (1,dim)
pos_bc = tf.expand_dims(pos, 1) # (batch,1)
pos2_bc = tf.expand_dims(pos2, 1) # (batch,1)
cond = tf.logical_and(tf.greater_equal(idxs, pos_bc), tf.less(idxs, pos2_bc)) # (batch,dim)
if batch_axis > axis:
cond = tf.transpose(cond) # (dim,batch)
cond = tf.reshape(cond, [tf.shape(x)[i] if i in (batch_axis, axis) else 1 for i in range(ndim)])
from TFUtil import where_bc
x = where_bc(cond, mask_value, x)
return x
def random_mask(x, batch_axis, axis, min_num, max_num, max_dims, mask_value=0.):
"""
:param tf.Tensor x: (batch,time,feature)
:param int batch_axis:
:param int axis:
:param int|tf.Tensor min_num:
:param int|tf.Tensor max_num: inclusive
:param int|tf.Tensor max_dims: inclusive
:param float|int mask_value:
"""
import tensorflow as tf
n_batch = tf.shape(x)[batch_axis]
if isinstance(min_num, int) and isinstance(max_num, int) and min_num == max_num:
num = min_num
else:
num = tf.random_uniform(shape=(n_batch,), minval=min_num, maxval=max_num + 1, dtype=tf.int32)
# https://github.com/tensorflow/tensorflow/issues/9260
# https://timvieira.github.io/blog/post/2014/08/01/gumbel-max-trick-and-weighted-reservoir-sampling/
z = -tf.log(-tf.log(tf.random_uniform((n_batch, tf.shape(x)[axis]), 0, 1)))
_, indices = tf.nn.top_k(z, num if isinstance(num, int) else tf.reduce_max(num))
# indices should be sorted, and of shape (batch,num), entries (int32) in [0,dim)
# indices = tf.Print(indices, ["indices", indices, tf.shape(indices)])
if isinstance(num, int):
for i in range(num):
x = _mask(x, batch_axis=batch_axis, axis=axis, pos=indices[:, i], max_amount=max_dims, mask_value=mask_value)
else:
_, x = tf.while_loop(
cond=lambda i, _: tf.less(i, tf.reduce_max(num)),
body=lambda i, x: (
i + 1,
tf.where(
tf.less(i, num),
_mask(x, batch_axis=batch_axis, axis=axis, pos=indices[:, i], max_amount=max_dims, mask_value=mask_value),
x)),
loop_vars=(0, x))
return x
def transform(data, network, time_factor=1):
x = data.placeholder
import tensorflow as tf
# summary("features", x)
step = network.global_train_step
step1 = tf.where(tf.greater_equal(step, 1000), 1, 0)
step2 = tf.where(tf.greater_equal(step, 2000), 1, 0)
def get_masked():
x_masked = x
x_masked = random_mask(
x_masked, batch_axis=data.batch_dim_axis, axis=data.time_dim_axis,
min_num=step1 + step2, max_num=tf.maximum(tf.shape(x)[data.time_dim_axis] // 100, 2) * (1 + step1 + step2 * 2),
max_dims=20 // time_factor)
x_masked = random_mask(
x_masked, batch_axis=data.batch_dim_axis, axis=data.feature_dim_axis,
min_num=step1 + step2, max_num=2 + step1 + step2 * 2,
max_dims=data.dim // 5)
#summary("features_mask", x_masked)
return x_masked
x = network.cond_on_train(get_masked, lambda: x)
return x
def switchout_target(self, source, **kwargs):
import tensorflow as tf
from TFUtil import where_bc
network = self.network
time_factor = 6
data = source(0, as_data=True)
assert data.is_batch_major # just not implemented otherwise
x = data.placeholder
def get_switched():
x_ = x
shape = tf.shape(x)
n_batch = tf.shape(x)[data.batch_dim_axis]
n_time = tf.shape(x)[data.time_dim_axis]
take_rnd_mask = tf.less(tf.random_uniform(shape=shape, minval=0., maxval=1.), 0.05)
take_blank_mask = tf.less(tf.random_uniform(shape=shape, minval=0., maxval=1.), 0.5)
rnd_label = tf.random_uniform(shape=shape, minval=0, maxval=target_num_labels, dtype=tf.int32)
rnd_label = where_bc(take_blank_mask, targetb_blank_idx, rnd_label)
x_ = where_bc(take_rnd_mask, rnd_label, x_)
x_ = random_mask(
x_, batch_axis=data.batch_dim_axis, axis=data.time_dim_axis,
min_num=0, max_num=tf.maximum(tf.shape(x)[data.time_dim_axis] // (50 // time_factor), 1),
max_dims=20 // time_factor,
mask_value=targetb_blank_idx)
#x_ = tf.Print(x_, ["switch", x[0], "to", x_[0]], summarize=100)
return x_
x = network.cond_on_train(get_switched, lambda: x)
return x
def targetb_linear(source, **kwargs):
from TFUtil import get_rnnt_linear_aligned_output
enc = source(1, as_data=True, auto_convert=False)
dec = source(0, as_data=True, auto_convert=False)
enc_lens = enc.get_sequence_lengths()
dec_lens = dec.get_sequence_lengths()
out, out_lens = get_rnnt_linear_aligned_output(
input_lens=enc_lens,
target_lens=dec_lens, targets=dec.get_placeholder_as_batch_major(),
blank_label_idx=targetb_blank_idx,
targets_consume_time=True)
return out
def targetb_linear_out(sources, **kwargs):
from TFUtil import Data
enc = sources[1].output
dec = sources[0].output
size = enc.get_sequence_lengths() # + dec.get_sequence_lengths()
#output_len_tag.set_tag_on_size_tensor(size)
return Data(name="targetb_linear", sparse=True, dim=targetb_num_labels, size_placeholder={0: size})
def targetb_search_or_fallback(source, **kwargs):
import tensorflow as tf
from TFUtil import where_bc
ts_linear = source(0) # (B,T)
ts_search = source(1) # (B,T)
l = source(2, auto_convert=False) # (B,)
return where_bc(tf.less(l[:, None], 0.01), ts_search, ts_linear)
def targetb_recomb_train(layer, batch_dim, scores_in, scores_base, base_beam_in, end_flags, **kwargs):
"""
:param ChoiceLayer layer:
:param tf.Tensor batch_dim: scalar
:param tf.Tensor scores_base: (batch,base_beam_in,1). existing beam scores
:param tf.Tensor scores_in: (batch,base_beam_in,dim). log prob frame distribution
:param tf.Tensor end_flags: (batch,base_beam_in)
:param tf.Tensor base_beam_in: int32 scalar, 1 or prev beam size
:rtype: tf.Tensor
:return: (batch,base_beam_in,dim), combined scores
"""
import tensorflow as tf
from TFUtil import where_bc, nd_indices, tile_transposed
scores = scores_in + scores_base # (batch,beam,dim)
dim = layer.output.dim
u = layer.explicit_search_sources[0].output # prev:u actually. [B*beam], pos in target [0..decT-1]
assert u.shape == ()
u_t = tf.reshape(tf.reshape(u.placeholder, (batch_dim, -1))[:,:base_beam_in], (-1,)) # u beam might differ from base_beam_in
targets = layer.network.parent_net.extern_data.data[target] # BPE targets, [B,decT]
assert targets.shape == (None,) and targets.is_batch_major
target_lens = targets.get_sequence_lengths() # [B]
target_lens_exp = tile_transposed(target_lens, axis=0, multiples=base_beam_in) # [B*beam]
missing_targets = target_lens_exp - u_t # [B*beam]
allow_target = tf.greater(missing_targets, 0) # [B*beam]
targets_exp = tile_transposed(targets.placeholder, axis=0, multiples=base_beam_in) # [B*beam,decT]
targets_u = tf.gather_nd(targets_exp, indices=nd_indices(where_bc(allow_target, u_t, 0))) # [B*beam]
targets_u = tf.reshape(targets_u, (batch_dim, base_beam_in)) # (batch,beam)
allow_target = tf.reshape(allow_target, (batch_dim, base_beam_in)) # (batch,beam)
#t = layer.explicit_search_sources[1].output # prev:t actually. [B*beam], pos in encoder [0..encT-1]
#assert t.shape == ()
#t_t = tf.reshape(tf.reshape(t.placeholder, (batch_dim, -1))[:,:base_beam_in], (-1,)) # t beam might differ from base_beam_in
t_t = layer.network.get_rec_step_index() - 1 # scalar
inputs = layer.network.parent_net.get_layer("encoder").output # encoder, [B,encT]
input_lens = inputs.get_sequence_lengths() # [B]
input_lens_exp = tile_transposed(input_lens, axis=0, multiples=base_beam_in) # [B*beam]
allow_blank = tf.less(missing_targets, input_lens_exp - t_t) # [B*beam]
allow_blank = tf.reshape(allow_blank, (batch_dim, base_beam_in)) # (batch,beam)
dim_idxs = tf.range(dim)[None,None,:] # (1,1,dim)
masked_scores = where_bc(
tf.logical_or(
tf.logical_and(tf.equal(dim_idxs, targetb_blank_idx), allow_blank[:,:,None]),
tf.logical_and(tf.equal(dim_idxs, targets_u[:,:,None]), allow_target[:,:,None])),
scores, float("-inf"))
return where_bc(end_flags[:,:,None], scores, masked_scores)
def get_vocab_tf():
from GeneratingDataset import Vocabulary
import TFUtil
import tensorflow as tf
vocab = Vocabulary.create_vocab(**sprint_interface_dataset_opts["bpe"])
labels = vocab.labels # bpe labels ("@@" at end, or not), excluding blank
labels = [(l + " ").replace("@@ ", "") for l in labels] + [""]
labels_t = TFUtil.get_shared_vocab(labels)
return labels_t
def get_vocab_sym(i):
"""
:param tf.Tensor i: e.g. [B], int32
:return: same shape as input, string
:rtype: tf.Tensor
"""
import tensorflow as tf
return tf.gather(params=get_vocab_tf(), indices=i)
def out_str(source, **kwargs):
# ["prev:out_str", "output_emit", "output"]
import tensorflow as tf
from TFUtil import where_bc
return source(0) + where_bc(source(1), get_vocab_sym(source(2)), tf.constant(""))
def get_filtered_score_op(verbose=False):
cpp_code = """
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/resource_op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/version.h"
#include <cmath>
#include <map>
#include <set>
#include <string>
#include <tuple>
using namespace tensorflow;
REGISTER_OP("GetFilteredScore")
.Input("prev_str: string")
.Input("scores: float32")
.Input("labels: string")
.Output("new_scores: float32")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
c->set_output(0, c->input(1));
return Status::OK();
});
class GetFilteredScoreOp : public OpKernel {
public:
using OpKernel::OpKernel;
void Compute(OpKernelContext* context) override {
const Tensor* prev_str = &context->input(0);
const Tensor* scores = &context->input(1);
const Tensor* labels = &context->input(2);
int n_batch = prev_str->shape().dim_size(0);
int n_beam = prev_str->shape().dim_size(1);
Tensor* ret;
OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({n_batch, n_beam}), &ret));
for(int bat = 0; bat < n_batch; ++bat)
for(int hyp = 0; hyp < n_beam; ++hyp)
ret->tensor<float, 2>()(bat, hyp) = scores->tensor<float, 2>()(bat, hyp);
for(int bat = 0; bat < n_batch; ++bat) {
std::map<std::string, std::set<int> > new_hyps; // seq -> set of hyp idx
for(int hyp = 0; hyp < n_beam; ++hyp) {
auto& seq_set = new_hyps[prev_str->tensor<string, 2>()(bat, hyp)];
seq_set.insert(hyp);
}
for(const auto& items : new_hyps) {
if(std::get<1>(items).size() > 1) {
float best_score = 0.;
int best_idx = -1;
for(int idx : std::get<1>(items)) {
float score = scores->tensor<float, 2>()(bat, idx);
if(score > best_score || best_idx == -1) {
best_score = score;
best_idx = idx;
}
}
float sum_score = 0.;
for(int idx : std::get<1>(items)) {
float score = scores->tensor<float, 2>()(bat, idx);
sum_score += expf(score - best_score);
}
sum_score = logf(sum_score) + best_score;
for(int idx : std::get<1>(items)) {
if(idx != best_idx)
ret->tensor<float, 2>()(bat, idx) = -std::numeric_limits<float>::infinity();
else
ret->tensor<float, 2>()(bat, idx) = sum_score;
}
}
}
}
}
};
REGISTER_KERNEL_BUILDER(Name("GetFilteredScore").Device(DEVICE_CPU), GetFilteredScoreOp);
"""
from TFUtil import OpCodeCompiler
compiler = OpCodeCompiler(
base_name="GetFilteredScore", code_version=1, code=cpp_code,
is_cpp=True, use_cuda_if_available=False, verbose=verbose)
tf_mod = compiler.load_tf_module()
return tf_mod.get_filtered_score
def get_filtered_score_cpp(prev_str, scores, labels):
"""
:param tf.Tensor prev_str: (batch,beam)
:param tf.Tensor scores: (batch,beam)
:param list[bytes] labels: len (dim)
:return: scores with logsumexp at best, others -inf, (batch,beam)
:rtype: tf.Tensor
"""
import TFUtil
labels_t = TFUtil.get_shared_vocab(labels)
return get_filtered_score_op()(prev_str, scores, labels_t)
def targetb_recomb_recog(layer, batch_dim, scores_in, scores_base, base_beam_in, end_flags, **kwargs):
"""
:param ChoiceLayer layer:
:param tf.Tensor batch_dim: scalar
:param tf.Tensor scores_base: (batch,base_beam_in,1). existing beam scores
:param tf.Tensor scores_in: (batch,base_beam_in,dim). log prob frame distribution
:param tf.Tensor end_flags: (batch,base_beam_in)
:param tf.Tensor base_beam_in: int32 scalar, 1 or prev beam size
:rtype: tf.Tensor
:return: (batch,base_beam_in,dim), combined scores
"""
import tensorflow as tf
from TFUtil import where_bc, nd_indices, tile_transposed
dim = layer.output.dim
prev_str = layer.explicit_search_sources[0].output # [B*beam], str
prev_str_t = tf.reshape(prev_str.placeholder, (batch_dim, -1))[:,:base_beam_in]
prev_out = layer.explicit_search_sources[1].output # [B*beam], int32
prev_out_t = tf.reshape(prev_out.placeholder, (batch_dim, -1))[:,:base_beam_in]
from GeneratingDataset import Vocabulary
import TFUtil
import tensorflow as tf
vocab = Vocabulary.create_vocab(**sprint_interface_dataset_opts["bpe"])
labels = vocab.labels # bpe labels ("@@" at end, or not), excluding blank
labels = [(l + " ").replace("@@ ", "").encode("utf8") for l in labels] + [b""]
# Pre-filter approx (should be much faster), sum approx (better).
scores_base = tf.reshape(get_filtered_score_cpp(prev_str_t, tf.reshape(scores_base, (batch_dim, base_beam_in)), labels), (batch_dim, base_beam_in, 1))
scores = scores_in + scores_base # (batch,beam,dim)
# Mask -> max approx, in all possible options, slow.
#mask = get_score_mask_cpp(prev_str_t, prev_out_t, scores, labels)
#masked_scores = where_bc(mask, scores, float("-inf"))
# Sum approx in all possible options, slow.
#masked_scores = get_new_score_cpp(prev_str_t, prev_out_t, scores, labels)
#scores = where_bc(end_flags[:,:,None], scores, masked_scores)
return scores
StoreAlignmentUpToEpoch = 10 * EpochSplit # 0 based, exclusive
AlignmentFilenamePattern = "net-model/alignments.%i.hdf"
def get_most_recent_align_hdf_files(epoch0):
"""
:param int epoch0: 0-based (sub) epoch
:return: filenames or None if there is nothing completed yet
:rtype: list[str]|None
"""
if epoch0 < EpochSplit:
return None
if epoch0 > StoreAlignmentUpToEpoch:
epoch0 = StoreAlignmentUpToEpoch # first epoch after
i = ((epoch0 - EpochSplit) // EpochSplit) * EpochSplit
return [AlignmentFilenamePattern % j for j in range(i, i + EpochSplit)]
#import_model_train_epoch1 = "base/data-train/base2.conv2l.specaug4a/net-model/network.160"
#_train_setup_dir = "data-train/base2.conv2l.specaug4a"
#model = _train_setup_dir + "/net-model/network"
preload_from_files = {
#"base": {
# "init_for_train": True,
# "ignore_missing": True,
# "filename": "/u/zeyer/setups/switchboard/2018-10-02--e2e-bpe1k/data-train/base2.conv2l.specaug4a/net-model/network.160",
#},
#"encoder": {
# "init_for_train": True,
# "ignore_missing": True,
# "filename": "/u/zeyer/setups/switchboard/2017-12-11--returnn/data-train/#dropout01.l2_1e_2.6l.n500.inpstddev3.fl2.max_seqs100.grad_noise03.nadam.lr05e_3.nbm6.nbrl.grad_clip_inf.nbm3.run1/net-model/network.077",
#},
#"encoder": {
# "init_for_train": True,
# "ignore_missing": True,
# "ignore_params_prefixes": {"output/"},
# "filename": "/u/zeyer/setups/switchboard/2019-10-22--e2e-bpe1k/data-train/%s/net-model/network.pretrain.250" % _import_baseline_setup,
#}
}
#lm_model_filename = "/work/asr3/irie/experiments/lm/switchboard/2018-01-23--lmbpe-zeyer/data-train/bpe1k_clean_i256_m2048_m2048.sgd_b16_lr0_cl2.newbobabs.d0.2/net-model/network.023"
def get_net_dict(pretrain_idx):
"""
:param int|None pretrain_idx: starts at 0. note that this has a default repetition factor of 6
:return: net_dict or None if pretrain should stop
:rtype: dict[str,dict[str]|int]|None
"""
# Note: epoch0 is 0-based here! I.e. in contrast to elsewhere, where it is 1-based.
# Also, we never use #repetition here, such that this is correct.
# This is important because of sub-epochs and storing the HDF files,
# to know exactly which HDF files cover the dataset completely.
epoch0 = pretrain_idx
net_dict = {}
# network
# (also defined by num_inputs & num_outputs)
EncKeyTotalDim = 200
AttNumHeads = 1 # must be 1 for hard-att
AttentionDropout = 0.1
EncKeyPerHeadDim = EncKeyTotalDim // AttNumHeads
EncValueTotalDim = 2048
EncValuePerHeadDim = EncValueTotalDim // AttNumHeads
LstmDim = EncValueTotalDim // 2
l2 = 0.0001
have_existing_align = True # only in training, and only in pretrain, and only after the first epoch
if pretrain_idx is not None:
net_dict["#config"] = {}
# Do this in the very beginning.
#lr_warmup = [0.0] * EpochSplit # first collect alignments with existing model, no training
lr_warmup = list(numpy.linspace(learning_rate * 0.1, learning_rate, num=10))
#lr_warmup += [learning_rate] * 20
if pretrain_idx < len(lr_warmup):
net_dict["#config"]["learning_rate"] = lr_warmup[pretrain_idx]
#if pretrain_idx >= EpochSplit + EpochSplit // 2:
# net_dict["#config"]["param_variational_noise"] = 0.1
#pretrain_idx -= len(lr_warmup)
use_targetb_search_as_target = False # not have_existing_align or epoch0 < StoreAlignmentUpToEpoch
keep_linear_align = False # epoch0 is not None and epoch0 < EpochSplit * 2
# We import the model, thus no growing.
start_num_lstm_layers = 2
final_num_lstm_layers = 6
num_lstm_layers = final_num_lstm_layers
if pretrain_idx is not None:
pretrain_idx = max(pretrain_idx, 0) // 6 # Repeat a bit.
num_lstm_layers = pretrain_idx + start_num_lstm_layers
pretrain_idx = num_lstm_layers - final_num_lstm_layers
num_lstm_layers = min(num_lstm_layers, final_num_lstm_layers)
if final_num_lstm_layers > start_num_lstm_layers:
start_dim_factor = 0.5
grow_frac = 1.0 - float(final_num_lstm_layers - num_lstm_layers) / (final_num_lstm_layers - start_num_lstm_layers)
dim_frac = start_dim_factor + (1.0 - start_dim_factor) * grow_frac
else:
dim_frac = 1.
time_reduction = [3, 2] if num_lstm_layers >= 3 else [6]
if pretrain_idx is not None and pretrain_idx <= 1 and "learning_rate" not in net_dict["#config"]:
# Fixed learning rate for the beginning.
net_dict["#config"]["learning_rate"] = learning_rate
net_dict["#info"] = {
"epoch0": epoch0, # Set this here such that a new construction for every pretrain idx is enforced in all cases.
"num_lstm_layers": num_lstm_layers,
"dim_frac": dim_frac,
"have_existing_align": have_existing_align,
"use_targetb_search_as_target": use_targetb_search_as_target,
"keep_linear_align": keep_linear_align,
}
# We use this pretrain construction during the whole training time (epoch0 > num_epochs).
if pretrain_idx is not None and epoch0 % EpochSplit == 0 and epoch0 > num_epochs:
# Stop pretraining now.
return None
net_dict.update({
"source": {"class": "eval", "eval": "self.network.get_config().typed_value('transform')(source(0, as_data=True), network=self.network)"},
"source0": {"class": "split_dims", "axis": "F", "dims": (-1, 1), "from": "source"}, # (T,40,1)
# Lingvo: ep.conv_filter_shapes = [(3, 3, 1, 32), (3, 3, 32, 32)], ep.conv_filter_strides = [(2, 2), (2, 2)]
"conv0": {"class": "conv", "from": "source0", "padding": "same", "filter_size": (3, 3), "n_out": 32, "activation": None, "with_bias": True}, # (T,40,32)
"conv0p": {"class": "pool", "mode": "max", "padding": "same", "pool_size": (1, 2), "from": "conv0"}, # (T,20,32)
"conv1": {"class": "conv", "from": "conv0p", "padding": "same", "filter_size": (3, 3), "n_out": 32, "activation": None, "with_bias": True}, # (T,20,32)
"conv1p": {"class": "pool", "mode": "max", "padding": "same", "pool_size": (1, 2), "from": "conv1"}, # (T,10,32)
"conv_merged": {"class": "merge_dims", "from": "conv1p", "axes": "static"}, # (T,320)
# Encoder LSTMs added below, resulting in "encoder0".
#"encoder": {"class": "postfix_in_time", "postfix": 0.0, "from": "encoder0"},
"encoder": {"class": "linear", "from": "encoder0", "n_out": 256, "activation": None},
"enc_ctx0": {"class": "linear", "from": "encoder", "activation": None, "with_bias": False, "n_out": EncKeyTotalDim},
"enc_ctx_win": {"class": "window", "from": "enc_ctx0", "window_size": 5}, # [B,T,W,D]
"enc_val": {"class": "copy", "from": "encoder"},
"enc_val_win": {"class": "window", "from": "enc_val", "window_size": 5}, # [B,T,W,D]
"enc_seq_len": {"class": "length", "from": "encoder", "sparse": True},
# for task "search" / search_output_layer
"output_wo_b0": {
"class": "masked_computation", "unit": {"class": "copy"},
"from": "output", "mask": "output/output_emit"},
"output_wo_b": {"class": "reinterpret_data", "from": "output_wo_b0", "set_sparse_dim": target_num_labels},
"decision": {
"class": "decide", "from": "output_wo_b", "loss": "edit_distance", "target": target,
'only_on_search': True},
"targetb_linear": {
"class": "eval", "from": ["data:%s" % target, "encoder"], "eval": targetb_linear,
"out_type": targetb_linear_out},
# Target for decoder ('output') with search ("extra.search") in training.
# The layer name must be smaller than "t_target" such that this is created first.
"1_targetb_base": {
"class": "copy",
"from": "existing_alignment", # if have_existing_align else "targetb_linear",
"register_as_extern_data": "targetb_base" if task == "train" else None},
"2_targetb_target": {
"class": "eval",
"from": "targetb_search_or_fallback" if use_targetb_search_as_target else "data:targetb_base",
"eval": "source(0)",
"register_as_extern_data": "targetb" if task == "train" else None},
"ctc_out": {"class": "softmax", "from": "encoder", "with_bias": False, "n_out": targetb_num_labels},
#"ctc_out_prior": {"class": "reduce", "mode": "mean", "axes": "bt", "from": "ctc_out"},
## log-likelihood: combine out + prior
"ctc_out_scores": {
"class": "eval", "from": ["ctc_out"],
"eval": "safe_log(source(0))",
#"eval": "safe_log(source(0)) * am_scale - tf.stop_gradient(safe_log(source(1)) * prior_scale)",
#"eval_locals": {
#"am_scale": 1.0, # WrapEpochValue(lambda epoch: numpy.clip(0.05 * epoch, 0.1, 0.3)),
#"prior_scale": 0.5 # WrapEpochValue(lambda epoch: 0.5 * numpy.clip(0.05 * epoch, 0.1, 0.3))
#}
},
"_target_masked": {"class": "masked_computation",
"mask": "output/output_emit",
"from": "output",
"unit": {"class": "copy"}},
"3_target_masked": {
"class": "reinterpret_data", "from": "_target_masked",
"set_sparse_dim": target_num_labels, # we masked blank away
"enforce_batch_major": True, # ctc not implemented otherwise...
"register_as_extern_data": "targetb_masked" if task == "train" else None},
"ctc": {"class": "copy", "from": "ctc_out_scores",
"loss": "ctc" if task == "train" else None,
"target": "targetb_masked" if task == "train" else None,
"loss_opts": {
"beam_width": 1, "use_native": True, "output_in_log_space": True,
"ctc_opts": {"logits_normalize": False}} if task == "train" else None
},
#"ctc_align": {"class": "forced_align", "from": "ctc_out_scores", "input_type": "log_prob",
#"align_target": "data:%s" % target, "topology": "ctc"},
})
if have_existing_align:
net_dict.update({
# This should be compatible to t_linear or t_search.
"existing_alignment": {
"class": "reinterpret_data", "from": "data:alignment",
"set_sparse": True, # not sure what the HDF gives us
"set_sparse_dim": targetb_num_labels,
"size_base": "encoder", # for RNA...
},
# This should be compatible to search_score.
#"existing_align_score": {
# "class": "squeeze", "from": "data:align_score", "axis": "f",
# "loss": "as_is", "loss_scale": 0
# }
})
# Add encoder BLSTM stack.
src = "conv_merged"
if num_lstm_layers >= 1:
net_dict.update({
"lstm0_fw": {"class": "rec", "unit": "nativelstm2", "n_out": int(LstmDim * dim_frac), "L2": l2, "direction": 1, "from": src, "trainable": True},
"lstm0_bw": {"class": "rec", "unit": "nativelstm2", "n_out": int(LstmDim * dim_frac), "L2": l2, "direction": -1, "from": src, "trainable": True}})
src = ["lstm0_fw", "lstm0_bw"]
for i in range(1, num_lstm_layers):
red = time_reduction[i - 1] if (i - 1) < len(time_reduction) else 1
net_dict.update({
"lstm%i_pool" % (i - 1): {"class": "pool", "mode": "max", "padding": "same", "pool_size": (red,), "from": src}})
src = "lstm%i_pool" % (i - 1)
net_dict.update({
"lstm%i_fw" % i: {"class": "rec", "unit": "nativelstm2", "n_out": int(LstmDim * dim_frac), "L2": l2, "direction": 1, "from": src, "dropout": 0.3 * dim_frac, "trainable": True},
"lstm%i_bw" % i: {"class": "rec", "unit": "nativelstm2", "n_out": int(LstmDim * dim_frac), "L2": l2, "direction": -1, "from": src, "dropout": 0.3 * dim_frac, "trainable": True}})
src = ["lstm%i_fw" % i, "lstm%i_bw" % i]
net_dict["encoder0"] = {"class": "copy", "from": src} # dim: EncValueTotalDim
def get_output_dict(train, search, targetb, beam_size=beam_size):
return {
"class": "rec",
"from": "encoder",
"include_eos": True,
"back_prop": (task == "train") and train,
"unit": {
#"am": {"class": "gather_nd", "from": "base:encoder", "position": "prev:t"}, # [B,D]
"am": {"class": "copy", "from": "data:source"},
# could make more efficient...
"enc_ctx_win": {"class": "gather_nd", "from": "base:enc_ctx_win", "position": ":i"}, # [B,W,D]
"enc_val_win": {"class": "gather_nd", "from": "base:enc_val_win", "position": ":i"}, # [B,W,D]
"att_query": {"class": "linear", "from": "am", "activation": None, "with_bias": False, "n_out": EncKeyTotalDim},
'att_energy': {"class": "dot", "red1": "f", "red2": "f", "var1": "static:0", "var2": None,
"from": ['enc_ctx_win', 'att_query']}, # (B, W)
'att_weights0': {"class": "softmax_over_spatial", "axis": "static:0", "from": 'att_energy',
"energy_factor": EncKeyPerHeadDim ** -0.5}, # (B, W)
'att_weights1': {"class": "dropout", "dropout_noise_shape": {"*": None},
"from": 'att_weights0', "dropout": AttentionDropout},
"att_weights": {"class": "merge_dims", "from": "att_weights1", "axes": "except_time"},
'att': {"class": "dot", "from": ['att_weights', 'enc_val_win'],
"red1": "static:0", "red2": "static:0", "var1": None, "var2": "f"}, # (B, V)
"prev_out_non_blank": {
"class": "reinterpret_data", "from": "prev:output_", "set_sparse_dim": target_num_labels},
"lm_masked": {"class": "masked_computation",
"mask": "prev:output_emit",
"from": "prev_out_non_blank", # in decoding
"unit": {
"class": "subnetwork", "from": "data",
"subnetwork": {
"input_embed": {"class": "linear", "activation": None, "with_bias": False, "from": "data", "n_out": 621},
"lstm0": {"class": "rec", "unit": "nativelstm2", "n_out": LstmDim, "from": ["input_embed", "base:att"]},
"output": {"class": "copy", "from": "lstm0"}
}}},
"lm_embed_masked": {"class": "copy", "from": "lm_masked"},
"lm_embed_unmask": {"class": "unmask", "from": "lm_embed_masked", "mask": "prev:output_emit"},
"lm": {"class": "copy", "from": "lm_embed_unmask"}, # [B,L]
"prev_label_masked": {"class": "masked_computation",
"mask": "prev:output_emit",
"from": "prev_out_non_blank", # in decoding
"unit": {"class": "linear", "activation": None, "n_out": 256}},
"prev_label_unmask": {"class": "unmask", "from": "prev_label_masked", "mask": "prev:output_emit"},
"prev_out_embed": {"class": "linear", "from": "prev:output_", "activation": None, "n_out": 128},
"s": {"class": "rec", "unit": "nativelstm2", "from": ["am", "prev_out_embed", "lm"], "n_out": 128, "L2": l2, "dropout": 0.3, "unit_opts": {"rec_weight_dropout": 0.3}},
"readout_in": {"class": "linear", "from": ["s", "att", "lm"], "activation": None, "n_out": 1000},
"readout": {"class": "reduce_out", "mode": "max", "num_pieces": 2, "from": "readout_in"},
"label_log_prob": {
"class": "linear", "from": "readout", "activation": "log_softmax", "dropout": 0.3, "n_out": target_num_labels},
"label_prob": {
"class": "activation", "from": "label_log_prob", "activation": "exp"},
"emit_prob0": {"class": "linear", "from": "s", "activation": None, "n_out": 1, "is_output_layer": True},
"emit_log_prob": {"class": "activation", "from": "emit_prob0", "activation": "log_sigmoid"},
"blank_log_prob": {"class": "eval", "from": "emit_prob0", "eval": "tf.log_sigmoid(-source(0))"},
"label_emit_log_prob": {"class": "combine", "kind": "add", "from": ["label_log_prob", "emit_log_prob"]}, # 1 gets broadcasted
"output_log_prob": {"class": "copy", "from": ["label_emit_log_prob", "blank_log_prob"]},
"output_prob": {
"class": "activation", "from": "output_log_prob", "activation": "exp",
"target": targetb, "loss": "ce", "loss_opts": {"focal_loss_factor": 2.0}
},
#"output_ce": {
# "class": "loss", "from": "output_prob", "target_": "layer:output", "loss_": "ce", "loss_opts_": {"label_smoothing": 0.1},
# "loss": "as_is" if train else None, "loss_scale": 0 if train else None},
#"output_err": {"class": "copy", "from": "output_ce/error", "loss": "as_is" if train else None, "loss_scale": 0 if train else None},
#"output_ce_blank": {"class": "eval", "from": "output_ce", "eval": "source(0) * 0.03"}, # non-blank/blank factor
#"loss": {"class": "switch", "condition": "output_is_blank", "true_from": "output_ce_blank", "false_from": "output_ce", "loss": "as_is" if train else None},
'output': {
'class': 'choice', 'target': targetb, 'beam_size': beam_size,
'from': "output_log_prob", "input_type": "log_prob",
"initial_output": 0,
"cheating": "exclusive" if task == "train" else None,
#"explicit_search_sources": ["prev:u"] if task == "train" else None,
#"custom_score_combine": targetb_recomb_train if task == "train" else None
"explicit_search_sources": ["prev:out_str", "prev:output"] if task == "search" else None,
"custom_score_combine": targetb_recomb_recog if task == "search" else None
},
"output_": {
"class": "eval", "from": "output", "eval": switchout_target, "initial_output": 0,
} if task == "train" else {"class": "copy", "from": "output", "initial_output": 0},
"out_str": {
"class": "eval", "from": ["prev:out_str", "output_emit", "output"],
"initial_output": None, "out_type": {"shape": (), "dtype": "string"},
"eval": out_str},
"output_is_not_blank": {"class": "compare", "from": "output_", "value": targetb_blank_idx, "kind": "not_equal", "initial_output": True},
"output_is_diff_to_before": {"class": "compare", "from": ["output_", "prev:output_"], "kind": "not_equal"},
# We allow repetitions of the output label. This "output_emit" is True on the first label but False otherwise, and False on blank.
"output_emit": {
"class": "eval", "from": ["output_is_not_blank", "output_is_diff_to_before"],
"is_output_layer": True, "initial_output": True,
"eval": "tf.logical_and(source(0), source(1))"},
"const0": {"class": "constant", "value": 0, "collocate_with": "du"},
"const1": {"class": "constant", "value": 1, "collocate_with": "du"},
# pos in target, [B]
"du": {"class": "switch", "condition": "output_emit", "true_from": "const1", "false_from": "const0"},
"u": {"class": "combine", "from": ["prev:u", "du"], "kind": "add", "initial_output": 0},
#"end": {"class": "compare", "from": ["t", "base:enc_seq_len"], "kind": "greater_equal"},
},
"target": targetb,
"size_target": targetb if task == "train" else None,
"max_seq_len": "max_len_from('base:encoder') * 2"}
net_dict["output"] = get_output_dict(train=True, search=(task != "train"), targetb="targetb")
return net_dict
network = get_net_dict(pretrain_idx=None)
search_output_layer = "decision"
debug_print_layer_output_template = True
# trainer
batching = "random"
# Seq-length 'data' Stats:
# 37867 seqs
# Mean: 447.397258827
# Std dev: 350.353162012
# Min/max: 15 / 2103
# Seq-length 'bpe' Stats:
# 37867 seqs
# Mean: 14.1077719386
# Std dev: 13.3402518828
# Min/max: 2 / 82
log_batch_size = True
batch_size = 10000
max_seqs = 200
#max_seq_length = {"bpe": 75}
_time_red = 6
_chunk_size = 60
chunking = ({
"data": _chunk_size * _time_red,
"alignment": _chunk_size,
}, {
"data": _chunk_size * _time_red // 2,
"alignment": _chunk_size // 2,
})
# chunking_variance ...
# min_chunk_size ...
def custom_construction_algo(idx, net_dict):
# For debugging, use: python3 ./crnn/Pretrain.py config...
return get_net_dict(pretrain_idx=idx)
# No repetitions here. We explicitly do that in the construction.
pretrain = {"copy_param_mode": "subset", "construction_algo": custom_construction_algo}
num_epochs = 150
model = "net-model/network"
cleanup_old_models = True
gradient_clip = 0
#gradient_clip_global_norm = 1.0
adam = True
optimizer_epsilon = 1e-8
accum_grad_multiple_step = 2
#debug_add_check_numerics_ops = True
#debug_add_check_numerics_on_output = True
stop_on_nonfinite_train_score = False
tf_log_memory_usage = True
gradient_noise = 0.0
# lr set above
learning_rate_control = "newbob_multi_epoch"
learning_rate_control_error_measure = "dev_error_output/output_prob"
learning_rate_control_relative_error_relative_lr = True
learning_rate_control_min_num_epochs_per_new_lr = 3
use_learning_rate_control_always = True
newbob_multi_num_epochs = 6
newbob_multi_update_interval = 1
newbob_learning_rate_decay = 0.7
learning_rate_file = "newbob.data"
# log
#log = "| /u/zeyer/dotfiles/system-tools/bin/mt-cat.py >> log/crnn.seq-train.%s.log" % task
log = "log/crnn.%s.log" % task
log_verbosity = 5
| [
"[email protected]"
] | |
a7ee7d01542a6a2c392badd3a337b82978760149 | 978228e0c7291e6dad04a49ac8fdbd2e17322b6b | /PythonAutomats/Tinder/song_of_the_day_tinder.py | 71674fd8278ca4cc0f848d30fdfb6f961e6da1bc | [] | no_license | tdworowy/PythonAutomats | f5edbea96b53e1e452f16457ba44a31e7750d7ab | 84d30db03b5be936092622d446b0fc9834dfa2de | refs/heads/master | 2022-08-27T21:12:24.963091 | 2022-08-15T08:39:46 | 2022-08-15T08:39:46 | 73,401,085 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | import sys
from random import choice
from Api.Songs import ApiAdapter
from Chrome_Driver_Folder.driver_path import get_driver_path
from Songs.last_fm_parser import FOLDER_PATH, update_songs_distribution
from Tinder.tinder_Api import TinderMessageBot, TinderAdapter
from Utils.decorators import log_exception
from Youtube.Youtube_bot_requests import get_youtube_url
from selenium import webdriver
@log_exception()
def main(login, password, names):
update_songs_distribution()
tinder_bot = TinderMessageBot()
chrome_driver_path = get_driver_path() + '\\chromedriver.exe'
driver = webdriver.Chrome(chrome_driver_path)
with open(FOLDER_PATH, 'r') as f:
songs_list = f.read()
songs_list = songs_list.split("\n")
song_title = choice(songs_list)
adapter = TinderAdapter(tiderBot=tinder_bot, name='tomasz.dworowy', receivers=names, driver=driver)
song = ApiAdapter(adapter)
song.my_logging.log().info("Get random song")
song.login(login, password)
url = get_youtube_url(song_title.strip())
song.sent_messages([url])
if __name__ == '__main__':
user = sys.argv[1]
passw = sys.argv[2] + " " + sys.argv[3]
nams = sys.argv[4]
# namesList = ['Ilona','Carol']
names = [nams]
main(user, passw, names)
| [
"[email protected]"
] | |
3988f947afc8104c3abbc1371fb28d19b7677e15 | ac6e4102dfb49a4e49de0e2766feb6e80ab0b5c2 | /h1/model/website_project_instance_create.py | 40e2620a7c7b0b2bb04d8a2d6fc79057e3b28627 | [
"MIT"
] | permissive | hyperonecom/h1-client-python | df01f05ad295121e3dd391a3274c41e2f5b88e53 | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | refs/heads/master | 2023-04-05T01:51:31.637002 | 2021-03-29T00:05:41 | 2021-03-29T00:05:41 | 319,309,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,357 | py | """
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from h1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from h1.model.tag_array import TagArray
from h1.model.website_env import WebsiteEnv
globals()['TagArray'] = TagArray
globals()['WebsiteEnv'] = WebsiteEnv
class WebsiteProjectInstanceCreate(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'name': (str,), # noqa: E501
'service': (str,), # noqa: E501
'image': (str,), # noqa: E501
'source': (str,), # noqa: E501
'env': ([WebsiteEnv],), # noqa: E501
'tag': (TagArray,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'service': 'service', # noqa: E501
'image': 'image', # noqa: E501
'source': 'source', # noqa: E501
'env': 'env', # noqa: E501
'tag': 'tag', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, service, image, *args, **kwargs): # noqa: E501
"""WebsiteProjectInstanceCreate - a model defined in OpenAPI
Args:
name (str):
service (str):
image (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
source (str): [optional] # noqa: E501
env ([WebsiteEnv]): [optional] # noqa: E501
tag (TagArray): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.service = service
self.image = image
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"[email protected]"
] | |
aefae7fca95e26baee3b1888e265657b3105ea73 | 2c3f13857d4a915410de5ac9547745eb2769db5f | /eval/e5/scrape_so.py | 60e0110c53ba3c6434b54a83d7b9f733bf2aff48 | [] | no_license | andrewhead/StackSkim | 43a4cf769645bb70202075f8077fa4d5d7be2a4b | 9ac11705ff82aa978d1a87177059e665f4e5ebef | refs/heads/master | 2020-06-03T16:15:15.127268 | 2016-01-16T17:16:36 | 2016-01-16T17:16:36 | 50,692,945 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,928 | py | #! /usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
import logging
import subprocess
import requests
import json
logging.basicConfig(level=logging.INFO, format="%(message)s")
SCRAPE_DIR = "scrape"
QUESTIONS_OUTFILE = 'questions.json'
ANSWERS_OUTFILE = 'answers.json'
ANSWERS = [
{'question': 3190798, 'answer': 3219849},
{'question': 14104228, 'answer': 14104369},
{'question': 364015, 'answer': 364074},
{'question': 12872387, 'answer': 12872443},
{'question': 8115087, 'answer': 8115155},
{'question': 25221352, 'answer': 25222518},
{'question': 12591832, 'answer': 12591905},
{'question': 27391812, 'answer': 27414401},
{'question': 4683639, 'answer': 4684210},
{'question': 5362732, 'answer': 5362764},
{'question': 27045539, 'answer': 27523599},
{'question': 19618268, 'answer': 19618531},
{'question': 19087332, 'answer': 19087631},
{'question': 26759118, 'answer': 26759193},
{'question': 5931223, 'answer': 5931253},
{'question': 4362491, 'answer': 4362514},
{'question': 2187821, 'answer': 2187834},
{'question': 9505971, 'answer': 9506077},
{'question': 23740288, 'answer': 23740522},
{'question': 9003288, 'answer': 9003638},
]
QUESTIONS = [
4980414,
18851438,
22705019,
2592798,
21219150,
14917510,
2130446,
6233805,
27436551,
17828552,
3929301,
27889586,
9893851,
23877406,
1283646,
23438583,
12332532,
17383236,
25356695,
15234524,
]
def fetch_questions(question_ids):
question_param = ';'.join([str(q) for q in question_ids])
answer_url = 'https://api.stackexchange.com/2.2/questions/' + question_param
more_questions = True
page_number = 1
all_questions = {'items': []}
while more_questions:
questions = requests.get(answer_url, params={
'site': 'stackoverflow',
'page': page_number,
'pagesize': '100',
'filter': '!9YdnSJ*_S',
}).json()
more_questions = questions['has_more']
page_number += 1
all_questions['items'].extend(questions['items'])
return all_questions
def fetch_answers(answer_ids):
answer_param = ';'.join([str(a) for a in answer_ids])
answer_url = 'https://api.stackexchange.com/2.2/answers/' + answer_param
more_answers = True
page_number = 1
all_answers = {'items': []}
while more_answers:
answers = requests.get(answer_url, params={
'site': 'stackoverflow',
'page': page_number,
'pagesize': '100',
'filter': '!9YdnSM68i',
}).json()
more_answers = answers['has_more']
page_number += 1
all_answers['items'].extend(answers['items'])
return all_answers
def wget_address(address):
subprocess.call([
"wget",
"-P", SCRAPE_DIR, # output to a scrape directory
"--adjust-extension", # download HTML pages with .html extension
"-nc", # don't download the same file twice
"-w", "1", # wait 1s between requests
"-p", "-k", # for downloading stylesheets (doesn't work?)
address,
])
def main():
''' Get ground truth answers from StackExchange API. '''
questions = fetch_questions(QUESTIONS)
answers = fetch_answers([a['answer'] for a in ANSWERS])
with open(QUESTIONS_OUTFILE, 'w') as qof:
json.dump(questions, qof, indent=2)
with open(ANSWERS_OUTFILE, 'w') as aof:
json.dump(answers, aof, indent=2)
''' StackOverflow content gets fetched to folder "stackoverflow.com" '''
for q in QUESTIONS:
wget_address("http://www.stackoverflow.com/questions/%d" % q)
for a in ANSWERS:
wget_address("www.stackoverflow.com/a/%d/%d" % (a['question'], a['answer']))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
26d5bc1377b3152d42dffb7588d782be1a251f02 | 55fefb8017a97d049f035f6771e1dfb6e7eb94a2 | /investmtype/migrations/0002_auto_20180912_1801.py | 7a085e28a5f0cfda3dea08219c2bed6a64ba48cb | [] | no_license | niravhjoshi/DjangoE2ISAapi | d90df84d1788e2f6a0335f707438afc543b02d56 | ff74799bdb122dbc3067b3f131663c0d932355c4 | refs/heads/master | 2020-03-25T18:01:04.356469 | 2019-04-23T12:02:32 | 2019-04-23T12:02:32 | 144,008,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-09-12 12:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('investmtype', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='investtypes',
old_name='U_id',
new_name='UserName',
),
]
| [
"[email protected]"
] | |
a79b7926d63cc865874100159fc42f3ad2fd8148 | 3899dd3debab668ef0c4b91c12127e714bdf3d6d | /venv/Lib/site-packages/tensorflow/contrib/tpu/python/ops/tpu_ops.py | 2208c6894ebed35b51b8734a9349641c00d2b1bc | [] | no_license | SphericalPotatoInVacuum/CNNDDDD | b2f79521581a15d522d8bb52f81b731a3c6a4db4 | 03c5c0e7cb922f53f31025b7dd78287a19392824 | refs/heads/master | 2020-04-21T16:10:25.909319 | 2019-02-08T06:04:42 | 2019-02-08T06:04:42 | 169,691,960 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 16,803 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Operations for TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
if platform.system() != "Windows":
# pylint: disable=wildcard-import,unused-import,g-import-not-at-top
from tensorflow.contrib.tpu.ops import gen_tpu_ops
from tensorflow.contrib.tpu.ops.gen_tpu_ops import *
from tensorflow.contrib.util import loader
from tensorflow.python.platform import resource_loader
# pylint: enable=wildcard-import,unused-import,g-import-not-at-top
_tpu_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_tpu_ops.so"))
def _create_default_group_assignment():
num_shards = tpu_function.get_tpu_context().number_of_shards
if num_shards is None:
logging.warning(
"cross_replica_sum should be used within a tpu_shard_context, but "
"got unset number_of_shards. Assuming 1.")
num_shards = 1
group_assignment = [list(range(num_shards))]
return group_assignment
def all_to_all(x,
concat_dimension,
split_dimension,
split_count,
group_assignment=None,
name=None):
"""Exchange data across TPU replicas.
Args:
x: The local tensor.
concat_dimension: The dimension number to concatenate.
split_dimension: The dimension number to split.
split_count: The number of splits, this number must equal to the sub-group
size(group_assignment.get_shape()[1])
group_assignment: Optional 2d int32 lists with shape [num_groups,
num_replicas_per_group]. `group_assignment[i]` represents the replica
ids in the ith subgroup.
name: Optional op name.
Returns:
A `Tensor` which is concatenated by data from different replicas.
"""
if group_assignment is None:
group_assignment = _create_default_group_assignment()
return gen_tpu_ops.all_to_all(
x,
group_assignment,
concat_dimension=concat_dimension,
split_dimension=split_dimension,
split_count=split_count,
name=name)
@ops.RegisterGradient("AllToAll")
def _all_to_all_grad(op, grad):
# The gradient of a all-to-all is also a all-to-all but the
# split_dimension and concat_dimension is swapped.
# The graident with respect to group_assignment is None.
return [
gen_tpu_ops.all_to_all(
grad,
op.inputs[1],
concat_dimension=op.get_attr("split_dimension"),
split_dimension=op.get_attr("concat_dimension"),
split_count=op.get_attr("split_count")), None
]
def cross_replica_sum(x, group_assignment=None, name=None):
"""Sum the input tensor across replicas according to group_assignment.
Args:
x: The local tensor to the sum.
group_assignment: Optional 2d int32 lists with shape [num_groups,
num_replicas_per_group]. `group_assignment[i]` represents the replica
ids in the ith subgroup.
name: Optional op name.
Returns:
A `Tensor` which is summed across replicas.
"""
if group_assignment is None:
group_assignment = _create_default_group_assignment()
return gen_tpu_ops.cross_replica_sum(x, group_assignment, name=name)
def collective_permute(x, source_target_pairs, name=None):
"""Permute the input tensor across replicas given source_target_pairs.
For each source_target_pair <a, b>, we send replica a's input to replica b.
Each replica id must only appear once in the source column. Also it must
only appear once in the target column.
For the replica id not in the target column, this op returns a zero tensor
with the same shape and dtype of the input x.
For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing
source_target_pairs=`[[0,1],[1,2],[2,3]]` gets the outputs:
`[0, A, B, C]`.
Args:
x: The local tensor to be permuted.
source_target_pairs: 2d int lists with shape [num_pairs, 2].
source_target_pairs[i][0] represents the source replica id and
source_target_pairs[i][1] represents the target replica id.
name: Optional op name.
Returns:
A `Tensor` which is permuted.
"""
return gen_tpu_ops.collective_permute(x, source_target_pairs, name=name)
@ops.RegisterGradient("CrossReplicaSum")
def _cross_replica_sum_grad(op, grad):
# The gradient of a cross replica sum is also a cross-replica sum.
# The graident with respect to group_assignment is None.
return [gen_tpu_ops.cross_replica_sum(grad, op.inputs[1]), None]
# This extra type checking exists to give a more helpful error message in
# the common case that uint8 and int64 values are infed. Remove when both
# types are supported.
_SUPPORTED_INFEED_DTYPES = set([
dtypes.bool, dtypes.int32, dtypes.int64, dtypes.bfloat16, dtypes.float32,
dtypes.complex64
])
def infeed_dequeue(dtype, shape, name=None):
"""A placeholder op for a value that will be fed into the computation.
Args:
dtype: A `tf.DType`. The type of elements in the tensor.
shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
A tensor that will be provided using the infeed mechanism.
Raises:
TypeError: If 'dtype` is not a supported infeed type.
"""
if dtype not in _SUPPORTED_INFEED_DTYPES:
raise TypeError(
"{} is not a supported TPU infeed type. Supported types are: "
"{}".format(dtype, list(_SUPPORTED_INFEED_DTYPES)))
return gen_tpu_ops.infeed_dequeue(dtype, shape, name=name)
# pylint: disable=redefined-outer-name
def infeed_dequeue_tuple(dtypes, shapes, name=None):
"""A placeholder op for values fed into the TPU simultaneously as a tuple.
Args:
dtypes: A list of `tf.DType`s that has length `>= 1`.
The element types of each element in `outputs`.
shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
The shapes of each tensor in `outputs`.
name: A name for the operation (optional).
Returns:
A list of `Tensor` objects of type `dtypes`.
A list of tensors that will be provided using the infeed mechanism.
Raises:
TypeError: If a type in 'dtypes` is not a supported infeed type.
"""
for dtype in dtypes:
if dtype not in _SUPPORTED_INFEED_DTYPES:
raise TypeError(
"{} is not a supported TPU infeed type. Supported types are: "
"{}".format(dtype, list(_SUPPORTED_INFEED_DTYPES)))
return gen_tpu_ops.infeed_dequeue_tuple(dtypes, shapes, name=name)
# pylint: enable=redefined-outer-name
# pylint: disable=protected-access
def send_tpu_embedding_gradients(inputs,
config,
learning_rates=None,
name=None):
"""A placeholder op for feeding per-sample gradients to the embedding layer.
Args:
inputs: A TensorList of gradients with which to update embedding tables.
Contains one tensor per embedding table in the model.
config: Serialized TPUEmbeddingConfiguration proto.
learning_rates: A TensorList of float32 scalars, one for each embedding
table, containing the learning rates for each table when dynamic
learning rate is enabled through the OptimizationParameters in
TPUEmbeddingConfiguration. When the learning rate is constant, the list
should be empty (optional).
name: A name for the operation (optional).
Returns:
A SendTPUEmbeddingGradients operation.
"""
if learning_rates is None:
learning_rates = []
return gen_tpu_ops._send_tpu_embedding_gradients(
inputs=inputs, learning_rates=learning_rates, config=config, name=name)
send_tpu_embedding_gradients.__doc__ = (
gen_tpu_ops._send_tpu_embedding_gradients.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_integer_batch(batch,
device_ordinal,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
batch: A list of 1D tensors, one for each embedding table, containing the
indices into the tables.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingIntegerBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops._enqueue_tpu_embedding_integer_batch(
batch=batch,
device_ordinal=device_ordinal,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_integer_batch.__doc__ = (
gen_tpu_ops._enqueue_tpu_embedding_integer_batch.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_sparse_batch(sample_indices,
embedding_indices,
aggregation_weights,
device_ordinal,
combiners=None,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
sample_indices: A list of rank 1 Tensors specifying the training example
and feature to which the corresponding embedding_indices and
aggregation_weights values belong. sample_indices[i] must equal b * nf +
f, where nf is the number of features from the corresponding table, f is
in [0, nf), and b is in [0, batch size).
embedding_indices: A list of rank 1 Tensors, indices into the embedding
tables.
aggregation_weights: A list of rank 1 Tensors containing per sample --
i.e. per (training example, feature) -- aggregation weights.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
combiners: A list of string scalars, one for each embedding table that
specify how to normalize the embedding activations after weighted
summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is
invalid to have the sum of the weights be 0 for 'mean' or the sum of the
squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default
is to use 'sum' for all tables (optional).
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingSparseBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops._enqueue_tpu_embedding_sparse_batch(
sample_indices=sample_indices,
embedding_indices=embedding_indices,
aggregation_weights=aggregation_weights,
device_ordinal=device_ordinal,
combiners=combiners,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_sparse_batch.__doc__ = (
gen_tpu_ops._enqueue_tpu_embedding_sparse_batch.__doc__)
# pylint: disable=protected-access
def enqueue_tpu_embedding_sparse_tensor_batch(sample_indices,
embedding_indices,
aggregation_weights,
table_ids,
device_ordinal,
combiners=None,
mode_override=None,
name=None):
"""A placeholder op for enqueueing embedding IDs to the TPU.
Args:
sample_indices: A list of rank 1 Tensors specifying the training example
to which the corresponding embedding_indices and aggregation_weights
values
belong. It corresponds to sp_ids.indices[:,0] in
embedding_lookup_sparse().
embedding_indices: A list of rank 1 Tensors, indices into the embedding
tables. It corresponds to sp_ids.values in embedding_lookup_sparse().
aggregation_weights: A list of rank 1 Tensors containing per training
example aggregation weights. It corresponds to sp_weights.values in
embedding_lookup_sparse().
table_ids: A list of integers specifying the identifier of the embedding
table (offset of TableDescriptor in the TPUEmbeddingConfiguration) to
lookup the corresponding input. The ith input is looked up using
table_ids[i]. The size of the table_ids list must be equal to that of
sample_indices, embedding_indices and aggregation_weights.
device_ordinal: The TPU device to use. Should be >= 0 and less than the
number of TPU cores in the task on which the node is placed.
combiners: A list of string scalars, one for each embedding table that
specify how to normalize the embedding activations after weighted
summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is
invalid to have the sum of the weights be 0 for 'mean' or the sum of the
squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default
is to use 'sum' for all tables (optional).
mode_override: A string input that overrides the mode specified in the
TPUEmbeddingConfiguration. Supported values are {'unspecified',
'inference', 'training', 'backward_pass_only'}. When set to
'unspecified', the mode set in TPUEmbeddingConfiguration is used,
otherwise mode_override is used (optional).
name: A name for the operation (optional).
Returns:
An EnqueueTPUEmbeddingSparseTensorBatch operation.
"""
if mode_override is None:
mode_override = "unspecified"
return gen_tpu_ops._enqueue_tpu_embedding_sparse_tensor_batch(
sample_indices=sample_indices,
embedding_indices=embedding_indices,
aggregation_weights=aggregation_weights,
table_ids=table_ids,
device_ordinal=device_ordinal,
combiners=combiners,
mode_override=mode_override,
name=name)
enqueue_tpu_embedding_sparse_tensor_batch.__doc__ = (
gen_tpu_ops._enqueue_tpu_embedding_sparse_tensor_batch.__doc__)
else:
# We have already built the appropriate libraries into the binary via CMake
# if we have built contrib, so we don't need this
pass
| [
"[email protected]"
] | |
8985832a9acfc77b38e220afc8b2162c9d8ceccd | 30736dab9d8e682e5603d4803349144a5f6a84fb | /sdk/cognitiveservices/azure-cognitiveservices-search-websearch/azure/cognitiveservices/search/websearch/models/ranking_ranking_group_py3.py | 1a865ff2a79a51e178c6aa333a5a293db4672915 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | montgomp/azure-sdk-for-python | 6fcaffc59f4321852aa71109691e94ad38c66464 | 0ffb0b0de095b97cbc5b69309bbce0a3b91d3eb4 | refs/heads/master | 2020-12-06T11:08:01.683369 | 2020-01-07T23:24:42 | 2020-01-07T23:24:42 | 232,445,563 | 1 | 0 | MIT | 2020-01-08T00:45:33 | 2020-01-08T00:45:33 | null | UTF-8 | Python | false | false | 1,174 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RankingRankingGroup(Model):
"""Defines a search results group, such as mainline.
All required parameters must be populated in order to send to Azure.
:param items: Required. A list of search result items to display in the
group.
:type items:
list[~azure.cognitiveservices.search.websearch.models.RankingRankingItem]
"""
_validation = {
'items': {'required': True},
}
_attribute_map = {
'items': {'key': 'items', 'type': '[RankingRankingItem]'},
}
def __init__(self, *, items, **kwargs) -> None:
super(RankingRankingGroup, self).__init__(**kwargs)
self.items = items
| [
"[email protected]"
] | |
36c5af91c16098ac83d608a74a948a0ebdc15c5d | 396f93d8e73c419ef82a94174815a2cecbb8334b | /.history/tester2_20200321213612.py | acc2caebe079475068d5af0e9ab3be845b0ef9ba | [] | no_license | mirfarzam/ArtificialIntelligence-HeuristicAlgorithm-TabuSearch | 8c73d9448b916009c9431526864a4441fdeb682a | 90b2dca920c85cddd7c1b3335344ac7b10a9b061 | refs/heads/master | 2021-03-26T21:16:42.561068 | 2020-04-17T21:44:26 | 2020-04-17T21:44:26 | 247,750,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | import os
import subprocess
import re
from datetime import datetime
import time
numberOfTests = 10
tabuIteration = '5'
tabuDuration = '0'
numberOfCities = '10'
final_solution = []
print(f"\n\nTest for Tabu Search with this config: \n\tIterations : {tabuIteration} \n\tDuration(Tabu Memory): {tabuDuration} \n\tNumber of Cities: {numberOfCities}")
for i in range(0, numberOfTests):
process = subprocess.Popen(['./algo_tabou.exe', tabuIteration, tabuDuration, numberOfCities, 'distances_entre_villes_{}.txt'.format(numberOfCities)],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
result = stdout
result = re.sub(r'\s', ' ', str(result))
solution = (re.findall(r'([0-9]{4}) km', result))[-1]
final_solution.append(int(solution))
coverage = re.findall(r'On est dans un minimum local a l\'iteration ([0-9]+) ->', result)
if coverage != []:
coverage = int(coverage[0])+ 1
else:
coverage = 5
number_of_solution_before_coverage = coverage
print('best found solution is {} and found in interation {}, number of solutions before coverage : {}'.format(solution, coverage, number_of_solution_before_coverage))
time.sleep( 1 )
print("Summery:")
optimum_result = list(filter(lambda x: x == '3473', final_solution))
print(f'number of ')
| [
"[email protected]"
] | |
fa1d5887fe6ef08cd2f3a5e63792396cc36a1d52 | 934235f70a390a3ba0d7b464cddd10872f31cda3 | /rango/server/.history/tango_with_django/tango_with_django/settings_20210102125402.py | 2b4db67f09db82b87456b7e41dffde57e8cabd20 | [] | no_license | deji100/Projects | 6919041ba23e77a5c74e5ab7692bfcee38ececcb | 17e64d954d1d7805be57ec5d8d4344e4944889e6 | refs/heads/master | 2023-04-30T05:25:03.143303 | 2021-05-20T15:00:43 | 2021-05-20T15:00:43 | 338,844,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,283 | py | """
Django settings for tango_with_django project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2l8=-3%*fjd#4)kl488nssgt%zu1$#l%)q=j3*#=ztcd9)hqq#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tango_with_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tango_with_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
AUTH_USER_MODEL = 'rango.U'
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILE_DIRS = [
os.path.join(BASE_DIR, 'rango/static')
]
MEDIA_URL = '/images/'
MEDIR_ROOT = os.path.join(BASE_DIR, 'rango/static/images') | [
"[email protected]"
] | |
9a16095b8acd59b41e7e8caefad9a50cec5f1b3d | 1f51c064f70a8ee303ebe3a64eb8e685f40aee88 | /lib/core/test_engine.py | 6c4d85439a526e58a2606752eb9c6071d5845b0e | [] | no_license | cxiang26/CVPR_2018_WAD | b35e74b238c214985810aca9d592b551deaf7638 | c1c631488349015d27b77437a840057833095af2 | refs/heads/master | 2020-03-24T23:29:51.861906 | 2018-08-01T10:11:27 | 2018-08-01T10:11:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,203 | py | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Test a Detectron network on an imdb (image database)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import cv2
import datetime
import logging
import numpy as np
import os
import yaml
import torch
from core.config import cfg
from core.test import im_detect_all
from datasets import task_evaluation
from datasets.json_dataset import JsonDataset
from modeling import model_builder
import nn as mynn
import utils.env as envu
import utils.net as net_utils
import utils.subprocess as subprocess_utils
import utils.vis as vis_utils
from utils.io import save_object, load_object
from utils.timer import Timer
logger = logging.getLogger(__name__)
def get_eval_functions():
# Determine which parent or child function should handle inference
if cfg.MODEL.RPN_ONLY:
raise NotImplementedError
# child_func = generate_rpn_on_range
# parent_func = generate_rpn_on_dataset
else:
# Generic case that handles all network types other than RPN-only nets
# and RetinaNet
child_func = test_net
parent_func = test_net_on_dataset
return parent_func, child_func
def get_inference_dataset(index, is_parent=True):
assert is_parent or len(cfg.TEST.DATASETS) == 1, \
'The child inference process can only work on a single dataset'
dataset_name = cfg.TEST.DATASETS[index]
if cfg.TEST.PRECOMPUTED_PROPOSALS:
assert is_parent or len(cfg.TEST.PROPOSAL_FILES) == 1, \
'The child inference process can only work on a single proposal file'
assert len(cfg.TEST.PROPOSAL_FILES) == len(cfg.TEST.DATASETS), \
'If proposals are used, one proposal file must be specified for ' \
'each dataset'
proposal_file = cfg.TEST.PROPOSAL_FILES[index]
else:
proposal_file = None
return dataset_name, proposal_file
def run_inference(
args, ind_range=None,
multi_gpu_testing=False, gpu_id=0,
check_expected_results=False,
dataset_dir=None):
parent_func, child_func = get_eval_functions()
is_parent = ind_range is None
def result_getter():
if is_parent:
# Parent case:
# In this case we're either running inference on the entire dataset in a
# single process or (if multi_gpu_testing is True) using this process to
# launch subprocesses that each run inference on a range of the dataset
all_results = {}
for i in range(len(cfg.TEST.DATASETS)):
dataset_name, proposal_file = get_inference_dataset(i)
output_dir = args.output_dir
results = parent_func(
args,
dataset_name,
proposal_file,
output_dir,
multi_gpu=multi_gpu_testing,
dataset_dir=dataset_dir
)
all_results.update(results)
return all_results
else:
# Subprocess child case:
# In this case test_net was called via subprocess.Popen to execute on a
# range of inputs on a single dataset
dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)
output_dir = args.output_dir
return child_func(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=ind_range,
gpu_id=gpu_id
)
all_results = result_getter()
if check_expected_results and is_parent:
task_evaluation.check_expected_results(
all_results,
atol=cfg.EXPECTED_RESULTS_ATOL,
rtol=cfg.EXPECTED_RESULTS_RTOL
)
task_evaluation.log_copy_paste_friendly_results(all_results)
return all_results
def run_inference_wad(args, ind_range=None, multi_gpu_testing=False, gpu_id=0):
parent_func, child_func = get_eval_functions()
is_parent = ind_range is None
def result_getter():
if is_parent:
# Parent case:
# In this case we're either running inference on the entire dataset in a
# single process or (if multi_gpu_testing is True) using this process to
# launch subprocesses that each run inference on a range of the dataset
all_results = {}
for i in range(len(cfg.TEST.DATASETS)):
dataset_name, proposal_file = get_inference_dataset(i)
output_dir = args.output_dir
results = parent_func(
args,
dataset_name,
proposal_file,
output_dir,
multi_gpu=multi_gpu_testing
)
all_results.update(results)
return all_results
else:
# Subprocess child case:
# In this case test_net was called via subprocess.Popen to execute on a
# range of inputs on a single dataset
dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)
output_dir = args.output_dir
return child_func(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=ind_range,
gpu_id=gpu_id
)
all_results = result_getter()
#if check_expected_results and is_parent:
if True:
task_evaluation.check_expected_results(
all_results,
atol=cfg.EXPECTED_RESULTS_ATOL,
rtol=cfg.EXPECTED_RESULTS_RTOL
)
task_evaluation.log_copy_paste_friendly_results(all_results)
return all_results
def test_net_on_dataset(
args,
dataset_name,
proposal_file,
output_dir,
multi_gpu=False,
gpu_id=0):
"""Run inference on a dataset."""
dataset = JsonDataset(dataset_name, args.dataset_dir)
test_timer = Timer()
test_timer.tic()
if multi_gpu:
num_images = len(dataset.get_roidb(gt=True))
all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(
args, dataset_name, proposal_file, num_images, output_dir
)
else:
all_boxes, all_segms, all_keyps = test_net(
args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id
)
test_timer.toc()
logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))
results = task_evaluation.evaluate_all(
dataset, all_boxes, all_segms, all_keyps, output_dir
)
return results
def multi_gpu_test_net_on_dataset(
args, dataset_name, proposal_file, num_images, output_dir):
"""Multi-gpu inference on a dataset."""
binary_dir = envu.get_runtime_dir()
binary_ext = envu.get_py_bin_ext()
binary = os.path.join(binary_dir, args.test_net_file + binary_ext)
assert os.path.exists(binary), 'Binary \'{}\' not found'.format(binary)
# Pass the target dataset and proposal file (if any) via the command line
opts = ['TEST.DATASETS', '("{}",)'.format(dataset_name)]
if proposal_file:
opts += ['TEST.PROPOSAL_FILES', '("{}",)'.format(proposal_file)]
# Run inference in parallel in subprocesses
# Outputs will be a list of outputs from each subprocess, where the output
# of each subprocess is the dictionary saved by test_net().
outputs = subprocess_utils.process_in_parallel(
'detection', num_images, binary, output_dir,
args.load_ckpt, opts)
# Collate the results from each subprocess
all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
for det_data in outputs:
all_boxes_batch = det_data['all_boxes']
all_segms_batch = det_data['all_segms']
all_keyps_batch = det_data['all_keyps']
for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):
all_boxes[cls_idx] += all_boxes_batch[cls_idx]
all_segms[cls_idx] += all_segms_batch[cls_idx]
all_keyps[cls_idx] += all_keyps_batch[cls_idx]
det_file = os.path.join(output_dir, 'detections.pkl')
cfg_yaml = yaml.dump(cfg)
save_object(
dict(
all_boxes=all_boxes,
all_segms=all_segms,
all_keyps=all_keyps,
cfg=cfg_yaml
), det_file
)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
return all_boxes, all_segms, all_keyps
def test_net(
args,
dataset_name,
proposal_file,
output_dir,
ind_range=None,
gpu_id=0):
"""Run inference on all images in a dataset or over an index range of images
in a dataset using a single GPU.
"""
assert not cfg.MODEL.RPN_ONLY, \
'Use rpn_generate to generate proposals from RPN-only models'
dataset = JsonDataset(dataset_name, args.dataset_dir)
timers = defaultdict(Timer)
if ind_range is not None:
if cfg.TEST.SOFT_NMS.ENABLED:
det_name = 'detection_range_%s_%s_soft_nms.pkl' % tuple(ind_range)
else:
det_name = 'detection_range_(%d_%d)_nms_%.1f.pkl' % (ind_range[0], ind_range[1], cfg.TEST.NMS)
else:
det_name = 'detections.pkl'
det_file = os.path.join(output_dir, det_name)
roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(dataset, proposal_file, ind_range)
num_images = len(roidb)
image_ids = []
num_classes = cfg.MODEL.NUM_CLASSES
all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
for i, entry in enumerate(roidb):
image_ids.append(entry['image'])
args.image_ids = image_ids
# If we have already computed the boxes
if os.path.exists(det_file):
obj = load_object(det_file)
all_boxes, all_segms, all_keyps = obj['all_boxes'], obj['all_segms'], obj['all_keyps']
else:
model = initialize_model_from_cfg(args, gpu_id=gpu_id)
for i, entry in enumerate(roidb):
if cfg.TEST.PRECOMPUTED_PROPOSALS:
# The roidb may contain ground-truth rois (for example, if the roidb
# comes from the training or val split). We only want to evaluate
# detection on the *non*-ground-truth rois. We select only the rois
# that have the gt_classes field set to 0, which means there's no
# ground truth.
box_proposals = entry['boxes'][entry['gt_classes'] == 0]
if len(box_proposals) == 0:
continue
else:
# Faster R-CNN type models generate proposals on-the-fly with an
# in-network RPN; 1-stage models don't require proposals.
box_proposals = None
im = cv2.imread(entry['image'])
cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(model, im, box_proposals, timers)
extend_results(i, all_boxes, cls_boxes_i)
if cls_segms_i is not None:
extend_results(i, all_segms, cls_segms_i)
if cls_keyps_i is not None:
extend_results(i, all_keyps, cls_keyps_i)
if i % 10 == 0: # Reduce log file size
ave_total_time = np.sum([t.average_time for t in timers.values()])
eta_seconds = ave_total_time * (num_images - i - 1)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
det_time = (
timers['im_detect_bbox'].average_time +
timers['im_detect_mask'].average_time +
timers['im_detect_keypoints'].average_time
)
misc_time = (
timers['misc_bbox'].average_time +
timers['misc_mask'].average_time +
timers['misc_keypoints'].average_time
)
logger.info(
(
'im_detect: range [{:d}, {:d}] of {:d}: '
'{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'
).format(
start_ind + 1, end_ind, total_num_images, start_ind + i + 1,
start_ind + num_images, det_time, misc_time, eta
)
)
if cfg.VIS:
im_name = os.path.splitext(os.path.basename(entry['image']))[0]
vis_utils.vis_one_image_cvpr2018_wad(
im[:, :, ::-1],
'{:d}_{:s}'.format(i, im_name),
os.path.join(output_dir, 'vis'),
cls_boxes_i,
segms=cls_segms_i,
keypoints=cls_keyps_i,
thresh=0.5,
box_alpha=0.8,
dataset=dataset.WAD_CVPR2018,
show_class=True
)
cfg_yaml = yaml.dump(cfg)
save_object(
dict(
all_boxes=all_boxes,
all_segms=all_segms,
all_keyps=all_keyps,
cfg=cfg_yaml
), det_file
)
logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
results = task_evaluation.evaluate_all(dataset, all_boxes, all_segms, all_keyps, output_dir, args)
return results
def initialize_model_from_cfg(args, gpu_id=0):
"""Initialize a model from the global cfg. Loads test-time weights and
set to evaluation mode.
"""
model = model_builder.Generalized_RCNN()
model.eval()
if args.cuda:
model.cuda()
if args.load_ckpt:
load_name = args.load_ckpt
logger.info("loading checkpoint %s", load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(model, checkpoint['model'])
model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True)
return model
def get_roidb_and_dataset(dataset, proposal_file, ind_range):
"""Get the roidb for the dataset specified in the global cfg. Optionally
restrict it to a range of indices if ind_range is a pair of integers.
"""
if cfg.TEST.PRECOMPUTED_PROPOSALS:
assert proposal_file, 'No proposal file given'
roidb = dataset.get_roidb(
proposal_file=proposal_file,
proposal_limit=cfg.TEST.PROPOSAL_LIMIT
)
else:
roidb = dataset.get_roidb(gt=True)
dataset.WAD_CVPR2018.roidb = roidb
if ind_range is not None:
total_num_images = len(roidb)
start, end = ind_range
roidb = roidb[start:end]
else:
start = 0
end = len(roidb)
total_num_images = end
return roidb, dataset, start, end, total_num_images
def empty_results(num_classes, num_images):
"""Return empty results lists for boxes, masks, and keypoints.
Box detections are collected into:
all_boxes[cls][image] = N x 5 array with columns (x1, y1, x2, y2, score)
Instance mask predictions are collected into:
all_segms[cls][image] = [...] list of COCO RLE encoded masks that are in
1:1 correspondence with the boxes in all_boxes[cls][image]
Keypoint predictions are collected into:
all_keyps[cls][image] = [...] list of keypoints results, each encoded as
a 3D array (#rois, 4, #keypoints) with the 4 rows corresponding to
[x, y, logit, prob] (See: utils.keypoints.heatmaps_to_keypoints).
Keypoints are recorded for person (cls = 1); they are in 1:1
correspondence with the boxes in all_boxes[cls][image].
"""
# Note: do not be tempted to use [[] * N], which gives N references to the
# *same* empty list.
all_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
all_segms = [[[] for _ in range(num_images)] for _ in range(num_classes)]
all_keyps = [[[] for _ in range(num_images)] for _ in range(num_classes)]
return all_boxes, all_segms, all_keyps
def extend_results(index, all_res, im_res):
"""Add results for an image to the set of all results at the specified
index.
"""
# Skip cls_idx 0 (__background__)
for cls_idx in range(1, len(im_res)):
all_res[cls_idx][index] = im_res[cls_idx] | [
"[email protected]"
] | |
83958d051cfb91f802ae0406167170b153d49d14 | 98dc91f742e13ff4007ffade532c801ce40c6105 | /userblog/apps.py | d579959ef1b454682d87a7d8f0f44a52a4288990 | [] | no_license | divyajonna/My_First_Task | 6149d0da008e549e9f0d8ad52a90fe36e15775e3 | c8c5007a2eb9112cecd5b1e6df1ea449be4c02d9 | refs/heads/master | 2021-05-16T15:51:05.676726 | 2018-02-12T05:58:21 | 2018-02-12T05:58:21 | 119,347,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class UserblogConfig(AppConfig):
name = 'userblog'
| [
"[email protected]"
] | |
8e994b7fb88bd07b8dd63b38805b854bd33fb1a1 | 6d9fbe6e6a2abfd8455e92f6dba67a5f02d87f41 | /lib/phonenumbers/data/region_KG.py | 43602a368e7acf704db78ca08cc9b7ebbdb58f5d | [] | no_license | JamesBrace/InfluenceUWebLaunch | 549d0b48ff3259b139cb891a19cb8b5382ffe2c8 | 332d25940e4b1b45a7a2a8200f77c8413543b199 | refs/heads/master | 2021-09-04T04:08:47.594900 | 2018-01-15T16:49:29 | 2018-01-15T16:49:29 | 80,778,825 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,904 | py | """Auto-generated file, do not edit by hand. KG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_KG = PhoneMetadata(id='KG', country_code=996, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[235-8]\\d{8,9}', possible_number_pattern='\\d{5,10}', possible_length=(9, 10), possible_length_local_only=(5, 6)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:3(?:1(?:[256]\\d|3[1-9]|47)|2(?:22|3[0-479]|6[0-7])|4(?:22|5[6-9]|6\\d)|5(?:22|3[4-7]|59|6\\d)|6(?:22|5[35-7]|6\\d)|7(?:22|3[468]|4[1-9]|59|[67]\\d)|9(?:22|4[1-8]|6\\d))|6(?:09|12|2[2-4])\\d)\\d{5}', example_number='312123456', possible_length=(9,), possible_length_local_only=(5, 6)),
mobile=PhoneNumberDesc(national_number_pattern='(?:20[0-35]|5[124-7]\\d|7[07]\\d)\\d{6}', possible_number_pattern='\\d{9}', example_number='700123456', possible_length=(9,)),
toll_free=PhoneNumberDesc(national_number_pattern='800\\d{6,7}', possible_number_pattern='\\d{9,10}', example_number='800123456', possible_length=(9, 10)),
premium_rate=PhoneNumberDesc(),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['[25-7]|31[25]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{4})(\\d{5})', format='\\1 \\2', leading_digits_pattern=['3(?:1[36]|[2-9])'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d)(\\d{3})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['8'], national_prefix_formatting_rule='0\\1')])
| [
"[email protected]"
] | |
3ddac5d0043c1c26ff2578686412fd41f81052d7 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/eis.py | e4c8c63e6e63638a617334a48198e48d02f03318 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'eIS':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
22c577217301c191b4d44fd8db9ea4e06b3e4e6d | d8673a8b9a34292b54285edbf900d7e130ec39b8 | /instant_generator/migrations/0006_auto_20200514_1040.py | 16f69bd9642ca9b3b15c7801742a1d7161ef6bae | [] | no_license | AzeezBello/toolx | 7383f43e500f300062193d8b43254c0b7af53dbf | daf6a7d585a4b72ace47b24ec86828fc6a2d2982 | refs/heads/master | 2022-03-07T10:12:32.915043 | 2022-02-16T00:01:18 | 2022-02-16T00:01:18 | 253,473,631 | 0 | 0 | null | 2022-02-16T00:02:45 | 2020-04-06T11:11:21 | HTML | UTF-8 | Python | false | false | 426 | py | # Generated by Django 2.2.9 on 2020-05-14 09:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instant_generator', '0005_auto_20200425_0018'),
]
operations = [
migrations.AlterField(
model_name='instantgenerator',
name='Identify_the_Problem_Your_Audience_Have',
field=models.TextField(),
),
]
| [
"[email protected]"
] | |
c41a8a3c97568c0f098dbb43a50bcf87af787237 | b742abb440174ec59f56a334f14871f3accc1743 | /util/IpUtil.py | ab987e3cf1d64a0ca66c52b6985f7680bc9bd341 | [] | no_license | sunshineywz123/baymax | 00e92b71b599df6ce39902652b78167a51100002 | 25518474cb09644c34febaf556fe8a0449dc7da4 | refs/heads/master | 2021-12-30T06:37:13.224221 | 2018-02-07T08:25:03 | 2018-02-07T08:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | # -*- coding: utf-8 -*-
__author__ = 'likangwei'
import socket
def get_ip_address():
return socket.gethostbyname(socket.gethostname())#得到本地ip
def get_host_name():
return socket.gethostname()
print get_ip_address() | [
"[email protected]"
] | |
89c8c28987a6cd74eaa104ef9ddd9e566b65636f | 7dfabdddeb5b8f1628e445cdb6d536958c8bc85b | /pcdet/models/dense_heads/anchor_head_single_fpn_range.py | d406aacffd4357d40fa54af9f62af187b582c228 | [
"Apache-2.0"
] | permissive | vehxianfish/SRDAN_Open | d6ba16ebc201c9651fac16bc30f57dc3a740041f | 47c1bd9d2369d8e486b18a7aea220af7324c9011 | refs/heads/master | 2023-08-15T10:36:56.483018 | 2021-09-25T03:35:53 | 2021-09-25T03:35:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,341 | py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .anchor_head_template import AnchorHeadTemplate
class GradReverse(torch.autograd.Function):
def __init__(self, lambd):
self.lambd = lambd
def forward(self, x):
return x.view_as(x)
def backward(self, grad_output):
return (grad_output * self.lambd)
def grad_reverse(x, lambd):
return GradReverse(lambd)(x)
class PatchAttentionLayer(nn.Module):
def __init__(self, num_channels, kernel_size, kernel_size2=0, no_sigmoid=False, detach=False):
super(PatchAttentionLayer, self).__init__()
if kernel_size2 == 0:
kernel_size2 = kernel_size
self.kernel_size = kernel_size
self.kernel_size2 = kernel_size2
self.patch_matrix = nn.Parameter(torch.randn(1, kernel_size, kernel_size2), requires_grad=True)
# self.patch_conv = nn.Conv2d(num_channels, 1, kernel_size, kernel_size) n, 126, 126
self.sigmoid = nn.Sigmoid()
# self.no_sigmoid = no_sigmoid
# self.detach = detach
def forward(self, input_tensor):
#2, 512, 126, 126
# print("kernel_size", self.kernel_size, self.kernel_size2)
# print("input_tensor", input_tensor.shape)
bt, c, h, w = input_tensor.size()
# print("bt, c, h, w", bt, c, h, w)
# print("input_tensor", input_tensor.shape)
# # patch_tensor = self.patch_conv(input_tensor)
# print("self.patch_matrix", self.patch_matrix.shape)
# print("self.patch_matrix.repeat(bt*c, 1, 1)", self.patch_matrix.repeat(bt*c, 1, 1).shape)
# if self.no_sigmoid:
# input_tensor = input_tensor.contiguous().view(-1, h, w) #
# input_tensor = input_tensor * self.patch_matrix.repeat(bt*c, 1, 1)
# input_tensor = input_tensor.view(bt, c, h, w)
# else:
input_tensor = input_tensor.view(-1, h, w) #
att_matrix = self.patch_matrix.repeat(bt*c, 1, 1)
# print("att_matrix", att_matrix.shape)
# if self.detach:
# att_matrix = att_matrix.detach()
input_tensor = input_tensor * att_matrix
# z = x * att_matrix.detach()
# z = x.detach() * att_matrix
input_tensor = self.sigmoid(input_tensor).view(bt, c, h, w)
return input_tensor
class SpatialSELayer(nn.Module):
"""
Re-implementation of SE block -- squeezing spatially and exciting channel-wise described in:
*Roy et al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, MICCAI 2018*
"""
def __init__(self, num_channels):
"""
:param num_channels: No of input channels
"""
super(SpatialSELayer, self).__init__()
self.conv = nn.Conv2d(num_channels, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_tensor, weights=None):
"""
:param weights: weights for few shot learning
:param input_tensor: X, shape = (batch_size, num_channels, H, W)
:return: output_tensor
"""
# spatial squeeze
batch_size, channel, a, b = input_tensor.size()
# print("input_tensor.size()", input_tensor.size()) #2, 512, 126, 126
if weights is not None:
weights = torch.mean(weights, dim=0)
weights = weights.view(1, channel, 1, 1)
out = F.conv2d(input_tensor, weights)
else:
out = self.conv(input_tensor)
# print("out.size()", out.size()) #2, 1, 126, 126
squeeze_tensor = self.sigmoid(out)
# print("squeeze_tensor.size()", squeeze_tensor.size()) # 2, 1, 126, 126
# spatial excitation
squeeze_tensor = squeeze_tensor.view(batch_size, 1, a, b)
# print("squeeze_tensor 2.size()", squeeze_tensor.size()) # 2, 1, 126, 126
output_tensor = torch.mul(input_tensor, squeeze_tensor)
# print("output_tensor 2.size()", output_tensor.size()) #2, 512, 126, 126
#output_tensor = torch.mul(input_tensor, squeeze_tensor)
return output_tensor
class netD_pixel(nn.Module):
def __init__(self, input_channels=256, context=False):
super(netD_pixel, self).__init__()
self.conv1 = nn.Conv2d(input_channels, 256, kernel_size=1, stride=1,
padding=0, bias=False)
self.conv2 = nn.Conv2d(256, 128, kernel_size=1, stride=1,
padding=0, bias=False)
self.conv3 = nn.Conv2d(128, 1, kernel_size=1, stride=1,
padding=0, bias=False)
self.context = context
self._init_weights()
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
#m.bias.data.zero_()
normal_init(self.conv1, 0, 0.01)
normal_init(self.conv2, 0, 0.01)
normal_init(self.conv3, 0, 0.01)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
if self.context:
feat = F.avg_pool2d(x, (x.size(2), x.size(3)))
x = self.conv3(x)
return F.sigmoid(x),feat
else:
x = self.conv3(x)
return F.sigmoid(x)
class ChannelSELayer(nn.Module):
"""
Re-implementation of Squeeze-and-Excitation (SE) block described in:
*Hu et al., Squeeze-and-Excitation Networks, arXiv:1709.01507*
"""
def __init__(self, num_channels, reduction_ratio=2):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSELayer, self).__init__()
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input_tensor):
"""
:param input_tensor: X, shape = (batch_size, num_channels, H, W)
:return: output tensor
"""
batch_size, num_channels, H, W = input_tensor.size() #2, 512, 126, 126
# Average along each channel
squeeze_tensor = input_tensor.view(batch_size, num_channels, -1).mean(dim=2) #2, 512, 126*126(1)
# channel excitation
fc_out_1 = self.relu(self.fc1(squeeze_tensor))
fc_out_2 = self.sigmoid(self.fc2(fc_out_1))
a, b = squeeze_tensor.size()
output_tensor = torch.mul(input_tensor, fc_out_2.view(a, b, 1, 1))
return output_tensor
class AnchorHeadSingleFPNRange(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, nusc=False, input_channels_fpn=None, num_fpn_up=0, num_fpn_downup=0, fpn_layers=[], **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training, nusc=nusc,
num_fpn_up=num_fpn_up, num_fpn_downup=num_fpn_downup, fpn_layers=fpn_layers
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.voxel_det_seconv_attention = self.model_cfg.get('VOXEL_DET_SECONV_ATTENTION', False)
self.voxel_det_se_attention = self.model_cfg.get('VOXEL_DET_SE_ATTENTION', False)
self.voxel_det_patch_attention = self.model_cfg.get('VOXEL_DET_PATCH_ATTENTION', False)
self.voxel_dom_seconv_attention = self.model_cfg.get('VOXEL_DOM_SECONV_ATTENTION', False)
self.voxel_dom_se_attention = self.model_cfg.get('VOXEL_DOM_SE_ATTENTION', False)
self.voxel_dom_patch_attention = self.model_cfg.get('VOXEL_DOM_PATCH_ATTENTION', False)
self.joint_attention = self.model_cfg.get('VOXEL_DETDOM_JOINT_ATTENTION', False)
self.dom_patch_first = self.model_cfg.get('DOM_PATCH_FIRST', False)
if self.range_guidance:
if self.range_guidance_dom_only:
input_channels_dom = input_channels + 2
else:
input_channels = input_channels + 2
input_channels_dom = input_channels
else:
input_channels_dom = input_channels
if self.joint_two_dom:
if self.dom_patch_first:
input_channels_dom_joint = input_channels
else:
input_channels_dom_joint = input_channels + 2
self.conv_cls = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1
)
self.rangeinv = self.model_cfg.get('RANGE_INV', False)
self.keep_x = self.model_cfg.get('KEEP_X', False)
self.keep_y = self.model_cfg.get('KEEP_Y', False)
self.keep_xy = self.model_cfg.get('KEEP_XY', False)
self.center_xy = self.model_cfg.get('CENTER_XY', False)
self.rm_thresh = self.model_cfg.get('RM_THRESH', 0)
if self.rangeinv:
self.conv_range = nn.Conv2d(
input_channels, 1,
kernel_size=1
)
#nn.Sequential(
if self.voxel_dom_patch_attention:
self.att_patch_layer = PatchAttentionLayer(512, self.model_cfg.PATCH_SIZE, self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE))
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
self.conv_dir_cls = None
dom_fc1, dom_fc2 = self.model_cfg.get('DOM_FC', [1024, 1024])
# print("dom_fc ", dom_fc1, dom_fc2)
if self.model_cfg.get('USE_DOMAIN_CLASSIFIER', None) is not None:
if self.range_guidance_new_conv_dom:
# print("input_channels_dom", input_channels_dom)
self.conv_dom_layers = netD_pixel(input_channels=input_channels_dom, context=self.range_guidance_new_conv_dom_context) #
if self.sep_two_dom:
self.domain_pool = nn.AdaptiveAvgPool2d(1)
self.domain_classifier = nn.Sequential(nn.Linear(input_channels_dom_sep, dom_fc1),
nn.ReLU(True), nn.Dropout(),
nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),
nn.Dropout(), nn.Linear(dom_fc2, 1))
if self.joint_two_dom:
self.domain_pool = nn.AdaptiveAvgPool2d(1)
self.domain_classifier = nn.Sequential(nn.Linear(input_channels_dom_joint, dom_fc1),
nn.ReLU(True), nn.Dropout(),
nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),
nn.Dropout(), nn.Linear(dom_fc2, 1))
else:
self.domain_pool = nn.AdaptiveAvgPool2d(1)
self.domain_classifier = nn.Sequential(nn.Linear(input_channels_dom, dom_fc1),
nn.ReLU(True), nn.Dropout(),
nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),
nn.Dropout(), nn.Linear(dom_fc2, 1))
######## FPN detector #########
self.input_channels_fpn = input_channels_fpn
self.input_channels_dom_fpn = {}
self.conv_cls_fpn = nn.ModuleDict()
self.conv_box_fpn = nn.ModuleDict()
self.att_spatial_se_layer_fpn = nn.ModuleDict()
self.att_se_layer_fpn = nn.ModuleDict()
self.att_patch_layer_fpn = nn.ModuleDict()
self.att_spatial_se_layer_det_fpn = nn.ModuleDict()
self.att_se_layer_det_fpn = nn.ModuleDict()
self.att_patch_layer_det_fpn = nn.ModuleDict()
if self.joint_two_dom:
self.input_channels_dom_joint_fpn = {}
for layer in self.fpn_layers:
if self.range_guidance:
if self.range_guidance_dom_only:
self.input_channels_dom_fpn[layer] = input_channels_fpn[layer] + 2
else:
self.input_channels_fpn[layer] = input_channels_fpn[layer] + 2
self.input_channels_dom_fpn[layer] = self.input_channels_fpn[layer]
else:
self.input_channels_dom_fpn[layer] = input_channels_fpn[layer]
if self.joint_two_dom:
if self.dom_patch_first:
self.input_channels_dom_joint_fpn[layer] = input_channels_fpn[layer]
else:
self.input_channels_dom_joint_fpn[layer] = input_channels_fpn[layer] + 2
for layer in self.fpn_layers:
# if self.range_guidance:
# if self.range_guidance_dom_only:
# self.input_channels_dom_fpn[layer] = self.input_channels_fpn[layer] + 2
# else:
# self.input_channels_fpn[layer] = self.input_channels_fpn[layer] + 2
# self.input_channels_dom_fpn[layer] = self.input_channels_fpn[layer]
# else:
# self.input_channels_dom_fpn[layer] = self.input_channels_fpn[layer]
# if self.voxel_det_seconv_attention and not self.joint_attention:
# self.att_spatial_se_layer_det_fpn[layer] = SpatialSELayer(self.input_channels_fpn[layer])
# if self.voxel_det_se_attention and not self.joint_attention:
# self.att_se_layer_det_fpn[layer] = ChannelSELayer(self.input_channels_fpn[layer])
if self.voxel_det_patch_attention and not self.joint_attention:
self.att_patch_layer_det_fpn[layer] = PatchAttentionLayer(self.input_channels_fpn[layer], self.model_cfg.PATCH_SIZE, self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE))
# print("self.input_channels_fpn[layer]", self.input_channels_fpn[layer])
if self.voxel_dom_seconv_attention:
self.att_spatial_se_layer_fpn[layer] = SpatialSELayer(self.input_channels_fpn[layer])
if self.voxel_dom_se_attention:
self.att_se_layer_fpn[layer] = ChannelSELayer(self.input_channels_fpn[layer])
if self.voxel_dom_patch_attention:
self.att_patch_layer_fpn[layer] = PatchAttentionLayer(self.input_channels_fpn[layer], self.model_cfg.PATCH_SIZE_FPN[layer], self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE))
self.num_anchors_per_location_fpn[layer] = sum(self.num_anchors_per_location_fpn[layer]) # 2, 7
self.conv_cls_fpn[layer] = nn.Conv2d(
self.input_channels_fpn[layer], self.num_anchors_per_location_fpn[layer] * self.num_class,
kernel_size=1
)# 512 -> 2
self.conv_box_fpn[layer] = nn.Conv2d(
self.input_channels_fpn[layer], self.num_anchors_per_location_fpn[layer] * self.box_coder.code_size,
kernel_size=1
)# 512 -> 14
######### fpn dir clf #########
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls_fpn = nn.ModuleDict()
for layer in self.fpn_layers:
self.conv_dir_cls_fpn[layer] = nn.Conv2d(
self.input_channels_fpn[layer],
self.num_anchors_per_location_fpn[layer] * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
for layer in self.fpn_layers:
self.conv_dir_cls_fpn[layer] = None
# print("USE_DOMAIN_CLASSIFIER", self.model_cfg.get('USE_DOMAIN_CLASSIFIER', None))
if self.model_cfg.get('USE_DOMAIN_CLASSIFIER', None):
self.domain_pool_fpn = nn.ModuleDict()
self.domain_classifier_fpn = nn.ModuleDict()
self.conv_dom_layers_fpn = nn.ModuleDict()
for layer in self.fpn_layers:
# self.domain_pool_fpn[layer] = nn.AdaptiveAvgPool2d(1)
# self.domain_classifier_fpn[layer] = nn.Sequential(nn.Linear(self.input_channels_dom_fpn[layer], dom_fc1),
# nn.ReLU(True), nn.Dropout(),
# nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),
# nn.Dropout(), nn.Linear(dom_fc2, 1))
if self.range_guidance_new_conv_dom:
self.conv_dom_layers_fpn[layer] = netD_pixel(input_channels=self.input_channels_dom_fpn[layer], context=self.range_guidance_new_conv_dom_context)
if self.joint_two_dom:
self.domain_pool_fpn[layer] = nn.AdaptiveAvgPool2d(1)
self.domain_classifier_fpn[layer] = nn.Sequential(nn.Linear(self.input_channels_dom_joint_fpn[layer], dom_fc1),
nn.ReLU(True), nn.Dropout(),
nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),
nn.Dropout(), nn.Linear(dom_fc2, 1))
else:
self.domain_pool_fpn[layer] = nn.AdaptiveAvgPool2d(1)
self.domain_classifier_fpn[layer] = nn.Sequential(nn.Linear(self.input_channels_dom_fpn[layer], 1024),
nn.ReLU(True), nn.Dropout(),
nn.Linear(1024, 1024), nn.ReLU(True),
nn.Dropout(), nn.Linear(1024, 1))
if self.range_guidance:
if self.fov:
total_range_x = self.model_cfg.PATCH_SIZE
total_range_y = self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE)
half_range_x = int(total_range_x * 0.5)
self.x_range_matrix = torch.abs(torch.arange(0, total_range_y, 1).float()).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(1,1, total_range_x, 1).cuda()
# print('x_range', x_range)
self.y_range_matrix = torch.abs(torch.arange(-half_range_x, half_range_x, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(1,1,1,total_range_y).cuda()
if self.range_guidance_dist:
joint_range_matrix = torch.stack((self.x_range_matrix,self.y_range_matrix),dim=-1).view(-1,2)
center_matrix = torch.tensor([(half_range_x, 0)]).float().cuda()
self.range_matrix = torch.cdist(joint_range_matrix,center_matrix).cuda().view(1,1,total_range_x, total_range_y)
else:
total_range_x = self.model_cfg.PATCH_SIZE
total_range_y = self.model_cfg.get('PATCH_SIZE2', self.model_cfg.PATCH_SIZE)
half_range_x = int(total_range_x * 0.5)
half_range_y = int(total_range_y * 0.5)
self.x_range_matrix = torch.abs(torch.arange(-half_range_y, half_range_y, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(1,1, total_range_x, 1).cuda()
self.y_range_matrix = torch.abs(torch.arange(-half_range_x, half_range_x, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(1,1,1,total_range_y).cuda()
if self.range_guidance_dist:
joint_range_matrix = torch.stack((self.x_range_matrix,self.y_range_matrix),dim=-1).view(-1,2)
center_matrix = torch.tensor([(0., 0.)]).float().cuda()
self.range_matrix = torch.cdist(joint_range_matrix,center_matrix).view(1,1,total_range_x, total_range_y)
self.x_range_matrix_fpn = {}
self.y_range_matrix_fpn = {}
self.range_matrix_fpn = {}
for layer in self.fpn_layers:
if self.fov:
total_range_x = self.model_cfg.PATCH_SIZE_FPN[layer]
total_range_y = self.model_cfg.get('PATCH_SIZE_FPN2', self.model_cfg.PATCH_SIZE_FPN)[layer]
half_range_x = int(total_range_x * 0.5)
self.x_range_matrix_fpn[layer] = torch.abs(torch.arange(0, total_range_y, 1).float()).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(1,1, total_range_x, 1).cuda()
# print('x_range', x_range)
self.y_range_matrix_fpn[layer] = torch.abs(torch.arange(-half_range_x, half_range_x, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(1,1,1,total_range_y).cuda()
if self.range_guidance_dist:
joint_range_matrix = torch.stack((self.x_range_matrix_fpn[layer],self.y_range_matrix_fpn[layer]),dim=-1).view(-1,2)
center_matrix = torch.tensor([(half_range_x, 0)]).float().cuda()
self.range_matrix_fpn[layer] = torch.cdist(joint_range_matrix,center_matrix).cuda().view(1,1,total_range_x, total_range_y)
else:
total_range_x = self.model_cfg.PATCH_SIZE_FPN[layer]
total_range_y = self.model_cfg.get('PATCH_SIZE_FPN2', self.model_cfg.PATCH_SIZE_FPN)[layer]
half_range_x = int(total_range_x * 0.5)
half_range_y = int(total_range_y * 0.5)
self.x_range_matrix_fpn[layer] = torch.abs(torch.arange(-half_range_y, half_range_y, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(1,1, total_range_x, 1).cuda()
self.y_range_matrix_fpn[layer] = torch.abs(torch.arange(-half_range_x, half_range_x, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(1,1,1,total_range_y).cuda()
if self.range_guidance_dist:
joint_range_matrix = torch.stack((self.x_range_matrix_fpn[layer],self.y_range_matrix_fpn[layer]),dim=-1).view(-1,2)
center_matrix = torch.tensor([(0, 0)]).float().cuda()
self.range_matrix_fpn[layer] = torch.cdist(joint_range_matrix,center_matrix).cuda().view(1,1,total_range_x, total_range_y)
self.domain_pool = nn.AdaptiveAvgPool2d(1)
self.init_weights()
def init_weights(self):
pi = 0.01
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
for layer in self.fpn_layers:
nn.init.constant_(self.conv_cls_fpn[layer].bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box_fpn[layer].weight, mean=0, std=0.001)
def forward(self, data_dict):
t_mode = data_dict['t_mode']
l = data_dict['l']
if 'pseudo' in t_mode:
pseudo = True
else:
pseudo = False
spatial_features_2d = data_dict['spatial_features_2d']
# print("spatial_features_2d", spatial_features_2d.shape) 126
# print('range ctx',self.range_guidance)
if t_mode == 'tsne':
return_dict = {}
spatial_features_2d = data_dict[f'spatial_features_2d']
return_dict[f'tsne_spatial_features_2d'] = self.domain_pool(spatial_features_2d)
if self.voxel_dom_patch_attention and self.dom_patch_first:
spatial_features_2d = self.att_patch_layer(spatial_features_2d)
return_dict['tsne_spatial_features_2d_PMA_First'] = self.domain_pool(spatial_features_2d)
if self.range_guidance and self.range_guidance_dom_only:
total_range = spatial_features_2d.shape[-1]
half_range = int(spatial_features_2d.shape[-1] * 0.5)
x_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1, total_range, 1).cuda()
y_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1,1,total_range).cuda()
spatial_features_2d = torch.cat((spatial_features_2d, x_range, y_range), dim=1)
return_dict['tsne_spatial_features_2d_RCD'] = self.domain_pool(spatial_features_2d)
if self.voxel_dom_patch_attention and not self.dom_patch_first:
spatial_features_2d = self.att_patch_layer(spatial_features_2d)
return_dict['tsne_spatial_features_2d_PMA_Late'] = self.domain_pool(spatial_features_2d)
for l in self.fpn_layers:
spatial_features_2d = data_dict[f'spatial_features_2d_fpn{l}']
return_dict[f'tsne_spatial_features_2d_fpn{l}'] = self.domain_pool(spatial_features_2d)
if self.voxel_dom_patch_attention and self.dom_patch_first:
spatial_features_2d = self.att_patch_layer_fpn[l](spatial_features_2d)
return_dict['tsne_spatial_features_2d_PMA_First_fpn{l}'] = self.domain_pool(spatial_features_2d)
if self.range_guidance and self.range_guidance_dom_only:
total_range = spatial_features_2d.shape[-1]
half_range = int(spatial_features_2d.shape[-1] * 0.5)
x_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1, total_range, 1).cuda()
y_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1,1,total_range).cuda()
spatial_features_2d = torch.cat((spatial_features_2d, x_range, y_range), dim=1)
return_dict['tsne_spatial_features_2d_RCD_fpn{l}'] = self.domain_pool(spatial_features_2d)
if self.voxel_dom_patch_attention and not self.dom_patch_first:
spatial_features_2d = self.att_patch_layer_fpn[l](spatial_features_2d)
return_dict['tsne_spatial_features_2d_PMA_Late_fpn{l}'] = self.domain_pool(spatial_features_2d)
return return_dict
# self.range_da = 2
# mid_dim = int(spatial_features_2d.shape[-1]/2.)
# range_interval = int(spatial_features_2d.shape[-1]/(2*self.range_da))
# start_dim = {}
# mid1_dim = {}
# mid2_dim = {}
# end_dim = {}
# interval_idx = {}
# interval_feat = {}
# if self.keep_xy:
# interval_feat2 = {}
# # for each range 0,1,2,3 (4)
# for n in range(0+self.remove_near_range, self.range_da-self.remove_far_range): # no0,1
# start_dim[n] = mid_dim - range_interval*(n+1) # 2-1=1, 2-2=0
# mid1_dim[n] = mid_dim - range_interval*n # 2-0=2 2-1=1 #int(spatial_features_2d.shape[-1]/2.)
# mid2_dim[n] = mid_dim + range_interval*n # 2+0=2 2+1=3
# end_dim[n] = mid_dim + range_interval*(n+1) # 2+1=3 2+2=4
# interval_idx[n] = torch.LongTensor([i for i in range(start_dim[n], mid1_dim[n])]+[i for i in range(mid2_dim[n], end_dim[n])])
# feat1 = spatial_features_2d[:,:,:,interval_idx[n]]
# feat1 = self.domain_pool(feat1).view(feat1.size(0), -1)
# data_dict[f'spatial_features_2d_x_{n}'] = feat1
# feat2 = spatial_features_2d[:,:,interval_idx[n],:]
# feat2 = self.domain_pool(feat2).view(feat2.size(0), -1)
# data_dict[f'spatial_features_2d_y_{n}'] = feat2
#
if not self.fpn_only:
spatial_features_2d_det = spatial_features_2d
#### Dom ####
if 'dom_img' in t_mode:
if t_mode == 'dom_img_src':
dom_src = True
elif t_mode == 'dom_img_tgt':
dom_src = False
else:
dom_src = None
if self.voxel_dom_patch_attention and self.dom_patch_first:
spatial_features_2d = self.att_patch_layer(spatial_features_2d)
if self.joint_two_dom:
x_pool2 = self.domain_pool(spatial_features_2d).view(spatial_features_2d.size(0), -1)
x_reverse2 = grad_reverse(x_pool2, l*-1)
dom_img_preds2 = self.domain_classifier(x_reverse2)#.
if self.range_guidance and self.range_guidance_dom_only:
if self.range_guidance_dist:
spatial_features_2d = torch.cat((spatial_features_2d, self.range_matrix.repeat(spatial_features_2d.shape[0],1,1,1)), dim=1)
else:
spatial_features_2d = torch.cat((spatial_features_2d, self.x_range_matrix.repeat(spatial_features_2d.shape[0],1,1,1), self.y_range_matrix.repeat(spatial_features_2d.shape[0],1,1,1)), dim=1)
if self.range_guidance_new_conv_dom:
x_reverse_dom = grad_reverse(spatial_features_2d, l*-1)
dom_img_preds = self.conv_dom_layers(x_reverse_dom)
if self.dom_squeeze:
dom_img_preds = dom_img_preds.squeeze(-1)
self.forward_ret_dict['dom_img_preds'] = dom_img_preds
else:
x_pool = self.domain_pool(spatial_features_2d).view(spatial_features_2d.size(0), -1)
# x_pool_joint = x_pool
x_reverse = grad_reverse(x_pool, l)
dom_head_context = self.domain_classifier[:-2](x_reverse)#.squeeze(-1)
if 'dom_img_det' in t_mode:
data_dict['dom_head_context'] = dom_head_context
dom_img_preds = self.domain_classifier[-2:](dom_head_context)#.squeeze(-1)
if self.dom_squeeze:
dom_img_preds = dom_img_preds.squeeze(-1)
self.forward_ret_dict['dom_img_preds'] = dom_img_preds
if self.voxel_dom_patch_attention and not self.dom_patch_first:
spatial_features_2d = self.att_patch_layer(spatial_features_2d)
if self.joint_two_dom:
x_pool = self.domain_pool(spatial_features_2d).view(spatial_features_2d.size(0), -1)
x_reverse = grad_reverse(x_pool, l*-1)
dom_img_preds2 = self.domain_classifier(x_reverse)#.squeeze(-1)
self.forward_ret_dict['dom_img_preds2'] = dom_img_preds2
if self.training:
targets_dict_dom = self.assign_targets(
gt_boxes=data_dict['gt_boxes'],
dom_src=dom_src,
pseudo=pseudo
)
self.forward_ret_dict.update(targets_dict_dom)
#### det ####
if self.voxel_det_patch_attention:
spatial_features_2d_det = self.att_patch_layer_det(spatial_features_2d_det)
cls_preds = self.conv_cls(spatial_features_2d_det)
box_preds = self.conv_box(spatial_features_2d_det)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d_det)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
if pseudo:
pseudo_weights = data_dict['pseudo_weights']
else:
pseudo_weights = None
# print("gt_classes", data_dict['gt_classes'].shape)
# print("gt_classes", data_dict['gt_classes'])
# print("pseudo_weights", pseudo_weights)
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes'],
pseudo=pseudo,
pseudo_weights=pseudo_weights
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
##### fpn dom #####
if self.num_fpn_up + self.num_fpn_downup > 0:
# print("fpn")
for layer in self.fpn_layers:
spatial_features_2d = data_dict[f'spatial_features_2d_fpn{layer}']
spatial_features_2d_det = spatial_features_2d
#### FPN Dom ####
if 'dom_img' in t_mode:
if t_mode == 'dom_img_src':
dom_src = True
elif t_mode == 'dom_img_tgt':
dom_src = False
else:
dom_src = None
if self.voxel_dom_patch_attention and self.dom_patch_first:
spatial_features_2d = self.att_patch_layer_fpn[layer](spatial_features_2d)
if self.joint_two_dom:
x_pool2 = self.domain_pool_fpn[layer](spatial_features_2d).view(spatial_features_2d.size(0), -1)
x_reverse2 = grad_reverse(x_pool2, l*-1)
dom_img_preds2 = self.domain_classifier_fpn[layer](x_reverse2)#.
if self.range_guidance and self.range_guidance_dom_only:
if self.range_guidance_dist:
spatial_features_2d = torch.cat((spatial_features_2d, self.range_matrix_fpn[layer].repeat(spatial_features_2d.shape[0],1,1,1)), dim=1)
else:
spatial_features_2d = torch.cat((spatial_features_2d, self.x_range_matrix_fpn[layer].repeat(spatial_features_2d.shape[0],1,1,1), self.y_range_matrix_fpn[layer].repeat(spatial_features_2d.shape[0],1,1,1)), dim=1)
# x_pool = self.domain_pool_fpn[layer](spatial_features_2d).view(spatial_features_2d.size(0), -1)
# x_reverse = grad_reverse(x_pool, l*-1)
# dom_img_preds = self.domain_classifier_fpn[layer](x_reverse).squeeze(-1)
if self.range_guidance_new_conv_dom:
x_reverse_dom = grad_reverse(spatial_features_2d, l*-1)
dom_img_preds = self.conv_dom_layers_fpn[layer](x_reverse_dom)
if self.dom_squeeze:
dom_img_preds = dom_img_preds.squeeze(-1)
self.forward_ret_dict[f'dom_img_preds_fpn{layer}'] = dom_img_preds
else:
x_pool = self.domain_pool_fpn[layer](spatial_features_2d).view(spatial_features_2d.size(0), -1)
# x_pool_joint = x_pool
x_reverse = grad_reverse(x_pool, l)
dom_head_context = self.domain_classifier_fpn[layer][:-2](x_reverse)#.squeeze(-1)
if 'dom_img_det' in t_mode:
data_dict[f'dom_head_context_fpn{layer}'] = dom_head_context
dom_img_preds = self.domain_classifier_fpn[layer][-2:](dom_head_context)#.squeeze(-1)
if self.dom_squeeze:
dom_img_preds = dom_img_preds.squeeze(-1)
self.forward_ret_dict[f'dom_img_preds_fpn{layer}'] = dom_img_preds
if self.voxel_dom_patch_attention and not self.dom_patch_first:
spatial_features_2d = self.att_patch_layer_fpn[layer](spatial_features_2d)
if self.joint_two_dom:
x_pool2 = self.domain_pool_fpn[layer](spatial_features_2d).view(spatial_features_2d.size(0), -1)
x_reverse2 = grad_reverse(x_pool2, l*-1)
dom_img_preds2 = self.domain_classifier_fpn[layer](x_reverse2)#.squeeze(-1)
self.forward_ret_dict[f'dom_img_preds2_fpn{layer}'] = dom_img_preds2
if self.training:
targets_dict_dom = self.assign_targets(
gt_boxes=data_dict['gt_boxes'],
dom_src=dom_src,
pseudo=pseudo,
fpn_layer=layer
)
self.forward_ret_dict.update(targets_dict_dom)
# print("layer", layer)
# print("spatial_features_2d", spatial_features_2d.shape)
# if self.joint_attention:
# if self.voxel_det_seconv_attention and self.voxel_det_se_attention:
# spatial_features_2d_out = torch.max(self.att_spatial_se_layer_fpn[layer](spatial_features_2d), self.att_se_layer_fpn[layer](spatial_features_2d))
# spatial_features_2d_det = spatial_features_2d_out
# elif self.voxel_det_seconv_attention:
# # print("spatial_features_2d before", spatial_features_2d.shape)
# spatial_features_2d_det = self.att_spatial_se_layer_fpn[layer](spatial_features_2d)
# elif self.voxel_det_se_attention:
# spatial_features_2d_det = self.att_se_layer_fpn[layer](spatial_features_2d)
# else:
# spatial_features_2d_det = spatial_features_2d
# else:
# if self.voxel_det_seconv_attention and self.voxel_det_se_attention:
# spatial_features_2d_out = torch.max(self.att_spatial_se_layer_det_fpn[layer](spatial_features_2d), self.att_se_layer_det_fpn[layer](spatial_features_2d))
# spatial_features_2d_det = spatial_features_2d_out
# elif self.voxel_det_seconv_attention:
# # print("spatial_features_2d before", spatial_features_2d.shape)
# spatial_features_2d_det = self.att_spatial_se_layer_det_fpn[layer](spatial_features_2d)
# elif self.voxel_det_se_attention:
# spatial_features_2d_det = self.att_se_layer_det_fpn[layer](spatial_features_2d)
# else:
# spatial_features_2d_det = spatial_features_2d
cls_preds = self.conv_cls_fpn[layer](spatial_features_2d_det)
box_preds = self.conv_box_fpn[layer](spatial_features_2d_det)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
# print("cls_preds2", cls_preds.shape) # 1, 252, 252, 2
# print("box_preds2", box_preds.shape) # 1, 252, 252, 14
self.forward_ret_dict[f'cls_preds_fpn{layer}'] = cls_preds
self.forward_ret_dict[f'box_preds_fpn{layer}'] = box_preds
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls_fpn[layer](spatial_features_2d_det)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict[f'dir_cls_preds_fpn{layer}'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
if pseudo:
pseudo_weights = data_dict['pseudo_weights']
else:
pseudo_weights = None
targets_dict_fpn = self.assign_targets(
gt_boxes=data_dict['gt_boxes'],
pseudo=pseudo,
pseudo_weights=pseudo_weights,
fpn_layer=layer
)
self.forward_ret_dict.update(targets_dict_fpn)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds,
fpn_layer=layer
)
data_dict[f'batch_cls_preds_fpn{layer}'] = batch_cls_preds
data_dict[f'batch_box_preds_fpn{layer}'] = batch_box_preds
data_dict[f'cls_preds_normalized_fpn{layer}'] = False
# print("data_dict fpn", data_dict[f'batch_cls_preds_fpn{layer}'])
return data_dict | [
"[email protected]"
] | |
ecc1f1d2cd3a78e2dc7168fd9d413dc5f440fbe5 | 2b0eab74af8d23244ff11699830f9bb10fbd717a | /fac/migrations/0088_folder_custom_form_data.py | 2743e0be6a25e3364aacb4075b2da20e62c2de44 | [] | no_license | alexandrenorman/mixeur | c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b | 95d21cd6036a99c5f399b700a5426e9e2e17e878 | refs/heads/main | 2023-03-13T23:50:11.800627 | 2021-03-07T15:49:15 | 2021-03-07T15:49:15 | 345,384,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | # Generated by Django 2.2.17 on 2020-11-24 10:54
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fac', '0087_project_custom_form_data'),
]
operations = [
migrations.AddField(
model_name='folder',
name='custom_form_data',
field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
7570fb3bb7d13173c86545c05e5965ec64f35c4c | 1b1190a127a21bfd096529d3a3f20d05151b2276 | /opsweb4/common/urls.py | d7b6911ed326a13da88a4b690cf0f28bab08413b | [] | no_license | cucy/zrdops4 | a9d37d9dd7cfba5a3ecb1180094621754d97c5e9 | f0609e90e5a944d30a65d6918b2bfcfd63a92c26 | refs/heads/master | 2021-01-19T14:39:54.315360 | 2017-09-24T04:01:27 | 2017-09-24T04:01:27 | 88,176,943 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import TemplateView
from common.views import success_view, error_view, IndexView
urlpatterns = [
# 通用视图
# 主页
url(r'^$', IndexView.as_view(), name='index'),
url(r'^success/$', success_view, name='success'),
url(r'^error/$', error_view, name='error'),
]
| [
"[email protected]"
] | |
5fbe5a1bfc9e491ef9f1597760db1d4c54b41b56 | f7ca89772fc3b19424f537895957cbf3fcafece3 | /nusa/version.py | b855b1a6a59a93d4da0487fc4064f1fdbc94ba7d | [
"MIT"
] | permissive | OrganizationUsername/nusa | d829d341da09bb7d10e7d4f630d63333c96ed2e0 | 05623a72b892330e4b0e059a03ac4614da934ce9 | refs/heads/master | 2022-12-20T11:51:14.221422 | 2020-10-07T19:10:25 | 2020-10-07T19:10:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | """
NuSA (Numerical Structural Analysis in Python)
Author: Pedro Jorge De Los Santos
E-mail: [email protected]
License: MIT License
"""
__version__ = "0.3.dev0" | [
"[email protected]"
] | |
5d85feea87db2cd7b9f58a5a1fb29a01ab6a72f2 | 32bae996c594282515193ecb2357feb4f3004944 | /sap/cli/datadefinition.py | 8ec403e875616f73661a190f86cd5fe392adeaa4 | [
"Apache-2.0"
] | permissive | corani/sapcli | 2559c59073d64285dfe606c0afe491826d2a72ea | 7e81776afa08be0c454c0f86754c8a9b5d938522 | refs/heads/master | 2022-11-13T12:27:44.016007 | 2020-06-08T11:19:40 | 2020-06-08T11:20:09 | 273,421,126 | 1 | 0 | Apache-2.0 | 2020-06-19T06:29:13 | 2020-06-19T06:29:12 | null | UTF-8 | Python | false | false | 954 | py | """ADT proxy for Data Definition (CDS)"""
import sap.adt
import sap.adt.wb
import sap.cli.core
import sap.cli.object
class CommandGroup(sap.cli.core.CommandGroup):
"""Adapter converting command line parameters to sap.adt.DataDefinition
methods calls.
"""
def __init__(self):
super(CommandGroup, self).__init__('ddl')
@CommandGroup.argument('name')
@CommandGroup.command()
def read(connection, args):
"""Prints it out based on command line configuration.
"""
ddl = sap.adt.DataDefinition(connection, args.name)
print(ddl.text)
@CommandGroup.argument('name', nargs='+')
@CommandGroup.command()
def activate(connection, args):
"""Actives the given class.
"""
activator = sap.cli.wb.ObjectActivationWorker()
activated_items = ((name, sap.adt.DataDefinition(connection, name)) for name in args.name)
sap.cli.object.activate_object_list(activator, activated_items, count=len(args.name))
| [
"[email protected]"
] | |
9a9deb90c6765a6bc613f4f39852d1b3fc01d628 | 0f850c7a6746174c3ecc20884e6dc7b2aa6bcce1 | /model.py | ca0e76717541a32c12900d843baad3651fbc293f | [] | no_license | wnd2da/launcher_gateone | 9e1831637606885374dcfeca36637b20fdcfcbbb | 601b00cdaf179e7cdd96e63f9aa8abd427f52cf9 | refs/heads/master | 2022-12-04T13:38:14.346940 | 2020-08-23T04:02:27 | 2020-08-23T04:02:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | # -*- coding: utf-8 -*-
#########################################################
# python
import os
import traceback
import json
# third-party
# sjva 공용
from framework.logger import get_logger
from framework import db, app, path_app_root
# 패키지
from .plugin import package_name, logger
db_file = os.path.join(path_app_root, 'data', 'db', '%s.db' % package_name)
app.config['SQLALCHEMY_BINDS'][package_name] = 'sqlite:///%s' % (db_file)
class ModelSetting(db.Model):
__tablename__ = 'plugin_%s_setting' % package_name
__table_args__ = {'mysql_collate': 'utf8_general_ci'}
__bind_key__ = package_name
id = db.Column(db.Integer, primary_key=True)
key = db.Column(db.String(100), unique=True, nullable=False)
value = db.Column(db.String, nullable=False)
def __init__(self, key, value):
self.key = key
self.value = value
def __repr__(self):
return repr(self.as_dict())
def as_dict(self):
return {x.name: getattr(self, x.name) for x in self.__table__.columns}
#########################################################
| [
"[email protected]"
] | |
30b010d2049deb9889ac634c2f45af497d8f3046 | 5a281cb78335e06c631181720546f6876005d4e5 | /ec2-api-8.0.0/ec2api/tests/unit/test_customer_gateway.py | e4dfd8ae776ef4561c265897a509d233ecc7cf74 | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 5,305 | py | # Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from ec2api.tests.unit import base
from ec2api.tests.unit import fakes
from ec2api.tests.unit import matchers
from ec2api.tests.unit import tools
class CustomerGatewayTestCase(base.ApiTestCase):
def test_create_customer_gateway(self):
self.db_api.add_item.side_effect = (
tools.get_db_api_add_item(fakes.ID_EC2_CUSTOMER_GATEWAY_2))
resp = self.execute('CreateCustomerGateway',
{'IpAddress': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_2,
'Type': 'ipsec.1'})
self.assertEqual({'customerGateway': fakes.EC2_CUSTOMER_GATEWAY_2},
resp)
self.db_api.add_item.assert_called_once_with(
mock.ANY, 'cgw',
{'ip_address': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_2})
resp = self.execute('CreateCustomerGateway',
{'IpAddress': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_2,
'Type': 'ipsec.1',
'BgpAsn': '65000'})
self.assertEqual({'customerGateway': fakes.EC2_CUSTOMER_GATEWAY_2},
resp)
def test_create_customer_gateway_idempotent(self):
self.set_mock_db_items(fakes.DB_CUSTOMER_GATEWAY_1)
resp = self.execute('CreateCustomerGateway',
{'IpAddress': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_1,
'Type': 'ipsec.1'})
self.assertEqual({'customerGateway': fakes.EC2_CUSTOMER_GATEWAY_1},
resp)
self.assertFalse(self.db_api.add_item.called)
resp = self.execute('CreateCustomerGateway',
{'IpAddress': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_1,
'Type': 'ipsec.1',
'BgpAsn': '65000'})
self.assertEqual({'customerGateway': fakes.EC2_CUSTOMER_GATEWAY_1},
resp)
self.assertFalse(self.db_api.add_item.called)
def test_create_customer_gateway_invalid_parameters(self):
self.assert_execution_error(
'Unsupported',
'CreateCustomerGateway',
{'IpAddress': fakes.IP_CUSTOMER_GATEWAY_ADDRESS_1,
'Type': 'ipsec.1',
'BgpAsn': '456'})
def test_delete_customer_gateway(self):
self.set_mock_db_items(fakes.DB_CUSTOMER_GATEWAY_2)
resp = self.execute(
'DeleteCustomerGateway',
{'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_2})
self.assertEqual({'return': True}, resp)
self.db_api.delete_item.assert_called_once_with(
mock.ANY, fakes.ID_EC2_CUSTOMER_GATEWAY_2)
def test_delete_customer_gateway_invalid_parameters(self):
self.set_mock_db_items()
self.assert_execution_error(
'InvalidCustomerGatewayID.NotFound',
'DeleteCustomerGateway',
{'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_2})
self.assertFalse(self.db_api.delete_item.called)
self.set_mock_db_items(fakes.DB_CUSTOMER_GATEWAY_1,
fakes.DB_VPN_CONNECTION_1)
self.assert_execution_error(
'IncorrectState',
'DeleteCustomerGateway',
{'CustomerGatewayId': fakes.ID_EC2_CUSTOMER_GATEWAY_1})
self.assertFalse(self.db_api.delete_item.called)
def test_describe_customer_gateways(self):
self.set_mock_db_items(fakes.DB_CUSTOMER_GATEWAY_1,
fakes.DB_CUSTOMER_GATEWAY_2)
resp = self.execute('DescribeCustomerGateways', {})
self.assertThat(resp['customerGatewaySet'],
matchers.ListMatches([fakes.EC2_CUSTOMER_GATEWAY_1,
fakes.EC2_CUSTOMER_GATEWAY_2]))
resp = self.execute(
'DescribeCustomerGateways',
{'CustomerGatewayId.1': fakes.ID_EC2_CUSTOMER_GATEWAY_2})
self.assertThat(
resp['customerGatewaySet'],
matchers.ListMatches([fakes.EC2_CUSTOMER_GATEWAY_2]))
self.db_api.get_items_by_ids.assert_called_once_with(
mock.ANY, set([fakes.ID_EC2_CUSTOMER_GATEWAY_2]))
self.check_filtering(
'DescribeCustomerGateways', 'customerGatewaySet',
[('bgp-asn', 65000),
('customer-gateway-id', fakes.ID_EC2_CUSTOMER_GATEWAY_2),
('ip-address', fakes.IP_CUSTOMER_GATEWAY_ADDRESS_2),
('state', 'available'),
('type', 'ipsec.1')])
self.check_tag_support(
'DescribeCustomerGateways', 'customerGatewaySet',
fakes.ID_EC2_CUSTOMER_GATEWAY_2, 'customerGatewayId')
| [
"Wayne [email protected]"
] | Wayne [email protected] |
60a83fbe0010450f8a95ef1138802bc95b62c8fa | 67b5c4a03c3da2808054cfabc4001f05c7fdac49 | /dataset/cifar_dataset.py | 7e78aa20301bc7fdaf7dd7f7f646b65b59594f87 | [] | no_license | dannieldwt/deep_learning_algorithm | 411b1ffef4fdea1e0a42a09bee82c68bab17bffc | e2a37a378c88e20560ef6c0e8187a751905a51b1 | refs/heads/master | 2022-04-10T03:46:19.788919 | 2020-01-18T14:16:14 | 2020-01-18T14:16:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,403 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 11 17:54:50 2019
@author: ubuntu
"""
import pickle
import numpy as np
from dataset.base_dataset import BasePytorchDataset
class Cifar10Dataset(BasePytorchDataset):
"""原版数据集地址http://www.cs.toronto.edu/~kriz/cifar.html
cifar10: 10个类别,每个类别6000张
cifar100: 100个类别,每个类别600张
单张图片为RGB 32x32的小图,总计60,000张,其中50,000张训练集,10,000张测试集
该数据集没有索引,所以只能一次性加载到内存
输入:data_type(数据集类型),包括train训练集和test测试集
输出:n,h,w,c (bgr格式), 所有图片源数据都统一用这种格式(包括voc/coco)
"""
def __init__(self, root_path='/home/ubuntu/MyDatasets/cifar-10-batches-py/',
data_type='train', img_transform=None, label_transform=None, bbox_transform=None, **kwargs):
super().__init__()
self.img_transform = img_transform
self.label_transform = label_transform
self.bbox_transform = bbox_transform
train_path = [root_path + 'data_batch_1',
root_path + 'data_batch_2',
root_path + 'data_batch_3',
root_path + 'data_batch_4',
root_path + 'data_batch_5']
test_path = [root_path + 'test_batch']
if data_type == 'train':
self.path = train_path
elif data_type == 'test':
self.path = test_path
else:
raise ValueError('wrong data type, only support train/test.')
self.meta_path = root_path + 'batches.meta'
dataset = self.get_dataset()
self.imgs = dataset['data']
self.labels = dataset['target']
self.bboxes = dataset.get('bbox', None)
self.CLASSES = dataset['target_names']
def get_dataset(self):
datas = []
labels = []
# 获取标签
with open(self.meta_path, 'rb') as f: # 参考cifar源网站的python代码
dict = pickle.load(f, encoding='bytes')
label_names = [label.decode('utf-8') for label in dict[b'label_names']] # decode用于去掉前缀的b
# 获取数据
for path in self.path:
with open(path, 'rb') as f:
dict = pickle.load(f, encoding='bytes')
data = dict[b'data'] # (10000, 3072)
label = np.array(dict[b'labels']) # (10000,)
datas.append(data)
labels.append(label)
cat_datas = np.concatenate(datas, axis=0) # (n, 3072)->(50000,3072)
cat_labels = np.concatenate(labels) # (n,)->(50000,)
# 分别提取R/G/B组成(C,H,W): 原始顺序参考官网说明
cat_datas = cat_datas.reshape(-1, 3, 32, 32).transpose(0,2,3,1)[...,[2,1,0]] # (b,c,h,w)->(b,h,w,c), rgb->bgr
# 按sklearn格式返回数据
dataset = {}
dataset['data'] = cat_datas
dataset['target'] = cat_labels
dataset['target_names'] = label_names
return dataset
def __getitem__(self, idx):
"""常规数据集传出的是多个变量,这里改为传出dict,再在定制collate中处理堆叠
注意:要求传出的为OrderedDict,这样在自定义collate_fn中不会出错。
"""
data_dict = {}
img = self.imgs[idx]
label = self.labels[idx]
if self.label_transform is not None:
label = self.label_transform(label)
if self.img_transform is not None:
img, ori_shape, scale_shape, pad_shape, scale_factor, flip = self.img_transform(img) # transform输出img(img, ori_shape, scale_factor), label
img_meta = dict(ori_shape = ori_shape,
scale_shape = scale_shape,
pad_shape = pad_shape,
scale_factor = scale_factor,
flip = flip)
data_dict = dict(img = img,
img_meta = img_meta,
gt_labels = label,
stack_list = ['img'])
return data_dict
def __len__(self):
return len(self.imgs)
class Cifar100Dataset(Cifar10Dataset):
"""原版数据集地址http://www.cs.toronto.edu/~kriz/cifar.html
cifar10: 10个类别,每个类别6000张
cifar100: 100个类别,每个类别600张
单张图片为RGB 32x32的小图,总计60,000张,其中50,000张训练集,10,000张测试集
"""
def __init__(self, root_path='../dataset/source/cifar100/', data_type='train',
norm=None, label_transform_dict=None, one_hot=None,
binary=None, shuffle=None):
train_path = [root_path + 'train']
test_path = [root_path + 'test']
if data_type == 'train':
self.path = train_path
elif data_type == 'test':
self.path = test_path
else:
raise ValueError('wrong data type, only support train/test.')
self.meta_path = [root_path + 'meta']
dataset = self.get_dataset()
self.imgs = dataset['data']
self.labels = dataset['target']
self.CLASSES = dataset['target_names']
| [
"[email protected]"
] | |
f5472639f09abf2e99dd3af4f8b4e77684efe070 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit2433.py | e2e631040a9f7275ba486935a2c37c61f5898b14 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,149 | py | # qubit number=4
# total number=39
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.rx(-0.5686282702997527,input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=18
prog.h(input_qubit[3]) # number=26
prog.cz(input_qubit[0],input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=28
prog.x(input_qubit[3]) # number=21
prog.rx(0.4241150082346221,input_qubit[2]) # number=33
prog.cx(input_qubit[0],input_qubit[3]) # number=22
prog.cx(input_qubit[0],input_qubit[3]) # number=12
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=23
prog.cz(input_qubit[1],input_qubit[2]) # number=24
prog.h(input_qubit[2]) # number=25
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=34
prog.cz(input_qubit[2],input_qubit[0]) # number=35
prog.h(input_qubit[0]) # number=36
prog.z(input_qubit[2]) # number=30
prog.cx(input_qubit[2],input_qubit[0]) # number=31
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[0]) # number=14
prog.y(input_qubit[0]) # number=15
prog.swap(input_qubit[2],input_qubit[0]) # number=37
prog.swap(input_qubit[2],input_qubit[0]) # number=38
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2433.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
47bfcf0b643a946c7fdd64357993ae722e0afc73 | 14324a581c4f22c7ea8a96cc79725cdb84960e43 | /trees/utils/Standard_CART/CART_Continuous.py | 3b7032ee5bb900794c9dc9104f4b7ad9640fa519 | [] | no_license | dogger123/django-treeapi | a3f141f87bb515e4af4f820a80daf6bacc40199d | 942da122d6c9909c21321a1aea2849428ba47120 | refs/heads/master | 2020-05-22T13:20:22.798164 | 2019-05-13T06:45:13 | 2019-05-13T06:45:13 | 186,357,054 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,772 | py | import operator
import re
import json
import pymysql
#------------------------I.数据读取----------------------------
def readFromCSV(filename='data'):
f = open(filename+".csv")
datas = []
# 开始读取数据集
for line in f:
newline = line.strip('\n').split(',')
# 插入到datas
datas.append(newline)
f.close()
#print(datas)
labels = datas[0][:len(datas[1])-1] #这里根据第2行的数据列数来决定labels裁剪多少,labels列数总比dataset列数少1
dataSet = datas[1:] #第二行开始就是数据集了
return dataSet,labels
def readFromDB(tablename='',fields=[]):
db=pymysql.connect("127.0.0.1","root","","subtreeapi",charset="utf8")
cur = db.cursor()
sql = "select "
for value in fields:
sql+="`%s`," % (value)
sql = sql[:-1]
sql += "from %s where `%s` != '-' " % (tablename,fields[-1])
cur.execute(sql)
#tmpdata = list(cur.fetchall())
dataSet = []
#数据库读取返回的是元组,这里通过循环将二维元组化为list
for line in list(cur.fetchall()):
dataSet.append(list(line))
for i in range(len(dataSet)):
#最后一列不需要数据清洗,一定有值,只是需要离散话映射
for j in range(len(dataSet[i])-1):
if dataSet[i][j] == '-':
dataSet[i][j] = 0.0
dataSet[i][j] = float(dataSet[i][j])
dataSet[i][-1] = resultDiscretization(dataSet[i][-1])
#这里根据dataset数据列数来决定labels裁剪,总比dataset列数少1
labels = fields[:len(dataSet[0])-1]
print(sql,"\n",dataSet[:10],"\n",labels)
return dataSet,labels
def resultDiscretization(origin):
origin = float(origin)
#上无上限,下无下限
if origin >= 90:
return "excellent"
elif origin >= 80 and origin <= 89:
return "good"
elif origin >= 60 and origin <= 79:
return "pass"
else:
return "failed"
#------------------------IT.算法核心----------------------------
def calGini(dataSet):
numEntries = len(dataSet)
labelCounts={}
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
gini=1
for label in labelCounts.keys():
prop=float(labelCounts[label])/numEntries
gini -=prop*prop
return gini
def splitDataSet(dataSet, axis, value,threshold):
retDataSet = []
if threshold == 'lt':
for featVec in dataSet:
if featVec[axis] <= value:
retDataSet.append(featVec)
else:
for featVec in dataSet:
if featVec[axis] > value:
retDataSet.append(featVec)
return retDataSet
def majorityCnt(classList):
classCount={}
for vote in classList:
if vote not in classCount.keys(): classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1
bestGiniGain = 1.0; bestFeature = -1;bsetValue=""
for i in range(numFeatures): #遍历特征
featList = [example[i] for example in dataSet]#得到特征列
uniqueVals = list(set(featList)) #从特征列获取该特征的特征值的set集合
uniqueVals.sort()
for value in uniqueVals:# 遍历所有的特征值
GiniGain = 0.0
# 左增益
left_subDataSet = splitDataSet(dataSet, i, value,'lt')
left_prob = len(left_subDataSet)/float(len(dataSet))
GiniGain += left_prob * calGini(left_subDataSet)
# print left_prob,calGini(left_subDataSet),
# 右增益
right_subDataSet = splitDataSet(dataSet, i, value,'gt')
right_prob = len(right_subDataSet)/float(len(dataSet))
GiniGain += right_prob * calGini(right_subDataSet)
# print right_prob,calGini(right_subDataSet),
# print GiniGain
if (GiniGain < bestGiniGain): #比较是否是最好的结果
bestGiniGain = GiniGain #记录最好的结果和最好的特征
bestFeature = i
bsetValue=value
return bestFeature,bsetValue
def createTree(dataSet,labels,text):
classList = [example[-1] for example in dataSet]
# print dataSet
if classList.count(classList[0]) == len(classList):
return {"name":classList[0],"col":"null","text":text,"children":"null"}#所有的类别都一样,就不用再划分了
if len(dataSet) == 1: #如果没有继续可以划分的特征,就多数表决决定分支的类别
return {"name":majorityCnt(classList),"col":"null","text":text,"children":"null"}
bestFeat,bsetValue = chooseBestFeatureToSplit(dataSet)
# print bestFeat,bsetValue,labels
bestFeatLabel = labels[bestFeat]
if bestFeat==-1:
return majorityCnt(classList)
myTree = {"name":bestFeatLabel,"col":bestFeat,"text":text,"children":[{},{}]}
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = list(set(featValues))
subLabels = labels[:]
# print bsetValue
#myTree[bestFeatLabel][bestFeatLabel+'<='+str(round(float(bsetValue),3))] = createTree(splitDataSet(dataSet, bestFeat, bsetValue,'lt'),subLabels)
myTree["children"][0] = createTree(splitDataSet(dataSet,bestFeat,bsetValue,'lt'),subLabels,'<='+str(round(float(bsetValue),3)))
#myTree[bestFeatLabel][bestFeatLabel+'>'+str(round(float(bsetValue),3))] = createTree(splitDataSet(dataSet, bestFeat, bsetValue,'gt'),subLabels)
myTree["children"][1] = createTree(splitDataSet(dataSet, bestFeat, bsetValue,'gt'),subLabels,'>'+str(round(float(bsetValue),3)))
return myTree
#----------------------III.分类评价--------------------------
#连续型分类不同于离散型
def classify(jsonTree,observation):
#如果是叶子节点
if jsonTree["children"]=="null":
return jsonTree["name"]
#是分支节点
else:
#找到本节点属性列对应的属性值
v=float(observation[jsonTree['col']])#nameToIndex(jsonTree['name'],labels)]
branch=None
#如果这个值符合节点的子节点的分支上的引导文字中指定的数字范围
threshold = re.findall(r"\d+\.?\d*",jsonTree["children"][0]["text"])[0]
if v > float(threshold):
branch=jsonTree["children"][0]
else:
branch=jsonTree["children"][1]
return classify(branch,observation)
def checkAccuracy(jsonTree,observations):
total = float(len(observations))
if total <= 0:return 0
correct = 0.0
for observation in observations:
print("data:",observation[:-1])
result = classify(jsonTree,observation)
print("result: T:",observation[-1],"|P:",result)
if str(result) == str(observation[-1]):
correct += 1.0
return correct/total
#--------------------------O.总控制函数()----------------------------
#--------------------------------------------------------------------
#根据相关数据生成一棵树
#dataSource:{csv,db} target:{tree,json,db}
def GenerateCART(dataSource="csv",sourceName="",target="dictTree",fields=[]):
'''常用变量定义'''
#数据集合(二维数组)
dataSet = []
#数据表头/特征名(一维数组)
labels = []
#字典树
dictTree = {}
'''数据源读取'''
if dataSource == 'csv':
dataSet,labels = readFromCSV(sourceName)
print('labels:',labels,'\n')
print('dataSet:',dataSet,'\n')
elif dataSource == "db":
dataSet,labels = readFromDB(sourceName,fields)
print('labels:',labels,'\n')
print('dataSet:',dataSet,'\n')
else:
print( "please specify the dataSource, csv or db" )
return "please specify the dataSource, csv or db"
'''树训练'''
dictTree = createTree(dataSet,labels,"null")
'''树输出'''
if target == "dictTree":
print(dictTree)
return dictTree
elif target == "db":
pass
elif target == "json":
print(json.dumps(dictTree))
return (json.dumps(dictTree))
else:
print( "please specify the target, csv or db" )
return "please specify the target, csv or db"
# 对新的观测数据进行分类。observation为观测数据。dictTree为训练好的字典树
def Classify(dictTree,observation):
return classify(dictTree,observation)
# 对新的观测数据集进行批量分类。observations为观测数据集。dictTree为训练好的字典树
def ClassifyAll(dictTree,observations):
classes = []
for observation in observations:
classes.append(classify(dictTree,observation))
return classes
# 计算精确度
def CheckAccuracy(jsonTree,dataSource,sourceName):
'''数据源读取'''
if dataSource == 'csv':
dataSet,labels = readFromCSV(sourceName)
#print('labels:',labels,'\n')
#print('dataSet:',dataSet,'\n')
elif dataSource == "db":
#print('labels:',labels,'\n')
print('dataSet:',dataSet,'\n')
else:
print( "please specify the dataSource, csv or db" )
return "please specify the dataSource, csv or db"
return checkAccuracy(jsonTree,dataSet)
#----------------------模块直接执行--------------------------
if __name__ == '__main__':
#dictTree = GenerateCART("csv","datasets/iris_train","dictTree")
dictTreeJson = GenerateCART("db","personal_transcripts_cs","json",['English','CET4','CET6','AdvancedMath','LinearAlgebra','ProbabilityTheory','DataStructure','DataBase','ComputerNetwork','OperatingSystem','CompositionPrinciple','CppProgramming','ProgrammingPractice','JavaProgramming','CSorSE','NCRE_CPP2'])
print(dictTreeJson)
#dataSet,labels =
#readFromDB('personal_transcripts_cs',['English','CET4','CET6','AdvancedMath','LinearAlgebra','ProbabilityTheory','DataStructure','DataBase','ComputerNetwork','OperatingSystem','CompositionPrinciple','CppProgramming','ProgrammingPractice','JavaProgramming','CSorSE','NCRE_CPP2'])
#print('ACC:{:.2%}'.format(checkAccuracy(dictTree,dataSet)))
#readFromDB('personal_transcripts_cs',['English','CET4','CET6','AdvancedMath','LinearAlgebra','ProbabilityTheory','DataStructure','DataBase','ComputerNetwork','OperatingSystem','CompositionPrinciple','CppProgramming','ProgrammingPractice','JavaProgramming','CSorSE','NCRE_CPP2'])
#----------------------其他参考函数--------------------------
def createTree_old(dataSet,labels):
classList = [example[-1] for example in dataSet]
# print dataSet
if classList.count(classList[0]) == len(classList):
return classList[0]#所有的类别都一样,就不用再划分了
if len(dataSet) == 1: #如果没有继续可以划分的特征,就多数表决决定分支的类别
return majorityCnt(classList)
bestFeat,bsetValue = chooseBestFeatureToSplit(dataSet)
# print bestFeat,bsetValue,labels
bestFeatLabel = labels[bestFeat]
if bestFeat==-1:
return majorityCnt(classList)
myTree = {bestFeatLabel:{}}
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = list(set(featValues))
subLabels = labels[:]
# print bsetValue
myTree[bestFeatLabel][bestFeatLabel+'<='+str(round(float(bsetValue),3))] = createTree(splitDataSet(dataSet, bestFeat, bsetValue,'lt'),subLabels)
myTree[bestFeatLabel][bestFeatLabel+'>'+str(round(float(bsetValue),3))] = createTree(splitDataSet(dataSet, bestFeat, bsetValue,'gt'),subLabels)
return myTree
| [
"="
] | = |
bf46db0b0b805194fe8e6a4f4fd9f6d2f6736559 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/hoework01/gettop10frommaoyam01_20200626130014.py | 2cde60173d6d8422f33af36b5ca36b8c888624ad | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 4,247 | py | # 使用requests,bs4库,爬取猫眼电影top10的电影名称、电影类型、上映时间,并以utf-8的字符集保存到csv文件中
import requests
from bs4 import BeautifulSoup as bs
maoyanUrl = "https://maoyan.com/board/4";
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
header = {
'Content-Type': 'text/plain; charset=UTF-8',
'Cookie' : '__mta=251934006.1593072991075.1593100662316.1593100664951.15; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593075275703.1593078726963.7; mojo-session-id={"id":"435818e6a726415f46defffa27f7abc6","time":1593100221937}; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593100665; mojo-trace-id=17; _lxsdk_s=172ec2bff67-0c2-e9f-c64%7C%7C24__mta=251934006.1593072991075.1593100690175.1593100868002.17; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593075275703.1593078726963.7; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593100868; _lxsdk_s=172ee2f4a3e-1c2-3a1-5a4%7C%7C1__mta=251934006.1593072991075.1593133988033.1593140260525.19; uuid_n_v=v1; uuid=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; _csrf=8557626db9b655cf9050ae7e5b2aab69278c8061c21eca95e1c3cf2130b0b64c; _lxsdk_cuid=172ea8cb247c8-0a73066b1c0a8b-4353760-100200-172ea8cb248c8; _lxsdk=2395D3F0B6BC11EA9F28E30FF5FFF73C9A16AE2FA53A448DA75AEAA9D715CB59; mojo-uuid=c457eacb7c1eb59d3d2f6c1f8d75b9c9; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1593072989,1593073002; _lx_utm=utm_source%3Dgoogle%26utm_medium%3Dorganic; __mta=251934006.1593072991075.1593134712257.1593134712989.9; mojo-session-id={"id":"b78cc9fcb57a627220ec165f84d9d5a9","time":1593140260318}; mojo-trace-id=1; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1593140260; _lxsdk_s=172ee8f28d1-560-08-4aa%7C%7C3',
# 'Host' : 'http://www.baidu.com',
'Origin': 'https://maoyan.com',
'Referer': 'https://maoyan.com/board/4',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
}
def get_urls(url, headers):
response = requests.get(url,headers=header)
bs_info = bs(response.text,"html.parser")
import re
films_url = []
for tag in bs_info.find_all('div',):
for tag_p in tag.find_all('a',href=re.compile('/films/')) :
# 获取top10电影详情页链接
films_url.append(url + tag_p.get('href'))
urls = set(films_url)
return urls
import pandas
# 获取详情页
def get_page_info(self,urls,header):
films_content = []
for url in urls:
content = get_page_content(self,url,header)
films_content.append(content)
return films_content
# 获取单个电影的详情信息
def get_page_content(url,header):
import re
response = requests.get(url, headers=header)
bs_info = bs(response.text,'html.parser')
# print(response.text)
for atag in bs_info.find_all('div',attrs={'class':'banner'}):
# print(atag.text)
film_name = atag.find('h1').text + atag.find('div',attrs = {'class' : 'ename ellipses'}
print(film_name)
return "test"
def main():
#urls = get_urls(maoyanUrl,header)
#contents = get_page_info(self,urls,header)
#print(urls)
page_1 = 'https://maoyan.com/films/1375'
get_page_content(page_1,header)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
bae48855210e299852060c2088e67bd9f761ceaa | f8b5aafac15f408a48fabf853a918015c927e6fe | /bk_tomo/venv/venv27/lib/python2.7/site-packages/ansible/plugins/connection/docker.py | f49d12bcc74208905d92af64bbc82e6a9ebf119d | [] | no_license | to30/tmp | bda1ac0ca3fc61e96c2a1c491367b698d7e97937 | ec809683970af6787728c2c41f161f416155982a | refs/heads/master | 2021-01-01T04:25:52.040770 | 2016-05-13T16:34:59 | 2016-05-13T16:34:59 | 58,756,087 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,357 | py | # Based on the chroot connection plugin by Maykel Moya
#
# Connection plugin for configuring docker containers
# (c) 2014, Lorin Hochstein
# (c) 2015, Leendert Brouwer
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# Maintainer: Leendert Brouwer (https://github.com/objectified)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import os.path
import pipes
import subprocess
import re
from distutils.version import LooseVersion
import ansible.constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.plugins.connection import ConnectionBase
from ansible.utils.unicode import to_bytes
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
BUFSIZE = 65536
class Connection(ConnectionBase):
''' Local docker based connections '''
transport = 'docker'
has_pipelining = True
# su currently has an undiagnosed issue with calculating the file
# checksums (so copy, for instance, doesn't work right)
# Have to look into that before re-enabling this
become_methods = frozenset(C.BECOME_METHODS).difference(('su',))
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
# Note: docker supports running as non-root in some configurations.
# (For instance, setting the UNIX socket file to be readable and
# writable by a specific UNIX group and then putting users into that
# group). Therefore we don't check that the user is root when using
# this connection. But if the user is getting a permission denied
# error it probably means that docker on their system is only
# configured to be connected to by root and they are not running as
# root.
if 'docker_command' in kwargs:
self.docker_cmd = kwargs['docker_command']
else:
self.docker_cmd = distutils.spawn.find_executable('docker')
if not self.docker_cmd:
raise AnsibleError("docker command not found in PATH")
self.can_copy_bothways = False
docker_version = self._get_docker_version()
if LooseVersion(docker_version) < LooseVersion('1.3'):
raise AnsibleError('docker connection type requires docker 1.3 or higher')
# Docker cp in 1.8.0 sets the owner and group to root rather than the
# user that the docker container is set to use by default.
#if LooseVersion(docker_version) >= LooseVersion('1.8.0'):
# self.can_copy_bothways = True
@staticmethod
def _sanitize_version(version):
return re.sub('[^0-9a-zA-Z\.]', '', version)
def _get_docker_version(self):
cmd = [self.docker_cmd, 'version']
cmd_output = subprocess.check_output(cmd)
for line in cmd_output.split('\n'):
if line.startswith('Server version:'): # old docker versions
return self._sanitize_version(line.split()[2])
# no result yet, must be newer Docker version
new_docker_cmd = [
self.docker_cmd,
'version', '--format', "'{{.Server.Version}}'"
]
cmd_output = subprocess.check_output(new_docker_cmd)
return self._sanitize_version(cmd_output)
def _connect(self, port=None):
""" Connect to the container. Nothing to do """
super(Connection, self)._connect()
if not self._connected:
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
self._play_context.remote_user, host=self._play_context.remote_addr)
)
self._connected = True
def exec_command(self, cmd, in_data=None, sudoable=False):
""" Run a command on the docker host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
# -i is needed to keep stdin open which allows pipelining to work
local_cmd = [self.docker_cmd, "exec", '-u', self._play_context.remote_user, '-i', self._play_context.remote_addr, executable, '-c', cmd]
display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr)
local_cmd = [to_bytes(i, errors='strict') for i in local_cmd]
p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
""" Transfer a file from local to docker container """
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
out_path = self._prefix_login_path(out_path)
if not os.path.exists(to_bytes(in_path, errors='strict')):
raise AnsibleFileNotFound(
"file or module does not exist: %s" % in_path)
if self.can_copy_bothways:
# only docker >= 1.8.1 can do this natively
args = [ self.docker_cmd, "cp", in_path, "%s:%s" % (self._play_context.remote_addr, out_path) ]
args = [to_bytes(i, errors='strict') for i in args]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
else:
out_path = pipes.quote(out_path)
# Older docker doesn't have native support for copying files into
# running containers, so we use docker exec to implement this
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
args = [self.docker_cmd, "exec", '-u', self._play_context.remote_user, "-i", self._play_context.remote_addr, executable, "-c",
"dd of=%s bs=%s" % (out_path, BUFSIZE)]
args = [to_bytes(i, errors='strict') for i in args]
with open(to_bytes(in_path, errors='strict'), 'rb') as in_file:
try:
p = subprocess.Popen(args, stdin=in_file,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
raise AnsibleError("docker connection with docker < 1.8.1 requires dd command in the chroot")
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def fetch_file(self, in_path, out_path):
""" Fetch a file from container to local. """
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
in_path = self._prefix_login_path(in_path)
# out_path is the final file path, but docker takes a directory, not a
# file path
out_dir = os.path.dirname(out_path)
args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
args = [to_bytes(i, errors='strict') for i in args]
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
# Rename if needed
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
if actual_out_path != out_path:
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
def close(self):
""" Terminate the connection. Nothing to do for Docker"""
super(Connection, self).close()
self._connected = False
| [
"[email protected]"
] | |
9fce71be596e167cbdbd8b915b98f9c62c586643 | a90aa4871684f6f24aa5b0daf2ece384418c748b | /basic/python/2_applica/3_scrapy/2_process/multiprocess.py | c422da3970f695c4de502a064be611270dd06869 | [] | no_license | Martians/code | fed5735b106963de79b18cc546624893665066cd | 653e2c595f4ac011aed7102ca26b842d4f6beaaf | refs/heads/master | 2021-07-11T19:22:24.858037 | 2019-02-22T13:04:55 | 2019-02-22T13:04:55 | 110,106,407 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,604 | py | # coding=utf-8
'''
方案:数据库记录网址信息,本地多进程拉取, 第三课
参考:https://github.com/hezhen/spider-course-4/multi-process
## Install
pip install ConfigParser
~/.pyenv/versions/3.6.5/lib/python3.6$ cp configparser.py ConfigParser.py
pip install mysql-connector
## Question
1. 下载过程中,发生的编码问题等
2. 遇到下载失败,该如何处理?需要重试避免信息遗漏
3. 如何试探一个网站允许的,单ip最大并发数、拉取频率;有怎样的试探策略
'''
import hashlib
import os
import threading
from dbmanager import DBConnector
from lxml import etree
import time
import urllib3
'''
## 程序说明
1. 多进程多线程同时开始拉取,数据库层面自动确保了冲突解决
2. 不支持完全的断点续传
如果处于downloading状态,那么重启后不会再次执行
3. 已经拉取过的网站,数据库会自动记录下来,就不需要专门的bloomfilter了
4. 程序默认启动时,清理了所有数据,需要注释后才能多进程同时使用
## 真正逻辑(对每个线程一致)
1. 获取任务url,包括处于downlading状态、但是已经超时的;或者处于new状态的
2. 设置任务状态为downloading,并记录一个超时时间
3. 开始任务下载,并解析子网址,将子网址加入到数据库中
4. 该任务全部完成后,设置任务状态为done
'''
dest_url = "http://www.mafengwo.cn"
#dest_url = "http://www.sohu.com"
class Crawling:
request_headers = {
'host': dest_url[7:],
'connection': "keep-alive",
'cache-control': "no-cache",
'upgrade-insecure-requests': "1",
'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36",
'accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
'accept-language': "zh-CN,en-US;q=0.8,en;q=0.6"
}
max_level = 3
dir_name = "download"
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def file_path(url):
return "{0}/{1}{2}".format(crawler.dir_name, url[7:].replace('/', '_'), "" if url.endswith(".html") else ".html")
''' 下载当前页面并解析,将获得的url放入到crawler中
'''
def get_page(url, index, depth):
try:
path = file_path(url)
print("try to download [{0}] - {1}".format(depth, url))
http = urllib3.PoolManager()
response = http.request("GET", url, headers = crawler.request_headers)
page = response.data
if page != b"":
file = open(path, "wb+")
file.write(page)
file.close()
else:
print("-- zero length for [{}]".format(url))
return
except urllib3.exceptions as err:
print("download {}, urllib3 err: {}".format(url, err))
return
except IOError as err:
print("download {}, IOError err: {}".format(url, err))
return
except Exception as err:
print("download {}, excpetion err: {}".format(url, err))
return
''' 这里限制depth
'''
if depth > crawler.max_level:
print("url [{}], exceed depth {}".format(url, depth))
else:
parse_page(url, index, depth, page)
dbmanager.fininsh(index)
return
def parse_page(url, index, depth, page):
try:
''' page 是 byte类型
'''
html = etree.HTML(page.lower().decode('utf-8'))
hrefs = html.xpath(u"//a")
except Exception as err:
print("length: {}, parse {}, err: {}".format(len(page), url, err))
time.sleep(0.5)
return
for href in hrefs:
try:
if 'href' in href.attrib:
val = href.attrib['href']
if val.find('javascript') != -1:
continue
if val.startswith("http://") is False:
if val.startswith('/'):
val = dest_url + val
else:
continue
if val.startswith(dest_url) is False:
continue
if val[-1] == '/':
val = val[0:-1]
dbmanager.enqueue(val, depth + 1)
except ValueError as err:
print("parse {}, err: {}".format(url, err))
return
total_thread = 3
threads = []
start_time = time.time()
crawler = Crawling()
dbmanager = DBConnector(total_thread)
dbmanager.enqueue(dest_url, 0)
task = dbmanager.dequeue()
get_page(task['url'], task['index'], task['depth'])
CRAWL_DELAY = 0.6
while True:
task = dbmanager.dequeue()
if task is None:
for t in threads:
t.join()
break
while True:
for t in threads:
if not t.is_alive():
threads.remove(t)
if len(threads) >= total_thread:
time.sleep(0.6)
continue
try:
t = threading.Thread(target=get_page, name=None,
args=(task['url'], task['index'], task['depth']))
t.setDaemon(True)
t.start()
threads.append(t)
time.sleep(CRAWL_DELAY)
break
except:
print("fail to start thread")
exit(0)
| [
"[email protected]"
] | |
47c7f38df6616f44f27f20887988c3be622a9975 | 0a21d5e72b4afbabcbf4ec0d65ea84cd8d6159c7 | /Contest/weekly-contest-179/A.py | ebd41441c424c583e8fdc4369c9093db724bcbc3 | [] | no_license | LuoJiaji/LeetCode-Demo | 193f27ba36c93f9030435874a145c63a81d3c0f8 | 78e6e87c01848a1dc71b7dc0716029ece5f35863 | refs/heads/master | 2020-06-24T03:03:33.366537 | 2020-04-05T02:09:41 | 2020-04-05T02:09:41 | 198,830,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | class Solution(object):
def generateTheString(self, n):
"""
:type n: int
:rtype: str
"""
ans = ''
if n % 2 == 1:
ans = 'a'*n
else:
ans = 'a'*(n-1) + 'b'
return ans
res = Solution().generateTheString(4)
print(res)
| [
"[email protected]"
] | |
61a48c806bd338b16b2b38e7ad2975e6901b5b30 | a7c0cc71e6da4615eca2c3d75117dad5b8dce8d3 | /export.py | 177102ba06cd51436420f1940dfd4e0c28205977 | [
"Apache-2.0"
] | permissive | CTFd/CTFd | 4b75207aeea3ed8d761cc6269c27a070693ab3ec | d8f0b9e602fca109cabe1895e847d39a46ce7429 | refs/heads/master | 2023-09-01T19:19:19.767862 | 2023-08-29T18:46:53 | 2023-08-29T18:46:53 | 28,681,142 | 4,593 | 2,273 | Apache-2.0 | 2023-09-13T18:24:37 | 2015-01-01T05:36:55 | Python | UTF-8 | Python | false | false | 770 | py | from CTFd import create_app
from CTFd.utils import config
from CTFd.utils.exports import export_ctf
import datetime
import sys
import shutil
app = create_app()
with app.app_context():
print(
"This file will be deleted in CTFd v4.0. Switch to using `python manage.py export_ctf`"
)
backup = export_ctf()
if len(sys.argv) > 1:
with open(sys.argv[1], "wb") as target:
shutil.copyfileobj(backup, target)
else:
ctf_name = config.ctf_name()
day = datetime.datetime.now().strftime("%Y-%m-%d_%T")
full_name = "{}.{}.zip".format(ctf_name, day)
with open(full_name, "wb") as target:
shutil.copyfileobj(backup, target)
print("Exported {filename}".format(filename=full_name))
| [
"[email protected]"
] | |
41d081c5e295fcddccd0310aa469c46bfcbe51c5 | a5a81bc468d2b9d6a87c49701441cf6dacdaf039 | /tester/grabAndPlotLimits.py | 61aedf06e2b49f6165e7a313ba5233e52cf45152 | [] | no_license | truggles/CombineHarvester | 396833e57afe2a203c9c4ee5117b87c3397eb795 | 8721ef6752006999c014a86bb13e81402cda0662 | refs/heads/master | 2020-04-05T08:19:11.877201 | 2016-02-01T14:39:17 | 2016-02-01T14:39:17 | 50,054,454 | 1 | 1 | null | 2016-01-20T19:26:20 | 2016-01-20T19:26:20 | null | UTF-8 | Python | false | false | 3,210 | py | import ROOT
import array
from ROOT import gPad
masses1 = [80, 90, 100, 110, 120, 130, 140, 160, 180, 600, 900, 1000, 1200, 1500, 2900, 3200]
ROOT.gROOT.SetBatch(True)
def plotLimits( signal, channel ) :
#if channel == 'em' or channel == 'tt' or channel == 'mt' : masses = masses1
masses = masses1
#limits = [[0 for x in range(5)] for x in range( len(masses) )]
limits = [[0 for x in range( len(masses) )] for x in range(5)]
#limits = [array.array( 'd', []) for x in range(5)]
#print limits
mCnt = 0
for mass in masses :
if channel != 'll' :
f = ROOT.TFile('%s/%s/%i/higgsCombineTest.Asymptotic.mH%i.root' % (signal, channel, mass, mass), 'r')
else :
f = ROOT.TFile('%s/higgsCombineLL.Asymptotic.mH%i.root' % (signal, mass), 'r')
t = f.Get('limit')
print "Channel: ",channel," Mass: ",mass
i = 0
for row in t :
if row.quantileExpected == -1 : continue
#print "Sig: ",row.quantileExpected," limit: ",row.limit
limits[i][mCnt] = row.limit
#limits[i].append( row.limit )
i += 1
mCnt += 1
n = len(masses)
neg2 = ROOT.TGraph( len(masses))
neg1 = ROOT.TGraph( len(masses))
med = ROOT.TGraph( len(masses))
pos1 = ROOT.TGraph( len(masses))
pos2 = ROOT.TGraph( len(masses))
midShade = ROOT.TGraph( len(masses)*2)
outShade = ROOT.TGraph( len(masses)*2)
for i in range( len(masses) ) :
neg2.SetPoint( i, masses[i], limits[0][i] )
neg1.SetPoint( i, masses[i], limits[1][i] )
med.SetPoint( i, masses[i], limits[2][i] )
pos1.SetPoint( i, masses[i], limits[3][i] )
pos2.SetPoint( i, masses[i], limits[4][i] )
midShade.SetPoint( i, masses[i],limits[3][i] )
midShade.SetPoint( n+i, masses[n-i-1],limits[1][n-i-1] )
outShade.SetPoint( i, masses[i],limits[4][i] )
outShade.SetPoint( n+i, masses[n-i-1],limits[0][n-i-1] )
outShade.SetFillStyle(1001)
outShade.SetFillColor(5)
midShade.SetFillStyle(1001)
midShade.SetFillColor(3)
c2 = ROOT.TCanvas( 'c2', 'c2', 600, 600 )
p1 = ROOT.TPad( 'p1', 'p1', 0, 0, 1, 1)
p1.Draw()
p1.cd()
med.SetLineStyle(2)
outShade.GetXaxis().SetTitle('Visible Mass (GeV)')
outShade.GetXaxis().SetTitleOffset( outShade.GetXaxis().GetTitleOffset() * 1.3 )
outShade.GetYaxis().SetTitle('95% CL limit on #sigma(gg#phi) x BR(#phi#rightarrow #tau#tau) [pb]')
outShade.GetYaxis().SetTitleOffset( outShade.GetYaxis().GetTitleOffset() * 1.3 )
outShade.SetTitle('Expected Limits A/H #rightarrow #tau#tau: Channel %s' % channel)
outShade.Draw('Af')
midShade.Draw('f')
med.Draw('l')
p1.SetLogy()
p1.SetLogx()
c2.SaveAs('/afs/cern.ch/user/t/truggles/www/limits/Limits_%s_%s.png' % (signal, channel) )
print "PNG files saved here: /afs/cern.ch/user/t/truggles/www/limits/"
print "They are visible online at: http://truggles.web.cern.ch/truggles/limits/"
channels = ['em', 'tt', 'mt', 'll']
signals = ['ggH', 'bbH']
for signal in signals :
for channel in channels :
plotLimits( signal, channel )
| [
"[email protected]"
] | |
9af6e42d83edadd6a400d931ac653f5aef03369c | c4cde4e3538b455e558f8896f90f05ea39408a4d | /helpers/rovershare.py | f6da69a9b6a767b6821333845c72ca02aac1d2d7 | [] | no_license | sumsted/rover | 0380b26b3eb0bf6426d28221f077169bb740456d | d658c35d45cf887b23e299d3ff570d9cdc856ab2 | refs/heads/master | 2021-01-01T20:01:37.512806 | 2017-12-04T12:16:04 | 2017-12-04T12:16:04 | 98,741,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,941 | py | from datetime import datetime
import json
import time
import redis
from helpers import settings
# todo make singleton
class RoverShare:
host = settings.share.host
port = settings.share.port
command_queue_key = settings.share.command_queue_key
sense_key = settings.share.sense_key
sense_queue_key = settings.share.sense_queue_key
encoders_key = settings.share.encoders_key
encoders_queue_key = settings.share.encoders_queue_key
ultrasonic_key = settings.share.ultrasonic_key
ultrasonic_queue_key = settings.share.ultrasonic_queue_key
gps_key = settings.share.gps_key
gps_queue_key = settings.share.gps_queue_key
led_queue_key = settings.share.led_queue_key
status_list_key = settings.share.status_list_key
map_hash_key = settings.share.map_hash_key
def __init__(self):
self.r = redis.Redis(RoverShare.host, RoverShare.port)
def clear_all_queues(self):
self.clear_commands()
self.clear_encoders_queue()
self.clear_led_queue()
self.clear_sense_queue()
self.clear_ultra_queue()
self.clear_gps_queue()
def push_all_end(self):
self.push_command('end')
self.push_encoders('end')
self.push_led('end')
self.push_sense('end')
self.push_ultrasonic('end')
def delay(self, duration=None):
time.sleep(duration or settings.share.delay)
###############################
# used by main controller
def clear_commands(self):
return self.r.delete(RoverShare.command_queue_key)
def push_command(self, command, speed=None, heading=None, distance=None, angle=None):
command = {'command': command, 'speed': speed, 'heading': heading, 'distance': distance, 'angle': angle}
serial_json = json.dumps(command)
return self.r.lpush(RoverShare.command_queue_key, serial_json)
def pop_command(self):
try:
serial_json = self.r.rpop(RoverShare.command_queue_key).decode()
return json.loads(serial_json)
except AttributeError:
return None
def pull_list_n_command(self, num=10):
try:
return [x.decode() for x in self.r.lrange(RoverShare.command_queue_key, 0, num - 1)]
except AttributeError:
return None
###############################
# used by sense hat controller
def clear_sense_queue(self):
return self.r.delete(RoverShare.sense_queue_key)
def push_sense(self, command, heading=None, correction=None):
command = {'command': command, 'heading': heading, 'correction': correction}
serial_json = json.dumps(command)
result = self.r.lpush(RoverShare.sense_queue_key, serial_json)
self.delay()
return result
def pop_sense(self):
try:
serial_json = self.r.rpop(RoverShare.sense_queue_key).decode()
return json.loads(serial_json)
except AttributeError:
return None
###############################
# used by encoders controller
def clear_encoders_queue(self):
return self.r.delete(RoverShare.encoders_queue_key)
def push_encoders(self, command, parameter):
command = {'command': command, 'parameter': parameter}
serial_json = json.dumps(command)
result = self.r.lpush(RoverShare.encoders_queue_key, serial_json)
self.delay()
return result
def pop_encoders(self):
try:
serial_json = self.r.rpop(RoverShare.encoders_queue_key).decode()
return json.loads(serial_json)
except AttributeError:
return None
###############################
# used by led controller
def clear_led_queue(self):
return self.r.delete(RoverShare.led_queue_key)
def push_led(self, command, parameter):
command = {'command': command, 'parameter': parameter}
serial_json = json.dumps(command)
result = self.r.lpush(RoverShare.led_queue_key, serial_json)
self.delay()
return result
def pop_led(self):
try:
serial_json = self.r.rpop(RoverShare.led_queue_key).decode()
return json.loads(serial_json)
except AttributeError:
return None
###############################
# used by ultra controller
def clear_ultra_queue(self):
return self.r.delete(RoverShare.ultrasonic_queue_key)
def push_ultrasonic(self, command, parameter):
command = {'command': command, 'parameter': parameter}
serial_json = json.dumps(command)
result = self.r.lpush(RoverShare.ultrasonic_queue_key, serial_json)
self.delay()
return result
def pop_ultrasonic(self):
try:
serial_json = self.r.rpop(RoverShare.ultrasonic_queue_key).decode()
return json.loads(serial_json)
except AttributeError:
return None
###############################
# used by gps controller
def clear_gps_queue(self):
return self.r.delete(RoverShare.gps_queue_key)
def push_gps(self, command, destination_lat=None, destination_lon=None, destination_three_words=None):
command = {'command': command, 'destination_lat': destination_lat, 'destination_lon': destination_lon,
'destination_three_words': destination_three_words}
serial_json = json.dumps(command)
result = self.r.lpush(RoverShare.gps_queue_key, serial_json)
self.delay()
return result
def pop_gps(self):
try:
serial_json = self.r.rpop(RoverShare.gps_queue_key).decode()
return json.loads(serial_json)
except AttributeError:
return None
###############################
# sense hat state
def update_sense(self, sense):
serial_json = json.dumps(sense)
return self.r.set(RoverShare.sense_key, serial_json)
def get_sense(self):
try:
serial_json = self.r.get(RoverShare.sense_key).decode()
return json.loads(serial_json)
except AttributeError:
return None
###############################
# ultrasonic state
def update_ultrasonic(self, ultrasonic):
serial_json = json.dumps(ultrasonic)
return self.r.set(RoverShare.ultrasonic_key, serial_json)
def get_ultrasonic(self):
try:
serial_json = self.r.get(RoverShare.ultrasonic_key).decode()
return json.loads(serial_json)
except AttributeError:
return None
###############################
# gps state
def update_gps(self, gps):
serial_json = json.dumps(gps)
return self.r.set(RoverShare.gps_key, serial_json)
def get_gps(self):
try:
serial_json = self.r.get(RoverShare.gps_key).decode()
return json.loads(serial_json)
except AttributeError:
return None
###############################
# encoder state
def update_encoders(self, encoders):
serial_json = json.dumps(encoders)
return self.r.set(RoverShare.encoders_key, serial_json)
def get_encoders(self):
try:
serial_json = self.r.get(RoverShare.encoders_key).decode()
return json.loads(serial_json)
except AttributeError:
return None
###############################
# status list
def clear_status(self):
return self.r.delete(RoverShare.status_list_key)
# todo make status a structure that includes seconds as id
def push_status(self, status):
now = datetime.now().strftime("%Y%m%d.%H%M.%S.%f")
msg = '%s %s' % (now, status or '** chirp chirp **')
print(msg)
return self.r.lpush(RoverShare.status_list_key, msg)
# todo then add a way to pull since passing in id and limit to 100
def pull_last_status(self):
try:
return self.r.lindex(RoverShare.status_list_key, 0).decode()
except AttributeError:
return None
def pull_list_n_status(self, num=10):
try:
return [x.decode() for x in self.r.lrange(RoverShare.status_list_key, 0, num - 1)]
except AttributeError:
return None
###############################
# map hash
def clear_map(self):
return self.r.delete(RoverShare.map_hash_key)
def add_map(self, x, y, val):
key = '%08d_%08d' % (x, y)
serial_json = json.dumps(val)
return self.r.hset(RoverShare.map_hash_key, key, serial_json)
def get_map(self):
try:
hm = self.r.hgetall(RoverShare.map_hash_key)
m = {}
for k, v in hm.items():
xy = k.decode()
x = int(xy[0:8])
y = int(xy[9:17])
map_val = v.decode()
try:
m[x][y] = json.loads(map_val)
except KeyError:
m[x] = {}
m[x][y] = json.loads(map_val)
return m
except AttributeError:
return None
if __name__ == '__main__':
# push 3 commands
rs = RoverShare()
# print('command')
# print(rs.clear_commands())
# print(rs.push_command('forward', 50, 40, 30))
# print(rs.push_command('stop', 0))
# print(rs.push_command('rotate', 45, 80, 20))
#
# print(rs.pop_command())
# print(rs.pop_command())
# print(rs.pop_command())
print('sensors')
print(rs.update_sense(
{
'temperature': 0.0,
'pressure': 0.0,
'humidity': 0.0,
'temperature_base': 0.0,
'pressure_base': 0.0,
'humidity_base': 0.0,
'temperature_delta': 0.0,
'pressure_delta': 0.0,
'humidity_delta': 0.0,
'pitch': 0.0,
'roll': 0.0,
'yaw': 0.0,
'yaw_delta': 0.0,
'pitch_delta': 0.0,
'roll_delta': 0.0,
'pitch_base': 0.0,
'roll_base': 0.0,
'yaw_base': 0.0,
'direction': 0.0,
'direction_delta': 0.0,
'direction_base': 0.0,
'direction_deviation': 0.0
}
))
print(rs.get_sense())
print('encoders')
print(rs.update_encoders({
'ticks': 0,
'ticks_base': 0,
'ticks_delta': 0,
'distance': 0
}))
print('ultrasonic')
print(rs.update_ultrasonic({
'left': 0.0,
'lower': 0.0,
'front': 70.0,
'right': 0.0,
'lower_deviation': 0.0
}))
# print('status')
# print(rs.clear_status())
# for i in range(20):
# print(rs.push_status("status %d" % i))
#
# rs.pull_last_status()
# for s in rs.pull_list_n_status(10):
# print(s)
#
# rs.pull_last_status()
# for s in rs.pull_list_n_status(5):
# print(s)
| [
"[email protected]"
] | |
14b7aae7767aad49409ab0944c50817bd8fbdaee | 70ab3ee89cafa7f4882a6944e6ec335210875d30 | /run_single_basketball_leyou.py | 6ccf32d3874091547ad3bd0b9bab6d23628562ba | [] | no_license | SXL5519/caipiao1.0_1 | 3fa1fecd00576c36f37e6af21f0fe9b326289a6a | 2db4387c5bad536cce99417041fbd34a699aa2cc | refs/heads/master | 2021-10-10T07:39:45.127902 | 2019-01-08T09:32:47 | 2019-01-08T09:32:59 | 164,613,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | import unittest,time
from HTMLTestRunner_jpg import HTMLTestRunner
from modle.function import send_mail,screen_shot,logfile
case_dir = "./test_case/Leyou"
pattern="*_single_basketball_*.py"
discover = unittest.defaultTestLoader.discover(case_dir,pattern)
logfile()
if __name__ =='__main__':
#日期格式化
times = time.strftime("%Y%m%d%H%M%S")
report_file="./report/leyou/LeyouCP_single_basketball_case_"+times+".html"
fp = open(report_file,"wb")
runner = HTMLTestRunner(stream=fp,
title="乐优竞蓝单关——自动化测试报告1",
description="运行环境:win7 Chrome")
try:
runner.run(discover)
except:
print('运行列表错误')
finally:
fp.close()
send_mail(report_file) | [
"[email protected]"
] | |
52e5f02be0348d286eaa45d1457e68ff9698b3b6 | d83118503614bb83ad8edb72dda7f449a1226f8b | /src/dprj/platinumegg/app/cabaret/views/mgr/kpi/battleeventjoindaily.py | 579ff996a58470e10b0a683fa40021637adbad39 | [] | no_license | hitandaway100/caba | 686fe4390e182e158cd9714c90024a082deb8c69 | 492bf477ac00c380f2b2758c86b46aa7e58bbad9 | refs/heads/master | 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | # -*- coding: utf-8 -*-
from platinumegg.app.cabaret.views.mgr.kpi.base import KpiHandler
from platinumegg.app.cabaret.util.api import BackendApi
import settings
from platinumegg.app.cabaret.models.battleevent.BattleEvent import BattleEventMaster
from platinumegg.app.cabaret.views.mgr.model_edit import AppModelChoiceField
import os
from django import forms
class Handler(KpiHandler):
"""バトルイベント参加率.
"""
class Form(forms.Form):
eventid = AppModelChoiceField(BattleEventMaster, required=False, label=u'イベントID')
def getTitle(self):
return u'バトルイベント日別参加数'
def getKpiName(self):
eventid = self.request.get("eventid")
if not eventid or not str(eventid).isdigit():
config = BackendApi.get_current_battleeventconfig(self.getModelMgr(), using=settings.DB_READONLY)
eventid = config.mid
self.__eventid = eventid
return 'battleeventjoindaily_%03d' % int(eventid)
def getOptionalForm(self):
form = Handler.Form()
form.eventid = self.__eventid
return form
def makeFileDataTable(self, dirpath, filelist):
tabledata = []
for filedata in filelist:
filepath = os.path.join(dirpath, filedata['filename'])
f = None
data = None
try:
f = open(filepath)
data = f.read()
f.close()
except:
if f:
f.close()
raise
tabledata.append(data.split(','))
return tabledata
def main(request):
return Handler.run(request)
| [
"[email protected]"
] | |
58c7ff2a6067216bc0386f2a43ff30ae25929f09 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/ankiandroid/testcase/interestcases/testcase1_3_0_3_3_004.py | a03762f34cada6a568fc0fd1d438871d03d00428 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,309 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.ichi2.anki',
'appActivity' : 'com.ichi2.anki.IntentHandler',
'resetKeyboard' : True,
'androidCoverage' : 'com.ichi2.anki/com.ichi2.anki.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
# preference setting and exit
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
os.popen("adb shell am start -n com.ichi2.anki/com.ichi2.anki.Preferences -a test")
scrollToClickElement(driver, "new UiSelector().text(\"AnkiDroid directory\")")
typeText(driver,"random")
driver.press_keycode(4)
time.sleep(2)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"0_3_3_004_pre\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
# testcase004
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Navigate up\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Statistics\")", "new UiSelector().className(\"android.widget.CheckedTextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"REVIEW COUNT\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"REVIEW TIME\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"REVIEW COUNT\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"FORECAST\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"OVERVIEW\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"FORECAST\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
swipe(driver, 0.5, 0.2, 0.5, 0.8)
element = getElememtBack(driver, "new UiSelector().text(\"Custom study session\")", "new UiSelector().className(\"android.widget.CheckedTextView\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Question\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"Sort field\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Filter suspended\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"0 cards shown\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Question\")", "new UiSelector().className(\"android.widget.TextView\").instance(1)")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"Question\")", "new UiSelector().className(\"android.widget.CheckedTextView\").instance(4)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Navigate up\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"3_004\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.ichi2.anki'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
| [
"[email protected]"
] | |
7aac97bd18585740470971643e94c41583982cc6 | b096dbccb31d3bd181259e930816964c71034ff4 | /tests/test_base/test_signature.py | 44e7721141d1ad15178fb3820f4cc6706d18aad4 | [] | no_license | cosphere-org/lily | b68f95720381a69ce0caa5f47fca461b3f5242a9 | f6a8281e10eedcccb86fcf3a26aaf282d91f70f4 | refs/heads/master | 2023-02-18T13:49:03.568989 | 2022-06-30T09:58:23 | 2022-06-30T09:58:23 | 175,789,374 | 6 | 0 | null | 2023-02-15T18:49:10 | 2019-03-15T09:28:05 | Python | UTF-8 | Python | false | false | 3,362 | py |
from datetime import datetime
import string
from unittest.mock import call
from itsdangerous import SignatureExpired, URLSafeTimedSerializer
import pytest
from lily.base import signature
from lily.base.events import EventFactory
#
# sign_payload
#
def test_sign_payload__calls_dumps_correctly(mocker):
dumps_mock = mocker.patch.object(URLSafeTimedSerializer, 'dumps')
signature.sign_payload(
email='[email protected]',
payload='WHATEVER',
secret='personal_secret',
salt='salt_me')
assert dumps_mock.call_args_list == [
call(['[email protected]', 'WHATEVER'], salt='salt_me')]
def test_sign_payload__returns_correct_code(mocker):
mocker.patch.object(
URLSafeTimedSerializer, 'dumps').return_value = 'g67g6f7g'
encoded_payload = signature.sign_payload(
email='[email protected]',
payload='WAT',
secret='personal_secret',
salt='salt_me')
assert encoded_payload == 'g67g6f7g'
#
# verify_payload
#
def test_verify_payload__make_the_right_calls(mocker):
loads_mock = mocker.patch.object(URLSafeTimedSerializer, 'loads')
loads_mock.return_value = ('hi@there', 'HI')
payload = signature.verify_payload(
encoded_payload='rubishtokenwhereareyou',
secret='my_secret',
salt='salt_me',
signer_email='hi@there',
max_age=24)
assert payload == 'HI'
assert loads_mock.call_count == 1
def test_verify_payload__different_secrets_for_encoding_and_decoding():
code = signature.sign_payload(
'[email protected]', 'NO!', 'secret123', 'salt')
with pytest.raises(EventFactory.BrokenRequest) as e:
assert signature.verify_payload(
code, 'my_secret', 'salt', 'hi@there', 120)
assert e.value.event == 'PAYLOAD_VERIFIED_AS_BROKEN'
def test_verify_payload__email_mismatch(mocker):
code = signature.sign_payload(
'[email protected]', 'NO!', 'secret123', 'salt')
with pytest.raises(EventFactory.BrokenRequest) as e:
signature.verify_payload(
code, 'secret123', 'salt', 'hello@there', 120)
assert e.value.event == 'PAYLOAD_VERIFIED_AS_BROKEN_MISMATCHING_EMAILS'
def test_verify_payload__recognizes_expired_token(mocker):
mocker.patch.object(
URLSafeTimedSerializer,
'loads'
).side_effect = SignatureExpired(
'error occured',
date_signed=datetime(2013, 1, 15, 6, 48))
with pytest.raises(EventFactory.BrokenRequest) as e:
signature.verify_payload(
'what.ever', 'personal_secret', 'salt', '[email protected]', 24)
assert e.value.event == 'PAYLOAD_VERIFIED_AS_EXPIRED'
#
# create_secret
#
def test_create_secret__creates_unique_secrets():
secrets = [signature.create_secret() for i in range(1000)]
assert len(secrets) == len(set(secrets))
def test_create_secret__safe_secret():
def assert_is_secret_safe(secret):
assert len(secret) == 64
assert len(set(string.ascii_uppercase) & set(secret)) > 0
assert len(set(string.ascii_lowercase) & set(secret)) > 0
assert len(set(string.digits) & set(secret)) > 0
assert len(set(string.punctuation) & set(secret)) > 0
# uniqueness of characters
assert len(set(secret)) > 30
for i in range(1000):
assert_is_secret_safe(signature.create_secret())
| [
"[email protected]"
] | |
605b8ac68ca36f19a6c83959423f5e17545569cc | 6d9fbe6e6a2abfd8455e92f6dba67a5f02d87f41 | /lib/phonenumbers/shortdata/region_RW.py | 34419d07f20772809e8c065ece534282a63aab08 | [] | no_license | JamesBrace/InfluenceUWebLaunch | 549d0b48ff3259b139cb891a19cb8b5382ffe2c8 | 332d25940e4b1b45a7a2a8200f77c8413543b199 | refs/heads/master | 2021-09-04T04:08:47.594900 | 2018-01-15T16:49:29 | 2018-01-15T16:49:29 | 80,778,825 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | """Auto-generated file, do not edit by hand. RW metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_RW = PhoneMetadata(id='RW', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2}', possible_number_pattern='\\d{3}', possible_length=(3,)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(),
emergency=PhoneNumberDesc(national_number_pattern='112', possible_number_pattern='\\d{3}', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='112', possible_number_pattern='\\d{3}', example_number='112', possible_length=(3,)),
standard_rate=PhoneNumberDesc(),
carrier_specific=PhoneNumberDesc(),
short_data=True)
| [
"[email protected]"
] | |
b3293d311675fab17255febd8777666c78c11a7c | 42d3e676cb9da325712dd54001a8fdda1661d1e1 | /OutOfAfrica.py | 8459020897373768d0f5f6ef2277657bb5c6f9b3 | [] | no_license | janaobsteter/msprime | 5f139f2b101e7246e53a6d0baaff1f28cf1dfa6c | e96ae69720100d544f69384d19a28fcca07a9c1d | refs/heads/master | 2022-08-03T11:35:51.759317 | 2020-05-25T07:59:49 | 2020-05-25T07:59:49 | 266,719,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,470 | py | import math
import msprime
# First we set out the maximum likelihood values of the various parameters
# given in Table 1.
N_A = 7300
N_B = 2100
N_AF = 12300
N_EU0 = 1000
N_AS0 = 510
# Times are provided in years, so we convert into generations.
generation_time = 25
T_AF = 220e3 / generation_time #
T_B = 140e3 / generation_time # divergence between west african and Eurasian populations
T_EU_AS = 21.2e3 / generation_time # divergence time between european and east asian populations
# We need to work out the starting (diploid) population sizes based on
# the growth rates provided for these two populations
r_EU = 0.004
r_AS = 0.0055
N_EU = N_EU0 / math.exp(-r_EU * T_EU_AS)
N_AS = N_AS0 / math.exp(-r_AS * T_EU_AS)
# Migration rates during the various epochs.
m_AF_B = 25e-5
m_AF_EU = 3e-5
m_AF_AS = 1.9e-5
m_EU_AS = 9.6e-5
# Population IDs correspond to their indexes in the population
# configuration array. Therefore, we have 0=YRI, 1=CEU and 2=CHB
# initially.
population_configurations = [
msprime.PopulationConfiguration(
sample_size=0, initial_size=N_AF),
msprime.PopulationConfiguration(
sample_size=1, initial_size=N_EU, growth_rate=r_EU),
msprime.PopulationConfiguration(
sample_size=1, initial_size=N_AS, growth_rate=r_AS)
]
migration_matrix = [
[ 0, m_AF_EU, m_AF_AS],
[m_AF_EU, 0, m_EU_AS],
[m_AF_AS, m_EU_AS, 0],
]
demographic_events = [
# CEU and CHB merge into B with rate changes at T_EU_AS
msprime.MassMigration(
time=T_EU_AS, source=2, destination=1, proportion=1.0),
msprime.MigrationRateChange(time=T_EU_AS, rate=0),
msprime.MigrationRateChange(
time=T_EU_AS, rate=m_AF_B, matrix_index=(0, 1)),
msprime.MigrationRateChange(
time=T_EU_AS, rate=m_AF_B, matrix_index=(1, 0)),
msprime.PopulationParametersChange(
time=T_EU_AS, initial_size=N_B, growth_rate=0, population_id=1),
# Population B merges into YRI at T_B
msprime.MassMigration(
time=T_B, source=1, destination=0, proportion=1.0),
# Size changes to N_A at T_AF
msprime.PopulationParametersChange(
time=T_AF, initial_size=N_A, population_id=0)
]
# Use the demography debugger to print out the demographic history
# that we have just described.
dd = msprime.DemographyDebugger(
population_configurations=population_configurations,
migration_matrix=migration_matrix,
demographic_events=demographic_events)
dd.print_history()
| [
"[email protected]"
] | |
0f506c20beb65be6f34ecb5fcb1bcacae5c97864 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_Class2922.py | 8052c4b482d939ce0e7613f8c75fb9223b0a120a | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,299 | py | # qubit number=4
# total number=42
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=35
prog.cz(input_qubit[0],input_qubit[3]) # number=36
prog.h(input_qubit[3]) # number=37
prog.h(input_qubit[3]) # number=22
prog.cx(input_qubit[0],input_qubit[3]) # number=32
prog.cx(input_qubit[0],input_qubit[3]) # number=39
prog.x(input_qubit[3]) # number=40
prog.cx(input_qubit[0],input_qubit[3]) # number=41
prog.cx(input_qubit[0],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.z(input_qubit[3]) # number=10
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=26
prog.cz(input_qubit[1],input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=28
prog.z(input_qubit[1]) # number=24
prog.cx(input_qubit[3],input_qubit[2]) # number=38
prog.h(input_qubit[0]) # number=29
prog.cz(input_qubit[1],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=31
prog.h(input_qubit[1]) # number=18
prog.rx(2.8902652413026093,input_qubit[2]) # number=13
prog.y(input_qubit[1]) # number=11
prog.y(input_qubit[1]) # number=12
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2922.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
f5bdb35b232eabf76f6685808f87bd5839a6ca32 | 392651974f2e6d006618b64aac6cc2613ba01883 | /accounts/admin.py | 5601dfa75500f8614dcb52e35fbc4181ec32ee53 | [
"MIT",
"Apache-2.0"
] | permissive | davidjrichardson/uwcs-zarya | 654266dc67deaacd6dba2e390c6dbc85a9525d83 | ab0a94540bf496531dd6b13fe7d313badc4a353c | refs/heads/master | 2022-09-29T12:58:14.542328 | 2021-03-10T17:07:06 | 2021-03-10T21:24:29 | 63,612,819 | 7 | 6 | MIT | 2022-08-30T20:57:36 | 2016-07-18T15:04:54 | Python | UTF-8 | Python | false | false | 1,088 | py | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from accounts.models import CompsocUser, ShellAccount, DatabaseAccount, ExecPlacement, ExecPosition
class CompsocUserInline(admin.StackedInline):
model = CompsocUser
class ShellAccountInline(admin.StackedInline):
model = ShellAccount
class DatabaseAccountInline(admin.StackedInline):
model = DatabaseAccount
class CompsocUserAdmin(BaseUserAdmin):
inlines = [
CompsocUserInline,
ShellAccountInline,
DatabaseAccountInline
]
def nickname(self, obj):
return CompsocUser.objects.get(user=obj).nickname
CompsocUserAdmin.list_display = ('username', 'nickname', 'email', 'first_name', 'last_name', 'is_staff')
CompsocUserAdmin.search_fields = ('username', 'compsocuser__nickname', 'first_name', 'last_name', 'email')
admin.site.unregister(get_user_model())
admin.site.register(get_user_model(), CompsocUserAdmin)
admin.site.register(ExecPosition)
admin.site.register(ExecPlacement)
| [
"[email protected]"
] | |
77764321c46b85ea547f1665e7b53ed0b2e9e1d9 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/468/usersdata/301/111541/submittedfiles/Av2_Parte3.py | a5ede4cca77b944784daf1e730e8b2f44e7d0ec2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | # -*- coding: utf-8 -*-
a=[]
m=int(input('a quantidade de listas: '))
for i in range(0,m,1):
int(input('digite os elemnetos: '))
a.append(m)
c=sum(a)
b=len(a)
print(a)
| [
"[email protected]"
] | |
d16a63615e60bf2d1563cbf42caf63ac028d8eb7 | 56782846ce12a4aa65c0cdd41231f82fb09bb2e2 | /python/14940.py | 08e7b50de71974b1f76b31e33f87fbaefb6d109c | [] | no_license | Zigje9/Algorithm_study | 2eb7255ffae734954944e549ccaab7c4573c3b99 | a6c526b42f8c0e01daf1d699f55c9b0a78741be8 | refs/heads/master | 2023-08-14T02:08:26.395265 | 2021-09-23T01:10:31 | 2021-09-23T01:10:31 | 256,192,531 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | import sys
from collections import deque
N, M = map(int, sys.stdin.readline().split())
move_x = [1, 0, -1, 0]
move_y = [0, -1, 0, 1]
board = []
for _ in range(N):
board.append(list(map(int, sys.stdin.readline().split())))
def get_start():
for i in range(N):
for j in range(M):
if board[i][j] == 2:
return [i, j]
visit = [[0]*M for _ in range(N)]
start_x, start_y = get_start()
def bfs():
q = deque()
q.append([start_x, start_y, 0])
visit[start_x][start_y] = 2e9
while q:
now_x, now_y, dist = q.popleft()
for i in range(4):
next_x = now_x + move_x[i]
next_y = now_y + move_y[i]
if 0 <= next_x < N and 0 <= next_y < M:
if visit[next_x][next_y] == 0 and board[next_x][next_y] == 1:
visit[next_x][next_y] = dist+1
q.append([next_x, next_y, dist+1])
bfs()
for i in range(N):
for j in range(M):
if visit[i][j] == 0 and board[i][j] == 1:
visit[i][j] = -1
visit[start_x][start_y] = 0
for line in visit:
print(" ".join(map(str, line)))
| [
"[email protected]"
] | |
0f505cac33fd32fb549df9455005938e8b0736e7 | 86e45d6f82210888601a8063c4a491a36c886b60 | /channels/management/commands/runserver.py | 34154b95ace15eaa6c7c0a54bb444472e8d09fae | [
"BSD-3-Clause"
] | permissive | wengole/channels | 68dfb1776f05bd5e8bafd82299b6274fbf932f4e | 21b54e7db8f08a61af934a56d6832a3065d37676 | refs/heads/master | 2021-01-15T15:54:04.794407 | 2015-11-07T10:49:47 | 2015-11-07T12:47:26 | 45,731,612 | 0 | 0 | null | 2015-11-07T10:15:27 | 2015-11-07T10:15:27 | null | UTF-8 | Python | false | false | 1,851 | py | import django
import threading
from django.core.management.commands.runserver import Command as RunserverCommand
from django.core.management import CommandError
from channels import channel_backends, DEFAULT_CHANNEL_BACKEND
from channels.worker import Worker
from channels.adapters import UrlConsumer
from channels.interfaces.wsgi import WSGIInterface
class Command(RunserverCommand):
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return WSGIInterface(self.channel_backend)
def run(self, *args, **options):
# Run the rest
return super(Command, self).run(*args, **options)
def inner_run(self, *args, **options):
# Check a handler is registered for http reqs
self.channel_backend = channel_backends[DEFAULT_CHANNEL_BACKEND]
if not self.channel_backend.registry.consumer_for_channel("http.request"):
# Register the default one
self.channel_backend.registry.add_consumer(UrlConsumer(), ["http.request"])
# Note that this is the right one on the console
self.stdout.write("Worker thread running, channels enabled")
if self.channel_backend.local_only:
self.stdout.write("Local channel backend detected, no remote channels support")
# Launch a worker thread
worker = WorkerThread(self.channel_backend)
worker.daemon = True
worker.start()
# Run rest of inner run
super(Command, self).inner_run(*args, **options)
class WorkerThread(threading.Thread):
"""
Class that runs a worker
"""
def __init__(self, channel_backend):
super(WorkerThread, self).__init__()
self.channel_backend = channel_backend
def run(self):
Worker(channel_backend=self.channel_backend).run()
| [
"[email protected]"
] | |
19615e4337da149ca2ed1b253545b7f5e66843d1 | 4f0beffcf200dd0b42cc192a4a2b9c2d0f0a2a42 | /rewrite/dsl/_dyacc.py | c7de0002f3dc35150c1a3e729ff2b20f0947c9d7 | [
"BSD-2-Clause"
] | permissive | rebcabin/pyrewrite | d11a2385920a8b375e9f3f7a4cc81ed8a178cf28 | 226d139ff361b095d46a58c65bc345cd18443008 | refs/heads/master | 2020-03-26T00:09:40.835150 | 2013-02-13T16:46:24 | 2013-02-13T16:46:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,270 | py |
# /home/stephen/continuum/pyrewrite/rewrite/dsl/_dyacc.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = 'o\xdc).WL\x02\xb9\xbb\xdb=\xab\x84j\xd8l'
_lr_action_items = {'NAME':([0,2,3,4,5,6,7,8,9,12,13,14,15,16,17,18,19,20,21,22,24,25,26,27,28,29,31,34,37,38,39,40,41,42,44,45,46,47,50,51,52,54,55,58,59,60,],[1,-3,-4,1,11,21,27,-1,-2,-20,21,-17,-21,-34,-19,21,21,-16,-24,-23,-18,-15,-22,-24,-6,-12,11,-14,21,50,21,27,27,27,-31,21,-28,21,-24,-13,-5,-10,-7,-25,21,-11,]),'INT':([6,7,13,18,19,37,38,39,40,41,42,45,47,59,],[22,22,22,22,22,22,22,22,22,22,22,22,22,22,]),'(':([1,6,7,13,18,19,20,21,22,26,27,37,38,39,40,41,42,45,47,50,59,],[5,13,13,13,13,13,37,-24,-23,-22,40,13,13,13,13,13,13,13,13,-24,13,]),']':([12,14,15,16,17,18,19,20,21,22,24,25,26,34,35,36,38,44,46,47,50,51,57,58,],[-20,-17,-21,-34,-19,-35,-35,-16,-24,-23,-18,-15,-22,-14,-29,46,-35,-31,-28,-35,-24,-13,-30,-25,]),',':([10,11,12,13,14,15,16,17,18,19,20,21,22,24,25,26,32,33,34,35,36,37,38,43,44,45,46,47,48,49,50,51,56,57,58,59,61,],[31,-9,-20,-35,-17,-21,-34,-19,-35,-35,-16,-24,-23,-18,-15,-22,45,-32,-14,-29,47,-35,-35,31,-31,-35,-28,-35,-26,59,-24,-13,45,47,-25,-35,59,]),'INCOMB':([7,12,14,15,16,17,20,22,24,26,27,28,29,40,41,42,44,46,53,54,55,58,60,],[-35,-20,-17,-21,-34,-19,-16,-23,-18,-22,-24,41,-12,-35,-35,-35,-31,-28,41,41,41,-25,-11,]),')':([10,11,12,13,14,15,16,17,18,20,21,22,24,25,26,27,29,32,33,34,37,38,40,41,43,44,45,46,48,49,50,51,53,54,56,58,59,60,61,],[30,-9,-20,-35,-17,-21,-34,-19,-35,-16,-24,-23,-18,-15,-22,-24,-12,44,-32,-14,-35,-35,-35,-35,-8,-31,-35,-28,-26,58,-24,-13,60,-10,-33,-25,-35,-11,-27,]),'AS':([6,13,18,19,21,37,39,45,47,59,],[18,18,18,18,38,18,18,18,18,18,]),'STRING':([6,7,13,18,19,37,38,39,40,41,42,45,47,59,],[16,16,16,16,16,16,16,16,16,16,16,16,16,16,]),'ARROW':([6,12,14,15,16,17,18,20,21,22,23,24,25,26,34,38,44,46,50,51,58,],[-35,-20,-17,-21,-34,-19,-35,-16,-24,-23,39,-18,-15,-22,-14,-35,-31,-28,-24,-13,-25,]),'DOUBLE':([6,7,13,18,19,37,38,39,40,41,42,45,47,59,],[26,26,26,26,26,26,26,26,26,26,26,26,26,26,]),'[':([6,7,13,18,19,37,38,39,40,41,42,45,47,59,],[19,19,19,19,19,19,19,19,19,19,19,19,19,19,]),':':([1,],[6,]),'=':([1,30,],[7,42,]),'$end':([2,3,4,7,8,9,12,14,15,16,17,18,20,21,22,24,25,26,27,28,29,34,38,39,41,42,44,46,50,51,52,54,55,58,60,],[-3,-4,0,-35,-1,-2,-20,-17,-21,-34,-19,-35,-16,-24,-23,-18,-15,-22,-24,-6,-12,-14,-35,-35,-35,-35,-31,-28,-24,-13,-5,-10,-7,-25,-11,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'strategy_args':([5,31,],[10,43,]),'term':([6,7,13,18,19,37,38,39,40,41,42,45,47,59,],[20,20,20,20,20,20,20,20,20,20,20,20,20,20,]),'string':([6,7,13,18,19,37,38,39,40,41,42,45,47,59,],[12,12,12,12,12,12,12,12,12,12,12,12,12,12,]),'tuple':([6,7,13,18,19,37,38,39,40,41,42,45,47,59,],[17,17,17,17,17,17,17,17,17,17,17,17,17,17,]),'expr':([6,13,18,19,37,39,45,47,59,],[23,33,34,35,48,52,33,35,48,]),'strategy_value':([7,40,41,42,],[28,53,54,55,]),'list':([6,7,13,18,19,37,38,39,40,41,42,45,47,59,],[24,24,24,24,24,24,24,24,24,24,24,24,24,24,]),'rule':([0,4,],[2,8,]),'strategy':([0,4,],[3,9,]),'appl':([6,7,13,18,19,37,38,39,40,41,42,45,47,59,],[14,14,14,14,14,14,14,14,14,14,14,14,14,14,]),'list_value':([19,47,],[36,57,]),'value':([6,7,13,18,19,37,38,39,40,41,42,45,47,59,],[25,29,25,25,25,25,51,25,29,29,29,25,25,25,]),'definitions':([0,],[4,]),'tuple_value':([13,45,],[32,56,]),'empty':([6,7,13,18,19,37,38,39,40,41,42,45,47,59,],[15,15,15,15,15,15,15,15,15,15,15,15,15,15,]),'appl_value':([37,59,],[49,61,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> definitions","S'",1,None,None,None),
('definitions -> definitions rule','definitions',2,'p_definitions1','',148),
('definitions -> definitions strategy','definitions',2,'p_definitions1','',149),
('definitions -> rule','definitions',1,'p_definitions2','',153),
('definitions -> strategy','definitions',1,'p_definitions2','',154),
('rule -> NAME : expr ARROW expr','rule',5,'p_rule','',160),
('strategy -> NAME = strategy_value','strategy',3,'p_strategy_def1','',166),
('strategy -> NAME ( strategy_args ) = strategy_value','strategy',6,'p_strategy_def2','',171),
('strategy_args -> strategy_args , strategy_args','strategy_args',3,'p_strategy_args1','',178),
('strategy_args -> NAME','strategy_args',1,'p_strategy_args2','',182),
('strategy_value -> strategy_value INCOMB strategy_value','strategy_value',3,'p_strategy_value1','',188),
('strategy_value -> NAME ( strategy_value )','strategy_value',4,'p_strategy_value2','',198),
('strategy_value -> value','strategy_value',1,'p_strategy_value3','',202),
('expr -> NAME AS value','expr',3,'p_expr1','',209),
('expr -> AS expr','expr',2,'p_expr2','',214),
('expr -> value','expr',1,'p_expr3','',218),
('value -> term','value',1,'p_value','',224),
('value -> appl','value',1,'p_value','',225),
('value -> list','value',1,'p_value','',226),
('value -> tuple','value',1,'p_value','',227),
('value -> string','value',1,'p_value','',228),
('value -> empty','value',1,'p_value','',229),
('term -> DOUBLE','term',1,'p_term_double','',235),
('term -> INT','term',1,'p_term_int','',239),
('term -> NAME','term',1,'p_term_term','',243),
('appl -> term ( appl_value )','appl',4,'p_appl','',249),
('appl_value -> expr','appl_value',1,'p_appl_value1','',253),
('appl_value -> appl_value , appl_value','appl_value',3,'p_appl_value2','',260),
('list -> [ list_value ]','list',3,'p_list','',266),
('list_value -> expr','list_value',1,'p_list_value1','',270),
('list_value -> list_value , list_value','list_value',3,'p_list_value2','',277),
('tuple -> ( tuple_value )','tuple',3,'p_tuple','',283),
('tuple_value -> expr','tuple_value',1,'p_tuple_value1','',287),
('tuple_value -> tuple_value , tuple_value','tuple_value',3,'p_tuple_value2','',294),
('string -> STRING','string',1,'p_string','',300),
('empty -> <empty>','empty',0,'p_empty','',306),
]
| [
"[email protected]"
] | |
18ca4fe49c3a373eff2d58cd4615322d002593fb | d3efc82dfa61fb82e47c82d52c838b38b076084c | /crossmarketetf/crossmarket_redemption_HA/YW_CETFSS_SHSH_044.py | 51dcf600f3979f6907a6dc09c820765634ed9b6c | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,671 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test")
from crossmarketetf.cetfservice.cetf_main_service import *
from crossmarketetf.cetfservice.cetf_get_components_asset import *
from crossmarketetf.cetfservice.cetf_utils import *
from crossmarketetf.cetfmysql.query_cetf_components_code import *
from mysql.QueryOrderErrorMsg import queryOrderErrorMsg
from service.mainService import *
from mysql.getUpOrDownPrice import getUpPrice
class YW_CETFSS_SHSH_044(xtp_test_case):
def test_YW_CETFSS_SHSH_044(self):
# -----------ETF赎回-------------
title = '赎回真实的ETF-可赎回的证券数足量,资金足额,预估现金差额>0'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、全成、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
unit_info = {
'ticker': '530580', # etf代码
'etf_unit': 10.0, # etf赎回单位数
'etf_unit_sell': 1.0, # etf卖出单位数
'component_unit_sell': 10.0 # 成分股卖出单位数
}
# -----------查询ETF赎回前成分股持仓-------------
component_stk_info = cetf_get_all_component_stk(Api,unit_info['ticker'])
# 查询etf最小申赎数量
unit_number = query_creation_redem_unit(unit_info['ticker'])
# etf赎回数量
quantity = int(unit_info['etf_unit'] * unit_number)
# 定义委托参数信息------------------------------------------
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_ETF'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_REDEMPTION'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity':
quantity,
'position_effect':
Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
g_func.cetf_parm_init(case_goal['期望状态'])
rs1 = cetf_service_test(Api, case_goal, wt_reqs,component_stk_info)
etf_creation_log(case_goal, rs1)
self.assertEqual(rs1['用例测试结果'], True)
# --------二级市场,卖出etf-----------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = queryOrderErrorMsg(11010121)
# 二级市场卖出的etf数量
quantity = int(unit_info['etf_unit_sell'] * unit_number)
quantity_list = split_etf_quantity(quantity)
# 查询涨停价
limitup_px = getUpPrice(unit_info['ticker'])
rs2 = {}
for etf_quantity in quantity_list:
wt_reqs_etf = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
etf_quantity,
'position_effect':
Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs2 = serviceTest(Api, case_goal, wt_reqs_etf)
if rs2['用例测试结果'] is False:
etf_sell_log(case_goal, rs2)
self.assertEqual(rs2['用例测试结果'], True)
return
etf_sell_log(case_goal, rs2)
time.sleep(2)
# ------------二级市场卖出成份股-----------
case_goal['期望状态'] = '全成'
case_goal['errorID'] = 0
case_goal['errorMSG'] = ''
# 查询etf成分股代码、数量、现金替代标志等
etf_components = query_cetf_components_info(unit_info['ticker'],1)
# 如果卖出单位大于100,表示卖出数量;小于100,表示卖出份数
rs3 = {}
for component_info in etf_components:
substitute_flag = component_info[1]
if substitute_flag in (0,1):
stk_code = component_info[0]
components_share = component_info[2]
quantity = (int(unit_info['component_unit_sell'])
if unit_info['component_unit_sell'] >= 100 else
int(components_share * unit_info['component_unit_sell']))
limitup_px = getUpPrice(stk_code)
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
stk_code,
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
quantity,
'position_effect':
Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs3 = serviceTest(Api, case_goal, wt_reqs)
if rs3['用例测试结果'] is False:
etf_components_sell_log(case_goal, rs3)
self.assertEqual(rs3['用例测试结果'], True)
etf_components_sell_log(case_goal, rs3)
self.assertEqual(rs3['用例测试结果'], True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0266bfd3dabb66fbc32785187d7dd62dcd182a82 | 5399dd4580ea3f528753bc8b52a981743d62f8bb | /ML/m39_pickle.py | 957e4037bd4d833cbf848e3087f487fa8f253ce3 | [] | no_license | iwillbeaprogramer/Study | 3ac7c118ffe3981d78b4ad263cb62432eae13970 | 3bfe571da5bbfc545b994e5878e217f9306bde14 | refs/heads/main | 2023-05-07T16:31:05.564973 | 2021-05-27T14:50:00 | 2021-05-27T14:50:00 | 324,044,441 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | # eval_set
from xgboost import XGBClassifier,XGBRegressor
from sklearn.datasets import load_boston, load_breast_cancer
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import r2_score,accuracy_score,mean_squared_error
datasets = load_boston()
x = datasets.data
y = datasets.target
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.15,shuffle=True,random_state = 66)
model = XGBRegressor(n_estimators = 1000,learning_rate = 0.01,n_jobs=8)
model.fit(x_train,y_train,verbose=1,eval_metric = ['rmse','logloss'],eval_set = [(x_train,y_train),(x_test,y_test)],early_stopping_rounds=10)
aaa = model.score(x_test,y_test)
print(aaa)
y_pred = model.predict(x_test)
r2 = r2_score(y_test,y_pred)
rmse = mean_squared_error(y_test,y_pred)**0.5
print("r2 : ",r2)
print("rmse : ",rmse)
result = model.evals_result()
print(result)
# 저장
import pickle
# pickle.dump(model, open("../data/xgb_save/m39.pickle.dat","wb"))
# print("저장완료")
print("불러오기")
model2 = pickle.load(open('../data/xgb_save/m39.pickle.dat','rb'))
print('불러왔다')
r22 = model.score(x_test,y_test)
print('r22 : ',r22)
| [
"[email protected]"
] | |
2816391722086df3dfeffc573cf0446551c2149b | 4bed9030031fc99f6ea3d5267bd9e773f54320f8 | /sparse/repos/MaayanLab/clustergrammer-widget/setup.py | 0dd34fb3172e27c6f17267a1c21facac86ecae9f | [
"BSD-3-Clause"
] | permissive | yuvipanda/mybinder.org-analytics | c5f4b939541d29727bc8d3c023b4d140de756f69 | 7b654e3e21dea790505c626d688aa15640ea5808 | refs/heads/master | 2021-06-13T05:49:12.447172 | 2018-12-22T21:48:12 | 2018-12-22T21:48:12 | 162,839,358 | 1 | 1 | BSD-3-Clause | 2021-06-10T21:05:50 | 2018-12-22T20:01:52 | Jupyter Notebook | UTF-8 | Python | false | false | 5,515 | py | from __future__ import print_function
from setuptools import setup, find_packages, Command
from setuptools.command.sdist import sdist
from setuptools.command.build_py import build_py
from setuptools.command.egg_info import egg_info
from subprocess import check_call
import os
import sys
import platform
here = os.path.dirname(os.path.abspath(__file__))
node_root = os.path.join(here, 'js')
is_repo = os.path.exists(os.path.join(here, '.git'))
npm_path = os.pathsep.join([
os.path.join(node_root, 'node_modules', '.bin'),
os.environ.get('PATH', os.defpath),
])
from distutils import log
log.set_verbosity(log.DEBUG)
log.info('setup.py entered')
log.info('$PATH=%s' % os.environ['PATH'])
LONG_DESCRIPTION = 'clustergrammer_widget'
def js_prerelease(command, strict=False):
"""decorator for building minified js/css prior to another command"""
class DecoratedCommand(command):
def run(self):
jsdeps = self.distribution.get_command_obj('jsdeps')
if not is_repo and all(os.path.exists(t) for t in jsdeps.targets):
# sdist, nothing to do
command.run(self)
return
try:
self.distribution.run_command('jsdeps')
except Exception as e:
missing = [t for t in jsdeps.targets if not os.path.exists(t)]
if strict or missing:
log.warn('rebuilding js and css failed')
if missing:
log.error('missing files: %s' % missing)
raise e
else:
log.warn('rebuilding js and css failed (not a problem)')
log.warn(str(e))
command.run(self)
update_package_data(self.distribution)
return DecoratedCommand
def update_package_data(distribution):
"""update package_data to catch changes during setup"""
build_py = distribution.get_command_obj('build_py')
# distribution.package_data = find_package_data()
# re-init build_py options which load package_data
build_py.finalize_options()
class NPM(Command):
description = 'install package.json dependencies using npm'
user_options = []
node_modules = os.path.join(node_root, 'node_modules')
targets = [
os.path.join(here, 'clustergrammer_widget', 'static', 'extension.js'),
os.path.join(here, 'clustergrammer_widget', 'static', 'index.js')
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def has_npm(self):
try:
check_call(['npm', '--version'])
return True
except:
return False
def should_run_npm_install(self):
package_json = os.path.join(node_root, 'package.json')
node_modules_exists = os.path.exists(self.node_modules)
return self.has_npm()
def run(self):
has_npm = self.has_npm()
if not has_npm:
log.error("`npm` unavailable. If you're running this command using sudo, make sure `npm` is available to sudo")
env = os.environ.copy()
env['PATH'] = npm_path
if self.should_run_npm_install():
log.info("Installing build dependencies with npm. This may take a while...")
check_call(['npm', 'install'], cwd=node_root, stdout=sys.stdout, stderr=sys.stderr)
os.utime(self.node_modules, None)
for t in self.targets:
if not os.path.exists(t):
msg = 'Missing file: %s' % t
if not has_npm:
msg += '\nnpm is required to build a development version of widgetsnbextension'
raise ValueError(msg)
# update package data in case this created new files
update_package_data(self.distribution)
version_ns = {}
with open(os.path.join(here, 'clustergrammer_widget', '_version.py')) as f:
exec(f.read(), {}, version_ns)
setup_args = {
'name': 'clustergrammer_widget',
'version': version_ns['__version__'],
'description': 'clustergrammer_widget',
'long_description': LONG_DESCRIPTION,
'include_package_data': True,
'data_files': [
('share/jupyter/nbextensions/clustergrammer_widget', [
'clustergrammer_widget/static/extension.js',
'clustergrammer_widget/static/index.js',
'clustergrammer_widget/static/index.js.map',
]),
],
'install_requires': [
'ipywidgets>=5.1.5',
],
'packages': find_packages(),
'zip_safe': False,
'cmdclass': {
'build_py': js_prerelease(build_py),
'egg_info': js_prerelease(egg_info),
'sdist': js_prerelease(sdist, strict=True),
'jsdeps': NPM,
},
'author': 'Nicolas Fernandez',
'author_email': '[email protected]',
'url': 'http://jupyter.org',
'keywords': [
'ipython',
'jupyter',
'widgets',
],
'classifiers': [
'Development Status :: 4 - Beta',
'Framework :: IPython',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Multimedia :: Graphics',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
}
setup(**setup_args)
| [
"[email protected]"
] | |
73c6fbe202adb37cfaa60701dbf155423a36249b | a775bb0ef2347a91aa1e6236f0e6eae6512a84ad | /src/robosub2019/armer.py | cbf0ec2d09da56ca925e8bb07392e777ba257059 | [] | no_license | Tartan-AUV/tartan-sub | 66376e163681bd7dac47c62e41669c0c842badc0 | 0a05156a887cdd6467813a358973cc23e6e55227 | refs/heads/master | 2020-04-06T16:45:04.396871 | 2019-10-19T17:46:37 | 2019-10-19T17:46:37 | 157,632,649 | 0 | 4 | null | 2019-11-03T20:26:47 | 2018-11-15T01:10:48 | Python | UTF-8 | Python | false | false | 855 | py | #!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
class Armer(object):
def __init__(self, run_config):
self.config = run_config
self.pub = rospy.Publisher(self.config.arming_topic, Bool, queue_size=1)
self.sub = rospy.Subscriber(self.config.arming_topic, Bool, self.callback)
self.armed = False
self.rate = rospy.Rate(1) # 1Hz
def arm(self):
while not rospy.is_shutdown() and not self.armed:
msg = Bool()
msg.data = True
self.pub.publish(msg)
self.rate.sleep()
return
def callback(self, msg):
self.armed = msg.data
def disarm(self):
while not rospy.is_shutdown():
msg = Bool()
msg.data = False
self.pub.publish(msg)
self.rate.sleep()
return
| [
"[email protected]"
] | |
0b330a3afbbf55128aa6e962b16c14c7c7eaf126 | 99b0631baa2fd9ab2455d848b47febf581916272 | /study_code/learn_nonlocal.py | 108dc21fa3015ab9051ced4645bbf529a466aeb5 | [] | no_license | seceast/PyProjects | a934e366cb619f2610d75b9a0fb47d818814a4de | 7be7193b4126ce920a3d3ffa4ef5d8743b3fa7d1 | refs/heads/master | 2023-03-07T22:23:21.229489 | 2021-02-25T05:37:58 | 2021-02-25T05:37:58 | 265,480,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | """
-*- coding: utf-8 -*-
@author: yangyd
@file: learn_nonlocal.py
@time: 2019/10/14 0014 14:33
"""
def outner():
b = 10
def inner():
# 声明外部函数局部变量,不声明可以使用b但是无法修改b
nonlocal b
print(f'old_var = {b}')
b = 20
print(f'new_bar = {b}')
inner()
outner()
| [
"[email protected]"
] | |
10dc28f9618ba9669fbf73bb52f0188b41ca1653 | 4bc2d855558ccb962991f997e9779919031687dd | /capstone/causalmodel/migrations/0026_suggestedintervention_is_priority.py | fa89dff57a9a21ae61b0079b3a2bd148a737e427 | [] | no_license | jmblontoc/Likha-Capstone | 80081e44b7ad6457eb776432e623c6db8b7a17e2 | e1c32911b58cd1419c8e1a554ac32210456d201d | refs/heads/master | 2022-12-10T03:26:32.946638 | 2018-12-09T04:33:10 | 2018-12-09T04:33:10 | 134,726,142 | 0 | 1 | null | 2022-11-25T23:52:42 | 2018-05-24T14:21:36 | Python | UTF-8 | Python | false | false | 413 | py | # Generated by Django 2.1b1 on 2018-11-01 11:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('causalmodel', '0025_suggestedintervention'),
]
operations = [
migrations.AddField(
model_name='suggestedintervention',
name='is_priority',
field=models.BooleanField(default=True),
),
]
| [
"[email protected]"
] | |
3f52cb7f354134071ca7b9aa0ee2b810466b0d7b | d76cf9484f93c1822c71c87980e6f76a2c0f5786 | /application/api/superclass/superclasses.py | e73ead1271653b8a6b572471b3917a103589597d | [] | no_license | chris-hamberg/twitter | a02b0278eab609d696a84f39ea771a5936379561 | b806de4e073a0f4791cde93111c69c4d44b6844a | refs/heads/master | 2022-12-14T19:06:26.221176 | 2019-12-13T21:44:23 | 2019-12-13T21:44:23 | 172,611,261 | 0 | 0 | null | 2022-12-08T04:51:18 | 2019-02-26T01:02:13 | Python | UTF-8 | Python | false | false | 4,721 | py | try:
from twitter.application.api.superclass.abstract_base_class import AbstractBase
from twitter.application.subprocess.base64encode import base64encode
from twitter.application.subprocess.connection import requests
except ModuleNotFoundError as main:
from application.api.superclass.abstract_base_class import AbstractBase
from application.subprocess.base64encode import base64encode
from application.subprocess.connection import requests
finally:
from requests import get, post
import inspect
# ----------------------------------------------------------------------- #
'''
Special classes for handling edge cases, as the occur in the api definition
'''
# ---------------------------------------=------------------------------- #
class Base64(AbstractBase):
'''
update_profile_image, and update_profile_banner classes inherit from
this class.
'''
def __init__(self):
super().__init__()
self._method = post
self._endpoint = None
self._key = None
#NOTE do the base64 encoding procedure !!!
def __call__(self, **params):
self._data = base64encode(params)
try:
# NOTE direct_messages changed state
###### by adding the content-type
###### field to headers.
###### try to pop content-type.
self._headers.pop('content-type')
except KeyError as good:
pass
return AbstractBase.__call__(self, **params)
def __repr__(self): raise NotImplementedError
class Collision(AbstractBase):
'''
For some reason the API was written to have name collisions.
collections/entires had to ve renamed to collections/entries_method
list/members had to be renamed to list/members_method
this superclass corrects the urls and endpoints for these classes
'''
def __init__(self):
super().__init__()
surrogate = self.__class__.__name__
biological = surrogate.split('_')[0]
self._url = self.url.replace(surrogate, biological)
self._endpoint = self._endpoint.split('_')[0]
def __repr__(self): raise NotImplementedError
class Empty(AbstractBase):
'''
settings, and remove_profile_banner classes inherit from this class.
'''
def __init__(self):
super().__init__()
del self._params, self._data
def __call__(self): #NOTE neither of these subclasses take any arg
return AbstractBase.__call__(self)
def __repr__(self):
return ' PARAMETERS: None'
class Media(AbstractBase):
def __init__(self):
super().__init__()
self._method = post
self._url = self.url.replace('api', 'upload')
self._url = self.url.replace(self.__class__.__name__.lower(),
'upload')
self._endpoint = self.endpoint.replace(self.__class__.__name__.lower(),
'upload')
try:
self._headers.pop('content-type')
except KeyError as good: pass
def __call__(self, **params):
if self.__class__.__name__ != 'upload':
params.update({'command': self.__class__.__name__})
return AbstractBase.__call__(self, **params)
def __repr__(self): raise NotImplementedError
class Numeric(AbstractBase):
'''
retweets, retweet, and unretweet classes inherit from this class.
All other status module classes are children of AbstractBase.
'''
def __init__(self, suffix=None):
# because the retweets class is a special edge case; where
# str.rstrip('.json') removes the trailing 's' from 'retweets',
# we need the following conditional statement to handle that
# deformation.
if not suffix:
suffix = '/{id}.json'
super().__init__()
self._method = post
self._url = self._url.rstrip('.json') + suffix
def __call__(self, **params): #NOTE these endpoints are of a special form.
url = self.url
self._url = self.url.format_map(params)
response = AbstractBase.__call__(self, **params)
self._url = url
return response
def __repr__(self): raise NotImplementedError
class Ternary(AbstractBase):
def __init__(self):
self._method = None
super().__init__()
frame = inspect.currentframe()
fpath = inspect.getouterframes(frame, 2)[9][1]
module = fpath.split('/')[-1].split('.')[0]
self._module = module
url = self.url.split('/')
url.insert(-2, self.parent)
url = '/'.join(url)
self._url = url
self._endpoint = '/'+self.parent+self._endpoint
def __repr__(self): raise NotImplementedError
| [
"[email protected]"
] | |
387a27431bd3d1af9f529f413813a0a29f54f3d5 | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/logic/v20150801preview/get_integration_account_certificate.py | 8d5432fc9120d562473d196d12733d96b4a0883b | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,163 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetIntegrationAccountCertificateResult',
'AwaitableGetIntegrationAccountCertificateResult',
'get_integration_account_certificate',
]
@pulumi.output_type
class GetIntegrationAccountCertificateResult:
def __init__(__self__, changed_time=None, created_time=None, key=None, location=None, metadata=None, name=None, public_certificate=None, tags=None, type=None):
if changed_time and not isinstance(changed_time, str):
raise TypeError("Expected argument 'changed_time' to be a str")
pulumi.set(__self__, "changed_time", changed_time)
if created_time and not isinstance(created_time, str):
raise TypeError("Expected argument 'created_time' to be a str")
pulumi.set(__self__, "created_time", created_time)
if key and not isinstance(key, dict):
raise TypeError("Expected argument 'key' to be a dict")
pulumi.set(__self__, "key", key)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if metadata and not isinstance(metadata, dict):
raise TypeError("Expected argument 'metadata' to be a dict")
pulumi.set(__self__, "metadata", metadata)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if public_certificate and not isinstance(public_certificate, str):
raise TypeError("Expected argument 'public_certificate' to be a str")
pulumi.set(__self__, "public_certificate", public_certificate)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="changedTime")
def changed_time(self) -> str:
"""
The changed time.
"""
return pulumi.get(self, "changed_time")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> str:
"""
The created time.
"""
return pulumi.get(self, "created_time")
@property
@pulumi.getter
def key(self) -> Optional['outputs.KeyVaultKeyReferenceResponse']:
"""
The key details in the key vault.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def metadata(self) -> Optional[Mapping[str, Any]]:
"""
The metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publicCertificate")
def public_certificate(self) -> Optional[str]:
"""
The public certificate.
"""
return pulumi.get(self, "public_certificate")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetIntegrationAccountCertificateResult(GetIntegrationAccountCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIntegrationAccountCertificateResult(
changed_time=self.changed_time,
created_time=self.created_time,
key=self.key,
location=self.location,
metadata=self.metadata,
name=self.name,
public_certificate=self.public_certificate,
tags=self.tags,
type=self.type)
def get_integration_account_certificate(certificate_name: Optional[str] = None,
integration_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIntegrationAccountCertificateResult:
"""
Use this data source to access information about an existing resource.
:param str certificate_name: The integration account certificate name.
:param str integration_account_name: The integration account name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['certificateName'] = certificate_name
__args__['integrationAccountName'] = integration_account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:logic/v20150801preview:getIntegrationAccountCertificate', __args__, opts=opts, typ=GetIntegrationAccountCertificateResult).value
return AwaitableGetIntegrationAccountCertificateResult(
changed_time=__ret__.changed_time,
created_time=__ret__.created_time,
key=__ret__.key,
location=__ret__.location,
metadata=__ret__.metadata,
name=__ret__.name,
public_certificate=__ret__.public_certificate,
tags=__ret__.tags,
type=__ret__.type)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.