blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
72e86ba0148bbe1b9d9f1b565316ff71b35a5b3d | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/W_w_Mgt_to_Cx_Cy_focus_Z_ok/Sob_k15_s001_EroM/pyr_Tcrop255_pad60_jit15/pyr_2s/L3/step10_a.py | f0aef80d3b5947a605591d698c2bed71ebb8c14f | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,323 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_2side_L3 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_W_and_I_gt_F
use_loss_obj = [G_sobel_k15_erose_M_loss_info_builder.set_loss_target("UNet_Cx").copy(), G_sobel_k15_erose_M_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900) .set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900) .set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900) .set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900) .set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900) .set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900) .set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900) .set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900) .set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900) .set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900) .set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900) .set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
] | |
278703b3e7899ad71128c9b1bcfa67f9a6245b9f | 306de77289d118f819c9ded4807a91268b7cc58b | /models/resnet_prm/resnet_PRM15.py | 2e6ac31e1d098a27709dbe75724d728d3a46a6bb | [] | no_license | ma-xu/NANet | 67252d8e3108f16049b9ebdb5a8c4a473b117874 | 5c894ebf05fed54a437e1b9b2d5dfc7a947f0775 | refs/heads/master | 2022-03-28T01:59:11.409632 | 2020-01-23T04:41:28 | 2020-01-23T04:41:28 | 225,984,431 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,777 | py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torch.nn.parameter import Parameter
import torch
import torch.nn.functional as F
from torch.nn import init
from torch.autograd import Variable
from collections import OrderedDict
import math
import time
"""
Actually the SGE + maxvalue
"""
__all__ = ['prm15_resnet18','prm15_resnet34','prm15_resnet50','prm15_resnet101','prm15_resnet152']
"""
group is the number of selected points.
"""
class PRMLayer(nn.Module):
def __init__(self,groups=64,mode='dotproduct'):
super(PRMLayer, self).__init__()
self.mode = mode
self.groups = groups
self.max_pool = nn.AdaptiveMaxPool2d(1,return_indices=True)
self.weight = Parameter(torch.zeros(1,self.groups,1,1))
self.bias = Parameter(torch.ones(1,self.groups,1,1))
self.sig = nn.Sigmoid()
self.gap = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
b,c,h,w = x.size()
# Similarity function
query_value, _ = self.get_key_position(x, self.groups) # shape [b*num,2,1,1]
query_value = query_value.view(b*self.groups,-1,1,1)
x_value = x.view(b*self.groups,-1,h,w)
similarity = self.get_similarity(x_value, query_value, mode=self.mode)
similarity = similarity.view(b,self.groups,h,w)
context = similarity.view(b, self.groups, -1)
context = context - context.mean(dim=2, keepdim=True)
std = context.std(dim=2, keepdim=True) + 1e-5
context = (context/std).view(b,self.groups,h,w)
# affine function
context = context * self.weight + self.bias
context = context.view(b*self.groups,1,h,w)\
.expand(b*self.groups, c//self.groups, h, w).reshape(b,c,h,w)
value = x*self.sig(context)
return value
def get_key_position(self, key,groups):
b,c,h,w = key.size()
value = key.view(b*groups,c//groups,h,w)
sumvalue = value.sum(dim=1,keepdim=True)
maxvalue,maxposition = self.max_pool(sumvalue)
t_position = torch.cat((maxposition//w,maxposition % w),dim=1)
t_value = value[torch.arange(b*groups),:,t_position[:,0,0,0],t_position[:,1,0,0]]
t_value = t_value.view(b, c, 1, 1)
t_mean = self.gap(key)
return t_value+t_mean, t_position
def get_similarity(self,query, key_value, mode='dotproduct'):
if mode == 'dotproduct':
similarity = torch.matmul(key_value.permute(0, 2, 1), query)
elif mode == 'l1norm':
similarity = -(abs(query - key_value)).sum(dim=1)
elif mode == 'gaussian':
# Gaussian Similarity (No recommanded, too sensitive to noise)
similarity = torch.exp(torch.matmul(key_value.permute(0, 2, 1), query))
similarity[similarity == float("Inf")] = 0
similarity[similarity <= 1e-9] = 1e-9
else:
similarity = torch.matmul(key_value.permute(0, 2, 1), query)
return similarity
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.prm = PRMLayer()
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.prm(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.prm = PRMLayer()
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.prm(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def prm15_resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def prm15_resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def prm15_resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def prm15_resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def prm15_resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
def demo():
st = time.perf_counter()
for i in range(1):
net = prm15_resnet50(num_classes=1000)
y = net(torch.randn(2, 3, 224,224))
print(i)
print("CPU time: {}".format(time.perf_counter() - st))
def demo2():
st = time.perf_counter()
for i in range(1):
net = prm15_resnet50(num_classes=1000).cuda()
y = net(torch.randn(2, 3, 224,224).cuda())
print(i)
# print("Allocated: {}".format(torch.cuda.memory_allocated()))
print("GPU time: {}".format(time.perf_counter() - st))
# demo()
# demo2()
| [
"[email protected]"
] | |
be04c205e071f2ef645ebbaa5a3f066e0a5d660e | 0d834835098f86c153367956cb2513377c91074c | /basic/html_table/table.py | d9fa2816db8db4c537b7c2ddd956d17b2a82d654 | [] | no_license | klandon94/flask_fundamentals | a63c2c6b5567627203299a685db5f73a8d749051 | ee8cd8976b504a03c15696019c2091951a3a5a73 | refs/heads/master | 2020-03-21T06:14:53.013649 | 2018-06-21T18:20:36 | 2018-06-21T18:20:36 | 138,207,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | from flask import Flask,render_template as render
app = Flask(__name__)
@app.route('/')
def table():
users = (
{'first_name': 'LeBron','last_name':'James'},
{'first_name': 'Kobe','last_name':'Bryant'},
{'first_name': 'Michael','last_name':'Jordan'},
{'first_name': 'Tim','last_name':'Duncan'}
)
return render('table.html', data=users)
if __name__ == "__main__":
app.run(debug=True) | [
"[email protected]"
] | |
dcec49309b9d2263807ea94a99e6251ff33b8e55 | 7b55cfc4ffa7678e4c7b8f2312831ebbd549e54f | /proj1/tests/other-tests/oskis-angels_tests/regexperts_tests/correct/expr_mod.py | 44a804c0817239e5cab49272e2545d5101bace4a | [] | no_license | czchen1/cs164-projects | 0d330efef85421e611a436b165428ba0ddfb3512 | a04cafbcaafd32e518227dacf89a6d7837bf9f57 | refs/heads/master | 2020-03-27T04:03:31.727524 | 2018-08-23T21:43:46 | 2018-08-23T21:43:46 | 145,909,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | print 2 % 2
print 1 % 2 % 4
print ((1 % (2)))
| [
"[email protected]"
] | |
95bec86aa8cf1ceab3c433e3060644fb568c10d4 | e25b2bed99e7bec09a5ecdc2baee33c90b1d5fcf | /day1/chapter3/string-method.py | 27c2ccb534327e0c08b62b890b74bd2ccdd2853b | [] | no_license | alabiansolution/python-wknd1902 | 6d579be040a130c54850396490961493e7eb3663 | cb492085a522681a447bcb3668a30c851dec1247 | refs/heads/master | 2020-06-08T11:46:33.514204 | 2019-06-22T12:02:11 | 2019-06-22T12:02:11 | 193,223,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py |
name = ' Benedict '
remove_space = name.strip()
print(len(remove_space))
| [
"[email protected]"
] | |
616ac500554e2c805a0e873d7f50ac2c6131a9dd | 1ea0e2b4f064ba0de45a73c527ee89a36771e8fc | /src/sentry/web/frontend/project_plugins.py | 30451ad0baa6661f163b889f712f71df204c9932 | [
"BSD-2-Clause"
] | permissive | atlassian/sentry | 6775e59c317f20f96982e91c2b3c88c02ecbb56b | b937615079d7b24dc225a83b99b1b65da932fc66 | refs/heads/master | 2023-08-27T15:45:47.699173 | 2017-09-18T22:14:55 | 2017-09-18T22:14:55 | 103,999,066 | 1 | 5 | BSD-3-Clause | 2023-04-01T07:49:37 | 2017-09-18T22:38:18 | Python | UTF-8 | Python | false | false | 995 | py | from __future__ import absolute_import
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from sentry.plugins import plugins
from sentry.web.frontend.base import ProjectView
class ProjectPluginsView(ProjectView):
required_scope = 'project:write'
def handle(self, request, organization, team, project):
if request.POST:
enabled = set(request.POST.getlist('plugin'))
for plugin in plugins.configurable_for_project(project, version=None):
if plugin.slug in enabled:
plugin.enable(project)
else:
plugin.disable(project)
messages.add_message(
request, messages.SUCCESS, _('Your settings were saved successfully.')
)
return self.redirect(request.path)
context = {
'page': 'plugins',
}
return self.respond('sentry/projects/plugins/list.html', context)
| [
"[email protected]"
] | |
b27aa8ef9e31b0a9620b8601e53177fa99afc77b | 0354d8e29fcbb65a06525bcac1f55fd08288b6e0 | /clients/python-flask/generated/swagger_server/models/extension_class_impllinks.py | 43b79fcdb2ef700ae5858714220dd95fce07aa70 | [
"MIT"
] | permissive | zhiwei55/swaggy-jenkins | cdc52956a40e947067415cec8d2da1425b3d7670 | 678b5477f5f9f00022b176c34b840055fb1b0a77 | refs/heads/master | 2020-03-06T20:38:53.012467 | 2018-02-19T01:53:33 | 2018-02-19T01:54:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,322 | py | # coding: utf-8
from __future__ import absolute_import
from swagger_server.models.link import Link
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class ExtensionClassImpllinks(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, _self: Link=None, _class: str=None):
"""
ExtensionClassImpllinks - a model defined in Swagger
:param _self: The _self of this ExtensionClassImpllinks.
:type _self: Link
:param _class: The _class of this ExtensionClassImpllinks.
:type _class: str
"""
self.swagger_types = {
'_self': Link,
'_class': str
}
self.attribute_map = {
'_self': 'self',
'_class': '_class'
}
self.__self = _self
self.__class = _class
@classmethod
def from_dict(cls, dikt) -> 'ExtensionClassImpllinks':
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ExtensionClassImpllinks of this ExtensionClassImpllinks.
:rtype: ExtensionClassImpllinks
"""
return deserialize_model(dikt, cls)
@property
def _self(self) -> Link:
"""
Gets the _self of this ExtensionClassImpllinks.
:return: The _self of this ExtensionClassImpllinks.
:rtype: Link
"""
return self.__self
@_self.setter
def _self(self, _self: Link):
"""
Sets the _self of this ExtensionClassImpllinks.
:param _self: The _self of this ExtensionClassImpllinks.
:type _self: Link
"""
self.__self = _self
@property
def _class(self) -> str:
"""
Gets the _class of this ExtensionClassImpllinks.
:return: The _class of this ExtensionClassImpllinks.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""
Sets the _class of this ExtensionClassImpllinks.
:param _class: The _class of this ExtensionClassImpllinks.
:type _class: str
"""
self.__class = _class
| [
"[email protected]"
] | |
d445f3dfb6f9d07858e859c69bbd06685a4c2ee4 | 13faa0d553ed6c6a57791db3dfdb2a0580a1695b | /CodeChef/Practice/Beginner/LONGSEQ.py | 54638200608b86934ba675bf7674e2b4f9055ec4 | [] | no_license | kautsiitd/Competitive_Programming | ba968a4764ba7b5f2531d03fb9c53dc1621c2d44 | a0d8ae16646d73c346d9ce334e5b5b09bff67f67 | refs/heads/master | 2021-01-17T13:29:52.407558 | 2017-10-01T09:58:23 | 2017-10-01T09:58:23 | 59,496,650 | 0 | 0 | null | 2017-05-20T17:27:18 | 2016-05-23T15:56:55 | HTML | UTF-8 | Python | false | false | 141 | py | for _ in range(input()):
a = raw_input()
if a.count('0') == 1 or a.count('1') == 1:
print "Yes"
else:
print "No"
| [
"[email protected]"
] | |
4c36bb6674d30229ecd48b9147a7917e4d314c33 | b7cb542ba2a0b00472aedda3a47a42f34ed01feb | /blogApp/admin.py | d6cd851510d7f6c427df2426cb543fe1f321f30b | [] | no_license | sajibuzzaman/Django_Rest_Framework | 1c2f5bc00ef5d4b380cc94d60c66a77bdf69a955 | c83cce921ad41f82c7ac960000ae1c0e2e2c5604 | refs/heads/master | 2023-04-04T14:30:28.609151 | 2021-04-20T11:58:10 | 2021-04-20T11:58:10 | 359,554,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | from django.contrib import admin
from .models import Blog, CommentBlog
class CommentBlogInline(admin.TabularInline):
model = CommentBlog
extra = 0
readonly_fields = ['blog','user', 'comment', 'ip']
class BlogAdmin(admin.ModelAdmin):
list_display = ['title', 'date_posted', 'image_tag']
list_filter = ['date_posted']
inlines = [CommentBlogInline]
list_per_page = 10
class CommentBlogAdmin(admin.ModelAdmin):
readonly_fields = ['blog','user', 'comment', 'ip']
list_display = ['blog', 'status', 'created_at', 'updated_at', 'user']
list_filter = ['status', 'created_at']
list_per_page = 10
# Register your models here.
admin.site.register(Blog, BlogAdmin)
| [
"[email protected]"
] | |
a6d97883d813063e9e64c30383d463c3795377ed | d66993b0383ee7a97c9d5fe761269a3cb8e67e22 | /Ejercicios/EjercicioAños.py | 437838d33b904ab1f6d4be9893aad03aede04c48 | [] | no_license | rramosaveros/CursoPythonCisco | 09828e3d8190490c0dc30861ae241f5222e108d6 | 1508e67873adfcf31b8c78d3f5cb2a0572dfeb1c | refs/heads/master | 2023-06-27T12:07:52.652780 | 2021-08-01T14:19:19 | 2021-08-01T14:19:19 | 391,647,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | año = int(input("Introduzca un año:"))
#
if año <= 1580:
print("No dentro del período del calendario gregoriano")
elif (año%4) != 0:
print("Año común")
elif (año % 100) != 0:
print("Año bisiesto")
elif (año % 400) != 0:
print("Año común")
else:
print("Año bisiesto")
#
| [
"[email protected]"
] | |
fe6407849dc00e3827ad92b25a8bd7a7c6f4a31c | 314a687b50622e0377f9829b824b046a18f2421d | /passbook/core/admin.py | 6e91b20800ee5bc367aef45818f109c11b0acf21 | [
"MIT"
] | permissive | fossabot/passbook | a4d13b31bb831adb6392d434cfde3080b7fc797e | cba17f6659404445ac3025f11657d89368cc8b4f | refs/heads/master | 2021-02-14T23:35:52.249963 | 2020-03-04T08:22:04 | 2020-03-04T08:22:04 | 244,845,185 | 0 | 0 | MIT | 2020-03-04T08:21:59 | 2020-03-04T08:21:59 | null | UTF-8 | Python | false | false | 120 | py | """passbook core model admin"""
from passbook.lib.admin import admin_autoregister
admin_autoregister("passbook_core")
| [
"[email protected]"
] | |
9ae571f70426fb357555e597a1796b30b120411c | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_04_01/aio/operations/_operations.py | 523f0e7792ab8919d6a9be76ddbebb382332f883 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 6,073 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_resource_skus_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ResourceSkusOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2019_04_01.aio.ComputeManagementClient`'s
:attr:`resource_skus` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, *, filter: Optional[str] = None, **kwargs: Any) -> AsyncIterable["_models.ResourceSku"]:
"""Gets the list of Microsoft.Compute SKUs available for your Subscription.
:keyword filter: The filter to apply on the operation. Only **location** filter is supported
currently. Default value is None.
:paramtype filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceSku or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_04_01.models.ResourceSku]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2019-04-01"))
cls: ClsType[_models.ResourceSkusResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_resource_skus_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceSkusResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus"}
| [
"[email protected]"
] | |
f44e8f2bbb168252f1c63bf3eaa369e9b9bca476 | ded1edb8ed387f9d1989334b8dd3ee397b36bf8b | /djangorest/api/views.py | 317904089ccab3c785a6d5359f3be0fc1b499057 | [] | no_license | elevenmunki/django_api | c790dfe6c8ab1d8da9454a9fc17d3bb339a7d39c | 3a16214f18d015d4ce21f42a473f9d69274aab80 | refs/heads/master | 2021-07-12T02:51:30.513141 | 2017-09-30T01:16:21 | 2017-09-30T01:16:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from rest_framework import generics
from .serializers import BucketlistSerializer
from .models import Bucketlist
#ListCreateAPIView is a generic view which provides GET (list all) and POST method handler
class CreateView(generics.ListCreateAPIView):
"""This class defines the create behavior of our rest api."""
queryset = Bucketlist.objects.all()
serializer_class = BucketlistSerializer
def perform_create(self, serializer):
"""Save the post data when creating a new bucketlist."""
serializer.save()
| [
"[email protected]"
] | |
d07a9c45cb065bb11701d18947cbb54a96100d40 | e3742e43ea3ca59016406d3c4308c21fad07d3d5 | /Basics/Shanbey/P30_章节回顾_2018世界杯各队进球数.py | 3fdd8891e6dc9a305211d850685b3a222863e75c | [] | no_license | yangyang0126/PythonLearning | e499c59ce04e884c3614c6a8c6a5b219234dce6c | 4a8ec4386ecb7609abb56c533131e4c283b628ec | refs/heads/master | 2020-08-10T18:15:18.720145 | 2020-05-24T02:29:19 | 2020-05-24T02:29:19 | 214,393,778 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,242 | py | '''
下方变量 goal_record 是2018年俄罗斯世界杯64场球赛的比分(不包含淘汰赛点球大战)
创建字典 wc_2018_goal,记录32支球队的各自进球数。
'''
goal_record = """Russia 5–0 Saudi Arabia;
Egypt 0–1 Uruguay;
Russia 3–1 Egypt;
Uruguay 1–0 Saudi Arabia;
Uruguay 3–0 Russia;
Saudi Arabia 2–1 Egypt;
Morocco 0–1 Iran;
Portugal 3–3 Spain;
Portugal 1–0 Morocco;
Iran 0–1 Spain;
Iran 1–1 Portugal;
Spain 2–2 Morocco;
France 2–1 Australia;
Peru 0–1 Denmark;
Denmark 1–1 Australia;
France 1–0 Peru;
Denmark 0–0 France;
Australia 0–2 Peru;
Argentina 1–1 Iceland;
Croatia 2–0 Nigeria;
Argentina 0–3 Croatia;
Nigeria 2–0 Iceland;
Nigeria 1–2 Argentina;
Iceland 1–2 Croatia;
Costa Rica 0–1 Serbia;
Brazil 1–1 Switzerland;
Brazil 2–0 Costa Rica;
Serbia 1–2 Switzerland;
Serbia 0–2 Brazil;
Switzerland 2–2 Costa Rica;
Germany 0–1 Mexico;
Sweden 1–0 South Korea;
South Korea 1–2 Mexico;
Germany 2–1 Sweden;
South Korea 2–0 Germany;
Mexico 0–3 Sweden;
Belgium 3–0 Panama;
Tunisia 1–2 England;
Belgium 5–2 Tunisia;
England 6–1 Panama;
England 0–1 Belgium;
Panama 1–2 Tunisia;
Colombia 1–2 Japan;
Poland 1–2 Senegal;
Japan 2–2 Senegal;
Poland 0–3 Colombia;
Japan 0–1 Poland;
Senegal 0–1 Colombia;
France 4–3 Argentina;
Uruguay 2–1 Portugal;
Spain 1–1 Russia;
Croatia 1–1 Denmark;
Brazil 2–0 Mexico;
Belgium 3–2 Japan;
Sweden 1–0 Switzerland;
Colombia 1–1 England;
Uruguay 0–2 France;
Brazil 1–2 Belgium;
Sweden 0–2 England;
Russia 2–2 Croatia;
France 1–0 Belgium;
Croatia 2–1 England;
Belgium 2–0 England;
France 4–2 Croatia"""
team_goal = []
for match in goal_record.split(";\n"):
for team in match.split("–"):
team_goal.append(team)
wc_2018_goal = {}
for num in range(len(team_goal)):
if num % 2 == 0:
wc_2018_goal[team_goal[num][:-2]] = 0
else:
wc_2018_goal[team_goal[num][2:]] = 0
for num in range(len(team_goal)):
if num % 2 == 0:
wc_2018_goal[team_goal[num][:-2]] += int(team_goal[num][-1])
else:
wc_2018_goal[team_goal[num][2:]] += int(team_goal[num][0])
for key,value in wc_2018_goal.items():
print(key,value)
| [
"[email protected]"
] | |
50b8d4da6ad76e44d8f29598f31da8e0096c0e67 | a397ac42dab5e68342a412a0b00cbcf401663d13 | /multithreadingTest.py | 5a334e82563ae69481bcbf69394dba9799268153 | [] | no_license | declanoller/rpi_camera1 | b2fdc2798e3c44980a0d09ac1414a65e06ddc821 | 5dff10b9745b660d21a23dd4a9d42be34d1340b1 | refs/heads/master | 2020-03-21T09:58:25.576696 | 2018-08-07T22:13:14 | 2018-08-07T22:13:14 | 138,427,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | from multiprocessing import Pool, Value
import datetime
from time import sleep
import os
def processFile(fName):
dtString = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
print("Processed file "+fName+" at time "+dtString+"\n")
def fileMonitor(dir):
print("entering filemonitor")
processedFiles = []
files = os.listdir(dir)
nFiles = len(files)
print("there are {} files in the directory.".format(nFiles))
while True:
sleep(.5)
files = os.listdir(dir)
if len(files)>nFiles:
print("new files found")
nFiles = len(files)
print("this many files now:",nFiles)
[processFile(file) for file in files if file not in processedFiles]
[processedFiles.append(file) for file in files if file not in processedFiles]
def fileSaver(path):
print("entering filesaver")
while True:
sleep(3)
dtString = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
fName = path+"/"+dtString+".txt"
print("created new file "+fName+" at time "+dtString+"\n")
f = open(fName,'w')
f.write("created new file "+fName+" at time "+dtString+"\n")
f.close()
if len(os.sys.argv)>1:
dir = os.sys.argv[1]
print(dir)
print(len(dir))
print(len((str(dir),)))
pool = Pool(processes=2)
p1 = pool.apply_async(fileMonitor,args=(dir,))
p2 = pool.apply_async(fileSaver,args=(dir,))
print(p1.get(timeout=30))
print(p2.get(timeout=30))
| [
"[email protected]"
] | |
bcf1792361959fe827e7c081b4078be6ae08174b | 6580ba5d135c4f33f1a0996953ba2a65f7458a14 | /applications/ji178/models/fdutilities0color.py | 91fd463eb0f338b25751b6a4ed0885b19f611149 | [
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | ali96343/facew2p | 02b038d3853691264a49de3409de21c8a33544b8 | a3881b149045e9caac344402c8fc4e62edadb42f | refs/heads/master | 2021-06-10T17:52:22.200508 | 2021-05-10T23:11:30 | 2021-05-10T23:11:30 | 185,795,614 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,439 | py | #
# table for controller: utilities_color
#
from gluon.contrib.populate import populate
db.define_table('dutilities0color',
Field('f0', label='key', writable = True , length= 1000),
Field('f1', 'text', label='data string', length= 1000),
Field('f2', 'text', label='save data string', length= 1000, default='' ),
)
#
if not db(db.dutilities0color.id ).count():
db.dutilities0color.insert( f0= 'sp247', f1= '(247)Dashboard')
db.dutilities0color.insert( f0= 'sp248', f1= '(248)Components')
db.dutilities0color.insert( f0= 'hf249', f1= '(249)Custom Components:')
db.dutilities0color.insert( f0= 'aa251', f1= '(251)Buttons')
db.dutilities0color.insert( f0= 'aa253', f1= '(253)Cards')
db.dutilities0color.insert( f0= 'sp254', f1= '(254)Utilities')
db.dutilities0color.insert( f0= 'hf255', f1= '(255)Custom Utilities:')
db.dutilities0color.insert( f0= 'aa257', f1= '(257)Colors')
db.dutilities0color.insert( f0= 'aa259', f1= '(259)Borders')
db.dutilities0color.insert( f0= 'aa261', f1= '(261)Animations')
db.dutilities0color.insert( f0= 'aa263', f1= '(263)Other')
db.dutilities0color.insert( f0= 'sp264', f1= '(264)Pages')
db.dutilities0color.insert( f0= 'hf265', f1= '(265)Login Screens:')
db.dutilities0color.insert( f0= 'aa267', f1= '(267)Login')
db.dutilities0color.insert( f0= 'aa269', f1= '(269)Register')
db.dutilities0color.insert( f0= 'aa271', f1= '(271)Forgot Password')
db.dutilities0color.insert( f0= 'hf272', f1= '(272)Other Pages:')
db.dutilities0color.insert( f0= 'aa274', f1= '(274)404 Page')
db.dutilities0color.insert( f0= 'aa276', f1= '(276)Blank Page')
db.dutilities0color.insert( f0= 'sp278', f1= '(278)Charts')
db.dutilities0color.insert( f0= 'sp280', f1= '(280)Tables')
db.dutilities0color.insert( f0= 'pb281', f1= '(281)Search for...')
db.dutilities0color.insert( f0= 'pb282', f1= '(282)Search for...')
db.dutilities0color.insert( f0= 'sx283', f1= '(283)3+')
db.dutilities0color.insert( f0= 'di284', f1= '(284)December 12, 2019')
db.dutilities0color.insert( f0= 'sx285', f1= '(285)A new monthly report is ready to download!')
db.dutilities0color.insert( f0= 'di286', f1= '(286)December 7, 2019')
db.dutilities0color.insert( f0= 'di287', f1= '(287)December 2, 2019')
db.dutilities0color.insert( f0= 'aa288', f1= '(288)Show All Alerts')
db.dutilities0color.insert( f0= 'di289', f1= '(289)Hi there! I am wondering if you can help me with a problem I ve been having.')
db.dutilities0color.insert( f0= 'di290', f1= '(290)Emily Fowler 58m')
db.dutilities0color.insert( f0= 'di291', f1= '(291)I have the photos that you ordered last month, how would you like them sent to you?')
db.dutilities0color.insert( f0= 'di292', f1= '(292)Jae Chun 1d')
db.dutilities0color.insert( f0= 'di293', f1= '(293)Last month s report looks great, I am very happy with the progress so far, keep up the good work!')
db.dutilities0color.insert( f0= 'di294', f1= '(294)Morgan Alvarez 2d')
db.dutilities0color.insert( f0= 'di295', f1= '(295)Am I a good boy? The reason I ask is because someone told me that people say this to all dogs, even if they aren t good...')
db.dutilities0color.insert( f0= 'di296', f1= '(296)Chicken the Dog 2w')
db.dutilities0color.insert( f0= 'aa297', f1= '(297)Read More Messages')
db.dutilities0color.insert( f0= 'sx298', f1= '(298)Valerie Luna')
db.dutilities0color.insert( f0= 'ha299', f1= '(299)Color Utilities')
db.dutilities0color.insert( f0= 'pc300', f1= '(300)page. The custom utilities below were created to extend this theme past the default utility classes built into Bootstrap s framework.')
db.dutilities0color.insert( f0= 'aa301', f1= '(301)Bootstrap Documentation')
db.dutilities0color.insert( f0= 'hf302', f1= '(302)Custom Text Color Utilities')
db.dutilities0color.insert( f0= 'pc303', f1= '(303).text-gray-100')
db.dutilities0color.insert( f0= 'pc304', f1= '(304).text-gray-200')
db.dutilities0color.insert( f0= 'pc305', f1= '(305).text-gray-300')
db.dutilities0color.insert( f0= 'pc306', f1= '(306).text-gray-400')
db.dutilities0color.insert( f0= 'pc307', f1= '(307).text-gray-500')
db.dutilities0color.insert( f0= 'pc308', f1= '(308).text-gray-600')
db.dutilities0color.insert( f0= 'pc309', f1= '(309).text-gray-700')
db.dutilities0color.insert( f0= 'pc310', f1= '(310).text-gray-800')
db.dutilities0color.insert( f0= 'pc311', f1= '(311).text-gray-900')
db.dutilities0color.insert( f0= 'hf312', f1= '(312)Custom Font Size Utilities')
db.dutilities0color.insert( f0= 'pc313', f1= '(313).text-xs')
db.dutilities0color.insert( f0= 'pc314', f1= '(314).text-lg')
db.dutilities0color.insert( f0= 'hf315', f1= '(315)Custom Background Gradient Utilities')
db.dutilities0color.insert( f0= 'di316', f1= '(316).bg-gradient-primary')
db.dutilities0color.insert( f0= 'di317', f1= '(317).bg-gradient-success')
db.dutilities0color.insert( f0= 'di318', f1= '(318).bg-gradient-info')
db.dutilities0color.insert( f0= 'di319', f1= '(319).bg-gradient-warning')
db.dutilities0color.insert( f0= 'di320', f1= '(320).bg-gradient-danger')
db.dutilities0color.insert( f0= 'hf321', f1= '(321)Custom Grayscale Background Utilities')
db.dutilities0color.insert( f0= 'di322', f1= '(322).bg-gray-100')
db.dutilities0color.insert( f0= 'di323', f1= '(323).bg-gray-200')
db.dutilities0color.insert( f0= 'di324', f1= '(324).bg-gray-300')
db.dutilities0color.insert( f0= 'di325', f1= '(325).bg-gray-400')
db.dutilities0color.insert( f0= 'di326', f1= '(326).bg-gray-500')
db.dutilities0color.insert( f0= 'di327', f1= '(327).bg-gray-600')
db.dutilities0color.insert( f0= 'di328', f1= '(328).bg-gray-700')
db.dutilities0color.insert( f0= 'di329', f1= '(329).bg-gray-800')
db.dutilities0color.insert( f0= 'di330', f1= '(330).bg-gray-900')
db.dutilities0color.insert( f0= 'sp331', f1= '(331)Copyright © Your Website 2019')
db.dutilities0color.insert( f0= 'he332', f1= '(332)Ready to Leave?')
db.dutilities0color.insert( f0= 'sx333', f1= '(333)')
db.dutilities0color.insert( f0= 'di334', f1= '(334)Select Logout below if you are ready to end your current session.')
db.dutilities0color.insert( f0= 'bu335', f1= '(335)Cancel')
db.dutilities0color.insert( f0= 'aa337', f1= '(337)Logout')
db.commit()
#
| [
"[email protected]"
] | |
7805ee413e99196317a0afddff32012074eee45c | 37d6493969b783755a64b182c588b05f32ff8964 | /cadash/utils.py | f9d2b6c7b1eed9dafe16bca523c8ab54ac9cdbec | [
"Apache-2.0"
] | permissive | harvard-dce/cadash | 97943f726fe73d88ee8677eeb670f4f82ab5c8c5 | 10300ca4ce097d8a633612554d257b939633eeae | refs/heads/master | 2020-04-04T21:42:34.816945 | 2017-07-26T18:31:23 | 2017-07-26T18:31:23 | 59,121,777 | 0 | 1 | null | 2016-09-16T15:58:13 | 2016-05-18T14:13:25 | JavaScript | UTF-8 | Python | false | false | 4,831 | py | # -*- coding: utf-8 -*-
"""Helper utilities and decorators."""
import os
import logging
import logging.config
import platform
import re
import sys
import yaml
from flask import current_app
from flask import flash
from flask import redirect
from flask import request
from flask import url_for
from flask_login import current_user
from functools import wraps
import requests
from requests.auth import HTTPBasicAuth
from cadash import __version__
from cadash.user.models import BaseUser
def flash_errors(form, category='warning'):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash('{0} - {1}'.format(getattr(form, field).label.text, error), category)
# from http://victorlin.me/posts/2012/08/26/good-logging-practice-in-python
def setup_logging(
app,
default_level=logging.INFO):
"""
set up logging config.
:param: app: application obj; relevant app.config['LOG_CONFIG']
which is the full path to the yaml file with configs for logs
:param: default_level: log level for basic config, default=INFO
"""
if os.path.exists(app.config['LOG_CONFIG']):
with open(app.config['LOG_CONFIG'], 'rt') as f:
config = yaml.load(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
def clean_name(name):
"""
clean `name` from non_alpha.
replaces non-alpha with underscores '_' and set the string to lower case
"""
return re.sub('[^0-9a-zA-Z]+', '_', name.strip()).lower()
def pull_data(url, creds=None):
"""
get text file from `url`.
reads a text file from given url
if basic auth needed, pass args creds['user'] and creds['pwd']
"""
headers = {
'User-Agent': default_useragent(),
'Accept-Encoding': 'gzip, deflate',
'Accept': 'text/html, text/*'
}
au = None
if creds is not None:
if 'user' in creds and 'pwd' in creds:
au = HTTPBasicAuth(creds['user'], creds['pwd'])
headers.update({'X-REQUESTED-AUTH': 'Basic'})
try:
response = requests.get(url, headers=headers, auth=au)
except requests.HTTPError as e:
logger = logging.getLogger(__name__)
logger.warning('data from url(%s) is unavailable. Error: %s' % (url, e))
return None
else:
return response.text
def default_useragent():
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join(
[_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return ' '.join([
'%s/%s' % (__name__, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def fetch_ldap_user(usr, pwd, cli):
"""fetch user in ldap, and the groups user belongs to.
returns a BaseUser object or None if not authenticated or unknown
"""
if cli.is_authenticated(usr, pwd):
u = BaseUser(usr)
groups = cli.fetch_groups(usr)
u.place_in_groups(groups)
return u
else:
return None
def is_authorized_by_groups(user, groups):
"""return True if `user` in any group of list `groups`."""
for g in groups:
if user.is_in_group(g):
return True
return False
def requires_roles(*roles):
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
if not current_app.config.get('LOGIN_DISABLED'):
if not is_authorized_by_groups(current_user, *roles):
flash('You need to login, or do not have credentials to access this page', 'info')
return redirect(url_for('public.home', next=request.url))
return f(*args, **kwargs)
return wrapped
return wrapper
| [
"[email protected]"
] | |
4bb2be4d6fdbddbaf576d3a9d74969ce33b1a2ce | f44aa93f92f2ddfa0e3ed6595c0b77c3ab14dde1 | /v7_Modular_events/runner.py | af0a0cf748808975286cabf01c5d5f0c0a51b50e | [] | no_license | michaeljpitcher/Lung-Network-Model | 5b7efd2f4522852e9be415aa3485419226f9fa8f | 8f23dc1c2002bdabc0f8ec5bd5078d64628adcb5 | refs/heads/master | 2020-06-23T05:17:07.357279 | 2017-06-29T15:28:24 | 2017-06-29T15:28:24 | 74,665,064 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | __author__ = "Michael J. Pitcher"
from Models.TB.TBModel3 import TBModel3
from Models.TB.TBClasses import *
from Models.TB.TBEventProbabilityKeys import *
params = {}
params[P_REPLICATION_BACTERIA_FAST] = 0.1
params[P_REPLICATION_BACTERIA_SLOW] = 0.01
params[P_TRANSLOCATE_BRONCHUS_BACTERIA_FAST] = 0.0
params[P_TRANSLOCATE_BRONCHUS_BACTERIA_SLOW] = 0.0
params[P_CHANGE_BACTERIA_FAST_TO_SLOW] = 0.0
params[P_CHANGE_BACTERIA_SLOW_TO_FAST] = 0.0
params[P_INGEST_AND_DESTROY_MACROPHAGE_FAST] = 0.0
params[P_INGEST_AND_DESTROY_MACROPHAGE_SLOW] = 0.0
params[P_RECRUITMENT_BPS_MACROPHAGE] = 0.0
params[P_RECRUITMENT_LYMPH_MACROPHAGE] = 0.0
params[P_DEATH_MACROPHAGE] = 0.0
model = TBModel3(params)
loads_f = {1: 10}
loads_s = {2: 5}
loads_m = {3: 10}
model.load(loads_f, loads_s, loads_m)
import cProfile
p = cProfile.Profile()
p.enable()
model.run(75)
p.disable()
p.print_stats('cumtime')
# model.display_network([BACTERIA_FAST, BACTERIA_SLOW, MACROPHAGE]) | [
"[email protected]"
] | |
b7a9408ba9cddb579b33e13dd708ac9ca84475dd | 3fdf0b58106c7e9bf3f16ee8dd7ec2192757d4d7 | /qa/rpc-tests/maxuploadtarget.py | d9ed5df4700da7bde8219c04d0042958a60ea4cd | [
"MIT"
] | permissive | USDEcoin/usdecoin | 935736c40134fdd22b9248a01a7e323d86615f9e | 50e58ab20db30b5bb6e340512c4789e4bd066ba0 | refs/heads/master | 2021-08-16T07:17:39.074856 | 2020-08-13T08:17:43 | 2020-08-13T08:17:43 | 212,833,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,743 | py | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(BitcoinTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("USDED", "usdecoind"),
help="usdecoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
| [
"[email protected]"
] | |
a10f9c0aa132f6863b3adb1a46e00038ae728335 | 029948b3fd0e41d80d66c84d808abff4fcb24ac8 | /dnac_api_client/models/claim_device_request_config_list.py | 15bfc7d92efe92a13a449347e88c0422c99f1119 | [] | no_license | yijxiang/dnac-api-client | 842d1da9e156820942656b8f34342d52c96d3c37 | 256d016e2df8fc1b3fdad6e28f441c6005b43b07 | refs/heads/master | 2021-09-25T21:10:09.502447 | 2018-10-25T14:39:57 | 2018-10-25T14:39:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,060 | py | # coding: utf-8
"""
Cisco DNA Center Platform v. 1.2.x (EFT)
REST API (EFT) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ClaimDeviceRequestConfigList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'config_id': 'str',
'config_parameters': 'list[ClaimDeviceRequestConfigParameters]'
}
attribute_map = {
'config_id': 'configId',
'config_parameters': 'configParameters'
}
def __init__(self, config_id=None, config_parameters=None): # noqa: E501
"""ClaimDeviceRequestConfigList - a model defined in OpenAPI""" # noqa: E501
self._config_id = None
self._config_parameters = None
self.discriminator = None
if config_id is not None:
self.config_id = config_id
if config_parameters is not None:
self.config_parameters = config_parameters
@property
def config_id(self):
"""Gets the config_id of this ClaimDeviceRequestConfigList. # noqa: E501
:return: The config_id of this ClaimDeviceRequestConfigList. # noqa: E501
:rtype: str
"""
return self._config_id
@config_id.setter
def config_id(self, config_id):
"""Sets the config_id of this ClaimDeviceRequestConfigList.
:param config_id: The config_id of this ClaimDeviceRequestConfigList. # noqa: E501
:type: str
"""
self._config_id = config_id
@property
def config_parameters(self):
"""Gets the config_parameters of this ClaimDeviceRequestConfigList. # noqa: E501
:return: The config_parameters of this ClaimDeviceRequestConfigList. # noqa: E501
:rtype: list[ClaimDeviceRequestConfigParameters]
"""
return self._config_parameters
@config_parameters.setter
def config_parameters(self, config_parameters):
"""Sets the config_parameters of this ClaimDeviceRequestConfigList.
:param config_parameters: The config_parameters of this ClaimDeviceRequestConfigList. # noqa: E501
:type: list[ClaimDeviceRequestConfigParameters]
"""
self._config_parameters = config_parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClaimDeviceRequestConfigList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
b906a8ee56c5bdb20867a9f5ededa1379458b25e | 1e3461947b86538c384d2faaab9a505912a151fb | /color_transformations.py | 6fa7f8fdaaf8168fd16401c26c13470ae5408884 | [
"MIT"
] | permissive | maxalbert/colormap-selector | c20a53ec36f90637aef11620434cc7811e49cc97 | 43ec6e70058e4b75496def3e49471c76c8684ef3 | refs/heads/master | 2021-01-01T05:50:17.044061 | 2015-01-09T00:08:47 | 2015-01-09T00:08:47 | 28,750,875 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,222 | py | import numpy as np
import matplotlib.colors as mcolors
whitepoint_D65 = np.array([0.9642, 1, 0.8249])
A_xyz2rgb = np.array(
[[3.240479, -1.537150, -0.498535],
[-0.969256, 1.875992, 0.041556 ],
[0.055648, -0.204043, 1.057311 ]])
A_rgb2xyz = np.linalg.inv(A_xyz2rgb)
class RGBRangeError(Exception):
pass
def f(t):
if t > (6./29)**3:
return t**(1./3)
else:
return 1./3 * (29./6)**2 * t + 4./29
def f_inv(t):
if t > 6./29:
return t**3
else:
return 3 * (6./29)**2 * (t - 4./29)
def xyz2lab(xyz, whitepoint=whitepoint_D65):
"""
Convert from CIELAB to XYZ color coordinates.
*Arguments*
xyz: 3-tuple (or other list-like)
*Returns*
3-tuple (L, a, b).
"""
X, Y, Z = xyz
Xw, Yw, Zw = whitepoint
L = 116. * f(Y/Yw) - 16
a = 500. * (f(X/Xw) - f(Y/Yw))
b = 200. * (f(Y/Yw) - f(Z/Zw))
return np.array([L, a, b], dtype=float)
def lab2xyz(lab, whitepoint=whitepoint_D65):
L, a, b = lab
Xw, Yw, Zw = whitepoint
Y = Yw * f_inv(1./116 * (L + 16))
X = Xw * f_inv(1./116 * (L + 16) + 0.002 * a)
Z = Zw * f_inv(1./116 * (L + 16) - 0.005 * b)
return X, Y, Z
def rgb2xyz(rgb):
rgb = np.asarray(rgb)
return np.dot(A_rgb2xyz, rgb)
def xyz2rgb(xyz, assert_valid=False, clip=False):
xyz = np.asarray(xyz)
rgb = np.dot(A_xyz2rgb, xyz)
r, g, b = rgb
if assert_valid and ((r < 0.0 or r > 1.0) or
(g < 0.0 or g > 1.0) or
(b < 0.0 or b > 1.0)):
raise RGBRangeError()
if clip:
rgb = np.clip(rgb, 0., 1.)
return rgb
def rgb2lab(rgb, whitepoint=whitepoint_D65):
return xyz2lab(rgb2xyz(rgb), whitepoint=whitepoint)
def lab2rgb(lab, whitepoint=whitepoint_D65, assert_valid=False, clip=False):
return xyz2rgb(lab2xyz(lab, whitepoint=whitepoint), assert_valid=assert_valid, clip=clip)
def rgb2rgba(rgb):
r, g, b = rgb
return np.array([r, g, b, 1.])
def lab2rgba(lab, whitepoint=whitepoint_D65, assert_valid=False, clip=False):
return rgb2rgba(lab2rgb(lab, whitepoint=whitepoint, assert_valid=assert_valid, clip=clip))
def linear_colormap(pt1, pt2, coordspace='RGB'):
"""
Define a perceptually linear colormap defined through a line in the
CIELab [1] color space. The line is defined by its endpoints `pt1`,
`pt2`. The argument `coordspace` can be either `RGB` (the default)
or `lab` and specifies whether the coordinates of `pt1`, `pt2` are
given in RGB or Lab coordinates.
[1] http://dba.med.sc.edu/price/irf/Adobe_tg/models/cielab.html
"""
if coordspace == 'RGB':
pt1 = np.array(rgb2lab(pt1))
pt2 = np.array(rgb2lab(pt2))
elif coordspace == 'Lab':
pt1 = np.array(pt1)
pt2 = np.array(pt2)
else:
raise ValueError("Argument 'coordspace' must be either 'RGB' "
"or 'Lab'. Got: {}".format(coordspace))
tvals = np.linspace(0, 1, 256)
path_vals = np.array([(1-t) * pt1 + t * pt2 for t in tvals])
cmap_vals = np.array([lab2rgb(pt) for pt in path_vals])
#print np.where(cmap_vals < 0)
cmap = mcolors.ListedColormap(cmap_vals)
return cmap
| [
"[email protected]"
] | |
65ff0d2791ba3bee69abbe3799d327244d350005 | 0b2cc875b84e1b43daa7e0ccabe864ec69278ab0 | /flarestack/cosmo/rates/frb_rates.py | 2d28e7f08dbd287c963a84bddf5dfcccf0f9042b | [
"MIT"
] | permissive | icecube/flarestack | 4ce9f165d8a0dd1b44e8ec2185f216c92fc27d11 | 4d02244e3b92744a08b3c09009cc9aa3ea5e7931 | refs/heads/master | 2023-08-16T16:05:18.492306 | 2023-08-11T23:38:55 | 2023-08-11T23:38:55 | 127,512,114 | 9 | 4 | MIT | 2023-09-13T00:17:44 | 2018-03-31T08:03:47 | Jupyter Notebook | UTF-8 | Python | false | false | 2,479 | py | import logging
from astropy import units as u
from flarestack.cosmo.rates.sfr_rates import get_sfr_evolution
local_frb_rates = {
"bochenek_20": (
7.23 * 10**7.0 / (u.Gpc**3 * u.yr),
(7.23 - 6.13) * 10**7.0 / (u.Gpc**3 * u.yr),
(7.23 + 8.78) * 10**7.0 / (u.Gpc**3 * u.yr),
"https://arxiv.org/abs/2005.10828",
),
}
def get_local_frb_rate(rate_name=None, with_range=False):
"""Returns a local rate of Fast Radio Bursts (FBBs).
:param rate_name: Name of local FRB rate to be used
:param with_range: Boolean to return +/- one sigma range functions alongside central rate
:return: Local rate
"""
if rate_name is None:
logging.info("No rate specified. Assuming default rate.")
rate_name = "bochenek_20"
if rate_name not in local_frb_rates.keys():
raise Exception(
f"Rate name '{rate_name}' not recognised. "
f"The following rates are available: {local_frb_rates.keys()}"
)
else:
local_rate, lower_lim, upper_lim, ref = local_frb_rates[rate_name]
logging.info(f"Loaded rate '{rate_name}' ({ref})")
if with_range:
if lower_lim is None:
raise Exception(
f"No one sigma rate range found for rate '{rate_name}'. "
f"Use a different rate, or set 'with_range=False'."
)
return (
local_rate.to("Mpc-3 yr-1"),
lower_lim.to("Mpc-3 yr-1"),
upper_lim.to("Mpc-3 yr-1"),
)
else:
return local_rate.to("Mpc-3 yr-1")
def get_frb_rate(evolution_name=None, rate_name=None, with_range=False, **kwargs):
"""Returns a local rate of core-collapse supernovae (CCSNe) as a function of redshift.
:param evolution_name: Name of Star Formation evolution to use
:param rate_name: Name of local FRB rate to be used
:param with_range: Boolean to return +/- one sigma range functions alongside central rate
:return: Rate as a function of redshift
"""
normed_evolution = get_sfr_evolution(evolution_name=evolution_name, **kwargs)
local_rate = get_local_frb_rate(rate_name=rate_name, with_range=with_range)
if with_range:
return (
lambda z: local_rate[0] * normed_evolution(z),
lambda z: local_rate[1] * normed_evolution(z),
lambda z: local_rate[2] * normed_evolution(z),
)
else:
return lambda z: local_rate * normed_evolution(z)
| [
"[email protected]"
] | |
4d925c84db3c96226c9de413a722a8e09a7e2080 | 2dd26e031162e75f37ecb1f7dd7f675eeb634c63 | /nemo/collections/nlp/models/language_modeling/bert_lm_model.py | 6b03d86982b016d0606784c76c4d0cdf9f0a2c91 | [
"Apache-2.0"
] | permissive | NVIDIA/NeMo | 1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1 | c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7 | refs/heads/main | 2023-08-21T15:28:04.447838 | 2023-08-21T00:49:36 | 2023-08-21T00:49:36 | 200,722,670 | 7,957 | 1,986 | Apache-2.0 | 2023-09-14T18:49:54 | 2019-08-05T20:16:42 | Python | UTF-8 | Python | false | false | 11,620 | py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Optional
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from nemo.collections.common.losses import AggregatorLoss, CrossEntropyLoss, SmoothedCrossEntropyLoss
from nemo.collections.common.metrics import Perplexity
from nemo.collections.nlp.data.language_modeling.lm_bert_dataset import (
BertPretrainingDataset,
BertPretrainingPreprocessedDataloader,
)
from nemo.collections.nlp.modules.common import BertPretrainingTokenClassifier, SequenceClassifier
from nemo.collections.nlp.modules.common.lm_utils import get_lm_model
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.classes.modelPT import ModelPT
from nemo.core.neural_types import NeuralType
from nemo.utils import logging
__all__ = ["BERTLMModel"]
class BERTLMModel(ModelPT):
"""
BERT language model pretraining.
"""
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
return self.bert_model.input_types
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
output_types_dict = {"mlm_log_probs": self.mlm_classifier.output_types["log_probs"]}
if not self.only_mlm_loss:
output_types_dict["nsp_logits"] = self.nsp_classifier.output_types["logits"]
return output_types_dict
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
vocab_file = None
config_dict = None
config_file = None
if cfg.tokenizer is not None:
if cfg.tokenizer.get('tokenizer_name') and cfg.tokenizer.get('tokenizer_model'):
self._setup_tokenizer(cfg.tokenizer)
if cfg.get('tokenizer.vocab_file'):
vocab_file = self.register_artifact('tokenizer.vocab_file', cfg.tokenizer.vocab_file)
else:
self.tokenizer = None
super().__init__(cfg=cfg, trainer=trainer)
if cfg.get('language_model.config'):
config_dict = OmegaConf.to_container(cfg.language_model.config)
if cfg.get('language_model.config_file'):
config_file = self.register_artifact('language_model.config_file', cfg.language_model.config_file)
self.bert_model = get_lm_model(
config_file=config_file, config_dict=config_dict, vocab_file=vocab_file, trainer=trainer, cfg=cfg,
)
self.hidden_size = self.bert_model.config.hidden_size
self.vocab_size = self.bert_model.config.vocab_size
self.only_mlm_loss = cfg.only_mlm_loss
self.mlm_classifier = BertPretrainingTokenClassifier(
hidden_size=self.hidden_size,
num_classes=self.vocab_size,
num_layers=cfg.num_tok_classification_layers,
activation="gelu",
log_softmax=True,
use_transformer_init=True,
)
self.mlm_loss = SmoothedCrossEntropyLoss()
if not self.only_mlm_loss:
self.nsp_classifier = SequenceClassifier(
hidden_size=self.hidden_size,
num_classes=2,
num_layers=cfg.num_seq_classification_layers,
log_softmax=False,
activation="tanh",
use_transformer_init=True,
)
self.nsp_loss = CrossEntropyLoss()
self.agg_loss = AggregatorLoss(num_inputs=2)
# # tie weights of MLM softmax layer and embedding layer of the encoder
if (
self.mlm_classifier.mlp.last_linear_layer.weight.shape
!= self.bert_model.embeddings.word_embeddings.weight.shape
):
raise ValueError("Final classification layer does not match embedding layer.")
self.mlm_classifier.mlp.last_linear_layer.weight = self.bert_model.embeddings.word_embeddings.weight
# create extra bias
# setup to track metrics
self.validation_perplexity = Perplexity()
self.setup_optimization(cfg.optim)
@typecheck()
def forward(self, input_ids, attention_mask, token_type_ids):
"""
No special modification required for Lightning, define it as you normally would
in the `nn.Module` in vanilla PyTorch.
"""
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask,
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
mlm_log_probs = self.mlm_classifier(hidden_states=hidden_states)
if self.only_mlm_loss:
return (mlm_log_probs,)
nsp_logits = self.nsp_classifier(hidden_states=hidden_states)
return mlm_log_probs, nsp_logits
def _compute_losses(self, mlm_log_probs, nsp_logits, output_ids, output_mask, labels):
mlm_loss = self.mlm_loss(log_probs=mlm_log_probs, labels=output_ids, output_mask=output_mask)
if self.only_mlm_loss:
loss, nsp_loss = mlm_loss, None
else:
nsp_loss = self.nsp_loss(logits=nsp_logits, labels=labels)
loss = self.agg_loss(loss_1=mlm_loss, loss_2=nsp_loss)
return mlm_loss, nsp_loss, loss
def _parse_forward_outputs(self, forward_outputs):
if self.only_mlm_loss:
return forward_outputs[0], None
else:
return forward_outputs
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, output_ids, output_mask, labels = batch
forward_outputs = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
mlm_log_probs, nsp_logits = self._parse_forward_outputs(forward_outputs)
_, _, loss = self._compute_losses(mlm_log_probs, nsp_logits, output_ids, output_mask, labels)
lr = self._optimizer.param_groups[0]['lr']
self.log('train_loss', loss)
self.log('lr', lr, prog_bar=True)
return {"loss": loss, "lr": lr}
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, output_ids, output_mask, labels = batch
forward_outputs = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
mlm_log_probs, nsp_logits = self._parse_forward_outputs(forward_outputs)
_, _, loss = self._compute_losses(mlm_log_probs, nsp_logits, output_ids, output_mask, labels)
self.validation_perplexity(logits=mlm_log_probs)
loss = {'val_loss': loss}
self.validation_step_outputs.append(loss)
return loss
def on_validation_epoch_end(self):
"""Called at the end of validation to aggregate outputs.
Args:
outputs (list): The individual outputs of each validation step.
Returns:
dict: Validation loss and tensorboard logs.
"""
if self.validation_step_outputs:
avg_loss = torch.stack([x['val_loss'] for x in self.validation_step_outputs]).mean()
perplexity = self.validation_perplexity.compute()
logging.info(f"evaluation perplexity {perplexity.cpu().item()}")
self.log(f'val_loss', avg_loss)
self.validation_step_outputs.clear() # free memory
def setup_training_data(self, train_data_config: Optional[DictConfig]):
self._train_dl = (
self._setup_preprocessed_dataloader(train_data_config)
if self.tokenizer is None
else self._setup_dataloader(train_data_config)
)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
self._validation_dl = (
self._setup_preprocessed_dataloader(val_data_config)
if self.tokenizer is None
else self._setup_dataloader(val_data_config)
)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
pass
def _setup_preprocessed_dataloader(self, cfg: Optional[DictConfig]):
dataset = cfg.data_file
max_predictions_per_seq = cfg.max_predictions_per_seq
batch_size = cfg.batch_size
if os.path.isdir(dataset):
files = [os.path.join(dataset, f) for f in os.listdir(dataset) if os.path.isfile(os.path.join(dataset, f))]
else:
files = [dataset]
files.sort()
dl = BertPretrainingPreprocessedDataloader(
data_files=files, max_predictions_per_seq=max_predictions_per_seq, batch_size=batch_size,
)
return dl
def _setup_tokenizer(self, cfg: DictConfig):
tokenizer = get_tokenizer(
tokenizer_name=cfg.tokenizer_name,
tokenizer_model=cfg.tokenizer_model,
special_tokens=OmegaConf.to_container(cfg.special_tokens) if cfg.special_tokens else None,
vocab_file=cfg.vocab_file,
)
self.tokenizer = tokenizer
def _setup_dataloader(self, cfg: DictConfig):
dataset = BertPretrainingDataset(
tokenizer=self.tokenizer,
data_file=cfg.data_file,
max_seq_length=cfg.max_seq_length,
mask_prob=cfg.mask_prob,
short_seq_prob=cfg.short_seq_prob,
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
drop_last=cfg.get("drop_last", False),
shuffle=cfg.shuffle,
num_workers=cfg.get("num_workers", 0),
)
return dl
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
result.append(
PretrainedModelInfo(
pretrained_model_name="bertbaseuncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/bertbaseuncased/versions/1.0.0rc1/files/bertbaseuncased.nemo",
description="The model was trained EN Wikipedia and BookCorpus on a sequence length of 512.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="bertlargeuncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/bertlargeuncased/versions/1.0.0rc1/files/bertlargeuncased.nemo",
description="The model was trained EN Wikipedia and BookCorpus on a sequence length of 512.",
)
)
return result
| [
"[email protected]"
] | |
40924b52d9e6ae8604eac8a4ca7597da7cb408e8 | d659f9b546e1622aec6ad10495363da1175f76b9 | /venv/bin/pip3.6 | 141ea44f3073cf7c8da0a1415a61048f0795beba | [] | no_license | modcomlearning/advanced_web2 | 7bb96c823cb606cb567c358a7b8dbb0b4918c2c7 | b701a25092a8ae40187c3af00a837e654fefc52e | refs/heads/master | 2023-01-07T21:09:06.930774 | 2020-11-14T12:08:53 | 2020-11-14T12:08:53 | 310,833,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | 6 | #!/home/jose/PycharmProjects/Advanced_Web_Sat/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
| [
"[email protected]"
] | |
072fcea6ad0ec0fa0e0effea0d1e577a5e7bed48 | 4d99350a527a88110b7bdc7d6766fc32cf66f211 | /OpenGLCffi/GLES1/EXT/OES/EGL_image.py | c7ba140322b6d986cfd1d3a484abbc478fc84042 | [
"MIT"
] | permissive | cydenix/OpenGLCffi | e790ef67c2f6c9877badd5c38b7d58961c8739cd | c78f51ae5e6b655eb2ea98f072771cf69e2197f3 | refs/heads/master | 2021-01-11T07:31:10.591188 | 2017-04-17T11:04:55 | 2017-04-17T11:04:55 | 80,312,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | from OpenGLCffi.GLES1 import params
@params(api='gles1', prms=['target', 'image'])
def glEGLImageTargetTexture2DOES(target, image):
pass
@params(api='gles1', prms=['target', 'image'])
def glEGLImageTargetRenderbufferStorageOES(target, image):
pass
| [
"[email protected]"
] | |
21a3e92d50d50f8afb88ed7e4b8b1700ab8212da | 0178e6a705ee8aa6bb0b0a8512bf5184a9d00ded | /Sungjin/String/5525.py | 63ad63a387c4109d8b3d5474c7e89a5353e0b5c5 | [] | no_license | comojin1994/Algorithm_Study | 0379d513abf30e3f55d6a013e90329bfdfa5adcc | 965c97a9b858565c68ac029f852a1c2218369e0b | refs/heads/master | 2021-08-08T14:55:15.220412 | 2021-07-06T11:54:33 | 2021-07-06T11:54:33 | 206,978,984 | 0 | 1 | null | 2020-05-14T14:06:46 | 2019-09-07T14:23:31 | Python | UTF-8 | Python | false | false | 746 | py | import sys
input = sys.stdin.readline
if __name__ == '__main__':
N = int(input())
M = int(input())
S = input().strip()
P = 'I' + 'OI'*N
start = False
check = 0
cnt, result = 0, 0
for i, s in enumerate(S):
if start:
if s == 'O' and check == 1: check += 1
elif s == 'I' and check == 2: cnt += 1
else:
if cnt >= N: result += cnt - N + 1
start = False
check, cnt = 0, 0
if s == 'I': start = True; check = 1
if cnt >= N: result += cnt - N + 1
print(result)
'''
2
25
OOIOIIOIOIOIOIOIOIOIOOIOI
6
4
100
IIOIOIOIOIOIOOOOIOIOIOIOOIIOIOIOIOIOIOIIOIOIOIOOOIIOIOIOIOIOIOOOOIOIOIOIOIOIOOIIIIOIOIOIOIOIIOIOIOIO
11
''' | [
"[email protected]"
] | |
514e0990111b61362819faa91608463b1ba507e2 | e1aac65877d20e8f93e63c8edf6dedd5137b562b | /testPydev/Integer_Roman.py | c3137a45187226f1c865ea1527b30aa13a179744 | [] | no_license | weezer/fun_scripts | 67ccb6cc107c0bdd1b347d9cb1c0bdf934e1a331 | 38b8bd25d6db79044712779f95b26bf09ca0072a | refs/heads/master | 2020-12-25T19:26:21.685571 | 2018-09-20T07:56:25 | 2018-09-20T07:56:25 | 18,510,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | class Solution(object):
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
roman_dict = {1: 'I', 4: 'IV', 5: 'V', 9: 'IX', 10: 'X', 40: 'XL',
50: 'L', 90: 'LC', 100: 'C', 400: 'CD', 500: 'D', 900: 'CM', 1000: 'M'}
r_lst = []
digit = 1
while num > 0:
a_lst = []
reminder = num % 10
reminder *= digit
digit *= 10
num /= 10
if roman_dict.get(reminder) is not None:
r_lst.insert(0, roman_dict.get(reminder))
else:
for minus_num in sorted(roman_dict, reverse = True):
if reminder == 0:
break
if minus_num > reminder:
continue
else:
while reminder >= minus_num:
a_lst.append(roman_dict[minus_num])
reminder -= minus_num
r_lst.insert(0, ''.join(a_lst))
return ''.join(r_lst)
if __name__ == "__main__":
s = Solution()
print s.intToRoman(3999) | [
"[email protected]"
] | |
0d51c3a3f4fe0075d723424ee07d4b36454dd438 | 6bfe6e6c24eeb281a266a2d3fdaac645e79a4a85 | /admin/web/login_controller.py | 9d8a11e94f35b293ab2b7fdd2183bdc7d0fc23cc | [] | no_license | itsumura-h/masonite_admin_install | 606b5fe5f7043919fd955a48485e14d063116837 | a8448eacdee0b4088b84ac2cac6d0fdec46dbe87 | refs/heads/master | 2020-05-31T01:17:21.094154 | 2019-06-30T07:11:13 | 2019-06-30T07:11:13 | 190,047,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,534 | py | import os, pickle, secrets
from datetime import datetime
from bcrypt import checkpw
from masonite.auth import Sign
from masonite.controllers import Controller
from masonite.request import Request
from config.auth import AUTH
from config.admin import LOGIN_CONF
from admin.web.LoginToken import LoginToken
# try:
# from app.models.LoginToken import LoginToken
# except:
# from app.LoginToken import LoginToken
class LoginController(Controller):
def store(self, request: Request):
email = request.input('email')
password = request.input('password')
user = AUTH['model'].where('email', email).first()
login = checkpw(bytes(password, 'utf-8'), bytes(user.password, 'utf-8'))
if login:
hash = LoginToken().login(int(user.id))
return {'login': True, 'token': hash, 'id': user.id, 'name': user.name}
# Delete existing token
# LoginToken.where('admin_user_id', user.id).delete()
# hash = secrets.token_urlsafe()
# login_token = LoginToken()
# login_token.admin_user_id = user.id
# login_token.token = hash
# login_token.save()
# return {'login': True, 'token': hash, 'id': user.id, 'name': user.name}
else:
return {'login': False}
def destroy(self, request: Request):
login_id = request.input('login_id')
# LoginToken.where('admin_user_id', login_id).delete()
LoginToken().logout(int(login_id))
return {}
| [
"[email protected]"
] | |
25b3e881e120ecfab45a93c2d78f861cb64a3cc0 | be1d3bbe87e42b3cc41238697129fc701380c43c | /web_speller/backend/web_speller/blinks/migrations/0001_initial.py | c6eae07327640598d0f11b266afc01ff1b717958 | [] | no_license | Borda/BCI-speller | 051a6bd2a03f9be3e795f628decea39166946cbb | bf80f6c99f4258448199040d81d21f10ba1fd09d | refs/heads/master | 2020-03-21T09:31:28.683073 | 2018-06-25T19:39:00 | 2018-06-25T19:39:00 | 138,404,105 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # Generated by Django 2.0.5 on 2018-06-23 20:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BCIDevice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('device_id', models.CharField(max_length=50, unique=True)),
],
),
]
| [
"[email protected]"
] | |
b594bc5ed8b10f098540111dac731858955d269e | 1d5b2b72d322dd154a8efb547290ad5abb1fd098 | /work_dir/py_rpc/test1/client.py | cd739de1ff7000803d3cdcf24557c405ba6060ac | [] | no_license | hxzwd/drafts | 6b593b50cae309c02495a8aff28719f7b636962d | 478f4a4c399ab0c7c3f8f6e22d13131488716e4d | refs/heads/master | 2020-04-28T01:42:58.998610 | 2019-05-05T17:49:48 | 2019-05-05T17:49:48 | 174,868,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py |
import rpyc
host_addr = "localhost"
port = 18812
c = rpyc.connect(host_addr, port)
c.root.func_test("TEST MESSAGE! HELLO")
| [
"="
] | = |
d0340512030ca2327070536ffecf2d04a3c898bf | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/qos/class.py | b2ba681e647deb4419d81561ae32ce1cc0831a60 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,833 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Class(Mo):
"""
The QoS classification traffic descriptor and specifications are used to categorize a packet within a specific group and making the packet accessible for QoS handling in the network.
"""
meta = ClassMeta("cobra.model.qos.Class")
meta.moClassName = "qosClass"
meta.rnFormat = "class-%(prio)s"
meta.category = MoCategory.REGULAR
meta.label = "QOS Class Policy"
meta.writeAccessMask = 0x100100000000001
meta.readAccessMask = 0x100100000000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.qos.Cong")
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.qos.RtResQoSPol")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.qos.Sched")
meta.childClasses.add("cobra.model.qos.Buffer")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childClasses.add("cobra.model.qos.Queue")
meta.childClasses.add("cobra.model.qos.PfcPol")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.qos.RtResQoSPol", "rtresQoSPol"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.qos.PfcPol", "pfcpol-"))
meta.childNamesAndRnPrefix.append(("cobra.model.qos.Buffer", "buffer"))
meta.childNamesAndRnPrefix.append(("cobra.model.qos.Sched", "sched"))
meta.childNamesAndRnPrefix.append(("cobra.model.qos.Queue", "queue"))
meta.childNamesAndRnPrefix.append(("cobra.model.qos.Cong", "cong"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.qos.InstPol")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.rnPrefixes = [
('class-', True),
]
prop = PropMeta("str", "admin", "admin", 261, PropCategory.REGULAR)
prop.label = "Admin State"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "enabled"
prop._addConstant("disabled", "disabled", 2)
prop._addConstant("enabled", "enabled", 1)
meta.props.add("admin", prop)
prop = PropMeta("str", "annotation", "annotation", 37277, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 39416, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "mtu", "mtu", 262, PropCategory.REGULAR)
prop.label = "MTU"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1500, 9216)]
prop.defaultValue = 9216
prop.defaultValueStr = "9216"
meta.props.add("mtu", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "prio", "prio", 263, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(0, 9)]
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("level1", "level1", 3)
prop._addConstant("level2", "level2", 2)
prop._addConstant("level3", "level3-(default)", 1)
prop._addConstant("level4", "level4", 9)
prop._addConstant("level5", "level5", 8)
prop._addConstant("level6", "level6", 7)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("prio", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "prio"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
def __init__(self, parentMoOrDn, prio, markDirty=True, **creationProps):
namingVals = [prio]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
929ef551e4a4ed39cde8a606b1b2829d341e61c9 | 942f0b081d2271978ffe20fbbfa8d687b57e5c02 | /daily_coding_challenges/challenges/mapping_of_digits.py | 53d0d8d458ffa10f31cc7f656983e1ff6e8da511 | [] | no_license | simtb/coding-puzzles | 99762322606bb505d82924d4d5843db1c04aafbd | 9e1d53e35b2117240eb357d7930cdb8cfd891c8e | refs/heads/master | 2021-04-12T15:46:40.181048 | 2021-02-28T23:47:36 | 2021-02-28T23:47:36 | 249,089,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | """
This problem was asked by Yelp.
Given a mapping of digits to letters (as in a phone number), and a digit string, return all possible letters the number could represent. You can assume each valid number in the mapping is a single digit.
For example if {“2”: [“a”, “b”, “c”], 3: [“d”, “e”, “f”], …} then “23” should return [“ad”, “ae”, “af”, “bd”, “be”, “bf”, “cd”, “ce”, “cf"].
"""
from typing import List
def solution(map_of_digits: dict, digit: str) -> List[str]:
pass | [
"[email protected]"
] | |
27eda92427ea2fac869b971a6c59e28db69c8522 | 42fdf741bf64ea2e63d1546bb08356286f994505 | /test_00_vd2ota/rasp30_gen7.py | a401fa45a8e426a8acd4cd76f7a914f9ea38d766 | [] | no_license | skim819/RASP_Workspace_sihwan | 7e3cd403dc3965b8306ec203007490e3ea911e3b | 0799e146586595577c8efa05c647b8cb92b962f4 | refs/heads/master | 2020-12-24T05:22:25.775823 | 2017-04-01T22:15:18 | 2017-04-01T22:15:18 | 41,511,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | self.dev_types =['fgota']*2 + ['ota_buf']*1 + ['ota']*1 + ['cap']*4+ ['nfet']*2 + ['pfet']*2 + ['tgate']*4 + ['mux4_1']*1 +['nmirror']*2+['ladder_blk']*1+ ['c4_blk']*1+['Nagating_blk']*1+['speech']*1+['gnd_out']*1+['vdd_out']*1+['in2in_x1']*1+['in2in_x6']*1+['volt_div']*1+['volt_div_fgota']*1+['integrator']*1+['integrator_nmirror']*1+['INFneuron']*1+['lpf']*1+['nfet_i2v']*1+['pfet_i2v']*1+['nmirror_w_bias']*1+['fgswc_nmirror_w_bias']*1+['i2v_pfet_gatefgota']*1+['mismatch_meas']*1+['mmap_local_swc']*1+['ramp_fe']*1+['sigma_delta_fe']*1+['hhneuron']*1+['ladder_filter']*1+ ['h_rect']*1+['dendiff']*1+['Algo_ADC']*1+['TIA_blk']*1+['ichar_nfet']*1+['bias_gen']*1+['vmm_senseamp1']*1+['vmm_senseamp2']*1+['tgate_so']*1+['vmm4x4_SR']*1+['vmm8x4_SR']*1+['SR4']*1+['vmm4x4_SR2']*1+['vmm4x4']*1+['sftreg']*1+['DAC_sftreg']*1 +['sftreg2']*1+['sftreg3']*1+['sftreg4']*1+['vmm8x4']*1+['vmm8inx8in']*1+['vmm8x4_in']*1+['vmm12x1']*1+['vmm12x1_wowta']*1+['Adaptive_receptor']*1+['ota_vmm']*1 +['nmirror_vmm']*2+['inv_mcab']*1+['Hyst_diff']*1+['Max_detect']*1+['Min_detect']*1+['wta_w_bias']*1+['hhn']*1+['fgswitch']*1+['common_drain']*1+['common_drain_nfet']*1+['hhn_debug']*1+['vd_2ota']*1
| [
"ubuntu@ubuntu-VirtualBox.(none)"
] | ubuntu@ubuntu-VirtualBox.(none) |
37248cd18c25736ef9d16caf5efea05432caf0ab | 22a561e558b90761892b980de0c19c5d852717f3 | /venv/bin/pip | 946e2b8ee48006564a6722bc82572a797b3efa33 | [] | no_license | BillSam/M_shop | 401228536b57ff98d44f26b24fbd7b46122e4be5 | ecb0fb686957193f749413ef4f3b87c4ea046e7d | refs/heads/main | 2023-03-23T12:05:55.313453 | 2021-02-25T11:39:35 | 2021-02-25T11:39:35 | 342,137,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | #!/home/work/Documents/pythhon/M_shop/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
63f8277e6c153482a0737d7d7fc2be4065b35437 | 4926667354fa1f5c8a93336c4d6e2b9f6630836e | /1318.py | 39050eeca9d4be13c770e32b2ee0404761fb2afa | [] | no_license | nascarsayan/lintcode | 343b3f6e7071479f0299dd1dd1d8068cbd7a7d9e | 4da24b9f5f182964a1bdf4beaa8afc17eb7a70f4 | refs/heads/master | 2021-07-13T12:31:45.883179 | 2020-07-20T02:27:53 | 2020-07-20T02:27:53 | 185,825,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | class Solution:
"""
@param nums: the given array
@param k: the given k
@param t: the given t
@return: whether there are two distinct indices i and j in the array such that the absolute difference between nums[i] and nums[j]
is at most t and the absolute difference between i and j is at most k.
"""
def containsNearbyAlmostDuplicate(self, nums, k, t):
# Write your code here
size = len(nums)
bucket, wt = {}, t + 1
for i in range(size):
bi = nums[i] // wt
for bj in range(bi - 1, bi + 2):
if bj in bucket and abs(bucket[bj] - nums[i]) <= t:
return True
bucket[bi] = nums[i]
if i >= k:
del bucket[nums[i - k] // wt]
return False
| [
"[email protected]"
] | |
ded03050bd48a80ff92b49a40d0a4295a2803b38 | bb2add9ca1fdd3e33b2c28d494d497b62af121ae | /setup.py | e2be87663ddf37084b2e742b0b56e5633964c225 | [
"MIT"
] | permissive | wegamekinglc/simpleutils | 4a48b0eae783ad74e3c16454ea04fff88a6b7796 | 1e3c89f72347b94538267c6f66f87c2cabdb18dd | refs/heads/master | 2021-07-26T01:09:27.582528 | 2020-03-17T02:19:28 | 2020-03-17T02:19:28 | 90,547,150 | 0 | 1 | null | 2017-07-22T19:21:26 | 2017-05-07T16:44:07 | Python | UTF-8 | Python | false | false | 357 | py | import io
from setuptools import setup
from setuptools import find_packages
setup(
name='simpleutils',
version='0.2.5',
packages=find_packages(),
url='',
license='MIT',
author='wegamekinglc',
author_email='[email protected]',
install_requires=io.open("requirements.txt", encoding='utf8').read(),
description=''
)
| [
"[email protected]"
] | |
4c981b7947331d05e0ffc54bf6c0a5898cc085f0 | 7c2e677d931a8eb7d7cffc6d54713411abbe83e4 | /AppBuilder9000/AppBuilder9000/ZPYLP0914/HikingApp/migrations/0003_auto_20200925_1746.py | b15a2782dcbe732ed55cad1047fa3495f381341d | [] | no_license | r3bunker/Python_Live_Project | 19e367b3cf74c2279c287fcd3a8a44a27f24041a | d3e06150d7daea6326cc1a4155309d99e4ff6244 | refs/heads/main | 2023-06-12T23:01:50.440371 | 2021-06-16T20:21:03 | 2021-06-16T20:21:03 | 344,883,966 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | # Generated by Django 2.2.5 on 2020-09-26 00:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('HikingApp', '0002_auto_20200923_2326'),
]
operations = [
migrations.AlterField(
model_name='hike_preferences',
name='nick_name',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='HikingApp.create_happ_user'),
),
]
| [
"[email protected]"
] | |
09fce38313a60fd7a92b0eb090a3706cabaa6808 | 62b7c6baaef93a603ecc4eb0c6a140e7b133b577 | /data/modules/exploit/macos/stager/membrane_reverse_tcp/core/handler.py | e25ac93798adf8402e51e6d675d78b3a9bdf9870 | [
"MIT"
] | permissive | sashka3076/ZetaSploit | eca5badfcb796c95aa2c8036b3814d5687a2f7b8 | f5de0c10fb811f4b6f358412f6d848a7da8080fd | refs/heads/main | 2023-03-03T04:43:06.137871 | 2021-02-05T18:37:18 | 2021-02-05T18:37:18 | 336,480,949 | 1 | 0 | MIT | 2021-02-06T07:27:20 | 2021-02-06T07:27:20 | null | UTF-8 | Python | false | false | 2,767 | py | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from data.modules.exploit.macos.stager.membrane_reverse_tcp.core.terminator import terminator
class handler:
def __init__(self, client):
self.client = client
self.terminator = terminator()
def sendterm(self):
terminator = self.terminator.generate_terminator()
self.client.send((terminator + '\x04').encode())
return terminator
def send(self, buffer):
if not isinstance(buffer, bytes):
buffer = buffer.encode()
self.client.send(buffer + '\x04'.encode())
def sendall(self, buffer):
terminator = self.sendterm()
if not isinstance(buffer, bytes):
buffer = buffer.encode()
self.client.send(buffer + '\x04'.encode())
return terminator
def recvstr(self, char='\n'):
result = self.recvall(char)
return result
def recvall(self, terminator):
result = b''
while 1:
data = self.client.recv(1024)
if terminator.encode() in data:
data = data.replace(terminator.encode(), b'')
result += data
break
else:
result += data
return result
def recvfile(self, terminator, input_file):
output_file = open(input_file, "wb")
while 1:
data = self.client.recv(1024)
if terminator.encode() in data:
data = data.replace(terminator.encode(), b'')
output_file.write(data)
break
else:
output_file.write(data)
output_file.close()
| [
"[email protected]"
] | |
4a36aec0bb29c9ee7b1376c66f7e37711c95c169 | a71fbf421c43fcb34fe7c8000eb807677821683c | /keras/keras06_RMSE.py | 7d109b56293501423f78ec268a42c551f20160cf | [] | no_license | leekyunghun/bit_seoul | ccd96dca3774f259e04b8388e134d6183b974268 | b76a3d5f83b77f5345d61cf3baa68aaefc25cd2a | refs/heads/master | 2023-02-06T08:12:17.768076 | 2020-12-22T13:35:19 | 2020-12-22T13:35:19 | 311,286,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,854 | py | import numpy as np
#1. 데이터 # 선형회귀 예제
x_train = np.array([1,2,3,4,5,6,7,8,9,10])
y_train = np.array([1,2,3,4,5,6,7,8,9,10])
x_test = np.array([11,12,13,14,15])
y_test = np.array([11,12,13,14,15])
x_pred = np.array([16,17,18])
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
#2.모델 구성
model = Sequential() # model이 Sequential이라고 선언
model.add(Dense(32, input_dim = 1)) # input_dim = 1 => input이 1차원 #model.add(Dense(32, input_dim = 1)) 값이 잘 나옴
model.add(Dense(16)) #model.add(Dense(16))
model.add(Dense(8)) #model.add(Dense(8))
model.add(Dense(1)) #model.add(Dense(1))
#3.컴파일, 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae']) # metrics = 평가방식 (훈련중에 연산한 내용을 보여주는역할)
model.fit(x_train, y_train, epochs = 100, batch_size = 1) # model.fit => 모델을 훈련시킴
# model.fit(x_train, y_train, epochs = 100) # model.fit => 모델을 훈련시킴
#4.평가, 예측
# loss, acc = model.evaluate(x_test, y_test, batch_size = 1)
loss = model.evaluate(x_test, y_test) # evaluate의 디폴트는 loss값 metrics에 추가한 값들이 evaluate 출력에 포함
print("loss : ", loss)
# print("acc : ", acc)
y_predict = model.predict(x_test) # 예측값 확인
print("결과물: \n :", y_predict)
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print("RMSE: ", RMSE(y_test, y_predict)) | [
"[email protected]"
] | |
3f8c227967ee0fd61a7c3e634b1b36f2412658e8 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc013/A/4657608.py | a1c4dd03b11eae7bc30d1cf841829c266962862f | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | N, M, L = map(int, input().split())
P, Q, R = map(int, input().split())
Res = max((N//P)*(M//Q)*(L//R), (N//Q)*(M//R)*(L//P), (N//R)*(M//P)*(L//Q))
Res2 = max((N//Q)*(M//P)*(L//R),(N//R)*(M//Q)*(L//P), (N//P)*(M//R)*(L//Q))
print(max(Res, Res2)) | [
"[email protected]"
] | |
2d3e682e938895f55504dace0bc7ead4ed580d59 | e13bccceb4c2fefbf8000f9b34195ab434cf1656 | /devel/lib/python2.7/dist-packages/test_rosmaster/__init__.py | cadd6945809a810de2ba0099831fa3a10e41f803 | [] | no_license | QuiN-cy/vacuum-en-band | ab59b718f289ad4e8a1f29e96724250b00bd894d | 48c296199b4a6ade40e084c9980d53ba1611a344 | refs/heads/master | 2023-06-01T12:13:38.664849 | 2021-06-11T15:42:42 | 2021-06-11T15:42:42 | 376,071,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | /home/student/rosws/devel/.private/test_rosmaster/lib/python2.7/dist-packages/test_rosmaster/__init__.py | [
"[email protected]"
] | |
3e50ccd03eaa5a52efbe94b7be2d3d9b816f48b6 | 7889f7f0532db6a7f81e6f8630e399c90438b2b9 | /3.4.3/_downloads/83afb11a0261474b783405dd2737c8b4/marker_path.py | 7a3894c1b84d42fa562aa9292d2baacac6e2009b | [] | no_license | matplotlib/matplotlib.github.com | ef5d23a5bf77cb5af675f1a8273d641e410b2560 | 2a60d39490941a524e5385670d488c86083a032c | refs/heads/main | 2023-08-16T18:46:58.934777 | 2023-08-10T05:07:57 | 2023-08-10T05:08:30 | 1,385,150 | 25 | 59 | null | 2023-08-30T15:59:50 | 2011-02-19T03:27:35 | null | UTF-8 | Python | false | false | 979 | py | """
===========
Marker Path
===========
Using a `~.path.Path` as marker for a `~.axes.Axes.plot`.
"""
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import numpy as np
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the circle with an internal cutout of the star
verts = np.concatenate([circle.vertices, star.vertices[::-1, ...]])
codes = np.concatenate([circle.codes, star.codes])
cut_star = mpath.Path(verts, codes)
plt.plot(np.arange(10)**2, '--r', marker=cut_star, markersize=15)
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.path`
# - `matplotlib.path.Path`
# - `matplotlib.path.Path.unit_regular_star`
# - `matplotlib.path.Path.unit_circle`
# - `matplotlib.axes.Axes.plot` / `matplotlib.pyplot.plot`
| [
"[email protected]"
] | |
a0b9d2e0e3821e46cb8e9cf4d8cc75a86df1e90d | b9cd1b9758e58f00335900fd120e1d47c23600ce | /tool/fastq_splitter.py | 29e257c5b5b800ac68560c31136a434849f7e847 | [
"Apache-2.0"
] | permissive | Multiscale-Genomics/mg-process-fastq | 4fb7fef68526237f06312a3f137df031a448731c | 50c7115c0c1a6af48dc34f275e469d1b9eb02999 | refs/heads/master | 2020-04-12T06:46:01.100270 | 2018-11-19T16:05:03 | 2018-11-19T16:05:03 | 64,320,140 | 2 | 4 | Apache-2.0 | 2018-11-16T16:54:54 | 2016-07-27T15:29:25 | Python | UTF-8 | Python | false | false | 9,469 | py | """
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os
import sys
from utils import logger
try:
if hasattr(sys, '_run_from_cmdl') is True:
raise ImportError
from pycompss.api.parameter import FILE_IN, FILE_OUT, IN, OUT
from pycompss.api.task import task
# from pycompss.api.api import compss_wait_on
except ImportError:
logger.warn("[Warning] Cannot import \"pycompss\" API packages.")
logger.warn(" Using mock decorators.")
from utils.dummy_pycompss import FILE_IN, FILE_OUT, IN, OUT # pylint: disable=ungrouped-imports
from utils.dummy_pycompss import task # pylint: disable=ungrouped-imports
# from utils.dummy_pycompss import compss_wait_on # pylint: disable=ungrouped-imports
from basic_modules.tool import Tool
from basic_modules.metadata import Metadata
from tool.fastqreader import fastqreader
from tool.common import common
# ------------------------------------------------------------------------------
class fastq_splitter(Tool): # pylint: disable=invalid-name
"""
Script for splitting up FASTQ files into manageable chunks
"""
def __init__(self, configuration=None):
"""
Initialise the tool with its configuration.
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
logger.info("FASTQ Splitter")
Tool.__init__(self)
if configuration is None:
configuration = {}
self.configuration.update(configuration)
@task(
in_file1=FILE_IN, tag=IN,
out_file=FILE_OUT, files_out=OUT,
returns=list)
def single_splitter(self, in_file1, out_file, tag='tmp'): # pylint: disable=no-self-use
"""
Function to divide the FastQ files into separate sub files of 1000000
sequences so that the aligner can run in parallel.
Parameters
----------
in_file1 : str
Location of first FASTQ file
tag : str
DEFAULT = tmp
Tag used to identify the files. Useful if this is getting run
manually on a single machine multiple times to prevent collisions of
file names
Returns
-------
Returns: Returns a list of the files that have been generated.
Each sub list containing the two paired end files for that
subset.
paired_files : list
List of lists of pair end files. Each sub list containing the two
paired end files for that subset.
"""
fqr = fastqreader()
fqr.openFastQ(in_file1)
file_loc_1_tmp = fqr.createOutputFiles(tag)
fastq_chunk_size = 1000000
if "fastq_chunk_size" in self.configuration:
fastq_chunk_size = self.configuration["fastq_chunk_size"]
record1 = fqr.next(1)
count_r3 = 0
tmp_dir = os.path.split(file_loc_1_tmp)[0]
files_out = [[os.path.split(file_loc_1_tmp)[1]]]
while fqr.eof(1) is False:
fqr.writeOutput(record1, 1)
record1 = fqr.next(1)
count_r3 += 1
if count_r3 % fastq_chunk_size == 0:
file_loc_1_new = fqr.incrementOutputFiles()
files_out.append([os.path.split(file_loc_1_new)[1]])
fqr.closeFastQ()
fqr.closeOutputFiles()
untar_idx = True
if "no-untar" in self.configuration and self.configuration["no-untar"] is True:
untar_idx = False
if untar_idx is True:
if os.path.isfile(out_file):
os.remove(out_file)
output_file_pregz = out_file.replace('.tar.gz', '.tar')
common.tar_folder(tmp_dir, output_file_pregz)
common.zip_file(output_file_pregz, 2)
return files_out
@task(
in_file1=FILE_IN, in_file2=FILE_IN, tag=IN,
out_file=FILE_OUT, files_out=OUT,
returns=list)
def paired_splitter(self, in_file1, in_file2, out_file, tag='tmp'): # pylint: disable=no-self-use,too-many-locals,too-many-statements
"""
Function to divide the paired-end FastQ files into separte sub files
of 1000000 sequences so that the aligner can run in parallel.
Parameters
----------
in_file1 : str
Location of first paired end FASTQ file
in_file2 : str
Location of second paired end FASTQ file
tag : str
DEFAULT = tmp
Tag used to identify the files. Useful if this is getting run
manually on a single machine multiple times to prevent collisions of
file names
Returns
-------
Returns: Returns a list of lists of the files that have been generated.
Each sub list containing the two paired end files for that
subset.
paired_files : list
List of lists of pair end files. Each sub list containing the two
paired end files for that subset.
"""
fqr = fastqreader()
fqr.openFastQ(in_file1, in_file2)
file_loc_1_tmp, file_loc_2_tmp = fqr.createOutputFiles(tag)
fastq_chunk_size = 1000000
if "fastq_chunk_size" in self.configuration:
fastq_chunk_size = self.configuration["fastq_chunk_size"]
record1 = fqr.next(1)
record2 = fqr.next(2)
count_r1 = 0
count_r2 = 0
count_r3 = 0
tmp_dir = os.path.split(file_loc_1_tmp)[0]
files_out = [[
os.path.split(file_loc_1_tmp)[1],
os.path.split(file_loc_2_tmp)[1]
]]
while fqr.eof(1) is False and fqr.eof(2) is False:
r1_id = record1["id"].split(" ")
r2_id = record2["id"].split(" ")
if r1_id[0] == r2_id[0]:
fqr.writeOutput(record1, 1)
fqr.writeOutput(record2, 2)
record1 = fqr.next(1)
record2 = fqr.next(2)
count_r1 += 1
count_r2 += 1
count_r3 += 1
elif r1_id[0] < r2_id[0]:
record1 = fqr.next(1)
count_r1 += 1
else:
record2 = fqr.next(2)
count_r2 += 1
if count_r3 % fastq_chunk_size == 0:
file_loc_1_new, file_loc_2_new = fqr.incrementOutputFiles()
files_out.append([
os.path.split(file_loc_1_new)[1],
os.path.split(file_loc_2_new)[1]
])
fqr.closeFastQ()
fqr.closeOutputFiles()
untar_idx = True
if "no-untar" in self.configuration and self.configuration["no-untar"] is True:
untar_idx = False
if untar_idx is True:
if os.path.isfile(out_file):
os.remove(out_file)
output_file_pregz = out_file.replace('.tar.gz', '.tar')
common.tar_folder(tmp_dir, output_file_pregz)
common.zip_file(output_file_pregz, 2)
return files_out
def run(self, input_files, input_metadata, output_files):
"""
The main function to run the splitting of FASTQ files (single or
paired) so that they can aligned in a distributed manner
Parameters
----------
input_files : dict
List of input fastq file locations
metadata : dict
output_files : dict
Returns
-------
output_file : str
Location of compressed (.tar.gz) of the split FASTQ files
output_names : list
List of file names in the compressed file
"""
sources = [input_files["fastq1"]]
if "fastq2" in input_files:
sources.append(input_files["fastq2"])
self.paired_splitter(
input_files["fastq1"], input_files["fastq2"],
output_files["output"]
)
else:
self.single_splitter(
input_files["fastq1"],
output_files["output"],
)
# results = compss_wait_on(results)
fastq_tar_meta = Metadata(
data_type=input_metadata["fastq1"].data_type,
file_type="TAR",
file_path=output_files["output"],
sources=sources,
taxon_id=input_metadata["fastq1"].taxon_id,
meta_data={
"tool": "fastq_splitter"
}
)
return (
{"output": output_files["output"]},
{"output": fastq_tar_meta}
)
# ------------------------------------------------------------------------------
| [
"[email protected]"
] | |
1c7c39467f29261840f0dd971bee8ef850b22534 | b5eaeded2af4417603d6592f29c81c2426397153 | /catatan/migrations/0002_auto_20201111_0927.py | cb4367a8a60122829cf0c2231a5d667dc6c68e2d | [] | no_license | giko99/sim-labsos | 9871059115be511882c96be10b6baaa9874a86ae | 1711607aa3cb087c1ec5efc2b337841eb0a1f33d | refs/heads/main | 2023-01-10T06:38:17.209459 | 2020-11-12T10:04:40 | 2020-11-12T10:04:40 | 311,870,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | # Generated by Django 2.2 on 2020-11-11 02:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catatan', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='catatan',
old_name='selang',
new_name='waktu_kegiatan',
),
]
| [
"[email protected]"
] | |
31170f4214bbbc086ecde9b382a34789691bc3a7 | 78230ff4256b5cc7b99a5139968b41a3495f173e | /test/test_retryz.py | ac54a00fe91e212239e8e2edc9f5c00633a7bb80 | [
"Apache-2.0"
] | permissive | jealous/retryz | f1fbbf411708b2ddb8a79ea48a8c6aaff1b7f725 | b47b326e0734854aaa7b97e115cc5a5828052b00 | refs/heads/master | 2023-06-01T05:49:31.566554 | 2017-05-21T14:48:45 | 2017-05-21T15:14:26 | 48,358,949 | 1 | 2 | null | 2017-05-21T15:17:54 | 2015-12-21T08:03:37 | Python | UTF-8 | Python | false | false | 11,992 | py | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from unittest import TestCase
from hamcrest import assert_that, instance_of, equal_to, raises, \
greater_than, less_than, contains_string
from retryz import retry, RetryTimeoutError
def _return_callback(ret):
return 4 + ret < 7
def func_wait_callback(tried):
if tried <= 6:
ret = 0.01
else:
ret = 1000
return ret
class AnotherDemoClass(object):
def __init__(self):
self.a = 0
@classmethod
def class_wait_callback(cls, tried):
if tried <= 3:
ret = 0.01
else:
ret = 1000
return ret
@staticmethod
def static_wait_callback(tried):
if tried <= 4:
ret = 0.01
else:
ret = 1000
return ret
def other_method_wait_callback(self, tried):
self.a += 1
if tried <= 2:
ret = 0.01
else:
ret = 1000
return ret
class RetryDemo(object):
def __init__(self):
self._call_count = 0
@property
def call_count(self):
return self._call_count
@retry(on_error=ValueError)
def on_error(self):
self._call_count += 1
if self.call_count <= 3:
raise ValueError()
else:
return self.call_count
@retry(on_error=lambda e: isinstance(e, (ValueError, TypeError)))
def on_errors(self):
self._call_count += 1
if self.call_count == 1:
raise ValueError()
elif self.call_count == 2:
raise TypeError()
else:
raise AttributeError()
@retry(on_error=lambda e: not isinstance(e, TypeError))
def unless_error(self):
self._call_count += 1
if self.call_count <= 2:
raise ValueError()
else:
raise TypeError()
@retry(on_error=lambda e: not isinstance(e, (TypeError, AttributeError)))
def unless_errors(self):
self._call_count += 1
if self.call_count == 1:
raise ValueError()
elif self.call_count == 2:
raise TypeError()
else:
raise AttributeError()
def _error_callback(self, ex):
assert_that(ex, instance_of(TypeError))
return self.call_count != 4
@retry(on_error=_error_callback)
def error_callback(self):
self._call_count += 1
raise TypeError()
@retry(timeout=0.05, wait=lambda x: 0 if x < 5 else 100)
def timeout(self):
self._call_count += 1
return self.call_count
@retry(timeout=lambda: 0.05, wait=lambda x: 0 if x < 6 else 100)
def timeout_callback(self):
self._call_count += 1
return self.call_count
@retry(on_return=True)
def on_return(self):
self._call_count += 1
if self.call_count < 3:
ret = True
else:
ret = False
return ret
@retry(on_return=lambda x: x in (1, 2, 3, 4, 5))
def on_returns(self):
self._call_count += 1
return self.call_count
@retry(on_return=lambda x: x != 4)
def unless_return(self):
self._call_count += 1
return self.call_count
@retry(on_return=lambda x: x not in [3, 4])
def unless_returns(self):
self._call_count += 1
return self.call_count
@retry(on_return=_return_callback)
def return_callback(self):
self._call_count += 1
return self.call_count
@retry(on_return=False, wait=0.01, limit=10)
def practical_on_return(self):
self._call_count += 1
return self._call_count == 6
@retry(on_return=False, wait=0.01, limit=10)
def on_return_raise_error(self):
self._call_count += 1
if self._call_count == 3:
raise ValueError('error out')
return self._call_count == 6
@retry(limit=3)
def limit(self):
self._call_count += 1
return self.call_count
@retry(wait=0.1, timeout=0.3)
def wait(self):
self._call_count += 1
return self.call_count
def _wait_callback(self, tried):
if tried <= 2 or self.call_count <= 5:
ret = 0.01
else:
ret = 1000
return ret
@retry(wait=_wait_callback, timeout=0.1)
def wait_callback(self):
self._call_count += 1
return self.call_count
@retry(wait=AnotherDemoClass.class_wait_callback, timeout=0.1)
def class_wait_callback(self):
self._call_count += 1
return self.call_count
@retry(wait=AnotherDemoClass.static_wait_callback, timeout=0.1)
def static_wait_callback(self):
self._call_count += 1
return self.call_count
@retry(wait=func_wait_callback, timeout=0.1)
def func_wait_callback(self):
self._call_count += 1
return self.call_count
@retry(wait=AnotherDemoClass().other_method_wait_callback, timeout=0.1)
def other_method_wait_callback(self):
self._call_count += 1
return self.call_count
def _on_retry(self):
self._call_count += 1
@retry(on_retry=_on_retry, limit=3)
def on_retry(self):
""" doc string for on retry
:return: call count
"""
self._call_count += 1
return self.call_count
@retry(limit=lambda: 3)
def limit_callback(self):
self._call_count += 1
return self.call_count
@retry(on_error=ValueError)
def unexpected_error(self):
raise AttributeError('unexpected attribute error.')
@retry(on_error=ValueError, wait=15, timeout=60 * 60 * 24 * 7)
def same_function(self):
self._call_count += 1
return self._call_count
def call(self):
self._call_count += 1
return self._call_count
class RetryTest(TestCase):
def test_on_error(self):
demo = RetryDemo()
demo.on_error()
assert_that(demo.call_count, equal_to(4))
def test_on_errors_and_limit(self):
demo = RetryDemo()
assert_that(demo.on_errors, raises(AttributeError))
assert_that(demo.call_count, equal_to(3))
def test_unless_error(self):
demo = RetryDemo()
assert_that(demo.unless_error, raises(TypeError))
assert_that(demo.call_count, equal_to(3))
def test_unless_errors(self):
demo = RetryDemo()
assert_that(demo.unless_errors, raises(TypeError))
assert_that(demo.call_count, equal_to(2))
def test_error_callback(self):
demo = RetryDemo()
assert_that(demo.error_callback, raises(TypeError))
assert_that(demo.call_count, equal_to(4))
def test_timeout(self):
demo = RetryDemo()
assert_that(demo.timeout, raises(RetryTimeoutError))
assert_that(demo.call_count, equal_to(5))
def test_timeout_callback(self):
demo = RetryDemo()
assert_that(demo.timeout_callback, raises(RetryTimeoutError))
assert_that(demo.call_count, equal_to(6))
def test_on_return(self):
demo = RetryDemo()
assert_that(demo.on_return(), equal_to(False))
assert_that(demo.call_count, equal_to(3))
def test_on_returns(self):
demo = RetryDemo()
assert_that(demo.on_returns(), equal_to(6))
def test_unless_return(self):
demo = RetryDemo()
assert_that(demo.unless_return(), equal_to(4))
def test_unless_returns(self):
demo = RetryDemo()
assert_that(demo.unless_returns(), equal_to(3))
def test_return_callback(self):
demo = RetryDemo()
assert_that(demo.return_callback(), equal_to(3))
def test_limit(self):
demo = RetryDemo()
assert_that(demo.limit(), equal_to(3))
def test_wait(self):
demo = RetryDemo()
assert_that(demo.wait, raises(RetryTimeoutError))
assert_that(demo.call_count, greater_than(2))
assert_that(demo.call_count, less_than(6))
def test_wait_callback(self):
demo = RetryDemo()
assert_that(demo.wait_callback, raises(RetryTimeoutError))
assert_that(demo.call_count, equal_to(6))
def test_func_wait_callback(self):
demo = RetryDemo()
assert_that(demo.func_wait_callback, raises(RetryTimeoutError))
assert_that(demo.call_count, equal_to(7))
def test_class_wait_callback(self):
demo = RetryDemo()
assert_that(demo.class_wait_callback, raises(RetryTimeoutError))
assert_that(demo.call_count, equal_to(4))
def test_static_wait_callback(self):
demo = RetryDemo()
assert_that(demo.static_wait_callback, raises(RetryTimeoutError))
assert_that(demo.call_count, equal_to(5))
def test_other_method_wait_callback(self):
demo = RetryDemo()
assert_that(demo.other_method_wait_callback, raises(RetryTimeoutError))
assert_that(demo.call_count, equal_to(3))
def test_on_retry(self):
demo = RetryDemo()
assert_that(demo.on_retry(), equal_to(5))
def test_limit_callback(self):
demo = RetryDemo()
assert_that(demo.limit_callback(), equal_to(3))
def test_unexpected_error(self):
demo = RetryDemo()
assert_that(demo.unexpected_error,
raises(AttributeError, 'unexpected'))
def test_functional_limit(self):
demo = RetryDemo()
assert_that(retry(demo.call, limit=3)(), equal_to(3))
def test_functional_on_return(self):
demo = RetryDemo()
assert_that(retry(demo.call, on_return=lambda x: x < 5)(), equal_to(5))
def test_partial_function_on_return(self):
def lt(x, y):
return x < y
f = functools.partial(lt, y=5)
demo = RetryDemo()
assert_that(retry(demo.call, on_return=f)(), equal_to(5))
def test_function_wrap(self):
demo = RetryDemo()
assert_that(demo.on_retry.__doc__,
contains_string('doc string for on retry'))
def test_same_function_first_entry(self):
demo = RetryDemo()
assert_that(demo.same_function(), equal_to(1))
def test_same_function_second_entry(self):
demo = RetryDemo()
assert_that(demo.same_function(), equal_to(1))
def test_practical_on_return(self):
demo = RetryDemo()
demo.practical_on_return()
assert_that(demo.call_count, equal_to(6))
def test_on_return_raise_error(self):
def f():
demo = RetryDemo()
demo.on_return_raise_error()
assert_that(f, raises(ValueError))
def test_error_wait_type(self):
def f():
@retry(wait='string')
def g():
pass
g()
assert_that(f, raises(ValueError, 'should be a number or a callback'))
def test_retry_retry_type(self):
def f():
@retry(on_retry=123)
def g():
pass
g()
assert_that(f, raises(ValueError, 'should be a function'))
| [
"[email protected]"
] | |
c888771fa263c8e74bf9f393fcdbb8476f204704 | 36add5afc63ec09d63b8a877c29c17391938ee5c | /.history/process_tweet_20201113145341.py | c00da9bce7d529f27130fc4cb4ab2841dd6d41c6 | [] | no_license | E-STAT/sentiment_api | e84eb04a9f21c7368ca20bdb97436ffea9f65f25 | bd9ee0d78d9eac8b6448b96c2560611a64f7b79d | refs/heads/master | 2023-01-12T13:06:14.654883 | 2020-11-20T11:30:22 | 2020-11-20T11:30:22 | 314,534,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | import re
REPLACE_BY_SPACE = re.compile('[/(){}\[\]\|,;&-_]') #punctuation to replace
def preprocess_text(text):
"""
Function to preprocess text: removes links, punctuation, spaces, non-alpha words and stop_words
Parameters
----------
text: str
a string to be preprocessed
Returns
-------
text: str
a preprocessed string
"""
self.text = text.lower() #lowercase
self.text = re.sub(r"http\S+", "", text) #replace links with ""
self.text = re.sub(r"\@\S+", "", text) #replace mentions with ""
self.text = re.sub(r"#\S+", "", text) #replace hashtags with ""
self.text = re.sub(r"won\'t", "would not", text) #deal with contractions
self.text = re.sub(r"n\'t", " not", text) #deal with contractions
self.text = REPLACE_BY_SPACE.sub(' ', text) #replace punctuation with space
self.text = [word.strip() for word in text.split()] #strip space from words
self.text = [word for word in text if len(word)>2] #removing words less than 2 characters
self.text = [word for word in text if word!='amp'] #removing twitter amp
self.text = ' '.join(text)
return self.text
test = preprocess_text("Hello, this is Ernest @OwojoriErnest. #EndSars")
print(test) | [
"[email protected]"
] | |
b70eb58802e5dbe8a525a194f8c647b1713059b2 | f98f4aaeca3ac841905e0cd8547bbf41944fe690 | /编程语言/Python/Python编程从入门到实践/第一部分_基础知识/第11章_测试代码/11_1/test_cities.py | 8a129dc96c26bbe02c29536932c7670c18d25732 | [] | no_license | zhb339/book-learning | 64f433b1ee1f66f3120828352df3b533be4cf9de | 5273fc8d11b2d602484dbe95e55f1e931858382f | refs/heads/master | 2020-03-29T10:48:22.771146 | 2018-11-07T13:46:53 | 2018-11-07T13:46:53 | 149,823,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | import unittest
from city_functions import get_location
class CityTest(unittest.TestCase):
def test_city_country(self):
location = get_location("hangzhou", "china")
self.assertEqual(location, "hangzhou, china")
unittest.main()
| [
"[email protected]"
] | |
06c77303e95c817d8ebca414a1acfcf467f2abd7 | 2c6f77b281ee9c901a788b5617f26e73a3732762 | /chapter7/insertionSort.py | 130a3f6cf1f401844d03745fea0c27e898c779cd | [] | no_license | Boberkraft/Data-Structures-and-Algorithms-in-Python | 5db8ff814f4e954aca6701fabcc70900fe7012ff | 60a5ef54f9cffb4dcdd3fc494cfeeb662d570ae9 | refs/heads/master | 2021-01-20T06:05:29.552453 | 2018-10-28T19:06:55 | 2018-10-28T19:06:55 | 89,841,233 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from PositinalList import PositionalList
def insertion_sort(l):
if len(l) > 1:
marker = L.first()
while marker != l.last():
pivot = l.after(marker)
value = pivot.element()
if value > marker.element():
marker = pivot
else:
walk = marker
while walk != l.first() and l.before(walk).element() > value:
walk = l.before(walk)
l.delete(pivot)
l.add_before(walk, value) | [
"[email protected]"
] | |
d71b31be0ea0908049b6484535473a11033bc7ca | c11123ce1e86f8306dcc3bf5d017dbfa8bb1d515 | /Easy/Pascal's Triangle.py | 2d0b01d3d2e9680b1cc10cbe895915d5b9fe90ab | [] | no_license | uathena1991/Leetcode | 7e606c68a51ed09e6e6a9fad327b24066e92d0c4 | e807ae43a0a253deaa6c9ed1c592fa3a14a6cab8 | refs/heads/master | 2021-05-01T15:21:25.568729 | 2019-10-13T14:33:30 | 2019-10-13T14:33:30 | 74,910,747 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = [[1]]
if numRows == 0:
return []
if numRows == 1:
return [[1]]
for n in range(1,numRows):
res.append([1]*(n+1))
for j in range(1,n):
res[n][j] = res[n-1][j] + res[n-1][j-1]
return res
a = Solution()
print a.generate(7) | [
"[email protected]"
] | |
5ae8b4f10739b6e6002264dbdfb90ab5fb784055 | 5d8bbb53d3ca8d532a1d977906a0fdf2aed6d46f | /ecoke/tests/test_views.py | e5c5f9a24e15e40c94f1b298ae200f8f7687d2b1 | [] | no_license | ErickMwazonga/e-Coke | da113d5cf2127b9947622008433057331c6a87f9 | d72f6f2d5f6b5bb8d1a50c2fa6d22cf9a462874d | refs/heads/master | 2021-01-20T07:16:00.763059 | 2018-03-20T12:25:43 | 2018-03-20T12:25:43 | 101,509,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,689 | py | # from django.test import Client, TestCase
# from django.urls import reverse
# from django.contrib.auth import get_user_model
# from django.utils import timezone
#
# from ecoke.models import Brand
#
#
# def create_user():
# User = get_user_model()
# user = User(username='john', email='[email protected]', is_active=True)
# user.set_password('letmein')
# user.save()
# return user
#
#
# class IndexTestCase(TestCase):
# def setUp(self):
# user = create_user()
#
# self.client = Client()
# self.client.force_login(user)
#
# def test_correct_template_used(self):
# url = reverse('ecoke:index')
# res = self.client.get(url)
#
# self.assertTemplateUsed(res, 'ecoke/index.html')
# self.assertIn('This is a e-Coke Application where you can collect data based on brands...', res.content)
#
#
# class BrandTestCase(TestCase):
# def setUp(self):
# user = create_user()
#
# self.client = Client()
# self.client.force_login(user)
#
# # def test_data_posted(self):
# # url = reverse('ecoke:brand_create')
# # data = {
# # 'collector_name': 'Chepe',
# # 'respondent_name': 'Chitalo',
# # 'respondent_city': 'Matano Mane',
# # 'favourite_drink': 'Fuze',
# # 'date_of_collection': timezone.now().date()
# # }
# # res = self.client.post(url, data=data)
# # self.assertEqual(Brand.objects.count(), 1)
# #
# #
# # def test_data_update(self):
# # data = {
# # 'collector_name': 'Chepe',
# # 'respondent_name': 'Chitalo',
# # 'respondent_city': 'Matano Mane',
# # 'favourite_drink': 'Fuze',
# # 'date_of_collection': timezone.now().date()
# # }
# # brand = Brand.objects.create(**data)
# # url = reverse('ecoke:brand_update', kwargs={'pk':brand.pk})
# # data['collector_name'] = 'Biro'
# # res = self.client.post(url, data=data)
# #
# # self.assertEqual(Brand.objects.first().collector_name, 'Biro')
#
# def test_data_delete(self):
# data = {
# 'collector_name': 'Chepe',
# 'respondent_name': 'Chitalo',
# 'respondent_city': 'Matano Mane',
# 'favourite_drink': 'Fuze',
# 'date_of_collection': timezone.now().date()
# }
# brand = Brand.objects.create(**data)
# self.assertEqual(Brand.objects.count(), 1)
#
# url = reverse('ecoke:brand_delete', kwargs={'pk':brand.pk})
# res = self.client.post(url)
#
# self.assertEqual(Brand.objects.count(), 0)
| [
"[email protected]"
] | |
1c2b29364823e4d003628dd1a69238ac0f8c2f65 | 6fd26735b9dfd1d3487c1edfebf9e1e595196168 | /2018/task05a.py | efc9e8f1ff974feab1583ece5da19fdb8705b276 | [
"BSD-3-Clause"
] | permissive | Kwpolska/adventofcode | bc3b1224b5272aa8f3a5c4bef1d8aebe04dcc677 | 8e55ef7b31a63a39cc2f08b3f28e15c2e4720303 | refs/heads/master | 2021-01-10T16:48:38.816447 | 2019-12-03T20:46:07 | 2019-12-03T20:46:07 | 47,507,587 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #!/usr/bin/env python3
import string
with open("input/05.txt") as fh:
file_data = fh.read().strip()
def solve(data):
while True:
out = data[0]
lout = data[0].lower()
for c in data[1:]:
lc = c.lower()
if lout and lout[-1] == lc and out[-1] != c:
out = out[:-1]
lout = lout[:-1]
else:
out += c
lout += lc
if out == data:
break
data = out
return len(data), data
test_data = "dabAcCaCBAcCcaDA"
test_output = solve(test_data)
test_expected = (10, "dabCBAcaDA")
print(test_output, test_expected)
assert test_output == test_expected
print(solve(file_data))
| [
"[email protected]"
] | |
62ab1ea93818eed7b32b1dd371198993153edd61 | a752920841038f1f84df06779ff041d6c1100697 | /pypinyin/phrases_dict.pyi | bdcaf1677ba135f14cff63265e002d7990b17f57 | [
"MIT"
] | permissive | mozillazg/python-pinyin | 06e5eaa5326b642d50aacbe71b7117ac6024b353 | 6a306a6ec0148502ae4e689a229340555ecb6333 | refs/heads/master | 2023-08-31T14:13:44.512972 | 2023-05-14T12:18:47 | 2023-05-14T12:18:47 | 12,830,126 | 4,564 | 634 | MIT | 2023-09-09T03:46:41 | 2013-09-14T14:01:40 | Python | UTF-8 | Python | false | false | 94 | pyi | from typing import Dict, List, Text
phrases_dict = ... # type: Dict[Text, List[List[Text]]]
| [
"[email protected]"
] | |
0dd8e37f694e893ca41f0e481521e1f666d2443f | 89376db3a7febcbd9b87a96cfc9e4f3fcf23758d | /test/unit/backends/msbuild/test_syntax.py | fab57708a7d0feb25463863dd417f764c17adbac | [
"BSD-3-Clause"
] | permissive | zokrezyl/bfg9000 | f80a48d39202dbf9520ae4f0fa234556b720008f | 430dfa2bfd4863d30f4690ff22f7b62dc80387ba | refs/heads/master | 2022-10-02T18:10:36.577322 | 2020-05-29T20:53:56 | 2020-05-29T20:54:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,727 | py | from io import BytesIO
from lxml import etree
from lxml.builder import E
from . import *
from bfg9000.backends.msbuild.syntax import *
from bfg9000.file_types import SourceFile
from bfg9000.path import Path, Root
from bfg9000.safe_str import jbos, literal, safe_string
from bfg9000.tools.common import Command
class my_safe_str(safe_string):
pass
class TestTextify(TestCase):
def test_string(self):
self.assertEqual(textify('foo'), 'foo')
self.assertEqual(textify('foo', True), 'foo')
self.assertEqual(textify('foo bar'), 'foo bar')
self.assertEqual(textify('foo bar', True), '"foo bar"')
def test_literal(self):
self.assertEqual(textify(literal('foo bar')), 'foo bar')
self.assertEqual(textify(literal('foo bar'), True), 'foo bar')
def test_jbos(self):
j = jbos('foo', literal('='), 'bar baz')
self.assertEqual(textify(j), 'foo=bar baz')
self.assertEqual(textify(j, True), 'foo="bar baz"')
def test_path(self):
p1 = Path('foo')
p2 = Path('foo', Root.srcdir)
self.assertEqual(textify(p1), r'$(OutDir)\foo')
self.assertEqual(textify(p2), r'$(SourceDir)\foo')
self.assertEqual(textify(p1, builddir=BuildDir.intermediate),
r'$(IntDir)\foo')
self.assertEqual(textify(p2, builddir=BuildDir.intermediate),
r'$(SourceDir)\foo')
self.assertEqual(textify(p1, builddir=BuildDir.solution),
r'$(SolutionDir)\foo')
self.assertEqual(textify(p2, builddir=BuildDir.solution),
r'$(SourceDir)\foo')
def test_file(self):
class MockCreator:
def __init__(self, msbuild_output=False):
self.msbuild_output = msbuild_output
src = SourceFile(Path('foo'), 'c++')
self.assertEqual(textify(src), r'$(SolutionDir)\foo')
src.creator = MockCreator()
self.assertEqual(textify(src), r'$(IntDir)\foo')
src.creator = MockCreator(True)
self.assertEqual(textify(src), r'$(OutDir)\foo')
def test_invalid(self):
with self.assertRaises(TypeError):
textify(my_safe_str())
class ProjectTest(TestCase):
xmlns = 'http://schemas.microsoft.com/developer/msbuild/2003'
def xpath(self, node, query):
return node.xpath(query, namespaces={'x': self.xmlns})
def assertXPath(self, node, query, result):
self.assertEqual(self.xpath(node, query), result)
class TestVcxProject(ProjectTest):
def test_write(self):
proj = VcxProject(FakeEnv(), 'project', output_file=Path('output'),
files=[{'name': SourceFile(Path('src.cpp'), 'c++'),
'options': {}}])
out = BytesIO()
proj.write(out)
tree = etree.fromstring(out.getvalue())
project = self.xpath(tree, '/x:Project')[0]
self.assertXPath(project, './x:PropertyGroup/x:TargetPath/text()',
[r'$(OutDir)\output'])
self.assertXPath(project, './x:ItemGroup/x:ClCompile/@Include',
[r'$(SolutionDir)\src.cpp'])
def test_duplicate_basename(self):
proj = VcxProject(
FakeEnv(), 'project', output_file=Path('output'), files=[
{'name': SourceFile(Path('a/src.cpp'), 'c++'), 'options': {}},
{'name': SourceFile(Path('b/src.cpp'), 'c++'), 'options': {}},
])
out = BytesIO()
proj.write(out)
tree = etree.fromstring(out.getvalue())
self.assertXPath(tree, 'x:ItemGroup/x:ClCompile/@Include', [
r'$(SolutionDir)\a\src.cpp', r'$(SolutionDir)\b\src.cpp'
])
self.assertXPath(
tree, 'x:ItemGroup/x:ClCompile/x:ObjectFileName/text()',
[r'$(IntDir)\a\src.obj', r'$(IntDir)\b\src.obj']
)
def test_compile_options(self):
proj = VcxProject(FakeEnv(), 'project')
root = E.Element()
proj._write_compile_options(root, {})
self.assertXPath(root, './*', [])
root = E.Element()
proj._write_compile_options(root, {
'warnings': {'level': 'all', 'as_error': True}
})
self.assertXPath(root, './WarningLevel/text()', ['EnableAllWarnings'])
self.assertXPath(root, './TreatWarningAsError/text()', ['true'])
root = E.Element()
proj._write_compile_options(root, {'debug': 'pdb'})
self.assertXPath(root, './DebugInformationFormat/text()',
['ProgramDatabase'])
root = E.Element()
proj._write_compile_options(root, {'includes': ['foo', 'bar']})
self.assertXPath(root, './AdditionalIncludeDirectories/text()',
['foo;bar;%(AdditionalIncludeDirectories)'])
root = E.Element()
proj._write_compile_options(root, {'defines': ['foo', 'bar']})
self.assertXPath(root, './PreprocessorDefinitions/text()',
['foo;bar;%(PreprocessorDefinitions)'])
root = E.Element()
proj._write_compile_options(root, {'pch': {'create': 'foo'}})
self.assertXPath(root, './PrecompiledHeader/text()', ['Create'])
self.assertXPath(root, './PrecompiledHeaderFile/text()', ['foo'])
root = E.Element()
proj._write_compile_options(root, {'pch': {'use': 'foo'}})
self.assertXPath(root, './PrecompiledHeader/text()', ['Use'])
self.assertXPath(root, './PrecompiledHeaderFile/text()', ['foo'])
root = E.Element()
proj._write_compile_options(root, {'runtime': 'dynamic-debug'})
self.assertXPath(root, './RuntimeLibrary/text()',
['MultiThreadedDebugDLL'])
root = E.Element()
proj._write_compile_options(root, {'extra': ['foo', 'bar']})
self.assertXPath(root, './AdditionalOptions/text()',
['foo bar %(AdditionalOptions)'])
def test_link_options(self):
proj = VcxProject(FakeEnv(), 'project')
root = E.Element()
proj._write_link_options(root, {})
self.assertXPath(root, './OutputFile/text()', ['$(TargetPath)'])
root = E.Element()
proj._write_link_options(root, {'debug': True})
self.assertXPath(root, './GenerateDebugInformation/text()', ['true'])
root = E.Element()
proj._write_link_options(root, {'import_lib': 'foo'})
self.assertXPath(root, './ImportLibrary/text()', ['foo'])
root = E.Element()
proj._write_link_options(root, {'extra': ['foo', 'bar']})
self.assertXPath(root, './AdditionalOptions/text()',
['foo bar %(AdditionalOptions)'])
root = E.Element()
proj._write_link_options(root, {'libs': ['foo', 'bar']})
self.assertXPath(root, './AdditionalDependencies/text()',
['foo;bar;%(AdditionalDependencies)'])
class TestNoopProject(ProjectTest):
def test_write(self):
proj = NoopProject(FakeEnv(), 'project')
out = BytesIO()
proj.write(out)
tree = etree.fromstring(out.getvalue())
self.assertXPath(tree, '/x:Project/x:Target/@Name', ['Build'])
class TestCommandProject(ProjectTest):
def test_write(self):
proj = CommandProject(FakeEnv(), 'project', commands=[
CommandProject.task('Exec', Command='echo foo')
])
out = BytesIO()
proj.write(out)
tree = etree.fromstring(out.getvalue())
build = self.xpath(tree, '/x:Project/x:Target[@Name="Build"]')[0]
self.assertXPath(build, './x:MakeDir/@Directories', ['$(OutDir)'])
self.assertXPath(build, './x:Exec/@Command', ['echo foo'])
# Now try without makedir.
proj = CommandProject(FakeEnv(), 'project', commands=[
CommandProject.task('Exec', Command='echo foo')
], makedir=False)
out = BytesIO()
proj.write(out)
tree = etree.fromstring(out.getvalue())
build = self.xpath(tree, '/x:Project/x:Target[@Name="Build"]')[0]
self.assertXPath(build, './x:MakeDir', [])
self.assertXPath(build, './x:Exec/@Command', ['echo foo'])
def test_convert_attr(self):
self.assertEqual(CommandProject.convert_attr('foo'), 'foo')
self.assertEqual(CommandProject.convert_attr(['foo', 'bar']),
'foo;bar')
def test_convert_command(self):
self.assertEqual(CommandProject.convert_command(['foo', 'bar']),
'foo bar')
self.assertEqual(CommandProject.convert_command(
[Command(None, 'rule', 'var', 'command'), 'bar']
), 'command bar')
| [
"[email protected]"
] | |
0863e527af5d732af536a3f43a250d44e42f5214 | bc441bb06b8948288f110af63feda4e798f30225 | /ops_automation_sdk/model/topology/view_pb2.pyi | 574a1e0fb8757902dfdc5b3c1c4fdc934341eb51 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,314 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
RepeatedScalarFieldContainer as google___protobuf___internal___containers___RepeatedScalarFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from ops_automation_sdk.model.topology.area_pb2 import (
Area as ops_automation_sdk___model___topology___area_pb2___Area,
)
from ops_automation_sdk.model.topology.link_pb2 import (
Link as ops_automation_sdk___model___topology___link_pb2___Link,
)
from ops_automation_sdk.model.topology.node_pb2 import (
Node as ops_automation_sdk___model___topology___node_pb2___Node,
)
from ops_automation_sdk.model.topology.note_pb2 import (
Note as ops_automation_sdk___model___topology___note_pb2___Note,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class View(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class Diff(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def addNodes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___node_pb2___Node]: ...
@property
def removeNodes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___node_pb2___Node]: ...
@property
def addLinks(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___link_pb2___Link]: ...
@property
def removeLinks(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___link_pb2___Link]: ...
def __init__(self,
*,
addNodes : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___node_pb2___Node]] = None,
removeNodes : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___node_pb2___Node]] = None,
addLinks : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___link_pb2___Link]] = None,
removeLinks : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___link_pb2___Link]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> View.Diff: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> View.Diff: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"addLinks",b"addLinks",u"addNodes",b"addNodes",u"removeLinks",b"removeLinks",u"removeNodes",b"removeNodes"]) -> None: ...
id = ... # type: typing___Text
name = ... # type: typing___Text
creator = ... # type: typing___Text
modifier = ... # type: typing___Text
readAuthorizers = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
writeAuthorizers = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[typing___Text]
version = ... # type: typing___Text
ctime = ... # type: builtin___int
mtime = ... # type: builtin___int
@property
def rootNode(self) -> ops_automation_sdk___model___topology___node_pb2___Node: ...
@property
def nodes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___node_pb2___Node]: ...
@property
def links(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___link_pb2___Link]: ...
@property
def areas(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___area_pb2___Area]: ...
@property
def notes(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[ops_automation_sdk___model___topology___note_pb2___Note]: ...
@property
def diff(self) -> View.Diff: ...
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
creator : typing___Optional[typing___Text] = None,
modifier : typing___Optional[typing___Text] = None,
readAuthorizers : typing___Optional[typing___Iterable[typing___Text]] = None,
writeAuthorizers : typing___Optional[typing___Iterable[typing___Text]] = None,
version : typing___Optional[typing___Text] = None,
ctime : typing___Optional[builtin___int] = None,
mtime : typing___Optional[builtin___int] = None,
rootNode : typing___Optional[ops_automation_sdk___model___topology___node_pb2___Node] = None,
nodes : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___node_pb2___Node]] = None,
links : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___link_pb2___Link]] = None,
areas : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___area_pb2___Area]] = None,
notes : typing___Optional[typing___Iterable[ops_automation_sdk___model___topology___note_pb2___Note]] = None,
diff : typing___Optional[View.Diff] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> View: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> View: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"diff",b"diff",u"rootNode",b"rootNode"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"areas",b"areas",u"creator",b"creator",u"ctime",b"ctime",u"diff",b"diff",u"id",b"id",u"links",b"links",u"modifier",b"modifier",u"mtime",b"mtime",u"name",b"name",u"nodes",b"nodes",u"notes",b"notes",u"readAuthorizers",b"readAuthorizers",u"rootNode",b"rootNode",u"version",b"version",u"writeAuthorizers",b"writeAuthorizers"]) -> None: ...
| [
"[email protected]"
] | |
a52188f6b0918565c35293d5307352a232165999 | daa8ed885ab8cac1d0c568bb01cb7c6426ae545a | /arithmetic.py | 64238ac84292597ced56bf9c3f77eb27f706d7f0 | [] | no_license | k8k/HBExercise02 | f20cacc9761ab9d57be54416be0bc1a63967681d | de01b9a5466c662d823c1fbf16ff110684538d54 | refs/heads/master | 2021-01-19T06:18:35.253982 | 2014-10-02T01:03:33 | 2014-10-02T01:03:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | def add(num1, num2):
return (num1 + num2)
def subtract(num1, num2):
return (num1 - num2)
def multiply(num1, num2):
return (num1 * num2)
def divide(num1, num2):
return (float(num1) / num2)
def square(num1):
return (num1 ** 2)
def cube(num1):
return (num1 ** 3)
def power(num1, num2):
return (num1 ** num2)
print (num1 ** num2)
def mod(num1, num2):
return (num1 % num2)
| [
"[email protected]"
] | |
a491e7f4fe918a165410005e4a2a8a193a1fdc41 | 6bc7062b2f99d0c54fd1bb74c1c312a2e3370e24 | /crowdfunding/projects/migrations/0011_auto_20200823_1422.py | c685f8e23363ce08211f7cb85847cbf765624d1e | [] | no_license | marinkoellen/drf-proj | f2d1f539efb877df69d285bd2fe6d5e789709933 | 874549d68ab80a774988c83706bb7934e035de42 | refs/heads/master | 2022-12-25T16:53:52.187704 | 2020-10-03T03:54:06 | 2020-10-03T03:54:06 | 289,620,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # Generated by Django 3.0.8 on 2020-08-23 06:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0010_auto_20200823_1421'),
]
operations = [
migrations.AlterField(
model_name='project',
name='date_created',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"[email protected]"
] | |
03b4d6ed65a5531775caf12fd19231937b5b77c8 | 5c0a253bf2fb83db01abc99097871c965f4cf565 | /spark/crm/PROC_A_RPT_SUN_INFO_DETAIL.py | 027630dff0344395b1cf1fef3a04b1f388a59356 | [] | no_license | airuibel/python-1 | 3b16553ede9d069ec56efbb12a89a4de6917a447 | 94f387e2d406fab2128bcfffce6146da720b2ccc | refs/heads/master | 2020-07-05T15:43:00.957221 | 2017-09-17T14:05:48 | 2017-09-17T14:05:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,493 | py | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_A_RPT_SUN_INFO_DETAIL').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#----------------------------------------------业务逻辑开始----------------------------------------------------------
#源表
OCRM_F_CI_SUN_BASEINFO = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_SUN_BASEINFO/*')
OCRM_F_CI_SUN_BASEINFO.registerTempTable("OCRM_F_CI_SUN_BASEINFO")
OCRM_F_CI_SUN_BASE = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_SUN_BASE/*')
OCRM_F_CI_SUN_BASE.registerTempTable("OCRM_F_CI_SUN_BASE")
OCRM_F_CUST_ELECTRICITY_COST = sqlContext.read.parquet(hdfs+'/OCRM_F_CUST_ELECTRICITY_COST/*')
OCRM_F_CUST_ELECTRICITY_COST.registerTempTable("OCRM_F_CUST_ELECTRICITY_COST")
OCRM_F_CUST_WATER_COST = sqlContext.read.parquet(hdfs+'/OCRM_F_CUST_WATER_COST/*')
OCRM_F_CUST_WATER_COST.registerTempTable("OCRM_F_CUST_WATER_COST")
OCRM_F_CI_CUSTOMER_TYPE = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUSTOMER_TYPE/*')
OCRM_F_CI_CUSTOMER_TYPE.registerTempTable("OCRM_F_CI_CUSTOMER_TYPE")
OCRM_F_CI_PER_CUST_INFO = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_PER_CUST_INFO/*')
OCRM_F_CI_PER_CUST_INFO.registerTempTable("OCRM_F_CI_PER_CUST_INFO")
ACRM_F_CI_ASSET_BUSI_PROTO = sqlContext.read.parquet(hdfs+'/ACRM_F_CI_ASSET_BUSI_PROTO/*')
ACRM_F_CI_ASSET_BUSI_PROTO.registerTempTable("ACRM_F_CI_ASSET_BUSI_PROTO")
#目标表:
#RPT_SUN_INFO_DETAIL 增改表 多个PY 属于第一个
#处理过程:先删除运算目录所有数据文件,然后复制BK目录前一天数据到运算目录,作为今天数据文件
ret = os.system("hdfs dfs -rm -r /"+dbname+"/RPT_SUN_INFO_DETAIL/*.parquet")
ret = os.system("hdfs dfs -cp -f /"+dbname+"/RPT_SUN_INFO_DETAIL_BK/"+V_DT_LD+".parquet /"+dbname+"/RPT_SUN_INFO_DETAIL/"+V_DT+".parquet")
RPT_SUN_INFO_DETAIL = sqlContext.read.parquet(hdfs+'/RPT_SUN_INFO_DETAIL/*')
RPT_SUN_INFO_DETAIL.registerTempTable("RPT_SUN_INFO_DETAIL")
#任务[21] 001-01::
V_STEP = V_STEP + 1
#MERGE操作
sql="""
SELECT CASE WHEN T.VILLAGE IS NULL THEN concat(T.TOWN,T.GROUPID)
WHEN T.TOWN IS NULL THEN concat(T.COUNTY,T.GROUPID)
ELSE concat(T.VILLAGE,T.GROUPID) END AS ADDR,
T.CUST_ID,
CASE WHEN T.ISHOUSEHOST = '1' THEN T.CUST_NAME ELSE T7.CUST_NAME END AS CUST_NAME,
T1.FAM_TOTASSET,
T1.FAM_DEBT ,
T2.SUM_PRICE AS ELEC,
T3.SUM_PRICE AS WATER,
T.FR_ID,
CONCAT(T4.VALUE,'县',T5.VALUE,'镇',T6.VALUE,'村',T.GROUPID,'组') ADDR_DESC
FROM OCRM_F_CI_SUN_BASEINFO T
LEFT JOIN OCRM_F_CI_SUN_BASE T1 ON T.CUST_ID = T1.CUST_ID AND T1.FR_ID = T.FR_ID
LEFT JOIN OCRM_F_CUST_ELECTRICITY_COST T2 ON T1.CUST_ID = T2.CUST_ID AND T2.FR_ID = T1.FR_ID
LEFT JOIN OCRM_F_CUST_WATER_COST T3 ON T1.CUST_ID = T3.CUST_ID AND T3.FR_ID = T1.FR_ID
LEFT JOIN OCRM_F_CI_CUSTOMER_TYPE T4 ON T.COUNTY = T4.ID AND T4.FR_ID = T.FR_ID
LEFT JOIN OCRM_F_CI_CUSTOMER_TYPE T5 ON T.TOWN = T5.ID AND T5.FR_ID = T.FR_ID
LEFT JOIN OCRM_F_CI_CUSTOMER_TYPE T6 ON T.VILLAGE = T6.ID AND T6.FR_ID = T.FR_ID
LEFT JOIN OCRM_F_CI_SUN_BASEINFO T7 ON T7.FR_ID = T.FR_ID AND T.COUNTY = T7.COUNTY
AND T.VILLAGE = T7.VILLAGE AND T.TOWN = T7.TOWN AND T.GROUPID = T7.GROUPID AND T.DOORID = T7.DOORID AND T7.ISHOUSEHOST = '1'
--WHERE T.FR_ID = V_FR_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_RPT_SUN_INFO_DETAIL_01 = sqlContext.sql(sql)
TMP_RPT_SUN_INFO_DETAIL_01.registerTempTable("TMP_RPT_SUN_INFO_DETAIL_01")
sql="""
select
src.ADDR --VARCHAR
,src.CUST_ID --VARCHAR
,src.CUST_NAME --VARCHAR
,src.FAM_TOTASSET --DECIMAL(22,2)
,src.FAM_DEBT --DECIMAL(22,2)
,dst.PRECREDIT --DECIMAL(22,2)
,dst.SIGNCREDIT --DECIMAL(22,2)
,dst.USECREDIT --DECIMAL(22,2)
,dst.FAM_CRE --DECIMAL(22,2)
,src.ELEC --DECIMAL(22,2)
,src.WATER --DECIMAL(22,2)
,src.FR_ID --VARCHAR
,src.ADDR_DESC --VARCHAR
from TMP_RPT_SUN_INFO_DETAIL_01 src
left join RPT_SUN_INFO_DETAIL dst on src.CUST_ID=dst.CUST_ID and src.FR_ID=dst.FR_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_RPT_SUN_INFO_DETAIL_INNER1 = sqlContext.sql(sql)
TMP_RPT_SUN_INFO_DETAIL_INNER1.registerTempTable("TMP_RPT_SUN_INFO_DETAIL_INNER1")
sql="""
select
src.ADDR --VARCHAR
,src.CUST_ID --VARCHAR
,src.CUST_NAME --VARCHAR
,src.FAM_TOTASSET --DECIMAL(22,2)
,src.FAM_DEBT --DECIMAL(22,2)
,dst.PRECREDIT --DECIMAL(22,2)
,dst.SIGNCREDIT --DECIMAL(22,2)
,dst.USECREDIT --DECIMAL(22,2)
,dst.FAM_CRE --DECIMAL(22,2)
,src.ELEC --DECIMAL(22,2)
,src.WATER --DECIMAL(22,2)
,src.FR_ID --VARCHAR
,src.ADDR_DESC --VARCHAR
from RPT_SUN_INFO_DETAIL src
left join TMP_RPT_SUN_INFO_DETAIL_INNER1 dst on src.CUST_ID=dst.CUST_ID and src.FR_ID=dst.FR_ID
where dst.CUST_ID is null
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_RPT_SUN_INFO_DETAIL_INNER2 = sqlContext.sql(sql)
TMP_RPT_SUN_INFO_DETAIL_INNER2.registerTempTable("TMP_RPT_SUN_INFO_DETAIL_INNER2")
RPT_SUN_INFO_DETAIL_UNION = TMP_RPT_SUN_INFO_DETAIL_INNER2.unionAll(TMP_RPT_SUN_INFO_DETAIL_INNER1)
dfn="RPT_SUN_INFO_DETAIL/"+V_DT+".parquet"
RPT_SUN_INFO_DETAIL_UNION.cache()
nrows = RPT_SUN_INFO_DETAIL_UNION.count()
RPT_SUN_INFO_DETAIL_UNION.write.save(path=hdfs + '/' + dfn, mode='overwrite')
RPT_SUN_INFO_DETAIL_UNION.unpersist()
#复制当天数据进BK
ret = os.system("hdfs dfs -rm -r /"+dbname+"/RPT_SUN_INFO_DETAIL_BK/"+V_DT+".parquet")
ret = os.system("hdfs dfs -cp -f /"+dbname+"/RPT_SUN_INFO_DETAIL/"+V_DT+".parquet /"+dbname+"/RPT_SUN_INFO_DETAIL_BK/"+V_DT+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds)
#任务[21] 001-01::
V_STEP = V_STEP + 1
RPT_SUN_INFO_DETAIL = sqlContext.read.parquet(hdfs+'/RPT_SUN_INFO_DETAIL/*')
RPT_SUN_INFO_DETAIL.registerTempTable("RPT_SUN_INFO_DETAIL")
#更新户主姓名 update操作
sql="""
select
dst.ADDR --VARCHAR
,dst.CUST_ID --VARCHAR
,COALESCE(dst.CUST_NAME,src.HZ_NAME) AS CUST_NAME --VARCHAR
,dst.FAM_TOTASSET --DECIMAL(22,2)
,dst.FAM_DEBT --DECIMAL(22,2)
,dst.PRECREDIT --DECIMAL(22,2)
,dst.SIGNCREDIT --DECIMAL(22,2)
,dst.USECREDIT --DECIMAL(22,2)
,dst.FAM_CRE --DECIMAL(22,2)
,dst.ELEC --DECIMAL(22,2)
,dst.WATER --DECIMAL(22,2)
,dst.FR_ID --VARCHAR
,dst.ADDR_DESC --VARCHAR
from RPT_SUN_INFO_DETAIL dst
left join OCRM_F_CI_PER_CUST_INFO src on dst.CUST_ID=src.CUST_ID and dst.FR_ID=src.FR_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_RPT_SUN_INFO_DETAIL_02 = sqlContext.sql(sql)
dfn="RPT_SUN_INFO_DETAIL/"+V_DT+".parquet"
TMP_RPT_SUN_INFO_DETAIL_02.cache()
nrows = TMP_RPT_SUN_INFO_DETAIL_02.count()
TMP_RPT_SUN_INFO_DETAIL_02.write.save(path=hdfs + '/' + dfn, mode='overwrite')
TMP_RPT_SUN_INFO_DETAIL_02.unpersist()
#复制当天数据进BK
ret = os.system("hdfs dfs -rm -r /"+dbname+"/RPT_SUN_INFO_DETAIL_BK/"+V_DT+".parquet")
ret = os.system("hdfs dfs -cp -f /"+dbname+"/RPT_SUN_INFO_DETAIL/"+V_DT+".parquet /"+dbname+"/RPT_SUN_INFO_DETAIL_BK/"+V_DT+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds)
#任务[21] 001-01::
V_STEP = V_STEP + 1
RPT_SUN_INFO_DETAIL = sqlContext.read.parquet(hdfs+'/RPT_SUN_INFO_DETAIL/*')
RPT_SUN_INFO_DETAIL.registerTempTable("RPT_SUN_INFO_DETAIL")
#MERGE操作,更新:授信金额(PRECREDIT)、可用信:USECREDIT
sql="""
SELECT A.FR_ID,A.CUST_ID,
cast(SUM(CASE WHEN A.CONT_STS = '1000' AND A.PRODUCT_ID LIKE '3%' THEN COALESCE(A.CONT_AMT,0) END) as DECIMAL(22,2)) as CONT_AMT ,
cast(SUM(COALESCE(BAL,0)) as DECIMAL(22,2)) AS BAL
FROM ACRM_F_CI_ASSET_BUSI_PROTO A
JOIN RPT_SUN_INFO_DETAIL B ON B.FR_ID = A.FR_ID AND A.CUST_ID = B.CUST_ID
GROUP BY A.FR_ID,A.CUST_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_RPT_SUN_INFO_DETAIL_03 = sqlContext.sql(sql)
TMP_RPT_SUN_INFO_DETAIL_03.registerTempTable("TMP_RPT_SUN_INFO_DETAIL_03")
sql="""
select
src.ADDR --VARCHAR
,src.CUST_ID --VARCHAR
,src.CUST_NAME --VARCHAR
,src.FAM_TOTASSET --DECIMAL(22,2)
,src.FAM_DEBT --DECIMAL(22,2)
,cast(CASE WHEN dst.CUST_ID is null then src.PRECREDIT else dst.CONT_AMT end as DECIMAL(22,2)) as PRECREDIT --DECIMAL(22,2)
,src.SIGNCREDIT --DECIMAL(22,2)
,cast(CASE WHEN dst.CUST_ID is null then src.USECREDIT else (dst.CONT_AMT - dst.BAL) end as DECIMAL(22,2)) as USECREDIT --DECIMAL(22,2)
,src.FAM_CRE --DECIMAL(22,2)
,src.ELEC --DECIMAL(22,2)
,src.WATER --DECIMAL(22,2)
,src.FR_ID --VARCHAR
,src.ADDR_DESC --VARCHAR
from RPT_SUN_INFO_DETAIL src
left join TMP_RPT_SUN_INFO_DETAIL_03 dst on src.CUST_ID=dst.CUST_ID and src.FR_ID=dst.FR_ID
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_RPT_SUN_INFO_DETAIL_04 = sqlContext.sql(sql)
dfn="RPT_SUN_INFO_DETAIL/"+V_DT+".parquet"
TMP_RPT_SUN_INFO_DETAIL_04.cache()
nrows = TMP_RPT_SUN_INFO_DETAIL_04.count()
TMP_RPT_SUN_INFO_DETAIL_04.write.save(path=hdfs + '/' + dfn, mode='overwrite')
TMP_RPT_SUN_INFO_DETAIL_04.unpersist()
#复制当天数据进BK
ret = os.system("hdfs dfs -rm -r /"+dbname+"/RPT_SUN_INFO_DETAIL_BK/"+V_DT+".parquet")
ret = os.system("hdfs dfs -cp -f /"+dbname+"/RPT_SUN_INFO_DETAIL/"+V_DT+".parquet /"+dbname+"/RPT_SUN_INFO_DETAIL_BK/"+V_DT+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds)
| [
"[email protected]"
] | |
6f2e0b21a9d3099d28f4f0e9656df7eb350b08b5 | 4086ded777dab91e3b88e376c9e86487ea4f670f | /src/Modules/Trinity.FFI/Trinity.FFI.Python/GraphEngine/Storage/cache_manager.py | 1367785a64745e5cd5f3eed7f0436c7d8e31563d | [
"MIT"
] | permissive | qdoop/GraphEngine | a618c5c7459036e24342288e5ae13a023e4c7228 | d83381c781edc4040824c1e31057789939530eff | refs/heads/master | 2020-03-07T03:28:56.175677 | 2018-03-30T00:35:52 | 2018-03-30T00:35:52 | 127,236,851 | 0 | 0 | MIT | 2018-03-29T04:30:59 | 2018-03-29T04:30:59 | null | UTF-8 | Python | false | false | 2,139 | py | from GraphEngine import GraphMachine
def filter_null_args(arg_list):
return filter(lambda x: x is not None, arg_list)
class CacheManager:
is_accessor = False
inst = None
module_id = -1
def load_cell(self, cell_id):
return self.inst.LoadCell(cell_id)
def save_cell(self, index=None, cell_id=None, write_ahead_log_options=None):
return self.inst.SaveCell(*filter_null_args((write_ahead_log_options, cell_id, index)))
def get_id(self, index):
return self.inst.CellGetId(index)
def get_field(self, index, field_name):
return self.inst.CellGetField(index, field_name)
def set_field(self, index, field_name, value):
return self.inst.CellSetField(index, field_name, value)
def append_field(self, index, field_name, content):
return self.inst.CellAppendField(index, field_name, content)
def remove_field(self, index, field_name):
return self.inst.CellRemoveField(index, field_name)
def delete(self, index):
return self.inst.Del(index)
def dispose(self):
return self.inst.Dispose()
@staticmethod
def remove_cell(cell_id):
GraphMachine.storage.RemoveCellFromStorage(cell_id)
GraphMachine.id_allocator.dealloc(cell_id)
class CellAccessorManager(CacheManager):
def __init__(self):
self.inst = GraphMachine.storage.CellAccessorManager()
self.module_id = self.inst.ModuleId
self.is_accessor = False
def use_cell(self, cell_id, options=None, cell_type=None):
return self.inst.UseCell(cell_id, *filter_null_args((options, cell_type)))
def __enter__(self):
# TODO
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# TODO
self.inst.Dispose()
del self.inst
class CellManager(CacheManager):
def __init__(self):
self.inst = GraphMachine.storage.CellManager()
self.module_id = self.inst.ModuleId
self.is_accessor = True
def new_cell(self, cell_type, cell_id=None, content=None):
return self.inst.NewCell(*filter_null_args((cell_type, cell_id, content)))
| [
"[email protected]"
] | |
1ee6d53f727f5e366a1cf7cf58dc8d0241e40160 | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /cherrypy/_cpwsgi.py | 2ff83942a96d2b4dc51c03d759b9c975d2127900 | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,916 | py | #Embedded file name: cherrypy\_cpwsgi.py
"""WSGI interface (see PEP 333 and 3333).
Note that WSGI environ keys and values are 'native strings'; that is,
whatever the type of "" is. For Python 2, that's a byte string; for Python 3,
it's a unicode string. But PEP 3333 says: "even if Python's str type is
actually Unicode "under the hood", the content of native strings must
still be translatable to bytes via the Latin-1 encoding!"
"""
import sys as _sys
import cherrypy as _cherrypy
from cherrypy._cpcompat import BytesIO
from cherrypy import _cperror
from cherrypy.lib import httputil
def downgrade_wsgi_ux_to_1x(environ):
"""Return a new environ dict for WSGI 1.x from the given WSGI u.x environ."""
env1x = {}
url_encoding = environ[u'wsgi.url_encoding']
for k, v in environ.items():
if k in (u'PATH_INFO', u'SCRIPT_NAME', u'QUERY_STRING'):
v = v.encode(url_encoding)
elif isinstance(v, unicode):
v = v.encode('ISO-8859-1')
env1x[k.encode('ISO-8859-1')] = v
return env1x
class VirtualHost(object):
"""Select a different WSGI application based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different applications. For example::
root = Root()
RootApp = cherrypy.Application(root)
Domain2App = cherrypy.Application(root)
SecureApp = cherrypy.Application(Secure())
vhost = cherrypy._cpwsgi.VirtualHost(RootApp,
domains={'www.domain2.example': Domain2App,
'www.domain2.example:443': SecureApp,
})
cherrypy.tree.graft(vhost)
"""
default = None
use_x_forwarded_host = True
domains = {}
def __init__(self, default, domains = None, use_x_forwarded_host = True):
self.default = default
self.domains = domains or {}
self.use_x_forwarded_host = use_x_forwarded_host
def __call__(self, environ, start_response):
domain = environ.get('HTTP_HOST', '')
if self.use_x_forwarded_host:
domain = environ.get('HTTP_X_FORWARDED_HOST', domain)
nextapp = self.domains.get(domain)
if nextapp is None:
nextapp = self.default
return nextapp(environ, start_response)
class InternalRedirector(object):
"""WSGI middleware that handles raised cherrypy.InternalRedirect."""
def __init__(self, nextapp, recursive = False):
self.nextapp = nextapp
self.recursive = recursive
def __call__(self, environ, start_response):
redirections = []
while True:
environ = environ.copy()
try:
return self.nextapp(environ, start_response)
except _cherrypy.InternalRedirect as ir:
sn = environ.get('SCRIPT_NAME', '')
path = environ.get('PATH_INFO', '')
qs = environ.get('QUERY_STRING', '')
old_uri = sn + path
if qs:
old_uri += '?' + qs
redirections.append(old_uri)
if not self.recursive:
new_uri = sn + ir.path
if ir.query_string:
new_uri += '?' + ir.query_string
if new_uri in redirections:
ir.request.close()
raise RuntimeError('InternalRedirector visited the same URL twice: %r' % new_uri)
environ['REQUEST_METHOD'] = 'GET'
environ['PATH_INFO'] = ir.path
environ['QUERY_STRING'] = ir.query_string
environ['wsgi.input'] = BytesIO()
environ['CONTENT_LENGTH'] = '0'
environ['cherrypy.previous_request'] = ir.request
class ExceptionTrapper(object):
"""WSGI middleware that traps exceptions."""
def __init__(self, nextapp, throws = (KeyboardInterrupt, SystemExit)):
self.nextapp = nextapp
self.throws = throws
def __call__(self, environ, start_response):
return _TrappedResponse(self.nextapp, environ, start_response, self.throws)
class _TrappedResponse(object):
response = iter([])
def __init__(self, nextapp, environ, start_response, throws):
self.nextapp = nextapp
self.environ = environ
self.start_response = start_response
self.throws = throws
self.started_response = False
self.response = self.trap(self.nextapp, self.environ, self.start_response)
self.iter_response = iter(self.response)
def __iter__(self):
self.started_response = True
return self
def next(self):
return self.trap(self.iter_response.next)
def close(self):
if hasattr(self.response, 'close'):
self.response.close()
def trap(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except self.throws:
raise
except StopIteration:
raise
except:
tb = _cperror.format_exc()
_cherrypy.log(tb, severity=40)
if not _cherrypy.request.show_tracebacks:
tb = ''
s, h, b = _cperror.bare_error(tb)
if self.started_response:
self.iter_response = iter([])
else:
self.iter_response = iter(b)
try:
self.start_response(s, h, _sys.exc_info())
except:
_cherrypy.log(traceback=True, severity=40)
raise
if self.started_response:
return ''.join(b)
else:
return b
class AppResponse(object):
"""WSGI response iterable for CherryPy applications."""
def __init__(self, environ, start_response, cpapp):
if environ.get(u'wsgi.version') == (u'u', 0):
environ = downgrade_wsgi_ux_to_1x(environ)
self.environ = environ
self.cpapp = cpapp
try:
self.run()
except:
self.close()
raise
r = _cherrypy.serving.response
self.iter_response = iter(r.body)
self.write = start_response(r.output_status, r.header_list)
def __iter__(self):
return self
def next(self):
return self.iter_response.next()
def close(self):
"""Close and de-reference the current request and response. (Core)"""
self.cpapp.release_serving()
def run(self):
"""Create a Request object using environ."""
env = self.environ.get
local = httputil.Host('', int(env('SERVER_PORT', 80)), env('SERVER_NAME', ''))
remote = httputil.Host(env('REMOTE_ADDR', ''), int(env('REMOTE_PORT', -1) or -1), env('REMOTE_HOST', ''))
scheme = env('wsgi.url_scheme')
sproto = env('ACTUAL_SERVER_PROTOCOL', 'HTTP/1.1')
request, resp = self.cpapp.get_serving(local, remote, scheme, sproto)
request.login = env('LOGON_USER') or env('REMOTE_USER') or None
request.multithread = self.environ['wsgi.multithread']
request.multiprocess = self.environ['wsgi.multiprocess']
request.wsgi_environ = self.environ
request.prev = env('cherrypy.previous_request', None)
meth = self.environ['REQUEST_METHOD']
path = httputil.urljoin(self.environ.get('SCRIPT_NAME', ''), self.environ.get('PATH_INFO', ''))
qs = self.environ.get('QUERY_STRING', '')
rproto = self.environ.get('SERVER_PROTOCOL')
headers = self.translate_headers(self.environ)
rfile = self.environ['wsgi.input']
request.run(meth, path, qs, rproto, headers, rfile)
headerNames = {'HTTP_CGI_AUTHORIZATION': 'Authorization',
'CONTENT_LENGTH': 'Content-Length',
'CONTENT_TYPE': 'Content-Type',
'REMOTE_HOST': 'Remote-Host',
'REMOTE_ADDR': 'Remote-Addr'}
def translate_headers(self, environ):
"""Translate CGI-environ header names to HTTP header names."""
for cgiName in environ:
if cgiName in self.headerNames:
yield (self.headerNames[cgiName], environ[cgiName])
elif cgiName[:5] == 'HTTP_':
translatedHeader = cgiName[5:].replace('_', '-')
yield (translatedHeader, environ[cgiName])
class CPWSGIApp(object):
"""A WSGI application object for a CherryPy Application."""
pipeline = [('ExceptionTrapper', ExceptionTrapper), ('InternalRedirector', InternalRedirector)]
head = None
config = {}
response_class = AppResponse
def __init__(self, cpapp, pipeline = None):
self.cpapp = cpapp
self.pipeline = self.pipeline[:]
if pipeline:
self.pipeline.extend(pipeline)
self.config = self.config.copy()
def tail(self, environ, start_response):
"""WSGI application callable for the actual CherryPy application.
You probably shouldn't call this; call self.__call__ instead,
so that any WSGI middleware in self.pipeline can run first.
"""
return self.response_class(environ, start_response, self.cpapp)
def __call__(self, environ, start_response):
head = self.head
if head is None:
head = self.tail
for name, callable in self.pipeline[::-1]:
conf = self.config.get(name, {})
head = callable(head, **conf)
self.head = head
return head(environ, start_response)
def namespace_handler(self, k, v):
"""Config handler for the 'wsgi' namespace."""
if k == 'pipeline':
self.pipeline.extend(v)
elif k == 'response_class':
self.response_class = v
else:
name, arg = k.split('.', 1)
bucket = self.config.setdefault(name, {})
bucket[arg] = v
| [
"[email protected]"
] | |
53ab5d874fe9b4a2789f77ec49d2cfe1ddc0e0fd | f0d0ea29240c53b6ce1c4b06095b528ece02fdd7 | /utils/stdvalue.py | 93e1cb555fd015df9031244044dc821b516d2157 | [] | no_license | zhifuliu/dianjing | 477529ccd6159329e1bc121aeb2ff328ee499f4a | 7b3f6d58f5bc0738651d8d72c9a24df4ade0ed36 | refs/heads/master | 2020-03-21T09:10:28.343268 | 2017-03-24T03:06:24 | 2017-03-24T03:06:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | # -*- coding: utf-8 -*-
"""
Author: Wang Chao <[email protected]>
Filename: stdvalue
Date Created: 2016-09-30 18:30
Description:
"""
MAX_INT = 2 ** 31 - 1 | [
"[email protected]"
] | |
24a2a2e09bb18264bdfb641a5ed9b3275d98e05b | a9f56cfe2fafc7981ae4d37afd49d3049d2f98b9 | /t_sms/models.py | b4639b553775a4ebef0802490b6ce52b673e181b | [] | no_license | talhajubair100/django_allauth_language_currence_test | 78dc340e4f3392c0dd18cd2e666c02ae04eba7bc | 9530d548ea092145bc583b8a839a9bac891ef098 | refs/heads/main | 2023-02-21T20:25:42.332322 | 2021-01-24T14:10:13 | 2021-01-24T14:10:13 | 326,125,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | from django.db import models
import os
from twilio.rest import Client
# Create your models here.
class Detail(models.Model):
name = models.CharField(max_length=150)
phone = models.CharField(max_length=14)
details = models.CharField(max_length=200)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
account_sid = '****************'
auth_token = '******************'
client = Client(account_sid, auth_token)
message = client.messages.create(
body=f'Hi {self.name}! {self.details}',
from_='******',
to='+8801735700187'
)
print('your sms id',message.sid)
return super().save(*args, **kwargs)
| [
"[email protected]"
] | |
2569dc664dc758ba28cbbdbad398093631bd023b | ccfd5f8efc0cf6a9ec1867860e5a16d19beaee90 | /neobistime/events/migrations/0009_event_image.py | a338af06b377c6da5159e7c0fa0d4c60f2a9b68a | [] | no_license | magina671/neobistime | bf9bc759acf39367e3be42164c05575c1b8ed7e8 | 6dc3f8c8195997cfd4cabd5666e89d6f80a7b95c | refs/heads/master | 2022-12-26T21:37:16.171324 | 2020-10-01T18:37:31 | 2020-10-01T18:37:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | # Generated by Django 2.2.6 on 2020-08-05 14:28
from django.db import migrations
import easy_thumbnails.fields
class Migration(migrations.Migration):
dependencies = [
('events', '0008_auto_20200805_0623'),
]
operations = [
migrations.AddField(
model_name='event',
name='image',
field=easy_thumbnails.fields.ThumbnailerImageField(default='event_imgs/default.jpg', upload_to='event_imgs/%Y/%m/%d/'),
),
]
| [
"[email protected]"
] | |
f11f47b153e424f308df2de51c0fff6667d742b7 | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/214bd114-5cc5-11e4-af55-00155d01fe08.py | b6a94cc57e3436b403a340fcb5162b16fe7d3c98 | [
"MIT"
] | permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | #!/usr/bin/python
################################################################################
# 214bd114-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# [email protected]
# [email protected]
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "214bd114-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKLM:\Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings\Zones\4', '1604')
# Output Lines
self.output = [r'HKLM:\Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings\Zones\4', ('1604=' + str(dword))]
if dword == 3:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings\Zones'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings\Zones\4'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings\Zones\4' -name '1604' -value 3 -Type DWord")
| [
"[email protected]"
] | |
bdb8f58b08216a4bd2cdb0f892102352d8abb24b | f902bfafd93fe96515f5b12fa8630b365cf71ce9 | /python/ht/inline/utils.py | d7802291d80c7175ed3b05e8c473baceebd95f5b | [] | no_license | hickb/Houdini-Toolbox | 69e999a924f387313382c254de2771d66c649f64 | 5309b2608f86b12aab8e90e754269ed2d2a44653 | refs/heads/master | 2022-06-18T23:08:45.481414 | 2020-05-06T19:25:02 | 2020-05-06T19:25:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,440 | py | """Utility functions to support custom inlinecpp functions."""
# =============================================================================
# IMPORTS
# =============================================================================
# Standard Library Imports
import ctypes
# Houdini Imports
import hou
# =============================================================================
# GLOBALS
# =============================================================================
# Tuple of all valid attribute data types.
_ALL_ATTRIB_DATA_TYPES = (
hou.attribData.Float,
hou.attribData.Int,
hou.attribData.String,
)
# Tuple of all valid attribute types.
_ALL_ATTRIB_TYPES = (
hou.attribType.Global,
hou.attribType.Point,
hou.attribType.Prim,
hou.attribType.Vertex,
)
# Mapping between hou.attribData and corresponding GA_StorageClass values.
_ATTRIB_STORAGE_MAP = {
hou.attribData.Int: 0,
hou.attribData.Float: 1,
hou.attribData.String: 2,
}
# Mapping between hou.attribTypes and corresponding GA_AttributeOwner values.
_ATTRIB_TYPE_MAP = {
hou.attribType.Vertex: 0,
hou.attribType.Point: 1,
hou.attribType.Prim: 2,
hou.attribType.Global: 3,
}
# Mapping between geometry types and corresponding GA_AttributeOwner values.
_GEOMETRY_ATTRIB_MAP = {hou.Vertex: 0, hou.Point: 1, hou.Prim: 2, hou.Geometry: 3}
# Mapping between hou.geometryTypes and corresponding GA_AttributeOwner values.
_GEOMETRY_TYPE_MAP = {
hou.geometryType.Vertices: 0,
hou.geometryType.Points: 1,
hou.geometryType.Primitives: 2,
}
# Mapping between group types and corresponding GA_AttributeOwner values.
_GROUP_ATTRIB_MAP = {hou.PointGroup: 1, hou.PrimGroup: 2}
# Mapping between group types and corresponding GA_GroupType values.
_GROUP_TYPE_MAP = {hou.PointGroup: 0, hou.PrimGroup: 1, hou.EdgeGroup: 2}
# =============================================================================
# FUNCTIONS
# =============================================================================
def build_c_double_array(values):
"""Convert a list of numbers to a ctypes double array.
:param values: A list of floats.
:type values: list(float)
:return: The values as ctypes compatible values.
:rtype: list(ctypes.c_double)
"""
arr = (ctypes.c_double * len(values))(*values)
return arr
def build_c_int_array(values):
"""Convert a list of numbers to a ctypes int array.
:param values: A list of ints.
:type values: list(int)
:return: The values as ctypes compatible values.
:rtype: list(ctypes.c_int)
"""
arr = (ctypes.c_int * len(values))(*values)
return arr
def build_c_string_array(values):
"""Convert a list of strings to a ctypes char * array.
:param values: A list of strings.
:type values: list(str)
:return: The values as ctypes compatible values.
:rtype: list(ctypes.c_char_p)
"""
arr = (ctypes.c_char_p * len(values))(*values)
return arr
def clean_string_values(values):
"""Process a string list, removing empty strings.
:param values: A list of strings to clean.
:type values: list(str)
:return: A clean tuple.
:rtype: tuple(str)
"""
return tuple([val for val in values if val])
def find_attrib(geometry, attrib_type, name):
"""Find an attribute with a given name and type on the geometry.
:param geometry: The geometry to find an attribute on.
:type geometry: hou.Geometry
:param attrib_type: The attribute type.
:type attrib_type: hou.attribType.
:param name: The attribute name.
:type name: str
:return: A found attribute, otherwise None.
:rtype: hou.Attrib or None
"""
if attrib_type == hou.attribType.Vertex:
return geometry.findVertexAttrib(name)
elif attrib_type == hou.attribType.Point:
return geometry.findPointAttrib(name)
elif attrib_type == hou.attribType.Prim:
return geometry.findPrimAttrib(name)
elif attrib_type == hou.attribType.Global:
return geometry.findGlobalAttrib(name)
else:
raise ValueError("Expected hou.attribType, got {}".format(type(attrib_type)))
def find_group(geometry, group_type, name):
"""Find a group with a given name and type.
group_type corresponds to the integer returned by _get_group_type()
:param geometry: The geometry to find the group in.
:type geometry: hou.Geometry
:param group_type: The group type.
:type group_type: int
:param name: The attribute name.
:type name: str
:return: A found group.
:rtype: hou.EdgeGroup or hou.PointGroup or hou.PrimGroup
"""
if group_type == 0:
return geometry.findPointGroup(name)
elif group_type == 1:
return geometry.findPrimGroup(name)
elif group_type == 2:
return geometry.findEdgeGroup(name)
else:
raise ValueError("Invalid group type {}".format(group_type))
def geo_details_match(geometry1, geometry2):
"""Test if two hou.Geometry objects point to the same detail.
:param geometry1: A geometry detail.
:type geometry1: hou.Geometry
:param geometry2: A geometry detail.
:type geometry2: hou.Geometry
:return: Whether or not the objects represent the same detail.
:rtype: bool
"""
# pylint: disable=protected-access
handle1 = geometry1._guDetailHandle()
handle2 = geometry2._guDetailHandle()
details_match = int(handle1._asVoidPointer()) == int(handle2._asVoidPointer())
handle1.destroy()
handle2.destroy()
return details_match
def get_attrib_owner(attribute_type):
"""Get an HDK compatible attribute owner value.
:param attribute_type: The type of attribute.
:type attribute_type: hou.attribType
:return: An HDK attribute owner value.
:rtype: int
"""
try:
return _ATTRIB_TYPE_MAP[attribute_type]
except KeyError:
raise ValueError("Invalid attribute type: {}".format(attribute_type))
def get_attrib_owner_from_geometry_entity_type(entity_type):
"""Get an HDK compatible attribute owner value from a geometry class.
The type can be of hou.Geometry, hou.Point, hou.Prim (or subclasses) or hou.Vertex.
:param entity_type: The entity to get a attribute owner for.
:type entity_type: hou.Vertex or hou.Point or hou.Prim or hou.Geometry
:return: An HDK attribute owner value.
:rtype: int
"""
# If the class is a base class in the map then just return it.
try:
return _GEOMETRY_ATTRIB_MAP[entity_type]
except KeyError:
pass
# If it is not in the map then it is most likely a subclass of hou.Prim,
# such as hou.Polygon, hou.Face, hou.Volume, etc. We will check the class
# against being a subclass of any of our valid types and if it is, return
# the owner of that class.
for key, value in _GEOMETRY_ATTRIB_MAP.items():
if issubclass(entity_type, key):
return value
# Something went wrong so raise an exception.
raise ValueError("Invalid entity type: {}".format(entity_type))
def get_attrib_owner_from_geometry_type(geometry_type):
"""Get an HDK compatible attribute owner value from a hou.geometryType.
:param geometry_type: The entity to get a attribute owner for.
:type geometry_type: hou.geometryType
:return: An HDK attribute owner value.
:rtype: int
"""
# If the class is a base class in the map then just return it.
try:
return _GEOMETRY_TYPE_MAP[geometry_type]
except KeyError:
# Something went wrong so raise an exception.
raise ValueError("Invalid geometry type: {}".format(geometry_type))
def get_attrib_storage(data_type):
"""Get an HDK compatible attribute storage class value.
:param data_type: The type of data to store.
:type data_type: hou.attribData
:return: An HDK attribute storage type.
:rtype: int
"""
try:
return _ATTRIB_STORAGE_MAP[data_type]
except KeyError:
raise ValueError("Invalid data type: {}".format(data_type))
def get_group_attrib_owner(group):
"""Get an HDK compatible group attribute type value.
:param group: The group to get the attribute owner for.
:type group: hou.PointGroup or hou.PrimGroup
:return: An HDK attribute owner value.
:rtype: int
"""
try:
return _GROUP_ATTRIB_MAP[type(group)]
except KeyError:
raise ValueError("Invalid group type")
def get_group_type(group):
"""Get an HDK compatible group type value.
:param group: The group to get the group type for.
:type group: hou.EdgeGroup or hou.PointGroup or hou.PrimGroup
:return: An HDK group type value.
:rtype: int
"""
try:
return _GROUP_TYPE_MAP[type(group)]
except KeyError:
raise ValueError("Invalid group type")
def get_nodes_from_paths(paths):
"""Convert a list of string paths to hou.Node objects.
:param paths: A list of paths.
:type paths: list(str)
:return: A tuple of hou.Node objects.
:rtype: tuple(hou.Node)
"""
return tuple([hou.node(path) for path in paths if path])
def get_points_from_list(geometry, point_list):
"""Convert a list of point numbers to hou.Point objects.
:param geometry: The geometry to get points for.
:type geometry: hou.Geometry
:param point_list: A list of point numbers.
:type point_list: list(int)
:return: Matching points on the geometry.
:rtype: tuple(hou.Point)
"""
# Return a empty tuple if the point list is empty.
if not point_list:
return ()
# Convert the list of integers to a space separated string.
point_str = " ".join([str(i) for i in point_list])
# Glob for the specified points.
return geometry.globPoints(point_str)
def get_prims_from_list(geometry, prim_list):
"""Convert a list of primitive numbers to hou.Prim objects.
:param geometry: The geometry to get prims for.
:type geometry: hou.Geometry
:param prim_list: A list of prim numbers.
:type prim_list: list(int)
:return: Matching prims on the geometry.
:rtype: tuple(hou.Prim)
"""
# Return a empty tuple if the prim list is empty.
if not prim_list:
return ()
# Convert the list of integers to a space separated string.
prim_str = " ".join([str(i) for i in prim_list])
# Glob for the specified prims.
return geometry.globPrims(prim_str)
| [
"[email protected]"
] | |
b8cec1ad804460eb9ff1f586909b5ef9f9b0f4bc | 813eb2e364262450d43263a524074098a8934bf1 | /gtdtst.py | 9d9fa734bab2f939323fd8d55ac1555b71016cfc | [] | no_license | cniekel/gtdtool | 67f0e9ab4622f5361959ed51e4369523f6cbccc3 | 38a23806efd697814c9ebc492d87f88403ace041 | refs/heads/master | 2016-09-06T16:15:31.820358 | 2008-12-11T21:43:46 | 2008-12-11T21:43:46 | 88,992 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,419 | py | from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Frame, PageTemplate, BaseDocTemplate
from reportlab.platypus import KeepTogether
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch,cm
from reportlab.lib.pagesizes import A4
PAGE_HEIGHT=defaultPageSize[1]; PAGE_WIDTH=defaultPageSize[0]
styles = getSampleStyleSheet()
class GTDPageTemplate(PageTemplate):
def __init__(self, id, pageSize=defaultPageSize):
self.pageWidth = pageSize[0]
self.pageHeight = pageSize[1]
border = 0.3*cm
miniwidth = (self.pageWidth / 2) - 2*border
miniheight = (self.pageHeight / 2) - 2*border
#self.f4 = Frame(border, border, miniwidth, miniheight, id='p4')
#self.f1 = Frame(self.pageWidth/2 + border, border, miniwidth, miniheight, id='p1')
#self.f2 = Frame(self.pageWidth/2 + border, self.pageHeight/2 + border, miniwidth, miniheight, id='p2')
#self.f3 = Frame(border, self.pageHeight/2 + border, miniwidth, miniheight, id='p3')
self.f4 = Frame(0, 0, miniwidth, miniheight, id='p4')
self.f1 = Frame(miniwidth, 0, miniwidth, miniheight, id='p1')
self.f2 = Frame(miniwidth, miniheight, miniwidth, miniheight, id='p2')
self.f3 = Frame(0, miniheight, miniwidth, miniheight, id='p3')
PageTemplate.__init__(self, id, [self.f1, self.f2, self.f3, self.f4])
class GTDDocTemplate(BaseDocTemplate):
def __init__(self, file, **kw):
BaseDocTemplate.__init__(self,file, **kw)
def myPages(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman', 9)
canvas.drawString(inch, 0.75*inch, 'Page %d %s' % (doc.page, 'test')
def go():
doc = GTDDocTemplate('phello.pdf', pageSize=A4)
doc.addPageTemplates(GTDPageTemplate('gtd', doc.pagesize))
Story=[]
style = styles['Normal']
style.leftIndent = 0.75*inch
style.firstLineIndent = 0
style.spaceAfter = 3
style.refresh()
style.listAttrs()
for i in range(8):
bogustext = ('This is <br>paragraph <font color="red"><i>number</i></font> %s' % i) * 20
p = KeepTogether([Paragraph('Yo %d'%i, style), Paragraph(bogustext, style)])
Story.append(p)
Story.append(Spacer(1, 0.2*inch))
doc.build(Story)#, onFirstPage=myPages, onLaterPages=myPages)
if __name__ == '__main__':
go()
| [
"="
] | = |
a02e2b2f451539a813e61106eaedd8268f16b09b | deb3c16ef887b6c496b8c920809d79b9f73aa2fe | /hosts/9cartoonme.py | bf89b8ff48e5e12ae354fae05d306d97a0c9da5c | [] | no_license | Yaser7440/cmdline_iptvplayer | 1ea35f4fd36c708176a43d402a49342c4cf723a5 | 4e287021d86cab8d6525262b647d144c6141d6b1 | refs/heads/master | 2021-01-24T10:49:29.278730 | 2016-09-21T09:24:26 | 2016-09-21T09:24:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,520 | py | # -*- coding: utf-8 -*-
###################################################
# LOCAL import
###################################################
from Plugins.Extensions.IPTVPlayer.dToolsSet.iptvplayerinit import TranslateTXT as _, SetIPTVPlayerLastHostError
from Plugins.Extensions.IPTVPlayer.components.ihost import CHostBase, CBaseHostClass, CDisplayListItem, RetHost, CUrlItem, ArticleContent
from Plugins.Extensions.IPTVPlayer.dToolsSet.iptvtools import printDBG, printExc, CSearchHistoryHelper, GetDefaultLang, remove_html_markup, GetLogoDir, GetCookieDir, byteify
from Plugins.Extensions.IPTVPlayer.libs.pCommon import common, CParsingHelper
import Plugins.Extensions.IPTVPlayer.libs.urlparser as urlparser
from Plugins.Extensions.IPTVPlayer.libs.youtube_dl.utils import clean_html
from Plugins.Extensions.IPTVPlayer.tools.iptvtypes import strwithmeta
###################################################
###################################################
# FOREIGN import
###################################################
from datetime import timedelta
import time
import re
import urllib
import urlparse
import unicodedata
import string
import base64
try: import json
except Exception: import simplejson as json
from Components.config import config, ConfigSelection, ConfigYesNo, ConfigText, getConfigListEntry
###################################################
###################################################
# E2 GUI COMMPONENTS
###################################################
from Plugins.Extensions.IPTVPlayer.components.asynccall import MainSessionWrapper
###################################################
###################################################
# Config options for HOST
###################################################
#config.plugins.iptvplayer.CartoonME_language = ConfigSelection(default = "en", choices = [("en", _("English")), ("es", _("Spanish"))])
def GetConfigList():
optionList = []
#optionList.append(getConfigListEntry(_("Language:"), config.plugins.iptvplayer.CartoonME_language))
return optionList
###################################################
def gettytul():
return 'http://9cartoon.me/'
class CartoonME(CBaseHostClass):
USER_AGENT = 'curl/7'
def __init__(self):
CBaseHostClass.__init__(self, {'history':'9cartoon.me', 'cookie':'CartoonME.cookie'})
self.defaultParams = {'use_cookie': True, 'load_cookie': True, 'save_cookie': True, 'cookiefile': self.COOKIE_FILE}
self.MAIN_URL = 'http://9cartoon.me/'
self.SEARCH_URL = self.MAIN_URL +'Search?s='
self.DEFAULT_ICON = "http://www.siwallpaperhd.com/wp-content/uploads/2015/07/spongebob_squarepants_face_wallpaper_hd_8_cartoon-724x453.png"
self.MAIN_CAT_TAB = [{'category':'list_types', 'title': _('Cartoon list'), 'url':self.MAIN_URL+'CartoonList', 'icon':self.DEFAULT_ICON},
{'category':'categories', 'title': _('Genres'), 'url':self.MAIN_URL, 'icon':self.DEFAULT_ICON},
{'category':'search', 'title': _('Search'), 'search_item':True, 'icon':self.DEFAULT_ICON},
{'category':'search_history', 'title': _('Search history'), 'icon':self.DEFAULT_ICON} ]
def getPage(self, baseUrl, params={}, post_data=None):
if params == {}: params = dict(self.defaultParams)
params['cloudflare_params'] = {'domain':'9cartoon.me', 'cookie_file':self.COOKIE_FILE, 'User-Agent':self.USER_AGENT, 'full_url_handle':self.getFullUrl}
return self.cm.getPageCFProtection(baseUrl, params, post_data)
def getIconUrl(self, url):
url = self.getFullUrl(url)
if url == '': return ''
cookieHeader = self.cm.getCookieHeader(self.COOKIE_FILE)
return strwithmeta(url, {'Cookie':cookieHeader, 'User-Agent':self.USER_AGENT})
def listCategories(self, cItem, nextCategory):
printDBG("CartoonME.listCategories")
sts, data = self.getPage(cItem['url'])
if not sts: return
tab = []
data = self.cm.ph.getDataBeetwenMarkers(data, 'genre right">', '</ul>', False)[1]
data = re.compile('''<a[^>]+?href=['"]([^'^"]+?)['"][^>]*?>([^<]+?)<''').findall(data)
for item in data:
title = self.cleanHtmlStr(item[1])
url = self.getFullUrl(item[0])
tab.append({'title':title, 'url':url})
cItem = dict(cItem)
cItem['category'] = nextCategory
self.listsTab(tab, cItem)
def listCartoonListTypes(self, cItem, nextCategory):
printDBG("CartoonME.listCartoonListTypes")
sts, data = self.getPage(cItem['url'])
if not sts: return
tab = []
data = self.cm.ph.getDataBeetwenMarkers(data, '<div class="list_search">', '</ul>', False)[1]
data = re.compile('''<a[^>]+?href=['"]([^'^"]+?)['"][^>]*?>([^<]+?)<''').findall(data)
for item in data:
title = self.cleanHtmlStr(item[1])
url = self.getFullUrl(item[0])
tab.append({'title':title, 'url':url})
cItem = dict(cItem)
cItem['category'] = nextCategory
self.listsTab(tab, cItem)
def listLetters(self, cItem, nextCategory):
printDBG("CartoonME.listLetters")
sts, data = self.getPage(cItem['url'])
if not sts: return
tab = []
data = self.cm.ph.rgetDataBeetwenMarkers(data, '<div class="list_search">', '<div class="anime_list_body">', False)[1]
data = re.compile('''<a[^>]+?href=['"]([^'^"]+?)['"][^>]*?>([^<]+?)<''').findall(data)
for item in data:
title = self.cleanHtmlStr(item[1])
url = self.getFullUrl(item[0])
tab.append({'title':title, 'url':url})
cItem = dict(cItem)
cItem['category'] = nextCategory
self.listsTab(tab, cItem)
def listItems1(self, cItem, nextCategory):
printDBG("CartoonME.listItems1")
page = cItem.get('page', 1)
url = cItem['url']
tmp = url.split('?')
url = tmp[0]
if len(tmp)>1: query = tmp[1]
else: query = ''
if not url.endswith('/'): url += '/'
if page > 1: url += '?page=%d&' % page
else: url += '?'
if query != '': url += query
sts, data = self.getPage(url)
if not sts: return
nextPage = self.cm.ph.getDataBeetwenMarkers(data, '<div class="pagination">', '<!-- end pager -->', False)[1]
if ('>%d<' % (page+1)) in nextPage: nextPage = True
else: nextPage = False
m1 = '<div class="anime_movies_items">'
data = self.cm.ph.getDataBeetwenMarkers(data, m1, '</section>', False)[1]
data = data.split(m1)
for item in data:
icon = self.getFullUrl( self.cm.ph.getSearchGroups(item, '''src=['"]([^"^']+?)['"]''')[0] )
url = self.getFullUrl( self.cm.ph.getSearchGroups(item, '''href=['"]([^'^"]+?)['"]''')[0] )
title = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(item, '<p class="name">', '</p>', True)[1] )
if title == '': title = self.cleanHtmlStr( self.cm.ph.getSearchGroups(item, '''title=['"]([^'^"]+?)['"]''')[0] )
if url.startswith('http'):
params = dict(cItem)
params.update({'title':title, 'url':url, 'icon':icon})
if '/WATCH/' in url.upper():
self.addVideo(params)
else:
params['category'] = nextCategory
self.addDir(params)
if nextPage:
params = dict(cItem)
params.update({'title':_('Next page'), 'page':page+1})
self.addDir(params)
def listItems2(self, cItem, nextCategory):
printDBG("CartoonME.listItems2")
page = cItem.get('page', 1)
url = cItem['url']
tmp = url.split('?')
url = tmp[0]
if len(tmp)>1: query = tmp[1]
else: query = ''
if not url.endswith('/'): url += '/'
if page > 1: url += '?page=%d&' % page
else: url += '?'
if query != '': url += query
sts, data = self.getPage(url)
if not sts: return
nextPage = self.cm.ph.getDataBeetwenMarkers(data, '<div class="pagination">', '<!-- end pager -->', False)[1]
if ('>%d<' % (page+1)) in nextPage: nextPage = True
else: nextPage = False
data = self.cm.ph.getDataBeetwenMarkers(data, '<div class="anime_list_body">', '</ul>', False)[1]
data = self.cm.ph.getAllItemsBeetwenMarkers(data, '<li', '</li>')
for item in data:
icon = self.getFullUrl( self.cm.ph.getSearchGroups(item, '''src=['"]([^"^']+?)['"]''')[0] )
tmp = self.cm.ph.rgetDataBeetwenMarkers(item, '<a', '</a>')[1]
url = self.getFullUrl( self.cm.ph.getSearchGroups(tmp, '''href=['"]([^'^"]+?)['"]''')[0] )
title = self.cleanHtmlStr( tmp )
desc = self.cleanHtmlStr( self.cm.ph.getSearchGroups(item, '''title=[']([^']+?)[']''')[0] )
if url.startswith('http'):
params = dict(cItem)
params.update({'title':title, 'url':url, 'icon':icon, 'desc':desc})
if '/WATCH/' in url.upper():
self.addVideo(params)
else:
params['category'] = nextCategory
self.addDir(params)
if nextPage:
params = dict(cItem)
params.update({'title':_('Next page'), 'page':page+1})
self.addDir(params)
def listEpisodes(self, cItem):
printDBG("CartoonME.listEpisodes")
sts, data = self.getPage(cItem['url'])
if not sts: return
data = self.cm.ph.getDataBeetwenMarkers(data, '<div class="anime_info_body">', '</ul>', False)[1]
data = data.split('<ul id="episode_related">')
if len(data) < 2: return
icon = self.getFullUrl( self.cm.ph.getSearchGroups(data[0], '''src=['"]([^"^']+?)['"]''')[0] )
if icon == '': icon = cItem.get('icon', '')
title = self.cleanHtmlStr( self.cm.ph.getDataBeetwenMarkers(data[0], '<h1', '</h1>', True)[1] )
if title == '': icon = cItem['title']
desc = self.cleanHtmlStr( data[0].split('</h1>')[-1] )
data = self.cm.ph.getAllItemsBeetwenMarkers(data[1], '<li', '</li>')
for item in data:
eTitle = self.cleanHtmlStr(self.cleanHtmlStr(item))
videoUrl = self.getFullUrl( self.cm.ph.getSearchGroups(item, '''href=['"]([^"^']+?)['"]''', 1, True)[0] )
if not videoUrl.startswith('http'): continue
params = dict(cItem)
params.update({'title':title + ' ' + eTitle, 'url':videoUrl, 'icon':icon, 'desc':desc})
self.addVideo(params)
def getLinksForVideo(self, cItem):
printDBG("CartoonME.getLinksForVideo [%s]" % cItem)
urlTab = []
if '9cartoon.me' not in cItem['url']:
return [{'name':'', 'url':cItem['url'], 'need_resolve':1}]
sts, data = self.getPage(cItem['url'])
if not sts: return []
data = self.cm.ph.getDataBeetwenMarkers(data, '#player_load', 'success', False)[1]
url = self.cm.ph.getSearchGroups(data, '''url: ['"]([^'^"]+?)['"]''')[0]
post_data = self.cm.ph.getSearchGroups(data, '''data: ['"]([^"^']+?)['"]''')[0]
sts, data = self.getPage(url, {'raw_post_data':True, 'header':{'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With':'XMLHttpRequest'}}, post_data)
if not sts: return []
printDBG(data)
tmp = self.cm.ph.getDataBeetwenMarkers(data, 'Download', '<script ', False)[1]
tmp = re.compile('''<a[^>]+?href=['"](http[^"^']+?)['"][^>]*?>([^<]+?)<''').findall(tmp)
for item in tmp:
urlTab.append({'name':item[1], 'url':item[0], 'need_resolve':1})
if 0 == len(urlTab):
tmp = self.cm.ph.getDataBeetwenMarkers(data, 'player', '</script>', False)[1]
if 'docid=' in tmp:
docid = self.cm.ph.getSearchGroups(tmp, '''['"]([a-zA-Z0-9_-]{28})['"]''')[0]
if docid != '':
url = 'https://video.google.com/get_player?docid=%s&authuser=&eurl=%s' % (docid, urllib.quote(cItem['url']))
urlTab.append({'name':'video.google.com', 'url':url, 'need_resolve':1})
return urlTab
def getVideoLinks(self, videoUrl):
printDBG("CartoonME.getVideoLinks [%s]" % videoUrl)
urlTab = []
if '/vload/' in videoUrl or 'redirector.googlevideo.com' in videoUrl:
header = {'Referer':videoUrl, 'User-Agent':self.USER_AGENT, 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language':'pl,en-US;q=0.7,en;q=0.3', 'Accept-Encoding':'gzip, deflate'}
params= {'return_data':False, 'use_cookie': True, 'save_cookie': True, 'load_cookie': True, 'cookiefile': self.COOKIE_FILE, 'header':header}
try:
sts, response = self.cm.getPage(videoUrl, params)
url = response.geturl()
response.close()
urlTab.append({'name':'', 'url':url, 'need_resolve':0})
except Exception:
printExc()
elif videoUrl.startswith('http'):
urlTab = self.up.getVideoLinkExt(videoUrl)
return urlTab
def listSearchResult(self, cItem, searchPattern, searchType):
printDBG("CartoonME.listSearchResult cItem[%s], searchPattern[%s] searchType[%s]" % (cItem, searchPattern, searchType))
page = cItem.get('page', 1)
cItem = dict(cItem)
cItem['url'] = self.SEARCH_URL + urllib.quote_plus(searchPattern)
self.listItems1(cItem, 'list_episodes')
if 0 == len(self.currList):
self.listEpisodes(cItem)
def handleService(self, index, refresh = 0, searchPattern = '', searchType = ''):
printDBG('handleService start')
CBaseHostClass.handleService(self, index, refresh, searchPattern, searchType)
name = self.currItem.get("name", '')
category = self.currItem.get("category", '')
mode = self.currItem.get("mode", '')
printDBG( "handleService: |||||||||||||||||||||||||||||||||||| name[%s], category[%s] " % (name, category) )
self.currList = []
#MAIN MENU
if name == None:
self.listsTab(self.MAIN_CAT_TAB, {'name':'category'})
elif category == 'categories':
self.listCategories(self.currItem, 'list_items_1')
if category == 'list_items_1':
self.listItems1(self.currItem, 'list_episodes')
elif category == 'list_types':
self.listCartoonListTypes(self.currItem, 'list_letters')
elif category == 'list_letters':
self.listLetters(self.currItem, 'list_items_2')
if category == 'list_items_2':
self.listItems2(self.currItem, 'list_episodes')
if category == 'list_episodes':
self.listEpisodes(self.currItem)
#SEARCH
elif category in ["search", "search_next_page"]:
cItem = dict(self.currItem)
cItem.update({'search_item':False, 'name':'category'})
self.listSearchResult(cItem, searchPattern, searchType)
#HISTORIA SEARCH
elif category == "search_history":
self.listsHistory({'name':'history', 'category': 'search'}, 'desc', _("Type: "))
else:
printExc()
CBaseHostClass.endHandleService(self, index, refresh)
class IPTVHost(CHostBase):
def __init__(self):
# for now we must disable favourites due to problem with links extraction for types other than movie
CHostBase.__init__(self, CartoonME(), True, favouriteTypes=[]) #, [CDisplayListItem.TYPE_VIDEO, CDisplayListItem.TYPE_AUDIO])
def getLinksForVideo(self, Index = 0, selItem = None):
retCode = RetHost.ERROR
retlist = []
if not self.isValidIndex(Index): return RetHost(retCode, value=retlist)
urlList = self.host.getLinksForVideo(self.host.currList[Index])
for item in urlList:
retlist.append(CUrlItem(item["name"], item["url"], item['need_resolve']))
return RetHost(RetHost.OK, value = retlist)
# end getLinksForVideo
def getResolvedURL(self, url):
# resolve url to get direct url to video file
retlist = []
urlList = self.host.getVideoLinks(url)
for item in urlList:
need_resolve = 0
retlist.append(CUrlItem(item["name"], item["url"], need_resolve))
return RetHost(RetHost.OK, value = retlist)
#def getArticleContent(self, Index = 0):
# retCode = RetHost.ERROR
# retlist = []
# if not self.isValidIndex(Index): return RetHost(retCode, value=retlist)
#
# hList = self.host.getArticleContent(self.host.currList[Index])
# for item in hList:
# title = item.get('title', '')
# text = item.get('text', '')
# images = item.get("images", [])
# othersInfo = item.get('other_info', '')
# retlist.append( ArticleContent(title = title, text = text, images = images, richDescParams = othersInfo) )
# return RetHost(RetHost.OK, value = retlist)
def converItem(self, cItem):
hostList = []
searchTypesOptions = [] # ustawione alfabetycznie
#searchTypesOptions.append((_("Movies"), "movie"))
#searchTypesOptions.append((_("TV Shows"), "tv_shows"))
hostLinks = []
type = CDisplayListItem.TYPE_UNKNOWN
possibleTypesOfSearch = None
if 'category' == cItem['type']:
if cItem.get('search_item', False):
type = CDisplayListItem.TYPE_SEARCH
possibleTypesOfSearch = searchTypesOptions
else:
type = CDisplayListItem.TYPE_CATEGORY
elif cItem['type'] == 'video':
type = CDisplayListItem.TYPE_VIDEO
elif 'more' == cItem['type']:
type = CDisplayListItem.TYPE_MORE
elif 'audio' == cItem['type']:
type = CDisplayListItem.TYPE_AUDIO
if type in [CDisplayListItem.TYPE_AUDIO, CDisplayListItem.TYPE_VIDEO]:
url = cItem.get('url', '')
if '' != url:
hostLinks.append(CUrlItem("Link", url, 1))
title = cItem.get('title', '')
description = cItem.get('desc', '')
icon = self.host.getIconUrl( cItem.get('icon', '') )
return CDisplayListItem(name = title,
description = description,
type = type,
urlItems = hostLinks,
urlSeparateRequest = 1,
iconimage = icon,
possibleTypesOfSearch = possibleTypesOfSearch)
# end converItem
| [
"[email protected]"
] | |
d96e3355e5e62406b5e4338cbe35b7eaa929bbac | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/48/usersdata/102/16157/submittedfiles/estatistica.py | 09d01593ac5da824c14d1683c4b56411be6a6d17 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | # -*- coding: utf-8 -*-
from __future__ import division
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
media = soma/len(lista)
return media
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
a=[]
b=[]
n=input('digite a quantidade de elementos :')
for i in range(0,n,1):
a.append(input('digite um elemento '))
soma=0
for i in range (0,n,1):
b.append(input('digite um elemnto '))
media_a= media(a)
media_b= media(b)
def desviopadrao(lista):
soma=0
for i in range (0,len(lista),1):
soma=soma+(lista[i]-media(lista))**2
s=((1/(len(lista)-1))*soma)**(1/2)
return s
s_a=s(a)
s_b=s(b)
print('%.2f :' %media_a)
print('%.2f:'%s_a)
print('%.2f:'%media_b)
print('%.2f:'%s_b)
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas. | [
"[email protected]"
] | |
13cb23ca4016737d8fd059ed257d80f4cb853250 | 6be845bf70a8efaf390da28c811c52b35bf9e475 | /windows/Resources/Python/Core/Lib/multiprocessing/forking.py | 877cae88752b18f508c4d539767e20190edbd9dd | [] | no_license | kyeremalprime/ms | 228194910bf2ed314d0492bc423cc687144bb459 | 47eea098ec735b2173ff0d4e5c493cb8f04e705d | refs/heads/master | 2020-12-30T15:54:17.843982 | 2017-05-14T07:32:01 | 2017-05-14T07:32:01 | 91,180,709 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 12,561 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: forking.py
import os
import sys
import signal
from multiprocessing import util, process
__all__ = [
'Popen', 'assert_spawning', 'exit', 'duplicate', 'close', 'ForkingPickler']
def assert_spawning(self):
if not Popen.thread_is_spawning():
raise RuntimeError('%s objects should only be shared between processes through inheritance' % type(self).__name__)
from pickle import Pickler
class ForkingPickler(Pickler):
dispatch = Pickler.dispatch.copy()
@classmethod
def register(cls, type, reduce):
def dispatcher(self, obj):
rv = reduce(obj)
self.save_reduce(obj=obj, *rv)
cls.dispatch[type] = dispatcher
def _reduce_method(m):
if m.im_self is None:
return (getattr, (m.im_class, m.im_func.func_name))
else:
return (
getattr, (m.im_self, m.im_func.func_name))
return
ForkingPickler.register(type(ForkingPickler.save), _reduce_method)
def _reduce_method_descriptor(m):
return (
getattr, (m.__objclass__, m.__name__))
ForkingPickler.register(type(list.append), _reduce_method_descriptor)
ForkingPickler.register(type(int.__add__), _reduce_method_descriptor)
try:
from functools import partial
except ImportError:
pass
else:
def _reduce_partial(p):
return (_rebuild_partial, (p.func, p.args, p.keywords or {}))
def _rebuild_partial(func, args, keywords):
return partial(func, *args, **keywords)
ForkingPickler.register(partial, _reduce_partial)
if sys.platform != 'win32':
import time
exit = os._exit
duplicate = os.dup
close = os.close
class Popen(object):
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self.pid = os.fork()
if self.pid == 0:
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
return
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, flag)
except os.error:
return
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if timeout is None:
return self.poll(0)
else:
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError as e:
if self.wait(timeout=0.1) is None:
raise
return
@staticmethod
def thread_is_spawning():
return False
else:
import thread
import msvcrt
import _subprocess
import time
from _multiprocessing import win32, Connection, PipeConnection
from .util import Finalize
from pickle import load, HIGHEST_PROTOCOL
def dump(obj, file, protocol=None):
ForkingPickler(file, protocol).dump(obj)
TERMINATE = 65536
WINEXE = sys.platform == 'win32' and getattr(sys, 'frozen', False)
WINSERVICE = sys.executable.lower().endswith('pythonservice.exe')
exit = win32.ExitProcess
close = win32.CloseHandle
if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def set_executable(exe):
global _python_exe
_python_exe = exe
def duplicate(handle, target_process=None, inheritable=False):
if target_process is None:
target_process = _subprocess.GetCurrentProcess()
return _subprocess.DuplicateHandle(_subprocess.GetCurrentProcess(), handle, target_process, 0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS).Detach()
class Popen(object):
"""
Start a subprocess to run the code of a process object
"""
_tls = thread._local()
def __init__(self, process_obj):
rfd, wfd = os.pipe()
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
cmd = get_command_line() + [rhandle]
cmd = ' '.join(('"%s"' % x for x in cmd))
hp, ht, pid, tid = _subprocess.CreateProcess(_python_exe, cmd, None, None, 1, 0, None, None, None)
ht.Close()
close(rhandle)
self.pid = pid
self.returncode = None
self._handle = hp
prep_data = get_preparation_data(process_obj._name)
to_child = os.fdopen(wfd, 'wb')
Popen._tls.process_handle = int(hp)
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del Popen._tls.process_handle
to_child.close()
return
@staticmethod
def thread_is_spawning():
return getattr(Popen._tls, 'process_handle', None) is not None
@staticmethod
def duplicate_for_child(handle):
return duplicate(handle, Popen._tls.process_handle)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _subprocess.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
if res == _subprocess.WAIT_OBJECT_0:
code = _subprocess.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_subprocess.TerminateProcess(int(self._handle), TERMINATE)
except WindowsError:
if self.wait(timeout=0.1) is None:
raise
return
def is_forking(argv):
"""
Return whether commandline indicates we are forking
"""
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
return True
else:
return False
def freeze_support():
"""
Run code for process object if this in not the main process
"""
if is_forking(sys.argv):
main()
sys.exit()
def get_command_line():
"""
Returns prefix of command line used for spawning a child process
"""
if process.current_process()._identity == () and is_forking(sys.argv):
raise RuntimeError('\n Attempt to start a new process before the current process\n has finished its bootstrapping phase.\n\n This probably means that you are on Windows and you have\n forgotten to use the proper idiom in the main module:\n\n if __name__ == \'__main__\':\n freeze_support()\n ...\n\n The "freeze_support()" line can be omitted if the program\n is not going to be frozen to produce a Windows executable.')
if getattr(sys, 'frozen', False):
return [sys.executable, '--multiprocessing-fork']
else:
prog = 'from multiprocessing.forking import main; main()'
return [
_python_exe, '-c', prog, '--multiprocessing-fork']
def main():
"""
Run code specifed by data received over pipe
"""
handle = int(sys.argv[-1])
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
from_parent = os.fdopen(fd, 'rb')
process.current_process()._inheriting = True
preparation_data = load(from_parent)
prepare(preparation_data)
self = load(from_parent)
process.current_process()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
exit(exitcode)
def get_preparation_data(name):
"""
Return info about parent needed by child to unpickle process object
"""
from .util import _logger, _log_to_stderr
d = dict(name=name, sys_path=sys.path, sys_argv=sys.argv, log_to_stderr=_log_to_stderr, orig_dir=process.ORIGINAL_DIR, authkey=process.current_process().authkey)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE and not WINSERVICE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None:
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['main_path'] = os.path.normpath(main_path)
return d
def reduce_connection(conn):
if not Popen.thread_is_spawning():
raise RuntimeError('By default %s objects can only be shared between processes\nusing inheritance' % type(conn).__name__)
return (
type(conn),
(Popen.duplicate_for_child(conn.fileno()),
conn.readable, conn.writable))
ForkingPickler.register(Connection, reduce_connection)
ForkingPickler.register(PipeConnection, reduce_connection)
old_main_modules = []
def prepare(data):
"""
Try to get current process ready to unpickle process object
"""
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name != 'ipython':
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [
os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [
os.path.dirname(main_path)]
file, path_name, etc = imp.find_module(main_name, dirs)
try:
main_module = imp.load_module('__parents_main__', file, path_name, etc)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
for obj in main_module.__dict__.values():
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass
return | [
"[email protected]"
] | |
dfd560b021f274cf620269da02b8991453c27075 | cf6a50732d708a3a3db0f297b73cb6f449a00b44 | /Practice15_Dict_Comprehensions/Prac_15_3_4lists.py | 08394fddeab789aa6f4927a312461f0c8229a29e | [] | no_license | subash319/PythonDepth | 9fe3920f4b0a25be02a9abbeeb60976853ab812e | 0de840b7776009e8e4362d059af14afaac6a8879 | refs/heads/master | 2022-11-16T03:03:56.874422 | 2020-07-17T01:19:39 | 2020-07-17T01:19:39 | 266,921,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | #
# names = ['Ted', 'Sam', 'Jim', 'Rob', 'Anu']
# maths = [98,67,54,88,95]
# physics = [88,64,78,99,78]
# chemistry = [78,67,45,79,87]
# These 4 lists contain the names and marks of students in 3 subjects.
# Write a dictionary comprehension to create the following dictionary from the above 4 lists.
# { 'Ted': [98, 88, 78],
# 'Sam': [67, 64, 67],
# 'Jim': [54, 78, 45],
# 'Rob': [88, 99, 79],
# 'Anu': [95, 78, 87] }
names = ['Ted', 'Sam', 'Jim', 'Rob', 'Anu']
maths = [98, 67, 54, 88, 95]
physics = [88, 64, 78, 99, 78]
chemistry = [78, 67, 45, 79, 87]
dict_list = {name : [math_marks, physics_marks, chemistry_marks] for name, math_marks, physics_marks, chemistry_marks in
zip(names, maths, physics, chemistry)}
print(dict_list) | [
"[email protected]"
] | |
e8671cf5ba3ec2529608a9c484cbb899180c4e45 | 439c87c48c6c2c812d1faca73cbf1b632e9403dc | /DAYS/Day23/Frequency_can_become_same.py | e3d4dbf245a9234ce1844db16bb8f4940835985b | [
"MIT"
] | permissive | saubhagyav/100_Days_Code_Challenge | 14ca8db68e09c7ac7741f164fea8b62cb36bf2c0 | bde41126b9342eacc488c79d01dc4b76a3651c93 | refs/heads/main | 2023-08-05T03:12:18.918079 | 2021-09-12T12:20:41 | 2021-09-12T12:20:41 | 389,375,066 | 2 | 2 | null | 2021-07-25T15:06:18 | 2021-07-25T15:06:17 | null | UTF-8 | Python | false | false | 416 | py | from collections import Counter
def Check_Frequency(Test_string):
Test_dict = Counter(Test_string)
Result = list(set(Test_dict.values()))
if len(Result) > 2:
return False
elif len(Result) == 2 and Result[1]-Result[0] > 1:
return False
else:
return True
Test_string = "xxxyyzz"
if Check_Frequency(Test_string):
print("Yes")
else:
print("No")
| [
"[email protected]"
] | |
960ac25a67fbf72976ab75e6b8dbf05281553012 | 758bf41e46a3093f4923af603f1f7f8063408b9c | /website/testFromRemoteRepo/_bsch3398/museum/python/django/contrib/gis/gdal/__init__.py | 7c3a03b42b7bc94fdbb03cfa9c6a54e206c549f2 | [
"BSD-3-Clause"
] | permissive | mpetyx/mpetyx.com | 4033d97b21c9227a6ba505980fd0c1b57254e8fb | d50c379b4fe09e0135656573f7049225fc90ae36 | refs/heads/master | 2021-01-10T19:50:15.488371 | 2014-01-22T09:04:14 | 2014-01-22T09:04:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,022 | py | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existant file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.datasource import DataSource
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, gdal_release_date, GEOJSON, GDAL_VERSION
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform
from django.contrib.gis.gdal.geometries import OGRGeometry
HAS_GDAL = True
except:
HAS_GDAL, GEOJSON = False, False
try:
from django.contrib.gis.gdal.envelope import Envelope
except ImportError:
# No ctypes, but don't raise an exception.
pass
| [
"[email protected]"
] | |
ff60e6952245ffd8f266b0eaf56ff16ceb153997 | d99ac626d62c663704444a9cce7e7fc793a9e75e | /crypto_implementations/virgil-crypto-c/wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_round5.py | afe300c8a71a0d27ca50895ef9ae32e159be90fd | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Experiment5X/CryptoFunctionDetection | 3ab32d5573a249d24db1faf772721bc80b8d905d | dac700193e7e84963943593e36844b173211a8a1 | refs/heads/master | 2023-04-19T09:12:35.828268 | 2021-05-13T22:39:27 | 2021-05-13T22:39:27 | 355,299,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,073 | py | # Copyright (C) 2015-2020 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <[email protected]>
from virgil_crypto_lib._libs import *
from ctypes import *
from ._vscf_impl import vscf_impl_t
from ._vscf_error import vscf_error_t
from ._vscf_raw_public_key import vscf_raw_public_key_t
from ._vscf_raw_private_key import vscf_raw_private_key_t
from virgil_crypto_lib.common._c_bridge import vsc_buffer_t
from virgil_crypto_lib.common._c_bridge import vsc_data_t
class vscf_round5_t(Structure):
pass
class VscfRound5(object):
"""Provide post-quantum encryption based on the round5 implementation.
For algorithm details check https://github.com/round5/code"""
# Defines whether a public key can be imported or not.
CAN_IMPORT_PUBLIC_KEY = True
# Define whether a public key can be exported or not.
CAN_EXPORT_PUBLIC_KEY = True
# Define whether a private key can be imported or not.
CAN_IMPORT_PRIVATE_KEY = True
# Define whether a private key can be exported or not.
CAN_EXPORT_PRIVATE_KEY = True
def __init__(self):
"""Create underlying C context."""
self._ll = LowLevelLibs()
self._lib = self._ll.foundation
def vscf_round5_new(self):
vscf_round5_new = self._lib.vscf_round5_new
vscf_round5_new.argtypes = []
vscf_round5_new.restype = POINTER(vscf_round5_t)
return vscf_round5_new()
def vscf_round5_delete(self, ctx):
vscf_round5_delete = self._lib.vscf_round5_delete
vscf_round5_delete.argtypes = [POINTER(vscf_round5_t)]
vscf_round5_delete.restype = None
return vscf_round5_delete(ctx)
def vscf_round5_use_random(self, ctx, random):
vscf_round5_use_random = self._lib.vscf_round5_use_random
vscf_round5_use_random.argtypes = [POINTER(vscf_round5_t), POINTER(vscf_impl_t)]
vscf_round5_use_random.restype = None
return vscf_round5_use_random(ctx, random)
def vscf_round5_generate_ephemeral_key(self, ctx, key, error):
"""Generate ephemeral private key of the same type.
Note, this operation might be slow."""
vscf_round5_generate_ephemeral_key = self._lib.vscf_round5_generate_ephemeral_key
vscf_round5_generate_ephemeral_key.argtypes = [POINTER(vscf_round5_t), POINTER(vscf_impl_t), POINTER(vscf_error_t)]
vscf_round5_generate_ephemeral_key.restype = POINTER(vscf_impl_t)
return vscf_round5_generate_ephemeral_key(ctx, key, error)
def vscf_round5_import_public_key(self, ctx, raw_key, error):
"""Import public key from the raw binary format.
Return public key that is adopted and optimized to be used
with this particular algorithm.
Binary format must be defined in the key specification.
For instance, RSA public key must be imported from the format defined in
RFC 3447 Appendix A.1.1."""
vscf_round5_import_public_key = self._lib.vscf_round5_import_public_key
vscf_round5_import_public_key.argtypes = [POINTER(vscf_round5_t), POINTER(vscf_raw_public_key_t), POINTER(vscf_error_t)]
vscf_round5_import_public_key.restype = POINTER(vscf_impl_t)
return vscf_round5_import_public_key(ctx, raw_key, error)
def vscf_round5_export_public_key(self, ctx, public_key, error):
"""Export public key to the raw binary format.
Binary format must be defined in the key specification.
For instance, RSA public key must be exported in format defined in
RFC 3447 Appendix A.1.1."""
vscf_round5_export_public_key = self._lib.vscf_round5_export_public_key
vscf_round5_export_public_key.argtypes = [POINTER(vscf_round5_t), POINTER(vscf_impl_t), POINTER(vscf_error_t)]
vscf_round5_export_public_key.restype = POINTER(vscf_raw_public_key_t)
return vscf_round5_export_public_key(ctx, public_key, error)
def vscf_round5_import_private_key(self, ctx, raw_key, error):
"""Import private key from the raw binary format.
Return private key that is adopted and optimized to be used
with this particular algorithm.
Binary format must be defined in the key specification.
For instance, RSA private key must be imported from the format defined in
RFC 3447 Appendix A.1.2."""
vscf_round5_import_private_key = self._lib.vscf_round5_import_private_key
vscf_round5_import_private_key.argtypes = [POINTER(vscf_round5_t), POINTER(vscf_raw_private_key_t), POINTER(vscf_error_t)]
vscf_round5_import_private_key.restype = POINTER(vscf_impl_t)
return vscf_round5_import_private_key(ctx, raw_key, error)
def vscf_round5_export_private_key(self, ctx, private_key, error):
"""Export private key in the raw binary format.
Binary format must be defined in the key specification.
For instance, RSA private key must be exported in format defined in
RFC 3447 Appendix A.1.2."""
vscf_round5_export_private_key = self._lib.vscf_round5_export_private_key
vscf_round5_export_private_key.argtypes = [POINTER(vscf_round5_t), POINTER(vscf_impl_t), POINTER(vscf_error_t)]
vscf_round5_export_private_key.restype = POINTER(vscf_raw_private_key_t)
return vscf_round5_export_private_key(ctx, private_key, error)
def vscf_round5_kem_shared_key_len(self, ctx, key):
"""Return length in bytes required to hold encapsulated shared key."""
vscf_round5_kem_shared_key_len = self._lib.vscf_round5_kem_shared_key_len
vscf_round5_kem_shared_key_len.argtypes = [POINTER(vscf_round5_t), POINTER(vscf_impl_t)]
vscf_round5_kem_shared_key_len.restype = c_size_t
return vscf_round5_kem_shared_key_len(ctx, key)
def vscf_round5_kem_encapsulated_key_len(self, ctx, public_key):
"""Return length in bytes required to hold encapsulated key."""
vscf_round5_kem_encapsulated_key_len = self._lib.vscf_round5_kem_encapsulated_key_len
vscf_round5_kem_encapsulated_key_len.argtypes = [POINTER(vscf_round5_t), POINTER(vscf_impl_t)]
vscf_round5_kem_encapsulated_key_len.restype = c_size_t
return vscf_round5_kem_encapsulated_key_len(ctx, public_key)
def vscf_round5_kem_encapsulate(self, ctx, public_key, shared_key, encapsulated_key):
"""Generate a shared key and a key encapsulated message."""
vscf_round5_kem_encapsulate = self._lib.vscf_round5_kem_encapsulate
vscf_round5_kem_encapsulate.argtypes = [POINTER(vscf_round5_t), POINTER(vscf_impl_t), POINTER(vsc_buffer_t), POINTER(vsc_buffer_t)]
vscf_round5_kem_encapsulate.restype = c_int
return vscf_round5_kem_encapsulate(ctx, public_key, shared_key, encapsulated_key)
def vscf_round5_kem_decapsulate(self, ctx, encapsulated_key, private_key, shared_key):
"""Decapsulate the shared key."""
vscf_round5_kem_decapsulate = self._lib.vscf_round5_kem_decapsulate
vscf_round5_kem_decapsulate.argtypes = [POINTER(vscf_round5_t), vsc_data_t, POINTER(vscf_impl_t), POINTER(vsc_buffer_t)]
vscf_round5_kem_decapsulate.restype = c_int
return vscf_round5_kem_decapsulate(ctx, encapsulated_key, private_key, shared_key)
def vscf_round5_setup_defaults(self, ctx):
"""Setup predefined values to the uninitialized class dependencies."""
vscf_round5_setup_defaults = self._lib.vscf_round5_setup_defaults
vscf_round5_setup_defaults.argtypes = [POINTER(vscf_round5_t)]
vscf_round5_setup_defaults.restype = c_int
return vscf_round5_setup_defaults(ctx)
def vscf_round5_generate_key(self, ctx, alg_id, error):
"""Generate new private key.
Note, this operation might be slow."""
vscf_round5_generate_key = self._lib.vscf_round5_generate_key
vscf_round5_generate_key.argtypes = [POINTER(vscf_round5_t), c_int, POINTER(vscf_error_t)]
vscf_round5_generate_key.restype = POINTER(vscf_impl_t)
return vscf_round5_generate_key(ctx, alg_id, error)
def vscf_round5_shallow_copy(self, ctx):
vscf_round5_shallow_copy = self._lib.vscf_round5_shallow_copy
vscf_round5_shallow_copy.argtypes = [POINTER(vscf_round5_t)]
vscf_round5_shallow_copy.restype = POINTER(vscf_round5_t)
return vscf_round5_shallow_copy(ctx)
def vscf_round5_impl(self, ctx):
vscf_round5_impl = self._lib.vscf_round5_impl
vscf_round5_impl.argtypes = [POINTER(vscf_round5_t)]
vscf_round5_impl.restype = POINTER(vscf_impl_t)
return vscf_round5_impl(ctx)
| [
"[email protected]"
] | |
703d10b745c8000288c553adcd42e3990d554b8b | 1b1a30bfa44ad30fa6fb4ac2e6254d9ff2bf9d46 | /keytree/tests/test_write.py | de9d42f3b86e230c671961a2444f2286f439f14f | [] | no_license | imclab/keytree | bff25eee2c8e82aa95062ddbfdda32b0b3a0759b | db7dc932af92d4367bdb934632327c6d9963c2cb | refs/heads/master | 2020-12-13T19:14:31.044506 | 2012-12-28T23:20:06 | 2012-12-28T23:20:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,858 | py |
from unittest import TestCase
from xml.etree import ElementTree as etree
from keytree import element
KML = """<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
</Document>
</kml>
"""
class ElementWriterTestCase(TestCase):
def setUp(self):
self.doc = etree.fromstring(KML)
def test_element(self):
f = {
'id': '1',
'geometry': {'type': 'Point', 'coordinates': (0.0, 0.0)},
'properties': {
'title': 'one',
'description': 'Point one' } }
elem = element(self.doc, f)
self.failUnlessEqual(
elem.tag, '{http://www.opengis.net/kml/2.2}Placemark' )
self.failUnlessEqual(elem.attrib['id'], '1')
self.failUnlessEqual(
elem.find('{http://www.opengis.net/kml/2.2}name').text,
'one' )
self.failUnlessEqual(
elem.find('{http://www.opengis.net/kml/2.2}Snippet').text,
'Point one' )
self.failUnlessEqual(
elem.find('{http://www.opengis.net/kml/2.2}Point').find(
'{http://www.opengis.net/kml/2.2}coordinates').text,
'0.000000,0.000000,0.0' )
def test_element_kw(self):
f = {
'id': '1',
'geometry': {'type': 'Point', 'coordinates': (0.0, 0.0)},
'properties': {} }
elem = element(self.doc, f, name='one', snippet='Point one')
self.failUnlessEqual(
elem.tag, '{http://www.opengis.net/kml/2.2}Placemark' )
self.failUnlessEqual(elem.attrib['id'], '1')
self.failUnlessEqual(
elem.find('{http://www.opengis.net/kml/2.2}name').text,
'one' )
self.failUnlessEqual(
elem.find('{http://www.opengis.net/kml/2.2}Snippet').text,
'Point one' )
| [
"[email protected]"
] | |
514a7cb7000a793365eac04a7428ab887b3fab7f | adb759899204e61042225fabb64f6c1a55dac8ce | /14500~14599/14501.py | ffaf69b3c4a49b459ac1f40535dc459b3e8d71b2 | [] | no_license | geneeol/baekjoon-online-judge | 21cdffc7067481b29b18c09c9152135efc82c40d | 2b359aa3f1c90f178d0c86ce71a0580b18adad6f | refs/heads/master | 2023-03-28T23:25:12.219487 | 2021-04-01T09:19:06 | 2021-04-01T09:19:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,439 | py | # 문제
# 상담원으로 일하고 있는 백준이는 퇴사를 하려고 한다.
# 오늘부터 N+1일째 되는 날 퇴사를 하기 위해서, 남은 N일 동안 최대한 많은 상담을 하려고 한다.
# 백준이는 비서에게 최대한 많은 상담을 잡으라고 부탁을 했고, 비서는 하루에 하나씩 서로 다른 사람의 상담을 잡아놓았다.
# 각각의 상담은 상담을 완료하는데 걸리는 기간 Ti와 상담을 했을 때 받을 수 있는 금액 Pi로 이루어져 있다.
# N = 7인 경우에 다음과 같은 상담 일정표를 보자.
# 1일 2일 3일 4일 5일 6일 7일
# Ti 3 5 1 1 2 4 2
# Pi 10 20 10 20 15 40 200
# 1일에 잡혀있는 상담은 총 3일이 걸리며, 상담했을 때 받을 수 있는 금액은 10이다.
# 5일에 잡혀있는 상담은 총 2일이 걸리며, 받을 수 있는 금액은 15이다.
# 상담을 하는데 필요한 기간은 1일보다 클 수 있기 때문에, 모든 상담을 할 수는 없다.
# 예를 들어서 1일에 상담을 하게 되면, 2일, 3일에 있는 상담은 할 수 없게 된다.
# 2일에 있는 상담을 하게 되면, 3, 4, 5, 6일에 잡혀있는 상담은 할 수 없다.
# 또한, N+1일째에는 회사에 없기 때문에, 6, 7일에 있는 상담을 할 수 없다.
# 퇴사 전에 할 수 있는 상담의 최대 이익은 1일, 4일, 5일에 있는 상담을 하는 것이며, 이때의 이익은 10+20+15=45이다.
# 상담을 적절히 했을 때, 백준이가 얻을 수 있는 최대 수익을 구하는 프로그램을 작성하시오.
# 입력
# 첫째 줄에 N (1 ≤ N ≤ 15)이 주어진다.
# 둘째 줄부터 N개의 줄에 Ti와 Pi가 공백으로 구분되어서 주어지며, 1일부터 N일까지 순서대로 주어진다. (1 ≤ Ti ≤ 5, 1 ≤ Pi ≤ 1,000)
# 출력
# 첫째 줄에 백준이가 얻을 수 있는 최대 이익을 출력한다.
n = int(input())
max_pay = [0] * n
table = [list(map(int, input().split())) for _ in range(n)]
for i in range(n - 1, -1, -1):
current_day, current_pay = table[i][0], table[i][1]
if current_day > n - i:
if i != n - 1:
max_pay[i] = max_pay[i + 1]
continue
if i == n - 1:
max_pay[i] = current_pay
elif i + current_day == n:
max_pay[i] = max(current_pay, max_pay[i + 1])
else:
max_pay[i] = max(current_pay + max_pay[i + current_day], max_pay[i + 1])
print(max_pay[0]) | [
"[email protected]"
] | |
db3bddc10e70ca78c44d08749d8653382c4a955e | 6ede8ffb96aabbe03feb7e739b645878abd7bc6d | /tensorflow/python/ops/bincount.py | 68950eaf5965860b96ae92d42a91ab25d8f35841 | [
"Apache-2.0"
] | permissive | AzureMentor/tensorflow | fea99d031494642b414e15e4e1f9b34cf0353a76 | 9f8fc1e945817428be0555a985d79073d713bce0 | refs/heads/master | 2021-08-22T04:43:04.739158 | 2020-05-20T02:24:52 | 2020-05-20T02:24:52 | 184,843,536 | 2 | 0 | Apache-2.0 | 2020-05-20T02:24:53 | 2019-05-04T02:29:27 | C++ | UTF-8 | Python | false | false | 11,553 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# maxlengthations under the License.
# ==============================================================================
"""tf.sparse.bincount ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_count_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util.tf_export import tf_export
@tf_export("sparse.bincount")
def sparse_bincount(values,
weights=None,
axis=0,
minlength=None,
maxlength=None,
binary_output=False,
name=None):
"""Count the number of times an integer value appears in a tensor.
This op takes an N-dimensional `Tensor`, `RaggedTensor`, or `SparseTensor`,
and returns an N-dimensional int64 SparseTensor where element
`[i0...i[axis], j]` contains the number of times the value `j` appears in
slice `[i0...i[axis], :]` of the input tensor. Currently, only N=0 and
N=-1 are supported.
Args:
values: A Tensor, RaggedTensor, or SparseTensor whose values should be
counted. These tensors must have a rank of 1 or 2.
weights: A 1-dimensional Tensor of weights. If specified, the input array is
weighted by the weight array, i.e. if a value `n` is found at position
`i`, `out[n]` will be increased by `weight[i]` instead of 1.
axis: The axis to slice over. Axes at and below `axis` will be flattened
before bin counting. Currently, only `0`, and `-1` are supported. If None,
all axes will be flattened (identical to passing `0`).
minlength: If given, skips `values` that are less than `minlength`, and
ensures that the output has a `dense_shape` of at least `minlength` in the
inner dimension.
maxlength: If given, skips `values` that are greater than or equal to
`maxlength`, and ensures that the output has a `dense_shape` of at most
`maxlength` in the inner dimension.
binary_output: If True, this op will output 1 instead of the number of times
a token appears (equivalent to one_hot + reduce_any instead of one_hot +
reduce_add). Defaults to False.
name: A name for this op.
Returns:
A SparseTensor with `output.shape = values.shape[:axis] + [N]`, where `N` is
* `maxlength` (if set);
* `minlength` (if set, and `minlength > reduce_max(values)`);
* `0` (if `values` is empty);
* `reduce_max(values) + 1` otherwise.
Examples:
**Bin-counting every item in individual batches**
This example takes an input (which could be a Tensor, RaggedTensor, or
SparseTensor) and returns a SparseTensor where the value of (i,j) is the
number of times value j appears in batch i.
>>> data = np.array([[10, 20, 30, 20], [11, 101, 11, 10001]], dtype=np.int64)
>>> output = tf.sparse.bincount(data, axis=-1)
>>> print(output)
SparseTensor(indices=tf.Tensor(
[[ 0 10]
[ 0 20]
[ 0 30]
[ 1 11]
[ 1 101]
[ 1 10001]], shape=(6, 2), dtype=int64),
values=tf.Tensor([1 2 1 2 1 1], shape=(6,), dtype=int64),
dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64))
**Bin-counting with defined output shape**
This example takes an input (which could be a Tensor, RaggedTensor, or
SparseTensor) and returns a SparseTensor where the value of (i,j) is the
number of times value j appears in batch i. However, all values of j
above 'maxlength' are ignored. The dense_shape of the output sparse tensor
is set to 'minlength'. Note that, while the input is identical to the
example above, the value '10001' in batch item 2 is dropped, and the
dense shape is [2, 500] instead of [2,10002] or [2, 102].
>>> minlength = maxlength = 500
>>> data = np.array([[10, 20, 30, 20], [11, 101, 11, 10001]], dtype=np.int64)
>>> output = tf.sparse.bincount(
... data, axis=-1, minlength=minlength, maxlength=maxlength)
>>> print(output)
SparseTensor(indices=tf.Tensor(
[[ 0 10]
[ 0 20]
[ 0 30]
[ 1 11]
[ 1 101]], shape=(5, 2), dtype=int64),
values=tf.Tensor([1 2 1 2 1], shape=(5,), dtype=int64),
dense_shape=tf.Tensor([ 2 500], shape=(2,), dtype=int64))
**Binary bin-counting**
This example takes an input (which could be a Tensor, RaggedTensor, or
SparseTensor) and returns a SparseTensor where (i,j) is 1 if the value j
appears in batch i at least once and is 0 otherwise. Note that, even though
some values (like 20 in batch 1 and 11 in batch 2) appear more than once,
the 'values' tensor is all 1s.
>>> data = np.array([[10, 20, 30, 20], [11, 101, 11, 10001]], dtype=np.int64)
>>> output = tf.sparse.bincount(data, binary_output=True, axis=-1)
>>> print(output)
SparseTensor(indices=tf.Tensor(
[[ 0 10]
[ 0 20]
[ 0 30]
[ 1 11]
[ 1 101]
[ 1 10001]], shape=(6, 2), dtype=int64),
values=tf.Tensor([1 1 1 1 1 1], shape=(6,), dtype=int64),
dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64))
**Weighted bin-counting**
This example takes two inputs - a values tensor and a weights tensor. These
tensors must be identically shaped, and have the same row splits or indices
in the case of RaggedTensors or SparseTensors. When performing a weighted
count, the op will output a SparseTensor where the value of (i, j) is the
sum of the values in the weight tensor's batch i in the locations where
the values tensor has the value j. In this case, the output dtype is the
same as the dtype of the weights tensor.
>>> data = np.array([[10, 20, 30, 20], [11, 101, 11, 10001]], dtype=np.int64)
>>> weights = [[2, 0.25, 15, 0.5], [2, 17, 3, 0.9]]
>>> output = tf.sparse.bincount(data, weights=weights, axis=-1)
>>> print(output)
SparseTensor(indices=tf.Tensor(
[[ 0 10]
[ 0 20]
[ 0 30]
[ 1 11]
[ 1 101]
[ 1 10001]], shape=(6, 2), dtype=int64),
values=tf.Tensor([2. 0.75 15. 5. 17. 0.9], shape=(6,), dtype=float32),
dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64))
"""
with ops.name_scope(name, "count", [values, weights]):
if not isinstance(values, sparse_tensor.SparseTensor):
values = ragged_tensor.convert_to_tensor_or_ragged_tensor(
values, name="values")
if weights is not None:
if not isinstance(weights, sparse_tensor.SparseTensor):
weights = ragged_tensor.convert_to_tensor_or_ragged_tensor(
weights, name="weights")
if weights is not None and binary_output:
raise ValueError("binary_output and weights are mutually exclusive.")
if axis is None:
axis = 0
if axis not in [0, -1]:
raise ValueError("Unsupported axis value %s. Only 0 and -1 are currently "
"supported." % axis)
minlength_value = minlength if minlength is not None else -1
maxlength_value = maxlength if maxlength is not None else -1
if axis == 0:
if isinstance(values, sparse_tensor.SparseTensor):
if weights is not None:
weights = validate_sparse_weights(values, weights)
values = values.values
elif isinstance(values, ragged_tensor.RaggedTensor):
if weights is not None:
weights = validate_ragged_weights(values, weights)
values = values.values
else:
if weights is not None:
weights = array_ops.reshape(weights, [-1])
values = array_ops.reshape(values, [-1])
if isinstance(values, sparse_tensor.SparseTensor):
weights = validate_sparse_weights(values, weights)
c_ind, c_val, c_shape = gen_count_ops.sparse_count_sparse_output(
values.indices,
values.values,
values.dense_shape,
weights,
minlength=minlength_value,
maxlength=maxlength_value,
binary_output=binary_output)
elif isinstance(values, ragged_tensor.RaggedTensor):
weights = validate_ragged_weights(values, weights)
c_ind, c_val, c_shape = gen_count_ops.ragged_count_sparse_output(
values.row_splits,
values.values,
weights,
minlength=minlength_value,
maxlength=maxlength_value,
binary_output=binary_output)
else:
weights = validate_dense_weights(values, weights)
c_ind, c_val, c_shape = gen_count_ops.dense_count_sparse_output(
values,
weights=weights,
minlength=minlength_value,
maxlength=maxlength_value,
binary_output=binary_output)
return sparse_tensor.SparseTensor(c_ind, c_val, c_shape)
def validate_dense_weights(values, weights):
"""Validates the passed weight tensor or creates an empty one."""
if weights is None:
return array_ops.constant([], dtype=values.dtype)
if not isinstance(weights, ops.Tensor):
raise ValueError(
"`weights` must be a tf.Tensor if `values` is a tf.Tensor.")
return weights
def validate_sparse_weights(values, weights):
"""Validates the passed weight tensor or creates an empty one."""
if weights is None:
return array_ops.constant([], dtype=values.values.dtype)
if not isinstance(weights, sparse_tensor.SparseTensor):
raise ValueError(
"`weights` must be a SparseTensor if `values` is a SparseTensor.")
checks = []
if weights.dense_shape is not values.dense_shape:
checks.append(
check_ops.assert_equal(
weights.dense_shape,
values.dense_shape,
message="'weights' and 'values' must have the same dense shape."))
if weights.indices is not values.indices:
checks.append(
check_ops.assert_equal(
weights.indices,
values.indices,
message="'weights' and 'values' must have the same indices.")
)
if checks:
with ops.control_dependencies(checks):
weights = array_ops.identity(weights.values)
else:
weights = weights.values
return weights
def validate_ragged_weights(values, weights):
"""Validates the passed weight tensor or creates an empty one."""
if weights is None:
return array_ops.constant([], dtype=values.values.dtype)
if not isinstance(weights, ragged_tensor.RaggedTensor):
raise ValueError(
"`weights` must be a RaggedTensor if `values` is a RaggedTensor.")
checks = []
if weights.row_splits is not values.row_splits:
checks.append(
check_ops.assert_equal(
weights.row_splits,
values.row_splits,
message="'weights' and 'values' must have the same row splits."))
if checks:
with ops.control_dependencies(checks):
weights = array_ops.identity(weights.values)
else:
weights = weights.values
return weights
| [
"[email protected]"
] | |
d6cd28ae361ec931ad2dfaf604fbe17836bc2e04 | 233928d206e13e068cf8cb5ff7888c9a2d84ad61 | /elice/lecture1/test.py | 58d44bdea1bbe1bcade0113355c215364068ddf0 | [] | no_license | Jinwoongma/Algorithm | 7f6daa2d3c2c361059c09fb4fe287b1cce4863e2 | 78803f4572f1416451a9f4f31f53b7d653f74d4a | refs/heads/master | 2022-10-07T22:53:20.333329 | 2020-06-07T13:27:47 | 2020-06-07T13:27:47 | 237,114,107 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | num = 10
def plus():
print(num)
num += 1
plus() | [
"[email protected]"
] | |
eae1209cde503300621b2e4cd63576ae312a868e | 699a43917ce75b2026a450f67d85731a0f719e01 | /using_python/322_coin_change/coin_change.py | 453bc631d8c72e4e6d1d021cb3b8598456640723 | [] | no_license | wusanshou2017/Leetcode | 96ab81ae38d6e04739c071acfc0a5f46a1c9620b | c4b85ca0e23700b84e4a8a3a426ab634dba0fa88 | refs/heads/master | 2021-11-16T01:18:27.886085 | 2021-10-14T09:54:47 | 2021-10-14T09:54:47 | 107,402,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | class Solution:
def coinChange(self, coins: [int], amount: int) -> int:
dp = [0] + [float("inf")] *amount
for coin in coins:
for i in range (coin , amount+1):
dp[i] = min (dp[i], dp[i-coin]+1)
return dp[amount] if dp[amount]!= float("inf") else -1
so =Solution()
print (so.coinChange()) | [
"[email protected]"
] | |
6828a17057599731cc595b26995b3930f8a78e82 | 7a15271c7cddd199f43555469a67d26ce0f60836 | /uncertainty_baselines/models/vit_mimo.py | d218ad8262546522d746a03e843d3e3409b5167a | [
"Apache-2.0"
] | permissive | google/uncertainty-baselines | b2c339d918bf3949ee066f9eafa6b51232a2ac3d | f5f6f50f82bd441339c9d9efbef3f09e72c5fef6 | refs/heads/main | 2023-09-02T13:59:26.355288 | 2023-08-14T16:35:22 | 2023-08-14T16:36:11 | 280,026,201 | 1,235 | 198 | Apache-2.0 | 2023-09-11T22:21:48 | 2020-07-16T01:54:32 | Python | UTF-8 | Python | false | false | 3,957 | py | # coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MIMO Vision Transformer model."""
from typing import Any, Optional, Tuple
import flax.linen as nn
import jax
import jax.numpy as jnp
from uncertainty_baselines.models import vit
Array = Any
PRNGKey = Any
Shape = Tuple[int]
Dtype = Any
class VisionTransformerMIMO(nn.Module):
"""MIMO Vision Transformer model."""
num_classes: int
patches: Any
transformer: Any
hidden_size: int
ensemble_size: int
representation_size: Optional[int] = None
classifier: str = 'token'
fix_base_model: bool = False
@nn.compact
def __call__(self, inputs, *, train):
"""Function of shapes [B*R,h,w,c*E] -> [E*B*R,num_classes]."""
out = {}
x = inputs
# We can merge s2d+emb into a single conv; it's the same.
x = nn.Conv(
features=self.hidden_size,
kernel_size=self.patches.size,
strides=self.patches.size,
padding='VALID',
name='embedding')(
x)
# Here, x is a grid of embeddings.
# TODO(dusenberrymw): Switch to self.sow(.).
out['stem'] = x
# Transformer.
n, h, w, c = x.shape
x = jnp.reshape(x, [n, h * w, c])
# If we want to add a class token, add it here.
if self.classifier == 'token':
cls = self.param('cls', nn.initializers.zeros, (1, 1, c))
cls = jnp.tile(cls, [n, 1, 1])
x = jnp.concatenate([cls, x], axis=1)
x = vit.Encoder(name='Transformer', **self.transformer)(x, train=train)
out['transformed'] = x
if self.classifier == 'token':
x = x[:, 0]
elif self.classifier == 'gap':
x = jnp.mean(x, axis=list(range(1, x.ndim - 1))) # (1,) or (1,2)
else:
raise ValueError(f'Invalid classifier={self.classifier}')
out['head_input'] = x
if self.representation_size is not None:
x = nn.Dense(features=self.representation_size, name='pre_logits')(x)
out['pre_logits'] = x
x = nn.tanh(x)
else:
x = vit.IdentityLayer(name='pre_logits')(x)
out['pre_logits'] = x
# TODO(markcollier): Fix base model without using stop_gradient.
if self.fix_base_model:
x = jax.lax.stop_gradient(x)
# Shape: (batch_size, num_classes * ensemble_size).
x = nn.Dense(self.num_classes * self.ensemble_size,
name='head',
kernel_init=nn.initializers.zeros)(x)
# Shape: (batch_size * ensemble_size, num_classes).
x = jnp.concatenate(jnp.split(x, self.ensemble_size, axis=-1))
out['logits'] = x
return x, out
def vision_transformer_mimo(num_classes: int,
patches: Any,
transformer: Any,
hidden_size: int,
ensemble_size: int,
representation_size: Optional[int] = None,
classifier: str = 'token',
fix_base_model: bool = False):
"""Builds a BatchEnsemble Vision Transformer (ViT) model."""
# TODO(dusenberrymw): Add API docs once the config dict in VisionTransformerBE
# is cleaned up.
return VisionTransformerMIMO(
num_classes=num_classes,
patches=patches,
transformer=transformer,
hidden_size=hidden_size,
ensemble_size=ensemble_size,
representation_size=representation_size,
classifier=classifier,
fix_base_model=fix_base_model)
| [
"[email protected]"
] | |
b7f7076f5afb4e3428632e25750722c00706b99e | 4777728e147ef6a7d3af53ac89ad7e7527fa7b54 | /scripts/match_districts.py | 64a10b50e30424cad54373cf920d8a713cd27253 | [
"MIT"
] | permissive | meilinger/firecares | d8da365e355aec7c0f9f75da25fca655432d0254 | fbcde3c1fb89a07b2b28d9039b49dca53b9b991b | refs/heads/master | 2021-01-18T19:38:45.330532 | 2016-11-01T20:51:25 | 2016-11-01T20:51:38 | 41,003,852 | 0 | 0 | null | 2015-08-19T00:12:20 | 2015-08-19T00:12:20 | null | UTF-8 | Python | false | false | 1,319 | py | import glob
import os
import sys
sys.path.append(os.pardir)
from firecares.firestation.management.commands.match_districts import Command
from firecares.firestation.models import FireDepartment, FireStation
from django.contrib.gis.geos import GeometryCollection as GC
import django
django.setup()
files = glob.glob(sys.argv[1]+'*districts*.geojson')
parsed_files = [(n.split('-')[1].upper(), n.split('-')[2], n) for n in files]
for state, name, path in parsed_files:
department = None
try:
department = FireDepartment.priority_departments.get(state=state, name__icontains=name.replace('_', ' '))
except FireDepartment.DoesNotExist:
if name == 'los_angeles_city':
department = FireDepartment.objects.get(id=87256)
c = Command()
c.handle(geojson_file=path, queryset=department.firestation_set.all())
geometry_collection = GC([n for n in department.firestation_set.all().values_list('district', flat=True) if n])
map(geometry_collection.append, [n for n in department.firestation_set.all().values_list('geom', flat=True) if n])
with open(os.path.join(sys.argv[1], 'processed', 'us-{0}-{1}-disticts_processed.geojson'.format(state.lower(), name, department.name.replace(' ', '_').lower())), 'w') as output:
output.write(geometry_collection.json)
| [
"[email protected]"
] | |
fdc77083dbf90bdb66b304f6e10d6aff7b96f4ab | d703487f4c27b92310ad1e6674da8b48e2095bc8 | /common/logic/fish_array_3.py | b86373ab6e1bf7feef2f27aa2a5f5e9eace2c0b3 | [] | no_license | rolllyroman/fish_ | 0aaee7544650886c73eaf06d5bb420b409628d29 | 7b535f8a0bc875b96922121a29636aab4a20f803 | refs/heads/master | 2020-03-28T00:51:06.565231 | 2018-09-21T08:50:36 | 2018-09-21T08:50:36 | 147,454,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,711 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Author: $Author$
Date: $Date$
Revision: $Revision$
Description:
鱼阵3,等间距环形鱼阵
"""
from common.data.scene import WIDTH, HEIGHT, CENTER_X, CENTER_Y
from common.data.fish_init_areas import FishInitArea
from common.arith.point_math import Point
from common.data.fish_levels import FISH_LEVELS_DATA
from common.data.pickfish_consts import TOLERATE_LAG_SECS
import fish_array
from common.gameobject import GameObject
import random
import math
from common.pb_utils import pbAppendRoute
class FishArrayInit(GameObject):
def __init__(self, initArea, fishLevels, space, speed, counts):
self.initArea = initArea
self.fishLevels = fishLevels
self.space = space
self.speed = speed
self.counts = counts
class FishArray(fish_array.FishArray):
def __init__(self, fishMgr):
super(FishArray, self).__init__(fishMgr)
#基础速度
self.speed = 54
#每个环间鱼的间距
self.space = -20
self.init_areas = [
#左边的环
FishArrayInit(FishInitArea(Point(0, CENTER_Y), Point(0, CENTER_Y), Point(WIDTH, CENTER_Y), Point(WIDTH, CENTER_Y)), [[3], [7], [12], [16]], self.space, self.speed, [48, 30, 12, 1]), \
#右边的环
FishArrayInit(FishInitArea(Point(WIDTH, CENTER_Y), Point(WIDTH, CENTER_Y), Point(0, CENTER_Y), Point(0, CENTER_Y)), [[3], [7], [12], [17]], self.space, self.speed, [48, 30, 12, 1]), \
]
def genFishs(self):
self.genFishDatas = []
centerNLevelNRadius = []
for area in self.init_areas:
centerP, direct, endP = area.initArea.getPointNDirect()
count = len(area.fishLevels)
levelNedges = []
for i in xrange(count):
space = self.space
level = random.choice(area.fishLevels[i])
levelData = FISH_LEVELS_DATA[level]
if i == count - 1:
centerP = centerP + (-direct) * (levelData.width/2.0)
levelNedges.append((level, centerP))
else:
width = levelData.width
if i == count - 2:
width = width/4.0
space = self.space * 2
elif i == 0:
space = self.space
else:
space = self.space / 2
centerP = centerP + (-direct) * (width/2.0)
levelNedges.append((level, centerP))
centerP = centerP + (-direct) * (width/2.0 + space)
levelNRadius = []
for level, edgeP in levelNedges:
levelNRadius.append((level, centerP.getDist(edgeP)))
centerNLevelNRadius.append((centerP, levelNRadius))
longestDuration = 0
for idx, area in enumerate(self.init_areas):
centerP, levelNRadius = centerNLevelNRadius[idx]
initP, direct, endP = area.initArea.getPointNDirect()
#获取初始角度
rad = direct.toRadian()
initRot = math.degrees(rad)
for i in xrange(len(levelNRadius)):
level, radius = levelNRadius[i]
deltaAngle = (math.pi*2)/area.counts[i]
levelData = FISH_LEVELS_DATA[level]
for i in xrange(area.counts[i]):
offsetDir = direct.rotateSelfByRadian(deltaAngle*i).normalize()
startP = centerP + (offsetDir*radius)
curEndP = Point(endP.x, startP.y) + direct * (levelData.width/2.0)
duration = curEndP.getDist(startP)/area.speed
#优化,把初始化位置都设到屏幕外半个身位
realStartP = Point(initP.x, startP.y) + (-direct) * (levelData.width/2.0)
realDuration = curEndP.getDist(realStartP)/area.speed
if duration > longestDuration:
longestDuration = duration
self.genFishDatas.append(fish_array.FishInitData(0, level, levelData.order, initRot, \
realStartP.x, realStartP.y, realDuration, levelData.getMulti(), levelData.getPickedRate(), 0, \
pbAppendRoute([], 0, area.speed, realDuration + TOLERATE_LAG_SECS), \
fish_array.FISH_ARRAY_APPEAR_TICK + (duration - realDuration)*1000))
self.duration = longestDuration + TOLERATE_LAG_SECS
super(FishArray, self).genFishs()
| [
"[email protected]"
] | |
b1e07b55cc88e5fa72d84564acadc485d25057d2 | 16c8fdf291430475f40d578b0d64552eb64046e9 | /colour/models/rgb/transfer_functions/nikon_nlog.py | 21e0c13e1081c32f15d60dac95893527f306835d | [
"BSD-3-Clause"
] | permissive | nodefeet/colour | 4c1bfed87ce173ff878bdf288fd9828bb68022e3 | 319dd5b1c45aef6983eff1830f918c1e593fb530 | refs/heads/develop | 2022-02-19T17:39:36.657993 | 2022-02-15T08:38:26 | 2022-02-15T08:38:26 | 460,456,444 | 0 | 0 | BSD-3-Clause | 2022-02-17T13:53:37 | 2022-02-17T13:53:36 | null | UTF-8 | Python | false | false | 5,529 | py | """
Nikon N-Log Log Encoding
========================
Defines the *Nikon N-Log* log encoding:
- :func:`colour.models.log_encoding_NLog`
- :func:`colour.models.log_decoding_NLog`
References
----------
- :cite:`Nikon2018` : Nikon. (2018). N-Log Specification Document - Version
1.0.0 (pp. 1-5). Retrieved September 9, 2019, from
http://download.nikonimglib.com/archive3/hDCmK00m9JDI03RPruD74xpoU905/\
N-Log_Specification_(En)01.pdf
"""
from __future__ import annotations
import numpy as np
from colour.algebra import spow
from colour.hints import (
Boolean,
FloatingOrArrayLike,
FloatingOrNDArray,
Integer,
)
from colour.models.rgb.transfer_functions import full_to_legal, legal_to_full
from colour.utilities import Structure, as_float, from_range_1, to_domain_1
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - http://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"NLOG_CONSTANTS",
"log_encoding_NLog",
"log_decoding_NLog",
]
NLOG_CONSTANTS: Structure = Structure(
cut1=0.328,
cut2=(452 / 1023),
a=(650 / 1023),
b=0.0075,
c=(150 / 1023),
d=(619 / 1023),
)
"""*Nikon N-Log* colourspace constants."""
def log_encoding_NLog(
in_r: FloatingOrArrayLike,
bit_depth: Integer = 10,
out_normalised_code_value: Boolean = True,
in_reflection: Boolean = True,
constants: Structure = NLOG_CONSTANTS,
) -> FloatingOrNDArray:
"""
Define the *Nikon N-Log* log encoding curve / opto-electronic transfer
function.
Parameters
----------
in_r
Linear reflection data :math`in`.
bit_depth
Bit depth used for conversion.
out_normalised_code_value
Whether the non-linear *Nikon N-Log* data :math:`out` is encoded as
normalised code values.
in_reflection
Whether the light level :math`in` to a camera is reflection.
constants
*Nikon N-Log* constants.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Non-linear data :math:`out`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``in_r`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``out_r`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nikon2018`
Examples
--------
>>> log_encoding_NLog(0.18) # doctest: +ELLIPSIS
0.3636677...
"""
in_r = to_domain_1(in_r)
if not in_reflection:
in_r = in_r * 0.9
cut1 = constants.cut1
a = constants.a
b = constants.b
c = constants.c
d = constants.d
out_r = np.where(
in_r < cut1,
a * spow(in_r + b, 1 / 3),
c * np.log(in_r) + d,
)
out_r_cv = (
out_r if out_normalised_code_value else legal_to_full(out_r, bit_depth)
)
return as_float(from_range_1(out_r_cv))
def log_decoding_NLog(
out_r: FloatingOrArrayLike,
bit_depth: Integer = 10,
in_normalised_code_value: Boolean = True,
out_reflection: Boolean = True,
constants: Structure = NLOG_CONSTANTS,
) -> FloatingOrNDArray:
"""
Define the *Nikon N-Log* log decoding curve / electro-optical transfer
function.
Parameters
----------
out_r
Non-linear data :math:`out`.
bit_depth
Bit depth used for conversion.
in_normalised_code_value
Whether the non-linear *Nikon N-Log* data :math:`out` is encoded as
normalised code values.
out_reflection
Whether the light level :math`in` to a camera is reflection.
constants
*Nikon N-Log* constants.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
Linear reflection data :math`in`.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``out_r`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
+------------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``in_r`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
References
----------
:cite:`Nikon2018`
Examples
--------
>>> log_decoding_NLog(0.36366777011713869) # doctest: +ELLIPSIS
0.1799999...
"""
out_r = to_domain_1(out_r)
out_r = (
out_r if in_normalised_code_value else full_to_legal(out_r, bit_depth)
)
cut2 = constants.cut2
a = constants.a
b = constants.b
c = constants.c
d = constants.d
in_r = np.where(
out_r < cut2,
spow(out_r / a, 3) - b,
np.exp((out_r - d) / c),
)
if not out_reflection:
in_r = in_r / 0.9
return as_float(from_range_1(in_r))
| [
"[email protected]"
] | |
7e93265e3e2ddc5409ba10e0b89e8c4bba613615 | 09a2d0231caf5231875270ca85dba3bf201d83c7 | /linotak/mentions/migrations/0004_incoming_received.py | 1d50965cd7e957d46d12464362c62a542df6d4ea | [] | no_license | pdc/linotak | 898c2a014a2f2beed25127efc4b69db637c1a537 | 0075ea457f764cbb67acecb584e927bf58d2e7a8 | refs/heads/develop | 2023-03-09T19:39:59.013308 | 2022-12-19T19:58:49 | 2022-12-19T19:58:49 | 148,982,795 | 0 | 0 | null | 2023-02-15T20:20:01 | 2018-09-16T09:54:31 | Python | UTF-8 | Python | false | false | 488 | py | # Generated by Django 3.0.8 on 2020-08-30 21:57
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
("mentions", "0003_post_i18n"),
]
operations = [
migrations.AddField(
model_name="incoming",
name="received",
field=models.DateTimeField(
default=django.utils.timezone.now, verbose_name="received"
),
),
]
| [
"[email protected]"
] | |
8e1afacf0cc3e334b5b5e95f56349940f2f1db5d | 1eb50735e3861cde4bca8f4feab5afc730003078 | /future/tqdm进度条.py | eda322f687b82450a99c954568d587570ce0ebd1 | [] | no_license | chinaylssly/fluent-python | 442e6458215e3c5a74c4d34d020b714da108f81d | 126c1d3e7853628c4a2c0e6ff475362b7d7fe33a | refs/heads/master | 2020-04-17T13:58:03.534184 | 2019-02-01T14:40:42 | 2019-02-01T14:40:42 | 166,637,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | # -*- coding: utf-8 -*-
from time import sleep
from tqdm import tqdm
for i in tqdm(range(10)):
sleep(1)
| [
"[email protected]"
] | |
575b7b48abf22f076b84e4ef15b11103ba8859b5 | 8cb8bfd2dae516612251039e0632173ea1ea4c8a | /modules/analyzes/door/doorsize/controller.py | 9126c599a165028ae6febf61087f048a2c4fb0a5 | [] | no_license | nyzsirt/lift-prod | 563cc70700d26a5812a1bce0bd9795998dce6e99 | 9a5f28e49ad5e80e422a5d5efee77a2d0247aa2b | refs/heads/master | 2020-04-22T01:05:42.262876 | 2019-02-09T13:31:15 | 2019-02-09T13:31:15 | 170,003,361 | 1 | 0 | null | 2019-02-10T17:11:50 | 2019-02-10T17:11:50 | null | UTF-8 | Python | false | false | 721 | py | from abstracts.abstract_resource_controller import AbstractResourceController
from modules.analyzes.door.doorsize.models import DoorSize
from modules.helper2 import helper
class ControllerDoorSize(AbstractResourceController):
def __init__(self):
self.helper = helper()
self.abstract = super(ControllerDoorSize, self)
self.main_model = DoorSize
self.default_kwargs = dict()
def get(self, get_args):
return self.abstract.get(get_args)
def create(self, data):
return self.abstract.create(data)
def update(self, mongoid, data):
return self.abstract.update(mongoid, data)
def delete(self, mongoid):
return self.abstract.delete(mongoid)
| [
"[email protected]"
] | |
6edd0fa849079349ad68374f58ae03751169c5a0 | c4ab9448e4df33cc55db85dbf37afb4982edcab4 | /isiscb/zotero/migrations/0003_auto_20160218_1614.py | 33af73f3ff24f0fde46cfe84682b49e4b6333db4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | upconsulting/IsisCB | 659c1293b3606fb797611e138deb234b41c617f5 | 6c20899bf9193cc4cc6b4a2efea24ae86cbc51eb | refs/heads/master | 2023-09-01T17:23:14.538601 | 2023-03-19T21:05:29 | 2023-03-19T21:05:29 | 40,304,109 | 6 | 2 | MIT | 2023-07-30T15:04:05 | 2015-08-06T12:40:51 | Python | UTF-8 | Python | false | false | 476 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-18 16:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zotero', '0002_auto_20160216_1622'),
]
operations = [
migrations.AlterField(
model_name='instanceresolutionevent',
name='to_instance_id',
field=models.CharField(max_length=1000),
),
]
| [
"[email protected]"
] | |
e16cb457cd2bdec0206f1fb45a2bc62bd0834ae8 | 29d09c634ffdd8cab13631d62bc6e3ad00df49bf | /Algorithm/baekjoon/13023_ABCDE.py | 668731e67b568111b79707b4d426bc57551db84d | [] | no_license | kim-taewoo/TIL_PUBLIC | f1d32c3b4f46344c1c99f02e95cc6d2a888a0374 | ae86b542f8b1805b5dd103576d6538e3b1f5b9f4 | refs/heads/master | 2021-09-12T04:22:52.219301 | 2021-08-28T16:14:11 | 2021-08-28T16:14:11 | 237,408,159 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | found = False
import sys
input = lambda: sys.stdin.readline()
def dfs(v, cnt, origin):
global found
if found:
return
if cnt == 5:
found = True
return
if cnt > chk_max[origin]:
chk_max[origin] = cnt
for i in board[v]:
if not chk[i]:
if chk_max[i]:
if cnt + chk_max[i] < 5: continue
chk[i] = True
dfs(i, cnt + 1, origin)
chk[i] = False
n,m = map(int, input().split())
board = [set() for _ in range(n)]
for i in range(m):
a, b = map(int, input().split())
board[a].add(b)
board[b].add(a)
chk_max = [0 for _ in range(n)]
chk = [False for _ in range(n)]
for i in range(n):
if found:
break
chk[i] = True
dfs(i, 1, i)
chk[i] = False
if found:
print(1)
else:
print(0) | [
"[email protected]"
] | |
d4cbaf36b608ec4f75a9244c59fad4ff17838dba | 620d21623a300821e2a195eed5434bac67fb4dca | /abb_experimental_ws/build/abb_irb2400_moveit_config/catkin_generated/pkg.develspace.context.pc.py | bad71f63564b6113dd733b4f2835133b33daf66a | [] | no_license | yazici/Robarch-dev | bb63c04bd2e62386e7c1215bf5b116ccd763c455 | 9f8db4c418db3fc80454200cb6625cc2b2151382 | refs/heads/master | 2020-05-21T02:32:06.765535 | 2018-09-07T15:50:26 | 2018-09-07T15:50:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "abb_irb2400_moveit_config"
PROJECT_SPACE_DIR = "/home/jrv/Research/RoboticArcitecture/abb_experimental_ws/devel/.private/abb_irb2400_moveit_config"
PROJECT_VERSION = "1.2.1"
| [
"[email protected]"
] | |
5601aff2121e0430b64554af7112f5143e683c7f | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnsurveyor.py | 0f79817f723f9553af007d8121d2638228c52f5a | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 570 | py | ii = [('CookGHP3.py', 1), ('RogePAV2.py', 1), ('CoolWHM2.py', 1), ('RogePAV.py', 1), ('RennJIT.py', 2), ('MartHSI2.py', 1), ('MarrFDI3.py', 1), ('PeckJNG.py', 5), ('FitzRNS3.py', 4), ('ClarGE2.py', 2), ('AinsWRR3.py', 1), ('CoolWHM.py', 1), ('ClarGE.py', 3), ('LyelCPG.py', 3), ('CrocDNL.py', 1), ('WadeJEB.py', 20), ('BachARE.py', 5), ('MereHHB3.py', 3), ('MereHHB.py', 1), ('FitzRNS4.py', 2), ('CoolWHM3.py', 5), ('FitzRNS.py', 11), ('FerrSDO.py', 1), ('MereHHB2.py', 4), ('JacoWHI.py', 1), ('ClarGE3.py', 6), ('FitzRNS2.py', 7), ('EvarJSP.py', 1), ('LyelCPG3.py', 2)] | [
"[email protected]"
] | |
137ce9f638c8cbef7f90ef43a198d1e2bbff6f71 | b1152e66088975211a7b3ae61f68c69630644f4a | /MIX_Graph_Multi_Agent/Attention/attention.py | c49680b7c0cdad91350c6e900a3da5d1bf04167d | [] | no_license | lxjlu/H2G-MAAC | ef09a6f838816ff3b47b71781204dcb1fe11a30b | 144542e098932d61b744bf337b3c6637b358a3b7 | refs/heads/main | 2023-06-21T22:26:22.018345 | 2021-07-17T10:38:41 | 2021-07-17T10:38:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | import tensorflow as tf
def scaled_dot_product_attention(q, k, v, mask):
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
# ## Multi-head Attention
# In[ ]:
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask=None):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention,
perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def main():
temp_mha = MultiHeadAttention(d_model=256, num_heads=4)
q = tf.random.uniform((64, 1, 60)) # (batch_size, encoder_sequence, d_model)
k = tf.random.uniform((64, 10, 60)) # (batch_size, encoder_sequence, d_model)
out, attn = temp_mha(k, k=k, q=q, mask=None)
print(out.shape, attn.shape)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
7f457300765fa9938e6a03720e657ddbc14780bf | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2017_01_18_predict_bulk_other_regressors/transform.py | 184cc8d04f8835c28a879cbb63722d039d7d7ed8 | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | import functions as rr
import numpy as np
# from sklearn.decomposition import PCA
from constants import const
import time
import h5py
def transform(ns, set_id, pca):
st = time.time()
C = const()
n_corr = C['cmax']
f_red = h5py.File("spatial_reduced_L%s.hdf5" % C['H'], 'a')
f_stats = h5py.File("spatial_L%s.hdf5" % C['H'], 'r')
ff = f_stats.get('ff_%s' % set_id)[...]
ff = ff.reshape(ns, n_corr*C['vmax']**3)
ff_red = pca.transform(ff)
f_red.create_dataset('reduced_%s' % set_id,
data=ff_red,
dtype='float64')
f_red.close()
f_stats.close()
"""calculate the error incurred in the PCA representation"""
ff_ = pca.inverse_transform(ff_red)
err = np.sqrt(np.sum((ff-ff_)**2))/ff.size
msg = "PCA representation error for %s: %s" % (set_id, err)
rr.WP(msg, C['wrt_file'])
timeE = np.round(time.time()-st, 2)
msg = "transform to low dimensional space, %s: %s s" % (set_id, timeE)
rr.WP(msg, C['wrt_file'])
if __name__ == '__main__':
ns = 10
set_id = 'random'
reduce(ns, set_id)
| [
"[email protected]"
] | |
350f64bc1b133699e73ca47c6c2a94e3069fb404 | 43ede7b8fb546c00804c0ef94501f6e48ba170d6 | /Cursos Python/Python 3 - João Batista/Coleções.py | 85ca78819113eed3dccd3d7d8939d46dd30dbba8 | [] | no_license | bopopescu/Python-13 | db407d17252473e78e705e563cfee4dbd316c6b9 | c8bef500f2d3e4a63d850f96dfa219eff2ecebda | refs/heads/master | 2022-11-22T16:24:08.490879 | 2020-06-11T14:22:24 | 2020-06-11T14:22:24 | 281,830,055 | 0 | 0 | null | 2020-07-23T02:26:31 | 2020-07-23T02:26:30 | null | UTF-8 | Python | false | false | 527 | py | # # Listas
# lista = ['eduardo - 0', 'atila - 1', 'lebiste - 2']
#
# # print(lista)
# #
# # for i in lista:
# # print(i)
#
# # print(lista[0])
#
# lista.append('Erickson - 3')
# # ['eduardo - 0', 'atila - 1', 'lebiste - 2', 'Erickson - 3']
# lista.insert(1, 'Lucas')
#
# print(dir(lista))
# # # Tuplas
# tupla = (10, 20, 30)
#
# for loop in tupla:
# if loop == 30:
# print('show', loop)
dicionario = {'numeros': [num for num in range(10)], 'segunda chave': 'segundo valor '}
print(dicionario.get('numeros'))
| [
"[email protected]"
] | |
b8a111842e89df90bce5aefed525fe7b800362c2 | b25c4a44e5d33d4d2acf98219b0fbac324bad6c7 | /Lv0__16_to_30/29.시저 암호.py | bc49a0dc8c9ad2fa45459fa03b81bf3f1b5989ab | [] | no_license | itwebMJ/algorithmStudy | 409d46bfa4d0b63d7137a0c64dd09c640dc99460 | 9881e07d276742d243bcd1a4929f726d5ec293ff | refs/heads/main | 2023-07-12T18:08:45.157261 | 2021-08-29T12:45:45 | 2021-08-29T12:45:45 | 375,876,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | '''
시저 암호
어떤 문장의 각 알파벳을 일정한 거리만큼 밀어서 다른 알파벳으로 바꾸는 암호화 방식을 시저 암호라고 합니다.
예를 들어 "AB"는 1만큼 밀면 "BC"가 되고, 3만큼 밀면 "DE"가 됩니다.
"z"는 1만큼 밀면 "a"가 됩니다. 문자열 s와 거리 n을 입력받아 s를 n만큼 민 암호문을 만드는 함수, solution을
완성해 보세요.
'''
def solution(s, n):
answer = ''
up = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
low = up.lower()
s_list = []
for i in s:
s_list.append(i)
for j in range(len(s_list)):
if s_list[j] in up:
idx = up.index(s_list[j])
answer += up[int((idx + n) % 26)]
elif s_list[j] in low:
idx = low.index(s_list[j])
answer += low[int((idx + n) % 26)]
elif s_list[j] == ' ':
answer += ' '
return answer | [
"[email protected]"
] | |
df40bafe036762bbfa75bb8eeaedc532d98f0d1e | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Vips/Combine.py | b29ecf7dc26299d9ce933c539cc90375d83cc41b | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 12,838 | py | # encoding: utf-8
# module gi.repository.Vips
# from /usr/lib64/girepository-1.0/Vips-8.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gobject as __gobject
class Combine(__gobject.GEnum):
# no doc
def as_integer_ratio(self): # real signature unknown; restored from __doc__
"""
Return integer ratio.
Return a pair of integers, whose ratio is exactly equal to the original int
and with a positive denominator.
>>> (10).as_integer_ratio()
(10, 1)
>>> (-10).as_integer_ratio()
(-10, 1)
>>> (0).as_integer_ratio()
(0, 1)
"""
pass
def bit_length(self): # real signature unknown; restored from __doc__
"""
Number of bits necessary to represent self in binary.
>>> bin(37)
'0b100101'
>>> (37).bit_length()
6
"""
pass
def conjugate(self, *args, **kwargs): # real signature unknown
""" Returns self, the complex conjugate of any int. """
pass
def from_bytes(self, *args, **kwargs): # real signature unknown
"""
Return the integer represented by the given array of bytes.
bytes
Holds the array of bytes to convert. The argument must either
support the buffer protocol or be an iterable object producing bytes.
Bytes and bytearray are examples of built-in objects that support the
buffer protocol.
byteorder
The byte order used to represent the integer. If byteorder is 'big',
the most significant byte is at the beginning of the byte array. If
byteorder is 'little', the most significant byte is at the end of the
byte array. To request the native byte order of the host system, use
`sys.byteorder' as the byte order value.
signed
Indicates whether two's complement is used to represent the integer.
"""
pass
def to_bytes(self, *args, **kwargs): # real signature unknown
"""
Return an array of bytes representing an integer.
length
Length of bytes object to use. An OverflowError is raised if the
integer is not representable with the given number of bytes.
byteorder
The byte order used to represent the integer. If byteorder is 'big',
the most significant byte is at the beginning of the byte array. If
byteorder is 'little', the most significant byte is at the end of the
byte array. To request the native byte order of the host system, use
`sys.byteorder' as the byte order value.
signed
Determines whether two's complement is used to represent the integer.
If signed is False and a negative integer is given, an OverflowError
is raised.
"""
pass
def __abs__(self, *args, **kwargs): # real signature unknown
""" abs(self) """
pass
def __add__(self, *args, **kwargs): # real signature unknown
""" Return self+value. """
pass
def __and__(self, *args, **kwargs): # real signature unknown
""" Return self&value. """
pass
def __bool__(self, *args, **kwargs): # real signature unknown
""" self != 0 """
pass
def __ceil__(self, *args, **kwargs): # real signature unknown
""" Ceiling of an Integral returns itself. """
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __divmod__(self, *args, **kwargs): # real signature unknown
""" Return divmod(self, value). """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __float__(self, *args, **kwargs): # real signature unknown
""" float(self) """
pass
def __floordiv__(self, *args, **kwargs): # real signature unknown
""" Return self//value. """
pass
def __floor__(self, *args, **kwargs): # real signature unknown
""" Flooring an Integral returns itself. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __index__(self, *args, **kwargs): # real signature unknown
""" Return self converted to an integer, if self is suitable for use as an index into a list. """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __int__(self, *args, **kwargs): # real signature unknown
""" int(self) """
pass
def __invert__(self, *args, **kwargs): # real signature unknown
""" ~self """
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lshift__(self, *args, **kwargs): # real signature unknown
""" Return self<<value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
def __mod__(self, *args, **kwargs): # real signature unknown
""" Return self%value. """
pass
def __mul__(self, *args, **kwargs): # real signature unknown
""" Return self*value. """
pass
def __neg__(self, *args, **kwargs): # real signature unknown
""" -self """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __or__(self, *args, **kwargs): # real signature unknown
""" Return self|value. """
pass
def __pos__(self, *args, **kwargs): # real signature unknown
""" +self """
pass
def __pow__(self, *args, **kwargs): # real signature unknown
""" Return pow(self, value, mod). """
pass
def __radd__(self, *args, **kwargs): # real signature unknown
""" Return value+self. """
pass
def __rand__(self, *args, **kwargs): # real signature unknown
""" Return value&self. """
pass
def __rdivmod__(self, *args, **kwargs): # real signature unknown
""" Return divmod(value, self). """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __rfloordiv__(self, *args, **kwargs): # real signature unknown
""" Return value//self. """
pass
def __rlshift__(self, *args, **kwargs): # real signature unknown
""" Return value<<self. """
pass
def __rmod__(self, *args, **kwargs): # real signature unknown
""" Return value%self. """
pass
def __rmul__(self, *args, **kwargs): # real signature unknown
""" Return value*self. """
pass
def __ror__(self, *args, **kwargs): # real signature unknown
""" Return value|self. """
pass
def __round__(self, *args, **kwargs): # real signature unknown
"""
Rounding an Integral returns itself.
Rounding with an ndigits argument also returns an integer.
"""
pass
def __rpow__(self, *args, **kwargs): # real signature unknown
""" Return pow(value, self, mod). """
pass
def __rrshift__(self, *args, **kwargs): # real signature unknown
""" Return value>>self. """
pass
def __rshift__(self, *args, **kwargs): # real signature unknown
""" Return self>>value. """
pass
def __rsub__(self, *args, **kwargs): # real signature unknown
""" Return value-self. """
pass
def __rtruediv__(self, *args, **kwargs): # real signature unknown
""" Return value/self. """
pass
def __rxor__(self, *args, **kwargs): # real signature unknown
""" Return value^self. """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Returns size in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __sub__(self, *args, **kwargs): # real signature unknown
""" Return self-value. """
pass
def __truediv__(self, *args, **kwargs): # real signature unknown
""" Return self/value. """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Truncating an Integral returns itself. """
pass
def __xor__(self, *args, **kwargs): # real signature unknown
""" Return self^value. """
pass
denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the denominator of a rational number in lowest terms"""
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the numerator of a rational number in lowest terms"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
value_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
value_nick = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
LAST = 3
MAX = 0
MIN = 2
SUM = 1
__class__ = type
__dict__ = None # (!) real value is "mappingproxy({'__module__': 'gi.repository.Vips', '__dict__': <attribute '__dict__' of 'Combine' objects>, '__doc__': None, '__gtype__': <GType VipsCombine (94317410467168)>, '__enum_values__': {0: <enum VIPS_COMBINE_MAX of type Vips.Combine>, 1: <enum VIPS_COMBINE_SUM of type Vips.Combine>, 2: <enum VIPS_COMBINE_MIN of type Vips.Combine>, 3: <enum VIPS_COMBINE_LAST of type Vips.Combine>}, '__info__': gi.EnumInfo(Combine), 'MAX': <enum VIPS_COMBINE_MAX of type Vips.Combine>, 'SUM': <enum VIPS_COMBINE_SUM of type Vips.Combine>, 'MIN': <enum VIPS_COMBINE_MIN of type Vips.Combine>, 'LAST': <enum VIPS_COMBINE_LAST of type Vips.Combine>})"
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
}
__gtype__ = None # (!) real value is '<GType VipsCombine (94317410467168)>'
__info__ = gi.EnumInfo(Combine)
| [
"[email protected]"
] | |
e8d44fcc8ab38010d57edd595269bffa26a9b608 | 6c791df92e63f8f1ba579c0aa326a7fdc9cfbf19 | /Recheck/Test_Grad_Descent.py | f1b2aac7d945649530f9e92d50d4d83273d3c1a4 | [] | no_license | krm9c/BDHeterogeneity | 5261ca874d2923da3d1320ac1b02e23b23e8d64a | eabb9bcedbe0cf580c6391eeaf455a675803970a | refs/heads/master | 2020-03-22T10:22:59.038734 | 2019-02-12T22:43:15 | 2019-02-12T22:43:15 | 139,898,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,040 | py | # The test file
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ' '
import Class_Recheck as NN_class
import tensorflow as tf
import numpy as np
import traceback
import random
###################################################################################
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert inputs.shape[0] == targets.shape[0]
if shuffle:
indices = np.arange(inputs.shape[0])
np.random.shuffle(indices)
for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
###################################################################################
def return_dict(placeholder, List, model, batch_x, batch_y, lr):
S ={}
for i, element in enumerate(List):
S[placeholder[i]] = element
S[model.Deep['FL_layer_10']] = batch_x
S[model.classifier['Target'] ] = batch_y
S[model.classifier["learning_rate"]] = lr
return S
####################################################################################
def sample_Z(X, m, n, kappa):
# return(X+np.random.uniform(-2, 2, size=[m, n]))
return ((X+np.random.normal(0,1, size=[m, n])))
####################################################################################
def Analyse_custom_Optimizer_GDR_old(X_train, y_train, X_test, y_test, kappa):
import gc
# Lets start with creating a model and then train batch wise.
model = NN_class.learners()
depth = []
depth.append(X_train.shape[1])
L = [100 for i in xrange(1)]
depth.extend(L)
lr = 0.001
model = model.init_NN_custom(classes, lr, depth, tf.nn.relu)
try:
t = xrange(Train_Glob_Iterations)
from tqdm import tqdm
for i in tqdm(t):
########### mini-batch learning update
batch_number = 0
#for batch in iterate_minibatches(X_train, y_train, Train_batch_size, shuffle=True):
for k in xrange(100):
x_batch =[]
y_batch =[]
arr = random.sample(range(0, len(X_train)), 64)
for idx in arr:
x_batch.append(X_train[idx])
y_batch.append(y_train[idx])
batch_xs = np.asarray(x_batch)
batch_ys = np.asarray(y_batch)
model.sess.run([model.Trainer["Weight_op"]],\
feed_dict={model.Deep['FL_layer_10']: batch_xs, model.classifier['Target']: \
batch_ys, model.classifier["learning_rate"]:lr})
if i%20== 0:
print "Step", i
X_test_perturbed = sample_Z(X_test, X_test.shape[0], X_test.shape[1], kappa=0)
print( "Accuracies", model.sess.run([model.Evaluation['accuracy']], \
feed_dict={model.Deep['FL_layer_10']: X_test_perturbed, model.classifier['Target']:\
y_test, model.classifier["learning_rate"]:lr}), model.sess.run([ model.Evaluation['accuracy']],\
feed_dict={model.Deep['FL_layer_10']: X_train, model.classifier['Target']:\
y_train}) )
# batch_number = batch_number + 1;
# batch_xs, batch_ys = batch
# batch_xs_pert =sample_Z(batch_xs, batch_xs.shape[0], batch_xs.shape[1], kappa=1)0
# model.sess.run([model.Trainer["Weight_op"]],\
# feed_dict={model.Deep['FL_layer_10']: batch_xs, model.classifier['Target']: \
# batch_ys, model.classifier["learning_rate"]:lr})
# if j % 1 == 0:
# print(model.sess.run([model.Evaluation['accuracy'] ],\
# feed_dict={model.Deep['FL_layer_10']: X_test, model.classifier['Target']: \
# y_test, model.classifier["learning_rate"]:lr}) )
except Exception as e:
print("I found an exception", e)
traceback.print_exc()
tf.reset_default_graph()
del model
gc.collect()
return 0
#######################################################################################################
################################ Parameters and function call##########################################
#######################################################################################################
# Setup the parameters and call the functions
Train_batch_size = 64
Train_Glob_Iterations = 501
Train_noise_Iterations = 1
from tqdm import tqdm
from tensorflow.examples.tutorials.mnist import input_data
classes = 4
mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True)
X_train = mnist.train.images
X_test = mnist.test.images
y_train = mnist.train.labels
y_test = mnist.test.labels
iterat_kappa = 1
Kappa_s = np.random.uniform(0, 1, size=[iterat_kappa])
Analyse_custom_Optimizer_GDR_old(X_train, y_train, X_test, y_test, Kappa_s[0]) | [
"[email protected]"
] | |
d8f8e08efb7e0e9134c5ee0f78f8b256b26822b1 | eae3d77ac72c168cee7701462f1fc45d7d4dcd91 | /2115_벌꿀채취.py | a196922576c19447850463993fc206b2387b3b9a | [] | no_license | ByeongjunCho/Algorithm-TIL | ed2f018d50bd2483bd1175ff9bf7e91913c14766 | ad79125a1498915fe97c1d57ee6860b06c410958 | refs/heads/master | 2022-07-19T15:12:23.689319 | 2020-05-18T08:37:09 | 2020-05-18T08:37:09 | 256,399,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,769 | py | # SWEA 2115. [모의 SW 역량테스트] 벌꿀채취
def makemaxmap():
for i in range(N):
for j in range(N-M+1):
makeMaxSubset(i, j, 0, 0, 0)
# i: 행, j: 열, cnt: 고려한원소수
# sum: 부분집합에 속한 원소의 합
# powSum: 부분집합에 속한 원소의 이익
def makeMaxSubset(i, j, cnt, sum, powSum):
if sum > C: # 부분집합의 합은 목표량C를 초과하면 리턴
return
if cnt == M:
if maxMap[i][j-M] < powSum:
maxMap[i][j - M] = powSum
return
# 선택
makeMaxSubset(i, j+1, cnt+1, sum+arr[i][j], powSum+(arr[i][j]**2))
# 비선택
makeMaxSubset(i, j + 1, cnt + 1, sum, powSum)
def getMaxBenefit():
max = 0 # 조합적 선택후 최대이익값
temp = 0
# 1. 일꾼 A를 기준으로 선택
for i in range(N):
for j in range(N-M+1):
# 2. 일꾼 B 선택
# 2.1 같은행 기준
for j2 in range(j+M, N-M+1):
temp = maxMap[i][j] + maxMap[i][j2]
if max < temp:
max = temp
# 다음행부터 마지막까지 선택
for i2 in range(i+1, N):
for j2 in range(N-M+1):
temp = maxMap[i][j] + maxMap[i2][j2]
if max < temp:
max = temp
return max
T = int(input())
for tc in range(1, T+1):
N, M, C = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(N)]
maxMap = [[0 for _ in range(N)] for _ in range(N)]
# 1. 각 위치에서 연속된 M개를 고려해 취할 수 있는 부분집합의 최대값 선택
makemaxmap()
# 2. 최대값 출력
print("#{} {}".format(tc, getMaxBenefit())) | [
"[email protected]"
] | |
8579ba110a3dfce9e5b310a76a8b8b051ef5e561 | a8544cedbec52f929e91e617a5f484d987352525 | /src/tests/src/technique/transitive/test_transitive_calculator.py | 3947a0e6a65c93529e95fe334898ef0a291bc475 | [] | no_license | thearod5/Tracer | 75df513ee2500bffc32c29139983990867239369 | 67ee3d7296fb4c788c111387b335ab9804815655 | refs/heads/master | 2023-05-31T13:53:34.640103 | 2021-06-18T01:00:10 | 2021-06-18T01:00:10 | 349,507,401 | 1 | 0 | null | 2021-06-18T01:00:10 | 2021-03-19T17:41:37 | Python | UTF-8 | Python | false | false | 3,429 | py | import numpy as np
from api.extension.cache import Cache
from api.technique.definitions.direct.calculator import DirectTechniqueData
from api.technique.definitions.transitive.calculator import (
TransitiveTechniqueCalculator,
TransitiveTechniqueData,
append_direct_component_matrices,
perform_transitive_aggregation,
perform_transitive_aggregation_on_component_techniques,
)
from api.technique.variationpoints.aggregation.aggregation_method import (
AggregationMethod,
)
from tests.res.test_technique_helper import SimilarityMatrixMock, TestTechniqueHelper
class TestIntermediateCalculationPipeline(TestTechniqueHelper):
matrices = [
np.array([[0, 1, 0]]),
np.array([[0], [1], [0]]),
np.array([[0, 1], [0, 0]]),
]
"""
IntermediatePipeline
"""
def test_transitive_pipeline(self):
counter_func, counter_dict = self.create_counter_func(
self.get_transitive_definition().get_name()
)
pipeline_funcs = [counter_func, counter_func]
pipeline = TransitiveTechniqueCalculator(
self.get_transitive_definition(), pipeline_funcs
)
pipeline.run_pipeline_on_dataset(self.dataset)
self.assertEqual(len(pipeline_funcs), counter_dict["value"])
"""
IndirectTechniqueCalculator
"""
def test_transitive_technique_calculator_use_case(self):
calculator = TransitiveTechniqueCalculator(self.get_transitive_definition())
technique_data = calculator.calculate_technique_data(self.dataset)
matrix = technique_data.similarity_matrix
self.assertEqual((1, 3), matrix.shape)
"""
calculate_technique_data
"""
def test_calculate_technique_data(self):
original = Cache.CACHE_ON
Cache.CACHE_ON = False
def counter_func(data: DirectTechniqueData):
data.similarity_matrix = SimilarityMatrixMock()
pipeline_funcs = [counter_func]
calculator = TransitiveTechniqueCalculator(
self.get_transitive_definition(), pipeline_funcs
)
technique_data = calculator.calculate_technique_data(self.dataset)
self.assertEqual(self.dataset.name, technique_data.dataset.name)
self.assertEqual(
self.get_transitive_definition().get_name(),
technique_data.technique.get_name(),
)
self.assertIsNotNone(technique_data.similarity_matrix)
Cache.CACHE_ON = original
"""
perform_transitive_aggregation
"""
def test_perform_transitive_aggregation(self):
data = TransitiveTechniqueData(
self.dataset, self.get_traced_transitive_definition()
)
append_direct_component_matrices(data)
perform_transitive_aggregation(data)
self.assertEqual((1, 3), data.similarity_matrix.shape)
self.assertEqual(1, data.similarity_matrix[0][0])
self.assertEqual(0, data.similarity_matrix[0][1])
self.assertEqual(1, data.similarity_matrix[0][2])
"""
perform_transitive_aggregation_on_matrices
"""
def test_perform_transitive_aggregation_on_matrices(self):
result = perform_transitive_aggregation_on_component_techniques(
self.matrices, AggregationMethod.MAX
)
self.assertEqual((1, 2), result.shape)
self.assertEqual(1, result[0][1])
self.assertEqual(1, result.sum(axis=1).sum())
| [
"[email protected]"
] | |
04e7c82d6bad587dcfbeabc467b229e6e694a43f | e121dcc5d23e225891420e730549b9cc7ebe8e88 | /python/lib/direct/tkwidgets/Slider.py | 71247acf6b23c510cf7a8708e9c291e631990509 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | PlumpMath/panda3d-3 | 4f4cf7627eddae9b7f30795e0a0657b01fdf670d | 5c0be0e1cd46b422d28d5b81ffb1e8b28c3ac914 | refs/heads/master | 2021-01-25T06:55:36.209044 | 2014-09-29T14:24:53 | 2014-09-29T14:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,536 | py | """
Slider Class: Velocity style controller for floating point values with
a label, entry (validated), and min/max slider
"""
__all__ = ['Slider', 'SliderWidget', 'rgbPanel']
from panda3d.direct.showbase.TkGlobal import *
from Tkinter import *
from Valuator import Valuator, rgbPanel, VALUATOR_MINI, VALUATOR_FULL
from panda3d.direct.task import Task
import math, sys, string
import operator, Pmw
from panda3d.pandac import ClockObject
class Slider(Valuator):
"""
Valuator widget which includes an min/max slider and an entry for setting
floating point values in a range
"""
def __init__(self, parent = None, **kw):
INITOPT = Pmw.INITOPT
optiondefs = (
('min', 0.0, self.setMin),
('max', 100.0, self.setMax),
('style', VALUATOR_MINI, INITOPT),
)
self.defineoptions(kw, optiondefs)
Valuator.__init__(self, parent)
# Can not enter None for min or max, update propertyDict to reflect
self.propertyDict['min']['fNone'] = 0
self.propertyDict['min']['help'] = 'Minimum allowable value.'
self.propertyDict['max']['fNone'] = 0
self.propertyDict['max']['help'] = 'Maximum allowable value.'
self.initialiseoptions(Slider)
def createValuator(self):
self._valuator = self.createcomponent(
'valuator',
(('slider', 'valuator'),),
None,
SliderWidget,
(self.interior(),),
style = self['style'],
command = self.setEntry,
value = self['value'])
#self._valuator._widget.bind('<Double-ButtonPress-1>', self.mouseReset)
# Add popup bindings to slider widget
try:
self._valuator._arrowBtn.bind(
'<ButtonPress-3>', self._popupValuatorMenu)
except AttributeError:
pass
self._valuator._minLabel.bind(
'<ButtonPress-3>', self._popupValuatorMenu)
self._valuator._maxLabel.bind(
'<ButtonPress-3>', self._popupValuatorMenu)
def packValuator(self):
if self['style'] == VALUATOR_FULL:
if self._label:
self._label.grid(row = 0, column = 0, sticky = EW)
self._entry.grid(row = 0, column = 1, sticky = EW)
self._valuator.grid(row = 1, columnspan = 2,
padx = 2, pady = 2, sticky = 'ew')
self.interior().columnconfigure(0, weight = 1)
else:
if self._label:
self._label.grid(row=0, column=0, sticky = EW)
self._entry.grid(row=0, column=1, sticky = EW)
self._valuator.grid(row=0, column=2, padx = 2, pady = 2)
self.interior().columnconfigure(0, weight = 1)
def setMin(self):
if self['min'] is not None:
self._valuator['min'] = self['min']
def setMax(self):
if self['max'] is not None:
self._valuator['max'] = self['max']
# Based on Pmw ComboBox code.
class SliderWidget(Pmw.MegaWidget):
def __init__(self, parent = None, **kw):
# Define the megawidget options.
INITOPT = Pmw.INITOPT
optiondefs = (
# Appearance
('style', VALUATOR_MINI, INITOPT),
('relief', RAISED, self.setRelief),
('borderwidth', 2, self.setBorderwidth),
('background', 'grey75', self.setBackground),
('fliparrow', 0, INITOPT),
# Behavior
# Bounds
('min', 0.0, self.setMin),
('max', 100.0, self.setMax),
# Initial value of slider, use self.set to change value
('value', 0.0, INITOPT),
('numDigits', 2, self.setNumDigits),
# Command to execute on slider updates
('command', None, None),
# Extra data to be passed to command function
('commandData', [], None),
# Callback's to execute during mouse interaction
('preCallback', None, None),
('postCallback', None, None),
# Extra data to be passed to callback function, needs to be a list
('callbackData', [], None),
)
self.defineoptions(kw, optiondefs)
# Initialise the base class (after defining the options).
Pmw.MegaWidget.__init__(self, parent)
# Create the components.
interior = self.interior()
# Current value
self.value = self['value']
self.formatString = '%2f'
self.increment = 0.01
# Interaction flags
self._isPosted = 0
self._fUnpost = 0
self._fUpdate = 0
self._firstPress = 1
self._fPressInsde = 0
# Slider dimensions
width = 100
self.xPad = xPad = 10
sliderWidth = width + 2 * xPad
height = 20
self.left = left = -(width/2.0)
self.right = right = (width/2.0)
top = -5
bottom = top + height
def createSlider(parent):
# Create the slider inside the dropdown window.
# Min label
self._minLabel = Label(parent, text = self['min'], width = 8,
anchor = W)
self._minLabel.pack(side = LEFT)
# Slider widget
if self['style'] == VALUATOR_FULL:
# Use a scale slider
self._widgetVar = DoubleVar()
self._widgetVar.set(self['value'])
self._widget = self.createcomponent(
'slider', (), None,
Scale, (interior,),
variable = self._widgetVar,
from_ = self['min'], to = self['max'],
resolution = 0.0,
width = 10,
orient = 'horizontal',
showvalue = 0,
length = sliderWidth,
relief = FLAT, bd = 2,
highlightthickness = 0)
else:
# Use a canvas slider
self._widget = self.createcomponent(
'slider', (), None,
Canvas, (parent,),
width = sliderWidth,
height = height,
bd = 2,
highlightthickness = 0,
scrollregion = (left - xPad, top, right + xPad, bottom))
# Interaction marker
xShift = 1
# Shadow arrow
self._marker = self._widget.create_polygon(-7 + xShift, 12,
7 + xShift, 12,
xShift, 0,
fill = 'black',
tags = ('marker',))
# Arrow
self._widget.create_polygon(-6.0, 10,
6.0, 10,
0, 0,
fill = 'grey85',
outline = 'black',
tags = ('marker',))
# The indicator
self._widget.create_line(left, 0,
right, 0,
width = 2,
tags = ('line',))
self._widget.pack(side = LEFT, expand=1, fill=X)
# Max label
self._maxLabel = Label(parent, text = self['max'], width = 8,
anchor = W)
self._maxLabel.pack(side = LEFT)
# Create slider
if self['style'] == VALUATOR_MINI:
# Create the arrow button to invoke slider
self._arrowBtn = self.createcomponent(
'arrowbutton',
(), None,
Canvas, (interior,), borderwidth = 0,
relief = FLAT, width = 14, height = 14,
scrollregion = (-7, -7, 7, 7))
self._arrowBtn.pack(expand = 1, fill = BOTH)
self._arrowBtn.create_polygon(-5, -5, 5, -5, 0, 5,
fill = 'grey50',
tags = 'arrow')
self._arrowBtn.create_line(-5, 5, 5, 5,
fill = 'grey50',
tags = 'arrow')
# Create the dropdown window.
self._popup = self.createcomponent(
'popup',
(), None,
Toplevel, (interior,),
relief = RAISED, borderwidth = 2)
self._popup.withdraw()
self._popup.overrideredirect(1)
# Create popup slider
createSlider(self._popup)
# Bind events to the arrow button.
self._arrowBtn.bind('<1>', self._postSlider)
self._arrowBtn.bind('<Enter>', self.highlightWidget)
self._arrowBtn.bind('<Leave>', self.restoreWidget)
# Need to unpost the popup if the arrow Button is unmapped (eg:
# its toplevel window is withdrawn) while the popup slider is
# displayed.
self._arrowBtn.bind('<Unmap>', self._unpostSlider)
# Bind events to the dropdown window.
self._popup.bind('<Escape>', self._unpostSlider)
self._popup.bind('<ButtonRelease-1>', self._widgetBtnRelease)
self._popup.bind('<ButtonPress-1>', self._widgetBtnPress)
self._popup.bind('<Motion>', self._widgetMove)
self._widget.bind('<Left>', self._decrementValue)
self._widget.bind('<Right>', self._incrementValue)
self._widget.bind('<Shift-Left>', self._bigDecrementValue)
self._widget.bind('<Shift-Right>', self._bigIncrementValue)
self._widget.bind('<Home>', self._goToMin)
self._widget.bind('<End>', self._goToMax)
else:
createSlider(interior)
self._widget['command'] = self._firstScaleCommand
self._widget.bind('<ButtonRelease-1>', self._scaleBtnRelease)
self._widget.bind('<ButtonPress-1>', self._scaleBtnPress)
# Check keywords and initialise options.
self.initialiseoptions(SliderWidget)
# Adjust relief
if not kw.has_key('relief'):
if self['style'] == VALUATOR_FULL:
self['relief'] = FLAT
self.updateIndicator(self['value'])
def destroy(self):
if (self['style'] == VALUATOR_MINI) and self._isPosted:
Pmw.popgrab(self._popup)
Pmw.MegaWidget.destroy(self)
#======================================================================
# Public methods
def set(self, value, fCommand = 1):
"""
self.set(value, fCommand = 1)
Set slider to new value, execute command if fCommand == 1
"""
# Send command if any
if fCommand and (self['command'] != None):
apply(self['command'], [value] + self['commandData'])
# Record value
self.value = value
def get(self):
"""
self.get()
Get current slider value
"""
return self.value
def updateIndicator(self, value):
if self['style'] == VALUATOR_MINI:
# Get current marker position
percentX = (value - self['min'])/float(self['max'] - self['min'])
newX = percentX * (self.right - self.left) + self.left
markerX = self._getMarkerX()
dx = newX - markerX
self._widget.move('marker', dx, 0)
else:
# Update scale's variable, which update scale without
# Calling scale's command
self._widgetVar.set(value)
#======================================================================
# Private methods for slider.
def _postSlider(self, event = None):
self._isPosted = 1
self._fUpdate = 0
# Make sure that the arrow is displayed sunken.
self.interior()['relief'] = SUNKEN
self.update_idletasks()
# Position popup so that marker is immediately below center of
# Arrow button
# Find screen space position of bottom/center of arrow button
x = (self._arrowBtn.winfo_rootx() + self._arrowBtn.winfo_width()/2.0 -
self.interior()['bd'])
# string.atoi(self.interior()['bd']))
y = self._arrowBtn.winfo_rooty() + self._arrowBtn.winfo_height()
# Popup border width
bd = self._popup['bd']
# bd = string.atoi(self._popup['bd'])
# Get width of label
minW = self._minLabel.winfo_width()
# Width of canvas to adjust for
cw = (self._getMarkerX() - self.left) + self.xPad
popupOffset = bd + minW + cw
ch = self._widget.winfo_height()
sh = self.winfo_screenheight()
# Compensate if too close to edge of screen
if y + ch > sh and y > sh / 2:
y = self._arrowBtn.winfo_rooty() - ch
# Popup window
Pmw.setgeometryanddeiconify(self._popup, '+%d+%d' % (x-popupOffset, y))
# Grab the popup, so that all events are delivered to it, and
# set focus to the slider, to make keyboard navigation
# easier.
Pmw.pushgrab(self._popup, 1, self._unpostSlider)
self._widget.focus_set()
# Ignore the first release of the mouse button after posting the
# dropdown slider, unless the mouse enters the dropdown slider.
self._fUpdate = 0
self._fUnpost = 0
self._firstPress = 1
self._fPressInsde = 0
def _updateValue(self, event):
mouseX = self._widget.canvasx(
event.x_root - self._widget.winfo_rootx())
if mouseX < self.left:
mouseX = self.left
if mouseX > self.right:
mouseX = self.right
# Update value
sf = (mouseX - self.left)/(self.right - self.left)
newVal = sf * (self['max'] - self['min']) + self['min']
self.set(newVal)
def _widgetBtnPress(self, event):
# Check behavior for this button press
widget = self._popup
xPos = event.x_root - widget.winfo_rootx()
yPos = event.y_root - widget.winfo_rooty()
fInside = ((xPos > 0) and (xPos < widget.winfo_width()) and
(yPos > 0) and (yPos < widget.winfo_height()))
# Set flags based upon result
if fInside:
self._fPressInside = 1
self._fUpdate = 1
if self['preCallback']:
apply(self['preCallback'], self['callbackData'])
self._updateValue(event)
else:
self._fPressInside = 0
self._fUpdate = 0
def _widgetMove(self, event):
if self._firstPress and not self._fUpdate:
canvasY = self._widget.canvasy(
event.y_root - self._widget.winfo_rooty())
if canvasY > 0:
self._fUpdate = 1
if self['preCallback']:
apply(self['preCallback'], self['callbackData'])
self._unpostOnNextRelease()
elif self._fUpdate:
self._updateValue(event)
def _scaleBtnPress(self, event):
if self['preCallback']:
apply(self['preCallback'], self['callbackData'])
def _scaleBtnRelease(self, event):
# Do post callback if any
if self['postCallback']:
apply(self['postCallback'], self['callbackData'])
def _widgetBtnRelease(self, event):
# Do post callback if any
if self._fUpdate and self['postCallback']:
apply(self['postCallback'], self['callbackData'])
if (self._fUnpost or
(not (self._firstPress or self._fPressInside))):
self._unpostSlider()
# Otherwise, continue
self._fUpdate = 0
self._firstPress = 0
self._fPressInside = 0
def _unpostOnNextRelease(self, event = None):
self._fUnpost = 1
def _unpostSlider(self, event=None):
if not self._isPosted:
# It is possible to get events on an unposted popup. For
# example, by repeatedly pressing the space key to post
# and unpost the popup. The <space> event may be
# delivered to the popup window even though
# Pmw.popgrab() has set the focus away from the
# popup window. (Bug in Tk?)
return
# Restore the focus before withdrawing the window, since
# otherwise the window manager may take the focus away so we
# can't redirect it. Also, return the grab to the next active
# window in the stack, if any.
Pmw.popgrab(self._popup)
self._popup.withdraw()
self._isPosted = 0
# Raise up arrow button
self.interior()['relief'] = RAISED
def _incrementValue(self, event):
self.set(self.value + self.increment)
def _bigIncrementValue(self, event):
self.set(self.value + self.increment * 10.0)
def _decrementValue(self, event):
self.set(self.value - self.increment)
def _bigDecrementValue(self, event):
self.set(self.value - self.increment * 10.0)
def _goToMin(self, event):
self.set(self['min'])
def _goToMax(self, event):
self.set(self['max'])
def _firstScaleCommand(self, val):
""" Hack to avoid calling command on instantiation of Scale """
self._widget['command'] = self._scaleCommand
def _scaleCommand(self, val):
self.set(string.atof(val))
# Methods to modify floater characteristics
def setMin(self):
self._minLabel['text'] = self.formatString % self['min']
if self['style'] == VALUATOR_FULL:
self._widget['from_'] = self['min']
self.updateIndicator(self.value)
def setMax(self):
self._maxLabel['text'] = self.formatString % self['max']
if self['style'] == VALUATOR_FULL:
self._widget['to'] = self['max']
self.updateIndicator(self.value)
def setNumDigits(self):
self.formatString = '%0.' + ('%d' % self['numDigits']) + 'f'
self._minLabel['text'] = self.formatString % self['min']
self._maxLabel['text'] = self.formatString % self['max']
self.updateIndicator(self.value)
self.increment = pow(10, -self['numDigits'])
def _getMarkerX(self):
# Get marker triangle coordinates
c = self._widget.coords(self._marker)
# Marker postion defined as X position of third vertex
return c[4]
def setRelief(self):
self.interior()['relief'] = self['relief']
def setBorderwidth(self):
self.interior()['borderwidth'] = self['borderwidth']
def setBackground(self):
self._widget['background'] = self['background']
def highlightWidget(self, event):
self._arrowBtn.itemconfigure('arrow', fill = 'black')
def restoreWidget(self, event):
self._arrowBtn.itemconfigure('arrow', fill = 'grey50')
| [
"[email protected]"
] | |
75c48e2a11389ce75a618495581d70c80b79da56 | 09e32f424b4f1e54709b7bc483023c273c28b559 | /w4/lucyflowers/lucyflowers.py | eb2fcb9e72ab8e75a0375018f68a92fae82f3361 | [] | no_license | lenin2ud/hackerrank | 6d4283e4d86ad0778ad19f85f5cb8d4d7d98bff6 | 7996542d87c32c6fffdc0d005dea92b57a18cd26 | refs/heads/master | 2021-03-27T04:30:32.732800 | 2014-06-13T00:10:30 | 2014-06-13T00:10:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,311 | py | # https://www.hackerrank.com/contests/w4/challenges/lucy-and-flowers
MOD = 10 ** 9 + 9
MAX_N = 5000
dp = [1] * (MAX_N + 1)
inv = [0] * (MAX_N + 1)
mem = [0] * (MAX_N + 1)
def extended_gcd(aa, bb):
lastremainder, remainder = abs(aa), abs(bb)
x, lastx, y, lasty = 0, 1, 1, 0
while remainder:
lastremainder, (quotient, remainder) = remainder, divmod(lastremainder, remainder)
x, lastx = lastx - quotient*x, x
y, lasty = lasty - quotient*y, y
return lastremainder, lastx * (-1 if aa < 0 else 1), lasty * (-1 if bb < 0 else 1)
def modinv(a, m):
g, x, y = extended_gcd(a, m)
if g != 1:
raise ValueError
return x % m
def precompute(n):
for i in range(1, n + 1):
r = 0
for j in range(i):
r += (dp[j] * dp[i - j - 1]) % MOD
dp[i] = r % MOD
for i in range(1, MAX_N + 1):
inv[i] = modinv(i, MOD)
def solve(n):
if mem[n] != 0:
return mem[n]
result = 0
comb = 1
for i in range(1, n + 1):
comb = (comb * (n - i + 1) * inv[i]) % MOD
result = (result + dp[i] * comb) % MOD
mem[n] = result
return result
if __name__ == '__main__':
precompute(MAX_N)
T = int(raw_input())
for t in range(T):
n = int(raw_input())
print solve(n)
| [
"[email protected]"
] | |
3289fda5e1f37200848fba7f873c3f84f00ef0cf | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/automation/v20170515preview/get_source_control.py | bd8cc3a9b818a03ca55f93a52a7bcfd1acd42b85 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,646 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetSourceControlResult',
'AwaitableGetSourceControlResult',
'get_source_control',
]
@pulumi.output_type
class GetSourceControlResult:
"""
Definition of the source control.
"""
def __init__(__self__, auto_sync=None, branch=None, creation_time=None, description=None, folder_path=None, id=None, last_modified_time=None, name=None, publish_runbook=None, repo_url=None, source_type=None, type=None):
if auto_sync and not isinstance(auto_sync, bool):
raise TypeError("Expected argument 'auto_sync' to be a bool")
pulumi.set(__self__, "auto_sync", auto_sync)
if branch and not isinstance(branch, str):
raise TypeError("Expected argument 'branch' to be a str")
pulumi.set(__self__, "branch", branch)
if creation_time and not isinstance(creation_time, str):
raise TypeError("Expected argument 'creation_time' to be a str")
pulumi.set(__self__, "creation_time", creation_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if folder_path and not isinstance(folder_path, str):
raise TypeError("Expected argument 'folder_path' to be a str")
pulumi.set(__self__, "folder_path", folder_path)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_modified_time and not isinstance(last_modified_time, str):
raise TypeError("Expected argument 'last_modified_time' to be a str")
pulumi.set(__self__, "last_modified_time", last_modified_time)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if publish_runbook and not isinstance(publish_runbook, bool):
raise TypeError("Expected argument 'publish_runbook' to be a bool")
pulumi.set(__self__, "publish_runbook", publish_runbook)
if repo_url and not isinstance(repo_url, str):
raise TypeError("Expected argument 'repo_url' to be a str")
pulumi.set(__self__, "repo_url", repo_url)
if source_type and not isinstance(source_type, str):
raise TypeError("Expected argument 'source_type' to be a str")
pulumi.set(__self__, "source_type", source_type)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="autoSync")
def auto_sync(self) -> Optional[bool]:
"""
The auto sync of the source control. Default is false.
"""
return pulumi.get(self, "auto_sync")
@property
@pulumi.getter
def branch(self) -> Optional[str]:
"""
The repo branch of the source control. Include branch as empty string for VsoTfvc.
"""
return pulumi.get(self, "branch")
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> Optional[str]:
"""
The creation time.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="folderPath")
def folder_path(self) -> Optional[str]:
"""
The folder path of the source control.
"""
return pulumi.get(self, "folder_path")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModifiedTime")
def last_modified_time(self) -> Optional[str]:
"""
The last modified time.
"""
return pulumi.get(self, "last_modified_time")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publishRunbook")
def publish_runbook(self) -> Optional[bool]:
"""
The auto publish of the source control. Default is true.
"""
return pulumi.get(self, "publish_runbook")
@property
@pulumi.getter(name="repoUrl")
def repo_url(self) -> Optional[str]:
"""
The repo url of the source control.
"""
return pulumi.get(self, "repo_url")
@property
@pulumi.getter(name="sourceType")
def source_type(self) -> Optional[str]:
"""
The source type. Must be one of VsoGit, VsoTfvc, GitHub.
"""
return pulumi.get(self, "source_type")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetSourceControlResult(GetSourceControlResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSourceControlResult(
auto_sync=self.auto_sync,
branch=self.branch,
creation_time=self.creation_time,
description=self.description,
folder_path=self.folder_path,
id=self.id,
last_modified_time=self.last_modified_time,
name=self.name,
publish_runbook=self.publish_runbook,
repo_url=self.repo_url,
source_type=self.source_type,
type=self.type)
def get_source_control(automation_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
source_control_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSourceControlResult:
"""
Definition of the source control.
:param str automation_account_name: The name of the automation account.
:param str resource_group_name: Name of an Azure Resource group.
:param str source_control_name: The name of source control.
"""
__args__ = dict()
__args__['automationAccountName'] = automation_account_name
__args__['resourceGroupName'] = resource_group_name
__args__['sourceControlName'] = source_control_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:automation/v20170515preview:getSourceControl', __args__, opts=opts, typ=GetSourceControlResult).value
return AwaitableGetSourceControlResult(
auto_sync=__ret__.auto_sync,
branch=__ret__.branch,
creation_time=__ret__.creation_time,
description=__ret__.description,
folder_path=__ret__.folder_path,
id=__ret__.id,
last_modified_time=__ret__.last_modified_time,
name=__ret__.name,
publish_runbook=__ret__.publish_runbook,
repo_url=__ret__.repo_url,
source_type=__ret__.source_type,
type=__ret__.type)
| [
"[email protected]"
] | |
fd8475f2fa48aed730dfe0df90c3253f83661d75 | 5d61565651b7ba5fa8fade3313a5e82fca8b6686 | /goodstype/forms.py | bab317474e9e54bac606219efd097ee9de0e8a3c | [] | no_license | lonelyxmas/ISMS | d597b00072bfa77907875f575b866fbb1fb53295 | 08c5e2f3518fc639cf1a1f2869f4b2f3ae58e306 | refs/heads/master | 2023-08-14T12:02:59.001215 | 2021-03-22T03:34:58 | 2021-03-22T03:34:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,013 | py | from django.forms import ModelForm
from django import forms
from django.forms import widgets as Fwidgets
from .models import *
class GoosTypeModelForm(ModelForm):
class Meta:
model = goodstype
fields = '__all__'
widgets = {
'FID': Fwidgets.Input(attrs={'type': 'hidden'}),
'FGoodsTypeID': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FGoodsType': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FDeviationType': Fwidgets.Select(attrs={'lay-verify': 'required'}),
'FPositiveDeviation': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FNegativeDeviation': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FDesc': Fwidgets.Input(attrs={'class': 'layui-input', 'autocomplete': 'off'})
}
class SubTypeModelForm(ModelForm):
FPID = forms.ChoiceField(widget=forms.Select(attrs={'lay-verify': 'required', 'disabled': 'disabled'}), required=False)
class Meta:
model = goodstype
fields = '__all__'
widgets = {
'FID': Fwidgets.Input(attrs={'type': 'hidden'}),
'FGoodsTypeID': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FGoodsType': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FDeviationType': Fwidgets.Select(attrs={'lay-verify': 'required'}),
'FPositiveDeviation': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FNegativeDeviation': Fwidgets.Input(attrs={'class': 'layui-input', 'lay-verify': 'required', 'autocomplete': 'off'}),
'FDesc': Fwidgets.Input(attrs={'class': 'layui-input', 'autocomplete': 'off'})
}
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.