max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
Houdini/Handlers/Play/Item.py | amrmashriqi/Houdini | 0 | 6631951 | from beaker.cache import cache_region as Cache, region_invalidate as Invalidate
from Houdini.Handlers import Handlers, XT
from Houdini.Handlers.Play.Moderation import cheatBan
from Houdini.Data.Penguin import Inventory
cardStarterDeckId = 821
fireBoosterDeckId = 8006
waterBoosterDeckId = 8010
boosterDecks = {
cardStarterDeckId: [1, 6, 9, 14, 17, 20, 22, 23, 26, 73, 89, 81],
fireBoosterDeckId: [3, 18, 216, 222, 229, 303, 304, 314, 319, 250, 352],
waterBoosterDeckId: [202, 204, 305, 15, 13, 312, 218, 220, 29, 90]
}
@Handlers.Handle(XT.BuyInventory)
def handleBuyInventory(self, data):
if data.ItemId not in self.server.items:
return self.sendError(402)
elif data.ItemId in self.inventory:
return self.sendError(400)
if self.server.items.isBait(data.ItemId):
return cheatBan(self, self.user.ID, comment="Added bait item")
if self.server.items.isTourGuide(data.ItemId):
self.receiveSystemPostcard(126)
if data.ItemId in boosterDecks:
self.addCards(*boosterDecks[data.ItemId])
itemCost = self.server.items.getCost(data.ItemId)
if self.user.Coins < itemCost:
return self.sendError(401)
self.addItem(data.ItemId, itemCost)
Invalidate(getPinString, 'houdini', 'pins', self.user.ID)
Invalidate(getAwardsString, 'houdini', 'awards', self.user.ID)
@Handlers.Handle(XT.GetInventory)
@Handlers.Throttle(-1)
def handleGetInventory(self, data):
self.sendXt("gi", "%".join(map(str, self.inventory)))
@Cache('houdini', 'pins')
def getPinString(self, penguinId):
def getString(pinId):
isMember = int(self.server.items[pinId].Member)
timestamp = self.server.pins.getUnixTimestamp(pinId)
return "|".join(map(str, [pinId, timestamp, isMember]))
if penguinId in self.server.players:
pinsArray = [getString(itemId) for itemId in self.server.players[penguinId].inventory
if self.server.items.isItemPin(itemId)]
else:
pinsArray = [getString(itemId) for itemId, in self.session.query(Inventory.ItemID)
.filter_by(PenguinID=penguinId) if self.server.items.isItemPin(itemId)]
return "%".join(pinsArray)
@Cache('houdini', 'awards')
def getAwardsString(self, penguinId):
if penguinId in self.server.players:
awardsArray = [str(itemId) for itemId in self.server.players[penguinId].inventory
if self.server.items.isItemAward(itemId)]
else:
awardsArray = [str(itemId) for itemId, in self.session.query(Inventory.ItemID)
.filter_by(PenguinID=penguinId) if self.server.items.isItemAward(itemId)]
return "|".join(awardsArray)
@Handlers.Handle(XT.GetPlayerPins)
@Handlers.Throttle()
def handleGetPlayerPins(self, data):
self.sendXt("qpp", getPinString(self, data.PlayerId))
@Handlers.Handle(XT.GetPlayerAwards)
@Handlers.Throttle()
def handleGetPlayerAwards(self, data):
self.sendXt("qpa", data.PlayerId, getAwardsString(self, data.PlayerId)) | from beaker.cache import cache_region as Cache, region_invalidate as Invalidate
from Houdini.Handlers import Handlers, XT
from Houdini.Handlers.Play.Moderation import cheatBan
from Houdini.Data.Penguin import Inventory
cardStarterDeckId = 821
fireBoosterDeckId = 8006
waterBoosterDeckId = 8010
boosterDecks = {
cardStarterDeckId: [1, 6, 9, 14, 17, 20, 22, 23, 26, 73, 89, 81],
fireBoosterDeckId: [3, 18, 216, 222, 229, 303, 304, 314, 319, 250, 352],
waterBoosterDeckId: [202, 204, 305, 15, 13, 312, 218, 220, 29, 90]
}
@Handlers.Handle(XT.BuyInventory)
def handleBuyInventory(self, data):
if data.ItemId not in self.server.items:
return self.sendError(402)
elif data.ItemId in self.inventory:
return self.sendError(400)
if self.server.items.isBait(data.ItemId):
return cheatBan(self, self.user.ID, comment="Added bait item")
if self.server.items.isTourGuide(data.ItemId):
self.receiveSystemPostcard(126)
if data.ItemId in boosterDecks:
self.addCards(*boosterDecks[data.ItemId])
itemCost = self.server.items.getCost(data.ItemId)
if self.user.Coins < itemCost:
return self.sendError(401)
self.addItem(data.ItemId, itemCost)
Invalidate(getPinString, 'houdini', 'pins', self.user.ID)
Invalidate(getAwardsString, 'houdini', 'awards', self.user.ID)
@Handlers.Handle(XT.GetInventory)
@Handlers.Throttle(-1)
def handleGetInventory(self, data):
self.sendXt("gi", "%".join(map(str, self.inventory)))
@Cache('houdini', 'pins')
def getPinString(self, penguinId):
def getString(pinId):
isMember = int(self.server.items[pinId].Member)
timestamp = self.server.pins.getUnixTimestamp(pinId)
return "|".join(map(str, [pinId, timestamp, isMember]))
if penguinId in self.server.players:
pinsArray = [getString(itemId) for itemId in self.server.players[penguinId].inventory
if self.server.items.isItemPin(itemId)]
else:
pinsArray = [getString(itemId) for itemId, in self.session.query(Inventory.ItemID)
.filter_by(PenguinID=penguinId) if self.server.items.isItemPin(itemId)]
return "%".join(pinsArray)
@Cache('houdini', 'awards')
def getAwardsString(self, penguinId):
if penguinId in self.server.players:
awardsArray = [str(itemId) for itemId in self.server.players[penguinId].inventory
if self.server.items.isItemAward(itemId)]
else:
awardsArray = [str(itemId) for itemId, in self.session.query(Inventory.ItemID)
.filter_by(PenguinID=penguinId) if self.server.items.isItemAward(itemId)]
return "|".join(awardsArray)
@Handlers.Handle(XT.GetPlayerPins)
@Handlers.Throttle()
def handleGetPlayerPins(self, data):
self.sendXt("qpp", getPinString(self, data.PlayerId))
@Handlers.Handle(XT.GetPlayerAwards)
@Handlers.Throttle()
def handleGetPlayerAwards(self, data):
self.sendXt("qpa", data.PlayerId, getAwardsString(self, data.PlayerId)) | none | 1 | 2.224509 | 2 |
|
ED/TP1/ED - 5/model/Dimension.py | lengors/ua-repository | 0 | 6631952 | <reponame>lengors/ua-repository<gh_stars>0
class Dimension:
@classmethod
def get(cls, argument):
if not hasattr(cls, 'REGISTERED'):
setattr(cls, 'REGISTERED', dict())
registered = cls.REGISTERED
value = registered.get(argument, None)
if value is None:
registered[argument] = value = cls.make(argument)
return value
@classmethod
def length(cls):
if not hasattr(cls, 'REGISTERED'):
return 0
return len(cls.REGISTERED) | class Dimension:
@classmethod
def get(cls, argument):
if not hasattr(cls, 'REGISTERED'):
setattr(cls, 'REGISTERED', dict())
registered = cls.REGISTERED
value = registered.get(argument, None)
if value is None:
registered[argument] = value = cls.make(argument)
return value
@classmethod
def length(cls):
if not hasattr(cls, 'REGISTERED'):
return 0
return len(cls.REGISTERED) | none | 1 | 2.975839 | 3 |
|
scripts/utils.py | jaeminsung/ml_stock_trading | 0 | 6631953 | <reponame>jaeminsung/ml_stock_trading
import os
import pandas as pd
def write_csv_file(data_df, stock, filename, include_header=False):
curDir = os.path.dirname(__file__)
stock_data_dir = 'data/{}'.format(stock)
filepath = os.path.join(curDir, os.pardir, stock_data_dir)
if not os.path.exists(filepath):
os.mkdir(filepath)
path_to_write = os.path.join(filepath, filename)
data_df.to_csv(path_to_write, index=False, header=include_header)
def read_stock_data(stock):
curDir = os.path.dirname(__file__)
stock_data_dir = 'data/{0}/{0}_data.csv'.format(stock)
csv_filepath = os.path.join(curDir, os.pardir, stock_data_dir)
return pd.read_csv(csv_filepath)
def include_n_days_before(data_df, num_days, start_date, end_date):
# NEED TO HANDLE ERROR IF START_DATE >= END_DATE
str_start_date = start_date.strftime('%Y-%m-%d')
str_end_date = end_date.strftime('%Y-%m-%d')
data_df = data_df[data_df['Date'] <= str_end_date]
ind = data_df[data_df['Date'] >= str_start_date].index.tolist()[0]
return data_df[ind-num_days+1:len(data_df)]
| import os
import pandas as pd
def write_csv_file(data_df, stock, filename, include_header=False):
curDir = os.path.dirname(__file__)
stock_data_dir = 'data/{}'.format(stock)
filepath = os.path.join(curDir, os.pardir, stock_data_dir)
if not os.path.exists(filepath):
os.mkdir(filepath)
path_to_write = os.path.join(filepath, filename)
data_df.to_csv(path_to_write, index=False, header=include_header)
def read_stock_data(stock):
curDir = os.path.dirname(__file__)
stock_data_dir = 'data/{0}/{0}_data.csv'.format(stock)
csv_filepath = os.path.join(curDir, os.pardir, stock_data_dir)
return pd.read_csv(csv_filepath)
def include_n_days_before(data_df, num_days, start_date, end_date):
# NEED TO HANDLE ERROR IF START_DATE >= END_DATE
str_start_date = start_date.strftime('%Y-%m-%d')
str_end_date = end_date.strftime('%Y-%m-%d')
data_df = data_df[data_df['Date'] <= str_end_date]
ind = data_df[data_df['Date'] >= str_start_date].index.tolist()[0]
return data_df[ind-num_days+1:len(data_df)] | en | 0.550162 | # NEED TO HANDLE ERROR IF START_DATE >= END_DATE | 2.955957 | 3 |
aicup-python/model/vec2_double.py | arijitgupta42/RAIC-2019 | 0 | 6631954 | <filename>aicup-python/model/vec2_double.py
class Vec2Double:
def __init__(self, x, y):
self.x = x
self.y = y
@staticmethod
def read_from(stream):
x = stream.read_double()
y = stream.read_double()
return Vec2Double(x, y)
def write_to(self, stream):
stream.write_double(self.x)
stream.write_double(self.y)
def __repr__(self):
return "Vec2Double(" + \
repr(self.x) + "," + \
repr(self.y) + \
")"
| <filename>aicup-python/model/vec2_double.py
class Vec2Double:
def __init__(self, x, y):
self.x = x
self.y = y
@staticmethod
def read_from(stream):
x = stream.read_double()
y = stream.read_double()
return Vec2Double(x, y)
def write_to(self, stream):
stream.write_double(self.x)
stream.write_double(self.y)
def __repr__(self):
return "Vec2Double(" + \
repr(self.x) + "," + \
repr(self.y) + \
")"
| none | 1 | 2.809334 | 3 |
|
model/attngan_modules.py | Hhhhhhhhhhao/I2T2I | 0 | 6631955 | <reponame>Hhhhhhhhhhao/I2T2I
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
import torch.nn.functional as F
from base import BaseModel
from model.global_attention_modules import GlobalAttentionGeneral as ATT_NET
n_gpu = torch.cuda.device_count()
device = torch.device('cuda:0' if n_gpu > 0 else 'cpu')
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
def forward(self, x):
nc = x.size(1)
assert nc % 2 == 0, 'channels dont divide 2!'
nc = int(nc/2)
return x[:, :nc] * F.sigmoid(x[:, nc:])
def conv1x1(in_planes, out_planes, bias=True):
"1x1 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=bias)
def conv3x3(in_planes, out_planes):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=True)
# Upsale the spatial size by a factor of 2
def upBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='nearest'),
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU())
return block
# Keep the spatial size
def Block3x3_relu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU())
return block
class ResBlock(nn.Module):
def __init__(self, channel_num):
super(ResBlock, self).__init__()
self.block = nn.Sequential(
conv3x3(channel_num, channel_num * 2),
nn.BatchNorm2d(channel_num * 2),
GLU(),
conv3x3(channel_num, channel_num),
nn.BatchNorm2d(channel_num))
def forward(self, x):
residual = x
out = self.block(x)
out += residual
return out
# ############## G networks ###################
class CAEmbedding(BaseModel):
def __init__(self, text_dim, embed_dim):
super(CAEmbedding, self).__init__()
self.text_dim = text_dim
self.embed_dim = embed_dim
self.linear = nn.Linear(self.text_dim, self.embed_dim*2, bias=True)
self.relu = nn.LeakyReLU(0.2, inplace=True)
def encode(self, text_embedding):
x = self.relu(self.linear(text_embedding))
mean = x[:, :self.embed_dim]
log_var = x[:, self.embed_dim:]
return mean, log_var
def reparametrize(self, mean, log_var):
std = log_var.mul(0.5).exp_()
if torch.cuda.is_available():
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mean)
def forward(self, text_embedding):
mean, log_var = self.encode(text_embedding)
# mean = mean.to(device)
# log_var = log_var.to(device)
c_code = self.reparametrize(mean, log_var)
return c_code, mean, log_var
class INIT_STAGE_G(nn.Module):
def __init__(self, nz, ngf, ncf):
super(INIT_STAGE_G, self).__init__()
self.gf_dim = ngf
self.in_dim = nz + ncf # cfg.TEXT.EMBEDDING_DIM
self.define_module()
def define_module(self):
nz, ngf = self.in_dim, self.gf_dim
self.fc = nn.Sequential(
nn.Linear(nz, ngf * 4 * 4 * 2, bias=True),
nn.BatchNorm1d(ngf * 4 * 4 * 2),
GLU())
self.upsample1 = upBlock(ngf, ngf // 2)
self.upsample2 = upBlock(ngf // 2, ngf // 4)
self.upsample3 = upBlock(ngf // 4, ngf // 8)
self.upsample4 = upBlock(ngf // 8, ngf // 16)
def forward(self, z_code, c_code):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param c_code: batch x cfg.TEXT.EMBEDDING_DIM
:return: batch x ngf/16 x 64 x 64
"""
c_z_code = torch.cat((c_code, z_code), 1)
# state size ngf x 4 x 4
out_code = self.fc(c_z_code)
out_code = out_code.view(-1, self.gf_dim, 4, 4)
# state size ngf/3 x 8 x 8
out_code = self.upsample1(out_code)
# state size ngf/4 x 16 x 16
out_code = self.upsample2(out_code)
# state size ngf/8 x 32 x 32
out_code32 = self.upsample3(out_code)
# state size ngf/16 x 64 x 64
out_code64 = self.upsample4(out_code32)
return out_code64
class NEXT_STAGE_G(nn.Module):
def __init__(self, ngf, nef, ncf):
super(NEXT_STAGE_G, self).__init__()
self.gf_dim = ngf
self.ef_dim = nef
self.cf_dim = ncf
self.num_residual = 2
self.define_module()
def _make_layer(self, block, channel_num):
layers = []
for i in range(self.num_residual):
layers.append(block(channel_num))
return nn.Sequential(*layers)
def define_module(self):
ngf = self.gf_dim
self.att = ATT_NET(ngf, self.ef_dim)
self.residual = self._make_layer(ResBlock, ngf * 2)
self.upsample = upBlock(ngf * 2, ngf)
def forward(self, h_code, c_code, word_embs, mask):
"""
h_code1(query): batch x idf x ih x iw (queryL=ihxiw)
word_embs(context): batch x cdf x sourceL (sourceL=seq_len)
c_code1: batch x idf x queryL
att1: batch x sourceL x queryL
"""
self.att.applyMask(mask)
c_code, att = self.att(h_code, word_embs)
h_c_code = torch.cat((h_code, c_code), 1)
out_code = self.residual(h_c_code)
# state size ngf/2 x 2in_size x 2in_size
out_code = self.upsample(out_code)
return out_code, att
class GET_IMAGE_G(nn.Module):
def __init__(self, ngf):
super(GET_IMAGE_G, self).__init__()
self.gf_dim = ngf
self.img = nn.Sequential(
conv3x3(ngf, 3),
nn.Tanh()
)
def forward(self, h_code):
out_img = self.img(h_code)
return out_img
class G_NET(nn.Module):
def __init__(self, opt):
super(G_NET, self).__init__()
ngf = opt.ngf
nef = opt.text_embedding_dim
ncf = opt.condition_dim
nz = opt.noise_dim
self.ca_net = CAEmbedding(nef, ncf)
self.ca_net.to(device)
self.opt = opt
if opt.branch_num > 0:
self.h_net1 = INIT_STAGE_G(opt.noise_dim, ngf * 16, ncf)
self.img_net1 = GET_IMAGE_G(ngf)
# gf x 64 x 64
if opt.branch_num > 1:
self.h_net2 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net2 = GET_IMAGE_G(ngf)
if opt.branch_num > 2:
self.h_net3 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net3 = GET_IMAGE_G(ngf)
def forward(self, z_code, sent_emb, word_embs, mask):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM
:param word_embs: batch x cdf x seq_len
:param mask: batch x seq_len
:return:
"""
fake_imgs = []
att_maps = []
c_code, mu, logvar = self.ca_net(sent_emb)
if torch.cuda.is_available():
c_code = c_code.cuda()
z_code = z_code.cuda()
if self.opt.branch_num > 0:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
if self.opt.branch_num > 1:
h_code2, att1 = \
self.h_net2(h_code1, c_code, word_embs, mask)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
if att1 is not None:
att_maps.append(att1)
if self.opt.branch_num > 2:
h_code3, att2 = \
self.h_net3(h_code2, c_code, word_embs, mask)
fake_img3 = self.img_net3(h_code3)
fake_imgs.append(fake_img3)
if att2 is not None:
att_maps.append(att2)
return fake_imgs, att_maps, mu, logvar
class G_DCGAN(nn.Module):
def __init__(self, opt):
super(G_DCGAN, self).__init__()
ngf = opt.ngf
nef = opt.text_embedding_dim
ncf = opt.condition_dim
self.ca_net = CAEmbedding(nef, ncf)
self.opt = opt
# 16gf x 64 x 64 --> gf x 64 x 64 --> 3 x 64 x 64
if opt.branch_num > 0:
self.h_net1 = INIT_STAGE_G(opt.noise_dim, ngf * 16, ncf)
# gf x 64 x 64
if opt.branch_num > 1:
self.h_net2 = NEXT_STAGE_G(ngf, nef, ncf)
if opt.branch_num > 2:
self.h_net3 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net = GET_IMAGE_G(ngf)
def forward(self, z_code, sent_emb, word_embs, mask):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM
:param word_embs: batch x cdf x seq_len
:param mask: batch x seq_len
:return:
"""
att_maps = []
c_code, mu, logvar = self.ca_net(sent_emb)
if self.opt.branch_num > 0:
h_code = self.h_net1(z_code, c_code)
if self.opt.branch_num > 1:
h_code, att1 = self.h_net2(h_code, c_code, word_embs, mask)
if att1 is not None:
att_maps.append(att1)
if self.opt.branch_num > 2:
h_code, att2 = self.h_net3(h_code, c_code, word_embs, mask)
if att2 is not None:
att_maps.append(att2)
fake_imgs = self.img_net(h_code)
return [fake_imgs], att_maps, mu, logvar
# ############## D networks ##########################
def Block3x3_leakRelu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 2
def downBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Conv2d(in_planes, out_planes, 4, 2, 1, bias=False),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 16
def encode_image_by_16times(ndf):
encode_img = nn.Sequential(
# --> state size. ndf x in_size/2 x in_size/2
nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 2ndf x x in_size/4 x in_size/4
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 4ndf x in_size/8 x in_size/8
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 8ndf x in_size/16 x in_size/16
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True)
)
return encode_img
class D_GET_LOGITS(nn.Module):
def __init__(self, ndf, nef, bcondition=False):
super(D_GET_LOGITS, self).__init__()
self.df_dim = ndf
self.ef_dim = nef
self.bcondition = bcondition
if self.bcondition:
self.jointConv = Block3x3_leakRelu(ndf * 8 + nef, ndf * 8)
self.outlogits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4))
def forward(self, h_code, c_code=None):
if self.bcondition and c_code is not None:
# conditioning output
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((h_code, c_code), 1)
# state size ngf x in_size x in_size
h_c_code = self.jointConv(h_c_code)
else:
h_c_code = h_code
output = self.outlogits(h_c_code)
return output.view(-1)
# For 64 x 64 images
class D_NET64(nn.Module):
def __init__(self, opt, b_jcu=True):
super(D_NET64, self).__init__()
ndf = opt.ndf
nef = opt.text_embedding_dim
self.img_code_s16 = encode_image_by_16times(ndf)
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code4 = self.img_code_s16(x_var) # 4 x 4 x 8df
return x_code4
# For 128 x 128 images
class D_NET128(nn.Module):
def __init__(self, opt, b_jcu=True):
super(D_NET128, self).__init__()
ndf = opt.ndf
nef = opt.text_embedding_dim
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s32_1 = Block3x3_leakRelu(ndf * 16, ndf * 8)
#
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code8 = self.img_code_s16(x_var) # 8 x 8 x 8df
x_code4 = self.img_code_s32(x_code8) # 4 x 4 x 16df
x_code4 = self.img_code_s32_1(x_code4) # 4 x 4 x 8df
return x_code4
# For 256 x 256 images
class D_NET256(nn.Module):
def __init__(self, opt, b_jcu=True):
super(D_NET256, self).__init__()
ndf = opt.ndf
nef = opt.text_embedding_dim
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s64 = downBlock(ndf * 16, ndf * 32)
self.img_code_s64_1 = Block3x3_leakRelu(ndf * 32, ndf * 16)
self.img_code_s64_2 = Block3x3_leakRelu(ndf * 16, ndf * 8)
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code16 = self.img_code_s16(x_var)
x_code8 = self.img_code_s32(x_code16)
x_code4 = self.img_code_s64(x_code8)
x_code4 = self.img_code_s64_1(x_code4)
x_code4 = self.img_code_s64_2(x_code4)
return x_code4 | import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
import torch.nn.functional as F
from base import BaseModel
from model.global_attention_modules import GlobalAttentionGeneral as ATT_NET
n_gpu = torch.cuda.device_count()
device = torch.device('cuda:0' if n_gpu > 0 else 'cpu')
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
def forward(self, x):
nc = x.size(1)
assert nc % 2 == 0, 'channels dont divide 2!'
nc = int(nc/2)
return x[:, :nc] * F.sigmoid(x[:, nc:])
def conv1x1(in_planes, out_planes, bias=True):
"1x1 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=bias)
def conv3x3(in_planes, out_planes):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=True)
# Upsale the spatial size by a factor of 2
def upBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='nearest'),
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU())
return block
# Keep the spatial size
def Block3x3_relu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU())
return block
class ResBlock(nn.Module):
def __init__(self, channel_num):
super(ResBlock, self).__init__()
self.block = nn.Sequential(
conv3x3(channel_num, channel_num * 2),
nn.BatchNorm2d(channel_num * 2),
GLU(),
conv3x3(channel_num, channel_num),
nn.BatchNorm2d(channel_num))
def forward(self, x):
residual = x
out = self.block(x)
out += residual
return out
# ############## G networks ###################
class CAEmbedding(BaseModel):
def __init__(self, text_dim, embed_dim):
super(CAEmbedding, self).__init__()
self.text_dim = text_dim
self.embed_dim = embed_dim
self.linear = nn.Linear(self.text_dim, self.embed_dim*2, bias=True)
self.relu = nn.LeakyReLU(0.2, inplace=True)
def encode(self, text_embedding):
x = self.relu(self.linear(text_embedding))
mean = x[:, :self.embed_dim]
log_var = x[:, self.embed_dim:]
return mean, log_var
def reparametrize(self, mean, log_var):
std = log_var.mul(0.5).exp_()
if torch.cuda.is_available():
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mean)
def forward(self, text_embedding):
mean, log_var = self.encode(text_embedding)
# mean = mean.to(device)
# log_var = log_var.to(device)
c_code = self.reparametrize(mean, log_var)
return c_code, mean, log_var
class INIT_STAGE_G(nn.Module):
def __init__(self, nz, ngf, ncf):
super(INIT_STAGE_G, self).__init__()
self.gf_dim = ngf
self.in_dim = nz + ncf # cfg.TEXT.EMBEDDING_DIM
self.define_module()
def define_module(self):
nz, ngf = self.in_dim, self.gf_dim
self.fc = nn.Sequential(
nn.Linear(nz, ngf * 4 * 4 * 2, bias=True),
nn.BatchNorm1d(ngf * 4 * 4 * 2),
GLU())
self.upsample1 = upBlock(ngf, ngf // 2)
self.upsample2 = upBlock(ngf // 2, ngf // 4)
self.upsample3 = upBlock(ngf // 4, ngf // 8)
self.upsample4 = upBlock(ngf // 8, ngf // 16)
def forward(self, z_code, c_code):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param c_code: batch x cfg.TEXT.EMBEDDING_DIM
:return: batch x ngf/16 x 64 x 64
"""
c_z_code = torch.cat((c_code, z_code), 1)
# state size ngf x 4 x 4
out_code = self.fc(c_z_code)
out_code = out_code.view(-1, self.gf_dim, 4, 4)
# state size ngf/3 x 8 x 8
out_code = self.upsample1(out_code)
# state size ngf/4 x 16 x 16
out_code = self.upsample2(out_code)
# state size ngf/8 x 32 x 32
out_code32 = self.upsample3(out_code)
# state size ngf/16 x 64 x 64
out_code64 = self.upsample4(out_code32)
return out_code64
class NEXT_STAGE_G(nn.Module):
def __init__(self, ngf, nef, ncf):
super(NEXT_STAGE_G, self).__init__()
self.gf_dim = ngf
self.ef_dim = nef
self.cf_dim = ncf
self.num_residual = 2
self.define_module()
def _make_layer(self, block, channel_num):
layers = []
for i in range(self.num_residual):
layers.append(block(channel_num))
return nn.Sequential(*layers)
def define_module(self):
ngf = self.gf_dim
self.att = ATT_NET(ngf, self.ef_dim)
self.residual = self._make_layer(ResBlock, ngf * 2)
self.upsample = upBlock(ngf * 2, ngf)
def forward(self, h_code, c_code, word_embs, mask):
"""
h_code1(query): batch x idf x ih x iw (queryL=ihxiw)
word_embs(context): batch x cdf x sourceL (sourceL=seq_len)
c_code1: batch x idf x queryL
att1: batch x sourceL x queryL
"""
self.att.applyMask(mask)
c_code, att = self.att(h_code, word_embs)
h_c_code = torch.cat((h_code, c_code), 1)
out_code = self.residual(h_c_code)
# state size ngf/2 x 2in_size x 2in_size
out_code = self.upsample(out_code)
return out_code, att
class GET_IMAGE_G(nn.Module):
def __init__(self, ngf):
super(GET_IMAGE_G, self).__init__()
self.gf_dim = ngf
self.img = nn.Sequential(
conv3x3(ngf, 3),
nn.Tanh()
)
def forward(self, h_code):
out_img = self.img(h_code)
return out_img
class G_NET(nn.Module):
def __init__(self, opt):
super(G_NET, self).__init__()
ngf = opt.ngf
nef = opt.text_embedding_dim
ncf = opt.condition_dim
nz = opt.noise_dim
self.ca_net = CAEmbedding(nef, ncf)
self.ca_net.to(device)
self.opt = opt
if opt.branch_num > 0:
self.h_net1 = INIT_STAGE_G(opt.noise_dim, ngf * 16, ncf)
self.img_net1 = GET_IMAGE_G(ngf)
# gf x 64 x 64
if opt.branch_num > 1:
self.h_net2 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net2 = GET_IMAGE_G(ngf)
if opt.branch_num > 2:
self.h_net3 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net3 = GET_IMAGE_G(ngf)
def forward(self, z_code, sent_emb, word_embs, mask):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM
:param word_embs: batch x cdf x seq_len
:param mask: batch x seq_len
:return:
"""
fake_imgs = []
att_maps = []
c_code, mu, logvar = self.ca_net(sent_emb)
if torch.cuda.is_available():
c_code = c_code.cuda()
z_code = z_code.cuda()
if self.opt.branch_num > 0:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
if self.opt.branch_num > 1:
h_code2, att1 = \
self.h_net2(h_code1, c_code, word_embs, mask)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
if att1 is not None:
att_maps.append(att1)
if self.opt.branch_num > 2:
h_code3, att2 = \
self.h_net3(h_code2, c_code, word_embs, mask)
fake_img3 = self.img_net3(h_code3)
fake_imgs.append(fake_img3)
if att2 is not None:
att_maps.append(att2)
return fake_imgs, att_maps, mu, logvar
class G_DCGAN(nn.Module):
def __init__(self, opt):
super(G_DCGAN, self).__init__()
ngf = opt.ngf
nef = opt.text_embedding_dim
ncf = opt.condition_dim
self.ca_net = CAEmbedding(nef, ncf)
self.opt = opt
# 16gf x 64 x 64 --> gf x 64 x 64 --> 3 x 64 x 64
if opt.branch_num > 0:
self.h_net1 = INIT_STAGE_G(opt.noise_dim, ngf * 16, ncf)
# gf x 64 x 64
if opt.branch_num > 1:
self.h_net2 = NEXT_STAGE_G(ngf, nef, ncf)
if opt.branch_num > 2:
self.h_net3 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net = GET_IMAGE_G(ngf)
def forward(self, z_code, sent_emb, word_embs, mask):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM
:param word_embs: batch x cdf x seq_len
:param mask: batch x seq_len
:return:
"""
att_maps = []
c_code, mu, logvar = self.ca_net(sent_emb)
if self.opt.branch_num > 0:
h_code = self.h_net1(z_code, c_code)
if self.opt.branch_num > 1:
h_code, att1 = self.h_net2(h_code, c_code, word_embs, mask)
if att1 is not None:
att_maps.append(att1)
if self.opt.branch_num > 2:
h_code, att2 = self.h_net3(h_code, c_code, word_embs, mask)
if att2 is not None:
att_maps.append(att2)
fake_imgs = self.img_net(h_code)
return [fake_imgs], att_maps, mu, logvar
# ############## D networks ##########################
def Block3x3_leakRelu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 2
def downBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Conv2d(in_planes, out_planes, 4, 2, 1, bias=False),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 16
def encode_image_by_16times(ndf):
encode_img = nn.Sequential(
# --> state size. ndf x in_size/2 x in_size/2
nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 2ndf x x in_size/4 x in_size/4
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 4ndf x in_size/8 x in_size/8
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 8ndf x in_size/16 x in_size/16
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True)
)
return encode_img
class D_GET_LOGITS(nn.Module):
def __init__(self, ndf, nef, bcondition=False):
super(D_GET_LOGITS, self).__init__()
self.df_dim = ndf
self.ef_dim = nef
self.bcondition = bcondition
if self.bcondition:
self.jointConv = Block3x3_leakRelu(ndf * 8 + nef, ndf * 8)
self.outlogits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4))
def forward(self, h_code, c_code=None):
if self.bcondition and c_code is not None:
# conditioning output
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((h_code, c_code), 1)
# state size ngf x in_size x in_size
h_c_code = self.jointConv(h_c_code)
else:
h_c_code = h_code
output = self.outlogits(h_c_code)
return output.view(-1)
# For 64 x 64 images
class D_NET64(nn.Module):
def __init__(self, opt, b_jcu=True):
super(D_NET64, self).__init__()
ndf = opt.ndf
nef = opt.text_embedding_dim
self.img_code_s16 = encode_image_by_16times(ndf)
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code4 = self.img_code_s16(x_var) # 4 x 4 x 8df
return x_code4
# For 128 x 128 images
class D_NET128(nn.Module):
def __init__(self, opt, b_jcu=True):
super(D_NET128, self).__init__()
ndf = opt.ndf
nef = opt.text_embedding_dim
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s32_1 = Block3x3_leakRelu(ndf * 16, ndf * 8)
#
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code8 = self.img_code_s16(x_var) # 8 x 8 x 8df
x_code4 = self.img_code_s32(x_code8) # 4 x 4 x 16df
x_code4 = self.img_code_s32_1(x_code4) # 4 x 4 x 8df
return x_code4
# For 256 x 256 images
class D_NET256(nn.Module):
def __init__(self, opt, b_jcu=True):
super(D_NET256, self).__init__()
ndf = opt.ndf
nef = opt.text_embedding_dim
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s64 = downBlock(ndf * 16, ndf * 32)
self.img_code_s64_1 = Block3x3_leakRelu(ndf * 32, ndf * 16)
self.img_code_s64_2 = Block3x3_leakRelu(ndf * 16, ndf * 8)
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code16 = self.img_code_s16(x_var)
x_code8 = self.img_code_s32(x_code16)
x_code4 = self.img_code_s64(x_code8)
x_code4 = self.img_code_s64_1(x_code4)
x_code4 = self.img_code_s64_2(x_code4)
return x_code4 | en | 0.398733 | # Upsale the spatial size by a factor of 2 # Keep the spatial size # ############## G networks ################### # mean = mean.to(device) # log_var = log_var.to(device) # cfg.TEXT.EMBEDDING_DIM :param z_code: batch x cfg.GAN.Z_DIM :param c_code: batch x cfg.TEXT.EMBEDDING_DIM :return: batch x ngf/16 x 64 x 64 # state size ngf x 4 x 4 # state size ngf/3 x 8 x 8 # state size ngf/4 x 16 x 16 # state size ngf/8 x 32 x 32 # state size ngf/16 x 64 x 64 h_code1(query): batch x idf x ih x iw (queryL=ihxiw) word_embs(context): batch x cdf x sourceL (sourceL=seq_len) c_code1: batch x idf x queryL att1: batch x sourceL x queryL # state size ngf/2 x 2in_size x 2in_size # gf x 64 x 64 :param z_code: batch x cfg.GAN.Z_DIM :param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM :param word_embs: batch x cdf x seq_len :param mask: batch x seq_len :return: # 16gf x 64 x 64 --> gf x 64 x 64 --> 3 x 64 x 64 # gf x 64 x 64 :param z_code: batch x cfg.GAN.Z_DIM :param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM :param word_embs: batch x cdf x seq_len :param mask: batch x seq_len :return: # ############## D networks ########################## # Downsale the spatial size by a factor of 2 # Downsale the spatial size by a factor of 16 # --> state size. ndf x in_size/2 x in_size/2 # --> state size 2ndf x x in_size/4 x in_size/4 # --> state size 4ndf x in_size/8 x in_size/8 # --> state size 8ndf x in_size/16 x in_size/16 # conditioning output # state size (ngf+egf) x 4 x 4 # state size ngf x in_size x in_size # For 64 x 64 images # 4 x 4 x 8df # For 128 x 128 images # # 8 x 8 x 8df # 4 x 4 x 16df # 4 x 4 x 8df # For 256 x 256 images | 2.482288 | 2 |
Backup/20190721144404/sublime_lib/st3/sublime_lib/flags.py | altundasbatu/SublimeTextSettings | 0 | 6631956 | """
Python enumerations for use with Sublime API methods.
In addition to the standard behavior,
these enumerations' constructors accept the name of an enumerated value as a string:
.. code-block:: python
>>> PointClass(sublime.DIALOG_YES)
<DialogResult.YES: 1>
>>> PointClass("YES")
<DialogResult.YES: 1>
Descendants of :class:`IntFlag` accept zero or more arguments:
.. code-block:: python
>>> PointClass("WORD_START", "WORD_END")
<PointClass.WORD_END|WORD_START: 3>
>>> PointClass()
<PointClass.0: 0>
.. versionchanged:: 1.2
Constructors accept member names
and `IntFlag` constructors accept multiple arguments.
"""
import sublime
from .vendor.python.enum import IntEnum, IntFlag
from inspect import cleandoc
from ._util.enum import ExtensibleConstructorMeta, construct_union, construct_with_alternatives
__all__ = [
'DialogResult', 'PointClass', 'FindOption', 'RegionOption',
'PopupOption', 'PhantomLayout', 'OpenFileOption', 'QuickPanelOption'
]
def autodoc(prefix=None):
if prefix is None:
prefix_str = ''
else:
prefix_str = prefix + '_'
def decorator(enum):
enum.__doc__ = cleandoc(enum.__doc__) + '\n\n' + '\n'.join([
cleandoc("""
.. py:attribute:: {name}
:annotation: = sublime.{pre}{name}
""").format(name=item.name, pre=prefix_str) for item in enum
])
return enum
return decorator
construct_from_name = construct_with_alternatives(
lambda cls, value: cls.__members__.get(value, None)
)
@autodoc('DIALOG')
@construct_from_name
class DialogResult(IntEnum):
"""
An :class:`~enum.IntEnum` for use with :func:`sublime.yes_no_cancel_dialog`.
"""
CANCEL = sublime.DIALOG_CANCEL
YES = sublime.DIALOG_YES
NO = sublime.DIALOG_NO
@autodoc('CLASS')
@construct_union
@construct_from_name
class PointClass(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with several methods of :class:`sublime.View`:
- :meth:`~sublime.View.classify`
- :meth:`~sublime.View.find_by_class`
- :meth:`~sublime.View.expand_by_class`
"""
WORD_START = sublime.CLASS_WORD_START
WORD_END = sublime.CLASS_WORD_END
PUNCTUATION_START = sublime.CLASS_PUNCTUATION_START
PUNCTUATION_END = sublime.CLASS_PUNCTUATION_END
SUB_WORD_START = sublime.CLASS_SUB_WORD_START
SUB_WORD_END = sublime.CLASS_SUB_WORD_END
LINE_START = sublime.CLASS_LINE_START
LINE_END = sublime.CLASS_LINE_END
EMPTY_LINE = sublime.CLASS_EMPTY_LINE
@autodoc()
@construct_union
@construct_from_name
class FindOption(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with several methods of :class:`sublime.View`:
- :meth:`~sublime.View.find`
- :meth:`~sublime.View.find_all`
"""
LITERAL = sublime.LITERAL
IGNORECASE = sublime.IGNORECASE
@autodoc()
@construct_union
@construct_from_name
class RegionOption(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with :meth:`sublime.View.add_regions`.
"""
DRAW_EMPTY = sublime.DRAW_EMPTY
HIDE_ON_MINIMAP = sublime.HIDE_ON_MINIMAP
DRAW_EMPTY_AS_OVERWRITE = sublime.DRAW_EMPTY_AS_OVERWRITE
DRAW_NO_FILL = sublime.DRAW_NO_FILL
DRAW_NO_OUTLINE = sublime.DRAW_NO_OUTLINE
DRAW_SOLID_UNDERLINE = sublime.DRAW_SOLID_UNDERLINE
DRAW_STIPPLED_UNDERLINE = sublime.DRAW_STIPPLED_UNDERLINE
DRAW_SQUIGGLY_UNDERLINE = sublime.DRAW_SQUIGGLY_UNDERLINE
PERSISTENT = sublime.PERSISTENT
HIDDEN = sublime.HIDDEN
@autodoc()
@construct_union
@construct_from_name
class PopupOption(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with :meth:`sublime.View.show_popup`.
"""
COOPERATE_WITH_AUTO_COMPLETE = sublime.COOPERATE_WITH_AUTO_COMPLETE
HIDE_ON_MOUSE_MOVE = sublime.HIDE_ON_MOUSE_MOVE
HIDE_ON_MOUSE_MOVE_AWAY = sublime.HIDE_ON_MOUSE_MOVE_AWAY
@autodoc('LAYOUT')
@construct_union
@construct_from_name
class PhantomLayout(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with :class:`sublime.Phantom`.
"""
INLINE = sublime.LAYOUT_INLINE
BELOW = sublime.LAYOUT_BELOW
BLOCK = sublime.LAYOUT_BLOCK
@autodoc()
@construct_union
@construct_from_name
class OpenFileOption(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with :meth:`sublime.Window.open_file`.
"""
ENCODED_POSITION = sublime.ENCODED_POSITION
TRANSIENT = sublime.TRANSIENT
@autodoc()
@construct_union
@construct_from_name
class QuickPanelOption(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with :meth:`sublime.Window.show_quick_panel`.
"""
MONOSPACE_FONT = sublime.MONOSPACE_FONT
KEEP_OPEN_ON_FOCUS_LOST = sublime.KEEP_OPEN_ON_FOCUS_LOST
| """
Python enumerations for use with Sublime API methods.
In addition to the standard behavior,
these enumerations' constructors accept the name of an enumerated value as a string:
.. code-block:: python
>>> PointClass(sublime.DIALOG_YES)
<DialogResult.YES: 1>
>>> PointClass("YES")
<DialogResult.YES: 1>
Descendants of :class:`IntFlag` accept zero or more arguments:
.. code-block:: python
>>> PointClass("WORD_START", "WORD_END")
<PointClass.WORD_END|WORD_START: 3>
>>> PointClass()
<PointClass.0: 0>
.. versionchanged:: 1.2
Constructors accept member names
and `IntFlag` constructors accept multiple arguments.
"""
import sublime
from .vendor.python.enum import IntEnum, IntFlag
from inspect import cleandoc
from ._util.enum import ExtensibleConstructorMeta, construct_union, construct_with_alternatives
__all__ = [
'DialogResult', 'PointClass', 'FindOption', 'RegionOption',
'PopupOption', 'PhantomLayout', 'OpenFileOption', 'QuickPanelOption'
]
def autodoc(prefix=None):
if prefix is None:
prefix_str = ''
else:
prefix_str = prefix + '_'
def decorator(enum):
enum.__doc__ = cleandoc(enum.__doc__) + '\n\n' + '\n'.join([
cleandoc("""
.. py:attribute:: {name}
:annotation: = sublime.{pre}{name}
""").format(name=item.name, pre=prefix_str) for item in enum
])
return enum
return decorator
construct_from_name = construct_with_alternatives(
lambda cls, value: cls.__members__.get(value, None)
)
@autodoc('DIALOG')
@construct_from_name
class DialogResult(IntEnum):
"""
An :class:`~enum.IntEnum` for use with :func:`sublime.yes_no_cancel_dialog`.
"""
CANCEL = sublime.DIALOG_CANCEL
YES = sublime.DIALOG_YES
NO = sublime.DIALOG_NO
@autodoc('CLASS')
@construct_union
@construct_from_name
class PointClass(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with several methods of :class:`sublime.View`:
- :meth:`~sublime.View.classify`
- :meth:`~sublime.View.find_by_class`
- :meth:`~sublime.View.expand_by_class`
"""
WORD_START = sublime.CLASS_WORD_START
WORD_END = sublime.CLASS_WORD_END
PUNCTUATION_START = sublime.CLASS_PUNCTUATION_START
PUNCTUATION_END = sublime.CLASS_PUNCTUATION_END
SUB_WORD_START = sublime.CLASS_SUB_WORD_START
SUB_WORD_END = sublime.CLASS_SUB_WORD_END
LINE_START = sublime.CLASS_LINE_START
LINE_END = sublime.CLASS_LINE_END
EMPTY_LINE = sublime.CLASS_EMPTY_LINE
@autodoc()
@construct_union
@construct_from_name
class FindOption(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with several methods of :class:`sublime.View`:
- :meth:`~sublime.View.find`
- :meth:`~sublime.View.find_all`
"""
LITERAL = sublime.LITERAL
IGNORECASE = sublime.IGNORECASE
@autodoc()
@construct_union
@construct_from_name
class RegionOption(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with :meth:`sublime.View.add_regions`.
"""
DRAW_EMPTY = sublime.DRAW_EMPTY
HIDE_ON_MINIMAP = sublime.HIDE_ON_MINIMAP
DRAW_EMPTY_AS_OVERWRITE = sublime.DRAW_EMPTY_AS_OVERWRITE
DRAW_NO_FILL = sublime.DRAW_NO_FILL
DRAW_NO_OUTLINE = sublime.DRAW_NO_OUTLINE
DRAW_SOLID_UNDERLINE = sublime.DRAW_SOLID_UNDERLINE
DRAW_STIPPLED_UNDERLINE = sublime.DRAW_STIPPLED_UNDERLINE
DRAW_SQUIGGLY_UNDERLINE = sublime.DRAW_SQUIGGLY_UNDERLINE
PERSISTENT = sublime.PERSISTENT
HIDDEN = sublime.HIDDEN
@autodoc()
@construct_union
@construct_from_name
class PopupOption(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with :meth:`sublime.View.show_popup`.
"""
COOPERATE_WITH_AUTO_COMPLETE = sublime.COOPERATE_WITH_AUTO_COMPLETE
HIDE_ON_MOUSE_MOVE = sublime.HIDE_ON_MOUSE_MOVE
HIDE_ON_MOUSE_MOVE_AWAY = sublime.HIDE_ON_MOUSE_MOVE_AWAY
@autodoc('LAYOUT')
@construct_union
@construct_from_name
class PhantomLayout(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with :class:`sublime.Phantom`.
"""
INLINE = sublime.LAYOUT_INLINE
BELOW = sublime.LAYOUT_BELOW
BLOCK = sublime.LAYOUT_BLOCK
@autodoc()
@construct_union
@construct_from_name
class OpenFileOption(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with :meth:`sublime.Window.open_file`.
"""
ENCODED_POSITION = sublime.ENCODED_POSITION
TRANSIENT = sublime.TRANSIENT
@autodoc()
@construct_union
@construct_from_name
class QuickPanelOption(IntFlag, metaclass=ExtensibleConstructorMeta):
"""
An :class:`~enum.IntFlag` for use with :meth:`sublime.Window.show_quick_panel`.
"""
MONOSPACE_FONT = sublime.MONOSPACE_FONT
KEEP_OPEN_ON_FOCUS_LOST = sublime.KEEP_OPEN_ON_FOCUS_LOST
| en | 0.465168 | Python enumerations for use with Sublime API methods. In addition to the standard behavior, these enumerations' constructors accept the name of an enumerated value as a string: .. code-block:: python >>> PointClass(sublime.DIALOG_YES) <DialogResult.YES: 1> >>> PointClass("YES") <DialogResult.YES: 1> Descendants of :class:`IntFlag` accept zero or more arguments: .. code-block:: python >>> PointClass("WORD_START", "WORD_END") <PointClass.WORD_END|WORD_START: 3> >>> PointClass() <PointClass.0: 0> .. versionchanged:: 1.2 Constructors accept member names and `IntFlag` constructors accept multiple arguments. .. py:attribute:: {name} :annotation: = sublime.{pre}{name} An :class:`~enum.IntEnum` for use with :func:`sublime.yes_no_cancel_dialog`. An :class:`~enum.IntFlag` for use with several methods of :class:`sublime.View`: - :meth:`~sublime.View.classify` - :meth:`~sublime.View.find_by_class` - :meth:`~sublime.View.expand_by_class` An :class:`~enum.IntFlag` for use with several methods of :class:`sublime.View`: - :meth:`~sublime.View.find` - :meth:`~sublime.View.find_all` An :class:`~enum.IntFlag` for use with :meth:`sublime.View.add_regions`. An :class:`~enum.IntFlag` for use with :meth:`sublime.View.show_popup`. An :class:`~enum.IntFlag` for use with :class:`sublime.Phantom`. An :class:`~enum.IntFlag` for use with :meth:`sublime.Window.open_file`. An :class:`~enum.IntFlag` for use with :meth:`sublime.Window.show_quick_panel`. | 3.102609 | 3 |
osbuild/util/linux.py | jelly/osbuild | 0 | 6631957 | <reponame>jelly/osbuild
"""Linux API Access
This module provides access to linux system-calls and other APIs, in particular
those not provided by the python standard library. The idea is to provide
universal wrappers with broad access to linux APIs. Convenience helpers and
higher-level abstractions are beyond the scope of this module.
In some cases it is overly complex to provide universal access to a specific
API. Hence, the API might be restricted to a reduced subset of its
functionality, just to make sure we can actually implement the wrappers in a
reasonable manner.
"""
import array
import ctypes
import ctypes.util
import fcntl
import os
import platform
import threading
__all__ = [
"ioctl_get_immutable",
"ioctl_toggle_immutable",
]
# NOTE: These are wrong on at least ALPHA and SPARC. They use different
# ioctl number setups. We should fix this, but this is really awkward
# in standard python.
# Our tests will catch this, so we will not accidentally run into this
# on those architectures.
FS_IOC_GETFLAGS = 0x80086601
FS_IOC_SETFLAGS = 0x40086602
FS_IMMUTABLE_FL = 0x00000010
if platform.machine() == "ppc64le":
BLK_IOC_FLSBUF = 0x20001261
else:
BLK_IOC_FLSBUF = 0x00001261
def ioctl_get_immutable(fd: int):
"""Query FS_IMMUTABLE_FL
This queries the `FS_IMMUTABLE_FL` flag on a specified file.
Arguments
---------
fd
File-descriptor to operate on.
Returns
-------
bool
Whether the `FS_IMMUTABLE_FL` flag is set or not.
Raises
------
OSError
If the underlying ioctl fails, a matching `OSError` will be raised.
"""
if not isinstance(fd, int) or fd < 0:
raise ValueError()
flags = array.array('L', [0])
fcntl.ioctl(fd, FS_IOC_GETFLAGS, flags, True)
return bool(flags[0] & FS_IMMUTABLE_FL)
def ioctl_toggle_immutable(fd: int, set_to: bool):
"""Toggle FS_IMMUTABLE_FL
This toggles the `FS_IMMUTABLE_FL` flag on a specified file. It can both set
and clear the flag.
Arguments
---------
fd
File-descriptor to operate on.
set_to
Whether to set the `FS_IMMUTABLE_FL` flag or not.
Raises
------
OSError
If the underlying ioctl fails, a matching `OSError` will be raised.
"""
if not isinstance(fd, int) or fd < 0:
raise ValueError()
flags = array.array('L', [0])
fcntl.ioctl(fd, FS_IOC_GETFLAGS, flags, True)
if set_to:
flags[0] |= FS_IMMUTABLE_FL
else:
flags[0] &= ~FS_IMMUTABLE_FL
fcntl.ioctl(fd, FS_IOC_SETFLAGS, flags, False)
def ioctl_blockdev_flushbuf(fd: int):
"""Flush the block device buffer cache
NB: This function needs the `CAP_SYS_ADMIN` capability.
Arguments
---------
fd
File-descriptor of a block device to operate on.
Raises
------
OSError
If the underlying ioctl fails, a matching `OSError`
will be raised.
"""
if not isinstance(fd, int) or fd < 0:
raise ValueError(f"Invalid file descriptor: '{fd}'")
fcntl.ioctl(fd, BLK_IOC_FLSBUF, 0)
class LibCap:
"""Wrapper for libcap (capabilities commands and library) project"""
cap_value_t = ctypes.c_int
_lock = threading.Lock()
_inst = None
def __init__(self, lib: ctypes.CDLL) -> None:
self.lib = lib
# process-wide bounding set
get_bound = lib.cap_get_bound
get_bound.argtypes = (self.cap_value_t,)
get_bound.restype = ctypes.c_int
get_bound.errcheck = self._check_result
self._get_bound = get_bound
from_name = lib.cap_from_name
from_name.argtypes = (ctypes.c_char_p, ctypes.POINTER(self.cap_value_t),)
from_name.restype = ctypes.c_int
from_name.errcheck = self._check_result
self._from_name = from_name
to_name = lib.cap_to_name
to_name.argtypes = (ctypes.c_int,)
to_name.restype = ctypes.POINTER(ctypes.c_char)
to_name.errcheck = self._check_result
self._to_name = to_name
free = lib.cap_free
free.argtypes = (ctypes.c_void_p,)
free.restype = ctypes.c_int
free.errcheck = self._check_result
self._free = free
@staticmethod
def _check_result(result, func, args):
if result is None or (isinstance(result, int) and result == -1):
err = ctypes.get_errno()
msg = f"{func.__name__}{args} -> {result}: error ({err}): {os.strerror(err)}"
raise OSError(err, msg)
return result
@staticmethod
def make():
path = ctypes.util.find_library("cap")
if not path:
return None
try:
lib = ctypes.CDLL(path, use_errno=True)
except (OSError, ImportError):
return None
return LibCap(lib)
@staticmethod
def last_cap() -> int:
"""Return the int value of the highest valid capability"""
try:
with open("/proc/sys/kernel/cap_last_cap", "rb") as f:
data = f.read()
return int(data)
except FileNotFoundError:
return 0
@classmethod
def get_default(cls) -> "LibCap":
"""Return a singleton instance of the library"""
with cls._lock:
if cls._inst is None:
cls._inst = cls.make()
return cls._inst
def get_bound(self, capability: int) -> bool:
"""Return the current value of the capability in the thread's bounding set"""
# cap = self.cap_value_t(capability)
return self._get_bound(capability) == 1
def to_name(self, value: int) -> str:
"""Translate from the capability's integer value to the its symbolic name"""
raw = self._to_name(value)
val = ctypes.cast(raw, ctypes.c_char_p).value
res = str(val, encoding="utf-8")
self._free(raw)
return res.upper()
def from_name(self, value: str) -> int:
"""Translate from the symbolic name to its integer value"""
cap = self.cap_value_t()
self._from_name(value.encode("utf-8"), ctypes.pointer(cap))
return int(cap.value)
def cap_is_supported(capability: str = "CAP_CHOWN") -> bool:
"""Return whether a given capability is supported by the system"""
lib = LibCap.get_default()
if not lib:
return False
try:
value = lib.from_name(capability)
lib.get_bound(value)
return True
except OSError:
return False
def cap_bound_set() -> set:
"""Return the calling thread's capability bounding set
If capabilities are not supported this function will return the empty set.
"""
lib = LibCap.get_default()
if not lib:
return set()
res = set(
lib.to_name(cap)
for cap in range(lib.last_cap() + 1)
if lib.get_bound(cap)
)
return res
def cap_mask_to_set(mask: int) -> set:
lib = LibCap.get_default()
if not lib:
return set()
def bits(n):
count = 0
while n:
if n & 1:
yield count
count += 1
n >>= 1
res = {
lib.to_name(cap) for cap in bits(mask)
}
return res
| """Linux API Access
This module provides access to linux system-calls and other APIs, in particular
those not provided by the python standard library. The idea is to provide
universal wrappers with broad access to linux APIs. Convenience helpers and
higher-level abstractions are beyond the scope of this module.
In some cases it is overly complex to provide universal access to a specific
API. Hence, the API might be restricted to a reduced subset of its
functionality, just to make sure we can actually implement the wrappers in a
reasonable manner.
"""
import array
import ctypes
import ctypes.util
import fcntl
import os
import platform
import threading
__all__ = [
"ioctl_get_immutable",
"ioctl_toggle_immutable",
]
# NOTE: These are wrong on at least ALPHA and SPARC. They use different
# ioctl number setups. We should fix this, but this is really awkward
# in standard python.
# Our tests will catch this, so we will not accidentally run into this
# on those architectures.
FS_IOC_GETFLAGS = 0x80086601
FS_IOC_SETFLAGS = 0x40086602
FS_IMMUTABLE_FL = 0x00000010
if platform.machine() == "ppc64le":
BLK_IOC_FLSBUF = 0x20001261
else:
BLK_IOC_FLSBUF = 0x00001261
def ioctl_get_immutable(fd: int):
"""Query FS_IMMUTABLE_FL
This queries the `FS_IMMUTABLE_FL` flag on a specified file.
Arguments
---------
fd
File-descriptor to operate on.
Returns
-------
bool
Whether the `FS_IMMUTABLE_FL` flag is set or not.
Raises
------
OSError
If the underlying ioctl fails, a matching `OSError` will be raised.
"""
if not isinstance(fd, int) or fd < 0:
raise ValueError()
flags = array.array('L', [0])
fcntl.ioctl(fd, FS_IOC_GETFLAGS, flags, True)
return bool(flags[0] & FS_IMMUTABLE_FL)
def ioctl_toggle_immutable(fd: int, set_to: bool):
"""Toggle FS_IMMUTABLE_FL
This toggles the `FS_IMMUTABLE_FL` flag on a specified file. It can both set
and clear the flag.
Arguments
---------
fd
File-descriptor to operate on.
set_to
Whether to set the `FS_IMMUTABLE_FL` flag or not.
Raises
------
OSError
If the underlying ioctl fails, a matching `OSError` will be raised.
"""
if not isinstance(fd, int) or fd < 0:
raise ValueError()
flags = array.array('L', [0])
fcntl.ioctl(fd, FS_IOC_GETFLAGS, flags, True)
if set_to:
flags[0] |= FS_IMMUTABLE_FL
else:
flags[0] &= ~FS_IMMUTABLE_FL
fcntl.ioctl(fd, FS_IOC_SETFLAGS, flags, False)
def ioctl_blockdev_flushbuf(fd: int):
"""Flush the block device buffer cache
NB: This function needs the `CAP_SYS_ADMIN` capability.
Arguments
---------
fd
File-descriptor of a block device to operate on.
Raises
------
OSError
If the underlying ioctl fails, a matching `OSError`
will be raised.
"""
if not isinstance(fd, int) or fd < 0:
raise ValueError(f"Invalid file descriptor: '{fd}'")
fcntl.ioctl(fd, BLK_IOC_FLSBUF, 0)
class LibCap:
"""Wrapper for libcap (capabilities commands and library) project"""
cap_value_t = ctypes.c_int
_lock = threading.Lock()
_inst = None
def __init__(self, lib: ctypes.CDLL) -> None:
self.lib = lib
# process-wide bounding set
get_bound = lib.cap_get_bound
get_bound.argtypes = (self.cap_value_t,)
get_bound.restype = ctypes.c_int
get_bound.errcheck = self._check_result
self._get_bound = get_bound
from_name = lib.cap_from_name
from_name.argtypes = (ctypes.c_char_p, ctypes.POINTER(self.cap_value_t),)
from_name.restype = ctypes.c_int
from_name.errcheck = self._check_result
self._from_name = from_name
to_name = lib.cap_to_name
to_name.argtypes = (ctypes.c_int,)
to_name.restype = ctypes.POINTER(ctypes.c_char)
to_name.errcheck = self._check_result
self._to_name = to_name
free = lib.cap_free
free.argtypes = (ctypes.c_void_p,)
free.restype = ctypes.c_int
free.errcheck = self._check_result
self._free = free
@staticmethod
def _check_result(result, func, args):
if result is None or (isinstance(result, int) and result == -1):
err = ctypes.get_errno()
msg = f"{func.__name__}{args} -> {result}: error ({err}): {os.strerror(err)}"
raise OSError(err, msg)
return result
@staticmethod
def make():
path = ctypes.util.find_library("cap")
if not path:
return None
try:
lib = ctypes.CDLL(path, use_errno=True)
except (OSError, ImportError):
return None
return LibCap(lib)
@staticmethod
def last_cap() -> int:
"""Return the int value of the highest valid capability"""
try:
with open("/proc/sys/kernel/cap_last_cap", "rb") as f:
data = f.read()
return int(data)
except FileNotFoundError:
return 0
@classmethod
def get_default(cls) -> "LibCap":
"""Return a singleton instance of the library"""
with cls._lock:
if cls._inst is None:
cls._inst = cls.make()
return cls._inst
def get_bound(self, capability: int) -> bool:
"""Return the current value of the capability in the thread's bounding set"""
# cap = self.cap_value_t(capability)
return self._get_bound(capability) == 1
def to_name(self, value: int) -> str:
"""Translate from the capability's integer value to the its symbolic name"""
raw = self._to_name(value)
val = ctypes.cast(raw, ctypes.c_char_p).value
res = str(val, encoding="utf-8")
self._free(raw)
return res.upper()
def from_name(self, value: str) -> int:
"""Translate from the symbolic name to its integer value"""
cap = self.cap_value_t()
self._from_name(value.encode("utf-8"), ctypes.pointer(cap))
return int(cap.value)
def cap_is_supported(capability: str = "CAP_CHOWN") -> bool:
"""Return whether a given capability is supported by the system"""
lib = LibCap.get_default()
if not lib:
return False
try:
value = lib.from_name(capability)
lib.get_bound(value)
return True
except OSError:
return False
def cap_bound_set() -> set:
"""Return the calling thread's capability bounding set
If capabilities are not supported this function will return the empty set.
"""
lib = LibCap.get_default()
if not lib:
return set()
res = set(
lib.to_name(cap)
for cap in range(lib.last_cap() + 1)
if lib.get_bound(cap)
)
return res
def cap_mask_to_set(mask: int) -> set:
lib = LibCap.get_default()
if not lib:
return set()
def bits(n):
count = 0
while n:
if n & 1:
yield count
count += 1
n >>= 1
res = {
lib.to_name(cap) for cap in bits(mask)
}
return res | en | 0.796434 | Linux API Access This module provides access to linux system-calls and other APIs, in particular those not provided by the python standard library. The idea is to provide universal wrappers with broad access to linux APIs. Convenience helpers and higher-level abstractions are beyond the scope of this module. In some cases it is overly complex to provide universal access to a specific API. Hence, the API might be restricted to a reduced subset of its functionality, just to make sure we can actually implement the wrappers in a reasonable manner. # NOTE: These are wrong on at least ALPHA and SPARC. They use different # ioctl number setups. We should fix this, but this is really awkward # in standard python. # Our tests will catch this, so we will not accidentally run into this # on those architectures. Query FS_IMMUTABLE_FL This queries the `FS_IMMUTABLE_FL` flag on a specified file. Arguments --------- fd File-descriptor to operate on. Returns ------- bool Whether the `FS_IMMUTABLE_FL` flag is set or not. Raises ------ OSError If the underlying ioctl fails, a matching `OSError` will be raised. Toggle FS_IMMUTABLE_FL This toggles the `FS_IMMUTABLE_FL` flag on a specified file. It can both set and clear the flag. Arguments --------- fd File-descriptor to operate on. set_to Whether to set the `FS_IMMUTABLE_FL` flag or not. Raises ------ OSError If the underlying ioctl fails, a matching `OSError` will be raised. Flush the block device buffer cache NB: This function needs the `CAP_SYS_ADMIN` capability. Arguments --------- fd File-descriptor of a block device to operate on. Raises ------ OSError If the underlying ioctl fails, a matching `OSError` will be raised. Wrapper for libcap (capabilities commands and library) project # process-wide bounding set Return the int value of the highest valid capability Return a singleton instance of the library Return the current value of the capability in the thread's bounding set # cap = self.cap_value_t(capability) Translate from the capability's integer value to the its symbolic name Translate from the symbolic name to its integer value Return whether a given capability is supported by the system Return the calling thread's capability bounding set If capabilities are not supported this function will return the empty set. | 2.317634 | 2 |
src/main/python/client/gui.py | mwintersperger-tgm/sew5-simple-user-database-mwintersperger-tgm | 0 | 6631958 | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import requests
class App(QWidget):
def __init__(self):
self.usernameValue = None
self.emailValue = None
self.photoValue = None
super().__init__()
self.title = 'Simple User Database'
self.left = 100
self.top = 100
self.width = 800
self.height = 600
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.label = QLabel('',self)
self.createTable()
self.username = QLineEdit(self)
self.username.textChanged.connect(self.usernameEntered)
self.usernameLabel = QLabel()
self.usernameLabel.setText("Username:")
self.email = QLineEdit(self)
self.email.textChanged.connect(self.emailEntered)
self.emailLabel = QLabel()
self.emailLabel.setText("Email:")
self.photo = QLineEdit(self)
self.photo.textChanged.connect(self.photoEntered)
self.photoLabel = QLabel()
self.photoLabel.setText("Photo:")
self.addUserButton = QPushButton('Add User', self)
self.addUserButton.clicked.connect(self.addUser)
# Add box layout, add table to box layout and add box layout to widget
self.layout = QVBoxLayout()
self.layout.addWidget(self.label)
self.layout.addWidget(self.tableWidget)
self.layout.addWidget(self.usernameLabel)
self.layout.addWidget(self.username)
self.layout.addWidget(self.emailLabel)
self.layout.addWidget(self.email)
self.layout.addWidget(self.photoLabel)
self.layout.addWidget(self.photo)
self.layout.addWidget(self.addUserButton)
self.setLayout(self.layout)
# Show widget
self.show()
def createTable(self):
r = requests.get('http://localhost:5000/users')
self.USERS = r.json()["users"]
# Create table
self.tableWidget = QTableWidget()
self.tableWidget.setRowCount(5)
self.tableWidget.setColumnCount(len(self.USERS))
self.tableWidget.setEditTriggers(QTableWidget.NoEditTriggers )
for i in range(0,len(self.USERS)):
self.tableWidget.setItem(0,i, QTableWidgetItem(self.USERS[i]["username"]))
self.tableWidget.setItem(1,i, QTableWidgetItem(self.USERS[i]["email"]))
self.tableWidget.setItem(2,i, QTableWidgetItem(self.USERS[i]["photo"]))
self.tableWidget.setItem(3,i, QTableWidgetItem("Delete User"))
self.tableWidget.setItem(4,i, QTableWidgetItem("Update User"))
self.tableWidget.itemDoubleClicked.connect(self.pushedTableButton)
def addUser(self):
allEntered = True
if self.usernameValue is None:
allEntered = False
if self.emailValue is None:
allEntered = False
if self.photoValue is None:
allEntered = False
if allEntered:
r = requests.post('http://localhost:5000/users', json={"username": self.usernameValue, "email": self.emailValue, "photo": self.photoValue})
self.label.setText(r.json()["message"])
r = requests.get('http://localhost:5000/users')
self.USERS = r.json()["users"]
self.tableWidget.setColumnCount(len(self.USERS))
for i in range(0,len(self.USERS)):
self.tableWidget.setItem(0,i, QTableWidgetItem(self.USERS[i]["username"]))
self.tableWidget.setItem(1,i, QTableWidgetItem(self.USERS[i]["email"]))
self.tableWidget.setItem(2,i, QTableWidgetItem(self.USERS[i]["photo"]))
self.tableWidget.setItem(3,i, QTableWidgetItem("Delete User"))
self.tableWidget.setItem(4,i, QTableWidgetItem("Update User"))
self.usernameValue = None
self.emailValue = None
self.photoValue = None
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Error")
msg.setInformativeText('empty textfields')
msg.setWindowTitle("Error")
msg.exec_()
def usernameEntered(self,text):
self.usernameValue = text
def emailEntered(self,text):
self.emailValue = text
def photoEntered(self,text):
self.photoValue = text
def pushedTableButton(self,clicked):
if clicked.row() == 3:
r = requests.delete('http://localhost:5000/users/%s' % self.USERS[clicked.column()]['id'])
self.label.setText(r.json()["message"])
r = requests.get('http://localhost:5000/users')
self.USERS = r.json()["users"]
self.tableWidget.setColumnCount(len(self.USERS))
for i in range(0,len(self.USERS)):
self.tableWidget.setItem(0,i, QTableWidgetItem(self.USERS[i]["username"]))
self.tableWidget.setItem(1,i, QTableWidgetItem(self.USERS[i]["email"]))
self.tableWidget.setItem(2,i, QTableWidgetItem(self.USERS[i]["photo"]))
self.tableWidget.setItem(3,i, QTableWidgetItem("Delete User"))
self.tableWidget.setItem(4,i, QTableWidgetItem("Update User"))
if clicked.row() == 4:
allEntered = True
if self.usernameValue is None:
allEntered = False
if self.emailValue is None:
allEntered = False
if self.photoValue is None:
allEntered = False
if allEntered:
r = requests.put('http://localhost:5000/users/%s' % self.USERS[clicked.column()]['id'], json={"username": self.usernameValue, "email": self.emailValue, "photo": self.photoValue})
self.label.setText(r.json()["message"])
r = requests.get('http://localhost:5000/users')
self.USERS = r.json()["users"]
self.tableWidget.setColumnCount(len(self.USERS))
for i in range(0,len(self.USERS)):
self.tableWidget.setItem(0,i, QTableWidgetItem(self.USERS[i]["username"]))
self.tableWidget.setItem(1,i, QTableWidgetItem(self.USERS[i]["email"]))
self.tableWidget.setItem(2,i, QTableWidgetItem(self.USERS[i]["photo"]))
self.tableWidget.setItem(3,i, QTableWidgetItem("Delete User"))
self.tableWidget.setItem(4,i, QTableWidgetItem("Update User"))
self.usernameValue = None
self.emailValue = None
self.photoValue = None
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Error")
msg.setInformativeText('empty textfields')
msg.setWindowTitle("Error")
msg.exec_()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
| import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import requests
class App(QWidget):
def __init__(self):
self.usernameValue = None
self.emailValue = None
self.photoValue = None
super().__init__()
self.title = 'Simple User Database'
self.left = 100
self.top = 100
self.width = 800
self.height = 600
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.label = QLabel('',self)
self.createTable()
self.username = QLineEdit(self)
self.username.textChanged.connect(self.usernameEntered)
self.usernameLabel = QLabel()
self.usernameLabel.setText("Username:")
self.email = QLineEdit(self)
self.email.textChanged.connect(self.emailEntered)
self.emailLabel = QLabel()
self.emailLabel.setText("Email:")
self.photo = QLineEdit(self)
self.photo.textChanged.connect(self.photoEntered)
self.photoLabel = QLabel()
self.photoLabel.setText("Photo:")
self.addUserButton = QPushButton('Add User', self)
self.addUserButton.clicked.connect(self.addUser)
# Add box layout, add table to box layout and add box layout to widget
self.layout = QVBoxLayout()
self.layout.addWidget(self.label)
self.layout.addWidget(self.tableWidget)
self.layout.addWidget(self.usernameLabel)
self.layout.addWidget(self.username)
self.layout.addWidget(self.emailLabel)
self.layout.addWidget(self.email)
self.layout.addWidget(self.photoLabel)
self.layout.addWidget(self.photo)
self.layout.addWidget(self.addUserButton)
self.setLayout(self.layout)
# Show widget
self.show()
def createTable(self):
r = requests.get('http://localhost:5000/users')
self.USERS = r.json()["users"]
# Create table
self.tableWidget = QTableWidget()
self.tableWidget.setRowCount(5)
self.tableWidget.setColumnCount(len(self.USERS))
self.tableWidget.setEditTriggers(QTableWidget.NoEditTriggers )
for i in range(0,len(self.USERS)):
self.tableWidget.setItem(0,i, QTableWidgetItem(self.USERS[i]["username"]))
self.tableWidget.setItem(1,i, QTableWidgetItem(self.USERS[i]["email"]))
self.tableWidget.setItem(2,i, QTableWidgetItem(self.USERS[i]["photo"]))
self.tableWidget.setItem(3,i, QTableWidgetItem("Delete User"))
self.tableWidget.setItem(4,i, QTableWidgetItem("Update User"))
self.tableWidget.itemDoubleClicked.connect(self.pushedTableButton)
def addUser(self):
allEntered = True
if self.usernameValue is None:
allEntered = False
if self.emailValue is None:
allEntered = False
if self.photoValue is None:
allEntered = False
if allEntered:
r = requests.post('http://localhost:5000/users', json={"username": self.usernameValue, "email": self.emailValue, "photo": self.photoValue})
self.label.setText(r.json()["message"])
r = requests.get('http://localhost:5000/users')
self.USERS = r.json()["users"]
self.tableWidget.setColumnCount(len(self.USERS))
for i in range(0,len(self.USERS)):
self.tableWidget.setItem(0,i, QTableWidgetItem(self.USERS[i]["username"]))
self.tableWidget.setItem(1,i, QTableWidgetItem(self.USERS[i]["email"]))
self.tableWidget.setItem(2,i, QTableWidgetItem(self.USERS[i]["photo"]))
self.tableWidget.setItem(3,i, QTableWidgetItem("Delete User"))
self.tableWidget.setItem(4,i, QTableWidgetItem("Update User"))
self.usernameValue = None
self.emailValue = None
self.photoValue = None
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Error")
msg.setInformativeText('empty textfields')
msg.setWindowTitle("Error")
msg.exec_()
def usernameEntered(self,text):
self.usernameValue = text
def emailEntered(self,text):
self.emailValue = text
def photoEntered(self,text):
self.photoValue = text
def pushedTableButton(self,clicked):
if clicked.row() == 3:
r = requests.delete('http://localhost:5000/users/%s' % self.USERS[clicked.column()]['id'])
self.label.setText(r.json()["message"])
r = requests.get('http://localhost:5000/users')
self.USERS = r.json()["users"]
self.tableWidget.setColumnCount(len(self.USERS))
for i in range(0,len(self.USERS)):
self.tableWidget.setItem(0,i, QTableWidgetItem(self.USERS[i]["username"]))
self.tableWidget.setItem(1,i, QTableWidgetItem(self.USERS[i]["email"]))
self.tableWidget.setItem(2,i, QTableWidgetItem(self.USERS[i]["photo"]))
self.tableWidget.setItem(3,i, QTableWidgetItem("Delete User"))
self.tableWidget.setItem(4,i, QTableWidgetItem("Update User"))
if clicked.row() == 4:
allEntered = True
if self.usernameValue is None:
allEntered = False
if self.emailValue is None:
allEntered = False
if self.photoValue is None:
allEntered = False
if allEntered:
r = requests.put('http://localhost:5000/users/%s' % self.USERS[clicked.column()]['id'], json={"username": self.usernameValue, "email": self.emailValue, "photo": self.photoValue})
self.label.setText(r.json()["message"])
r = requests.get('http://localhost:5000/users')
self.USERS = r.json()["users"]
self.tableWidget.setColumnCount(len(self.USERS))
for i in range(0,len(self.USERS)):
self.tableWidget.setItem(0,i, QTableWidgetItem(self.USERS[i]["username"]))
self.tableWidget.setItem(1,i, QTableWidgetItem(self.USERS[i]["email"]))
self.tableWidget.setItem(2,i, QTableWidgetItem(self.USERS[i]["photo"]))
self.tableWidget.setItem(3,i, QTableWidgetItem("Delete User"))
self.tableWidget.setItem(4,i, QTableWidgetItem("Update User"))
self.usernameValue = None
self.emailValue = None
self.photoValue = None
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Error")
msg.setInformativeText('empty textfields')
msg.setWindowTitle("Error")
msg.exec_()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
| en | 0.454963 | # Add box layout, add table to box layout and add box layout to widget # Show widget # Create table | 2.683288 | 3 |
whisky/pipeline.py | underscorenygren/slick | 1 | 6631959 | from whisky import items
class WhiskyItemPipeline(object):
"""fills whisky items fields from name by default"""
def process_item(self, the_item, spider):
if isinstance(the_item, items.WhiskyItem):
the_item = items.fill_item_from_name(the_item)
return the_item
| from whisky import items
class WhiskyItemPipeline(object):
"""fills whisky items fields from name by default"""
def process_item(self, the_item, spider):
if isinstance(the_item, items.WhiskyItem):
the_item = items.fill_item_from_name(the_item)
return the_item
| en | 0.579308 | fills whisky items fields from name by default | 2.973101 | 3 |
models/kitti/frustum/__init__.py | Masterchef365/pvcnn | 477 | 6631960 | from models.kitti.frustum.frustum_net import FrustumPointNet, FrustumPointNet2, FrustumPVCNNE
| from models.kitti.frustum.frustum_net import FrustumPointNet, FrustumPointNet2, FrustumPVCNNE
| none | 1 | 1.110027 | 1 |
|
src/pattern_manager/__init__.py | SysDesignSrl/pattern_manager | 0 | 6631961 | #!/usr/bin/env python
from pattern_manager.xform import XForm
from pattern_manager.util import handle_input_1d, matrix_to_tf, publish_markers, broadcast_transforms
from pattern_manager.plugin import Plugin, PluginLoader
| #!/usr/bin/env python
from pattern_manager.xform import XForm
from pattern_manager.util import handle_input_1d, matrix_to_tf, publish_markers, broadcast_transforms
from pattern_manager.plugin import Plugin, PluginLoader
| ru | 0.26433 | #!/usr/bin/env python | 1.037628 | 1 |
xos/tools/openstack-healthcheck.py | mary-grace/xos | 66 | 6631962 | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#! /usr/bin/env python
"""
Check the status of libvirt, openstack-nova-compute, and
quantum-openvswitch-agent. If these services are enabled and have failed,
then restart them.
"""
from __future__ import print_function
import os
import sys
import subprocess
import time
def get_systemd_status(service):
p = subprocess.Popen(
["/bin/systemctl", "is-active", service],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(out, err) = p.communicate()
out = out.strip()
return out
libvirt_enabled = os.system("systemctl -q is-enabled libvirtd.service") == 0
nova_compute_enabled = (
os.system("systemctl -q is-enabled openstack-nova-compute.service") == 0
)
openvswitch_agent_enabled = (
os.system("systemctl -q is-enabled quantum-openvswitch-agent.service") == 0
)
print("enabled:")
print(" libvirtd=", libvirt_enabled)
print(" openstack-nova-compute=", nova_compute_enabled)
print(" quantum-openvswitch-agent=", openvswitch_agent_enabled)
if (
(not libvirt_enabled)
or (not nova_compute_enabled)
or (not openvswitch_agent_enabled)
):
print("services are not enabled. exiting")
sys.exit(0)
libvirt_status = get_systemd_status("libvirtd.service")
nova_compute_status = get_systemd_status("openstack-nova-compute.service")
openvswitch_agent_status = get_systemd_status("quantum-openvswitch-agent.service")
print("status:")
print(" libvirtd=", libvirt_status)
print(" openstack-nova-compute=", nova_compute_status)
print(" quantum-openvswitch-agent=", openvswitch_agent_status)
if (
(libvirt_status == "failed")
or (nova_compute_status == "failed")
or (openvswitch_agent_status == "failed")
):
print("services have failed. doing the big restart")
os.system("systemctl stop openstack-nova-compute.service")
os.system("systemctl stop quantum-openvswitch-agent.service")
os.system("systemctl stop libvirtd.service")
time.sleep(5)
os.system("systemctl start libvirtd.service")
time.sleep(5)
os.system("systemctl start quantum-openvswitch-agent.service")
time.sleep(5)
os.system("systemctl start openstack-nova-compute.service")
print("done")
| # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#! /usr/bin/env python
"""
Check the status of libvirt, openstack-nova-compute, and
quantum-openvswitch-agent. If these services are enabled and have failed,
then restart them.
"""
from __future__ import print_function
import os
import sys
import subprocess
import time
def get_systemd_status(service):
p = subprocess.Popen(
["/bin/systemctl", "is-active", service],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
(out, err) = p.communicate()
out = out.strip()
return out
libvirt_enabled = os.system("systemctl -q is-enabled libvirtd.service") == 0
nova_compute_enabled = (
os.system("systemctl -q is-enabled openstack-nova-compute.service") == 0
)
openvswitch_agent_enabled = (
os.system("systemctl -q is-enabled quantum-openvswitch-agent.service") == 0
)
print("enabled:")
print(" libvirtd=", libvirt_enabled)
print(" openstack-nova-compute=", nova_compute_enabled)
print(" quantum-openvswitch-agent=", openvswitch_agent_enabled)
if (
(not libvirt_enabled)
or (not nova_compute_enabled)
or (not openvswitch_agent_enabled)
):
print("services are not enabled. exiting")
sys.exit(0)
libvirt_status = get_systemd_status("libvirtd.service")
nova_compute_status = get_systemd_status("openstack-nova-compute.service")
openvswitch_agent_status = get_systemd_status("quantum-openvswitch-agent.service")
print("status:")
print(" libvirtd=", libvirt_status)
print(" openstack-nova-compute=", nova_compute_status)
print(" quantum-openvswitch-agent=", openvswitch_agent_status)
if (
(libvirt_status == "failed")
or (nova_compute_status == "failed")
or (openvswitch_agent_status == "failed")
):
print("services have failed. doing the big restart")
os.system("systemctl stop openstack-nova-compute.service")
os.system("systemctl stop quantum-openvswitch-agent.service")
os.system("systemctl stop libvirtd.service")
time.sleep(5)
os.system("systemctl start libvirtd.service")
time.sleep(5)
os.system("systemctl start quantum-openvswitch-agent.service")
time.sleep(5)
os.system("systemctl start openstack-nova-compute.service")
print("done")
| en | 0.83161 | # Copyright 2017-present Open Networking Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #! /usr/bin/env python Check the status of libvirt, openstack-nova-compute, and quantum-openvswitch-agent. If these services are enabled and have failed, then restart them. | 1.985449 | 2 |
repos/system_upgrade/el7toel8/actors/firewalldfactsactor/actor.py | Jakuje/leapp-repository | 0 | 6631963 | <gh_stars>0
from leapp.actors import Actor
from leapp.models import FirewalldFacts
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
from leapp.libraries.actor import private
import os
import xml.etree.ElementTree as ElementTree
class FirewalldFactsActor(Actor):
"""
Provide data about firewalld
After collecting data, a message with relevant data will be produced.
"""
name = 'firewalld_facts_actor'
consumes = ()
produces = (FirewalldFacts,)
tags = (FactsPhaseTag, IPUWorkflowTag)
def process(self):
facts = FirewalldFacts()
try:
tree = ElementTree.parse('/etc/firewalld/lockdown-whitelist.xml')
root = tree.getroot()
facts.firewall_config_command = private.getLockdownFirewallConfigCommand(root)
except IOError:
pass
try:
tree = ElementTree.parse('/etc/firewalld/direct.xml')
root = tree.getroot()
facts.ebtablesTablesInUse = private.getEbtablesTablesInUse(root)
except IOError:
pass
ipsetTypesInUse = set()
directory = '/etc/firewalld/ipsets'
try:
for file in os.listdir(directory):
if not file.endswith('.xml'):
continue
try:
tree = ElementTree.parse(os.path.join(directory, file))
root = tree.getroot()
ipsetTypesInUse |= set(private.getIpsetTypesInUse(root))
except IOError:
pass
facts.ipsetTypesInUse = list(ipsetTypesInUse)
except OSError:
pass
self.produce(facts)
| from leapp.actors import Actor
from leapp.models import FirewalldFacts
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
from leapp.libraries.actor import private
import os
import xml.etree.ElementTree as ElementTree
class FirewalldFactsActor(Actor):
"""
Provide data about firewalld
After collecting data, a message with relevant data will be produced.
"""
name = 'firewalld_facts_actor'
consumes = ()
produces = (FirewalldFacts,)
tags = (FactsPhaseTag, IPUWorkflowTag)
def process(self):
facts = FirewalldFacts()
try:
tree = ElementTree.parse('/etc/firewalld/lockdown-whitelist.xml')
root = tree.getroot()
facts.firewall_config_command = private.getLockdownFirewallConfigCommand(root)
except IOError:
pass
try:
tree = ElementTree.parse('/etc/firewalld/direct.xml')
root = tree.getroot()
facts.ebtablesTablesInUse = private.getEbtablesTablesInUse(root)
except IOError:
pass
ipsetTypesInUse = set()
directory = '/etc/firewalld/ipsets'
try:
for file in os.listdir(directory):
if not file.endswith('.xml'):
continue
try:
tree = ElementTree.parse(os.path.join(directory, file))
root = tree.getroot()
ipsetTypesInUse |= set(private.getIpsetTypesInUse(root))
except IOError:
pass
facts.ipsetTypesInUse = list(ipsetTypesInUse)
except OSError:
pass
self.produce(facts) | en | 0.786997 | Provide data about firewalld After collecting data, a message with relevant data will be produced. | 2.410572 | 2 |
tests/test_geocoding.py | hasan-haider/conrad | 244 | 6631964 | # -*- coding: utf-8 -*-
from conrad.utils import get_address
NYC_ADDRESS = {
"amenity": "New York City Hall",
"house_number": "260",
"road": "Broadway",
"neighbourhood": "Civic Center",
"suburb": "Manhattan",
"city": "Manhattan Community Board 1",
"county": "New York County",
"state": "New York",
"postcode": "10000",
"country": "United States of America",
"country_code": "us",
"latitude": 40.7127281,
"longitude": -74.0060152,
}
def test_bad_place():
place = "doesnotexist"
address = get_address(place)
assert address is None
def test_good_place():
place = "New York"
address = get_address(place)
assert address == NYC_ADDRESS
| # -*- coding: utf-8 -*-
from conrad.utils import get_address
NYC_ADDRESS = {
"amenity": "New York City Hall",
"house_number": "260",
"road": "Broadway",
"neighbourhood": "Civic Center",
"suburb": "Manhattan",
"city": "Manhattan Community Board 1",
"county": "New York County",
"state": "New York",
"postcode": "10000",
"country": "United States of America",
"country_code": "us",
"latitude": 40.7127281,
"longitude": -74.0060152,
}
def test_bad_place():
place = "doesnotexist"
address = get_address(place)
assert address is None
def test_good_place():
place = "New York"
address = get_address(place)
assert address == NYC_ADDRESS
| en | 0.769321 | # -*- coding: utf-8 -*- | 3.537081 | 4 |
frontend/auth_providers/ustc.py | zzh1996/hackergame | 0 | 6631965 | <gh_stars>0
from urllib.parse import urlencode
from urllib.request import urlopen
from urllib.error import URLError
from xml.etree import ElementTree
import logging
from django.contrib import messages
from django.shortcuts import redirect
from django.urls import path
from .base import BaseLoginView
logger = logging.getLogger(__name__)
class LoginView(BaseLoginView):
provider = 'ustc'
group = 'ustc'
service: str
ticket: str
sno: str
def get(self, request):
self.service = request.build_absolute_uri('http://home.ustc.edu.cn/~zzh1996/cas/cas_crypto.html')
self.ticket = request.GET.get('ticket')
if not self.ticket:
return redirect('https://passport.ustc.edu.cn/login?' +
urlencode({'service': self.service}))
if self.check_ticket():
self.login(sno=self.sno)
return redirect('hub')
def check_ticket(self):
try:
with urlopen(
'https://passport.ustc.edu.cn/serviceValidate?' +
urlencode({'service': self.service, 'ticket': self.ticket}), timeout=15
) as req:
tree = ElementTree.fromstring(req.read())[0]
except URLError as e:
messages.error(self.request, '连接统一身份认证平台出错')
logging.exception(e)
return False
cas = '{http://www.yale.edu/tp/cas}'
if tree.tag != cas + 'authenticationSuccess':
messages.error(self.request, '登录失败')
return False
self.identity = tree.find('attributes').find(cas + 'gid').text.strip()
self.sno = tree.find(cas + 'user').text.strip()
return True
urlpatterns = [
path('ustc/login/', LoginView.as_view()),
]
| from urllib.parse import urlencode
from urllib.request import urlopen
from urllib.error import URLError
from xml.etree import ElementTree
import logging
from django.contrib import messages
from django.shortcuts import redirect
from django.urls import path
from .base import BaseLoginView
logger = logging.getLogger(__name__)
class LoginView(BaseLoginView):
provider = 'ustc'
group = 'ustc'
service: str
ticket: str
sno: str
def get(self, request):
self.service = request.build_absolute_uri('http://home.ustc.edu.cn/~zzh1996/cas/cas_crypto.html')
self.ticket = request.GET.get('ticket')
if not self.ticket:
return redirect('https://passport.ustc.edu.cn/login?' +
urlencode({'service': self.service}))
if self.check_ticket():
self.login(sno=self.sno)
return redirect('hub')
def check_ticket(self):
try:
with urlopen(
'https://passport.ustc.edu.cn/serviceValidate?' +
urlencode({'service': self.service, 'ticket': self.ticket}), timeout=15
) as req:
tree = ElementTree.fromstring(req.read())[0]
except URLError as e:
messages.error(self.request, '连接统一身份认证平台出错')
logging.exception(e)
return False
cas = '{http://www.yale.edu/tp/cas}'
if tree.tag != cas + 'authenticationSuccess':
messages.error(self.request, '登录失败')
return False
self.identity = tree.find('attributes').find(cas + 'gid').text.strip()
self.sno = tree.find(cas + 'user').text.strip()
return True
urlpatterns = [
path('ustc/login/', LoginView.as_view()),
] | none | 1 | 2.324174 | 2 |
|
scripts/svm_classifier_2d.py | vipavlovic/pyprobml | 4,895 | 6631966 | <filename>scripts/svm_classifier_2d.py<gh_stars>1000+
# SVM for binary classification in 2d
# Code is based on
# https://github.com/ageron/handson-ml2/blob/master/05_support_vector_machines.ipynb
import superimport
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from sklearn.svm import SVC
from sklearn import datasets
from sklearn.datasets import make_moons
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
X, y = make_moons(n_samples=100, noise=0.15, random_state=42)
def plot_dataset(X, y, axes):
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^")
plt.axis(axes)
plt.grid(True, which='both')
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"$x_2$", fontsize=20, rotation=0)
def plot_predictions(clf, axes):
x0s = np.linspace(axes[0], axes[1], 100)
x1s = np.linspace(axes[2], axes[3], 100)
x0, x1 = np.meshgrid(x0s, x1s)
X = np.c_[x0.ravel(), x1.ravel()]
y_pred = clf.predict(X).reshape(x0.shape)
y_decision = clf.decision_function(X).reshape(x0.shape)
plt.contourf(x0, x1, y_pred, cmap=plt.cm.brg, alpha=0.2)
plt.contourf(x0, x1, y_decision, cmap=plt.cm.brg, alpha=0.1)
###
# Linear SVC with poly degree features
polynomial_svm_clf = Pipeline([
("poly_features", PolynomialFeatures(degree=3)),
("scaler", StandardScaler()),
("svm_clf", LinearSVC(C=10, loss="hinge", random_state=42))
])
polynomial_svm_clf.fit(X, y)
plot_predictions(polynomial_svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
plt.savefig("svm_classifier_moons_polynomial_preproc.pdf")
plt.show()
###
# RBF kernel with different hparams
gamma1, gamma2 = 0.1, 5
C1, C2 = 0.001, 1000
hyperparams = (gamma1, C1), (gamma1, C2), (gamma2, C1), (gamma2, C2)
svm_clfs = []
for gamma, C in hyperparams:
rbf_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="rbf", gamma=gamma, C=C))
])
rbf_kernel_svm_clf.fit(X, y)
svm_clfs.append(rbf_kernel_svm_clf)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10.5, 7), sharex=True, sharey=True)
for i, svm_clf in enumerate(svm_clfs):
plt.sca(axes[i // 2, i % 2])
plot_predictions(svm_clf, [-1.5, 2.45, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.45, -1, 1.5])
gamma, C = hyperparams[i]
plt.title(r"$\gamma = {}, C = {}$".format(gamma, C), fontsize=16)
if i in (0, 1):
plt.xlabel("")
if i in (1, 3):
plt.ylabel("")
pml.savefig("svm_classifier_moons_rbf.pdf")
plt.show()
| <filename>scripts/svm_classifier_2d.py<gh_stars>1000+
# SVM for binary classification in 2d
# Code is based on
# https://github.com/ageron/handson-ml2/blob/master/05_support_vector_machines.ipynb
import superimport
import numpy as np
import matplotlib.pyplot as plt
import pyprobml_utils as pml
from sklearn.svm import SVC
from sklearn import datasets
from sklearn.datasets import make_moons
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
X, y = make_moons(n_samples=100, noise=0.15, random_state=42)
def plot_dataset(X, y, axes):
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^")
plt.axis(axes)
plt.grid(True, which='both')
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"$x_2$", fontsize=20, rotation=0)
def plot_predictions(clf, axes):
x0s = np.linspace(axes[0], axes[1], 100)
x1s = np.linspace(axes[2], axes[3], 100)
x0, x1 = np.meshgrid(x0s, x1s)
X = np.c_[x0.ravel(), x1.ravel()]
y_pred = clf.predict(X).reshape(x0.shape)
y_decision = clf.decision_function(X).reshape(x0.shape)
plt.contourf(x0, x1, y_pred, cmap=plt.cm.brg, alpha=0.2)
plt.contourf(x0, x1, y_decision, cmap=plt.cm.brg, alpha=0.1)
###
# Linear SVC with poly degree features
polynomial_svm_clf = Pipeline([
("poly_features", PolynomialFeatures(degree=3)),
("scaler", StandardScaler()),
("svm_clf", LinearSVC(C=10, loss="hinge", random_state=42))
])
polynomial_svm_clf.fit(X, y)
plot_predictions(polynomial_svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
plt.savefig("svm_classifier_moons_polynomial_preproc.pdf")
plt.show()
###
# RBF kernel with different hparams
gamma1, gamma2 = 0.1, 5
C1, C2 = 0.001, 1000
hyperparams = (gamma1, C1), (gamma1, C2), (gamma2, C1), (gamma2, C2)
svm_clfs = []
for gamma, C in hyperparams:
rbf_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="rbf", gamma=gamma, C=C))
])
rbf_kernel_svm_clf.fit(X, y)
svm_clfs.append(rbf_kernel_svm_clf)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10.5, 7), sharex=True, sharey=True)
for i, svm_clf in enumerate(svm_clfs):
plt.sca(axes[i // 2, i % 2])
plot_predictions(svm_clf, [-1.5, 2.45, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.45, -1, 1.5])
gamma, C = hyperparams[i]
plt.title(r"$\gamma = {}, C = {}$".format(gamma, C), fontsize=16)
if i in (0, 1):
plt.xlabel("")
if i in (1, 3):
plt.ylabel("")
pml.savefig("svm_classifier_moons_rbf.pdf")
plt.show()
| en | 0.82471 | # SVM for binary classification in 2d # Code is based on # https://github.com/ageron/handson-ml2/blob/master/05_support_vector_machines.ipynb ### # Linear SVC with poly degree features ### # RBF kernel with different hparams | 2.928514 | 3 |
kyleandemily/rsvp/translate_guestlist.py | ehayne/KyleAndEmily | 0 | 6631967 | <gh_stars>0
import json
DB = []
PERSON_COUNT = 0
def split_name(guest_info):
return guest_info.split()
def add_guest(guest_info, invitation_count):
global PERSON_COUNT, DB
PERSON_COUNT += 1
first_name, last_name = split_name(guest_info)
person_fields = {
'invitation': invitation_count,
'first_name': first_name,
'last_name': last_name,
'updated': "2015-01-09T23:51:37.745Z"
}
person_document = {
'pk': PERSON_COUNT,
'model': 'rsvp.person',
'fields': person_fields
}
DB.append(person_document)
with open('./guestlist.csv','r') as fh:
data = fh.read().splitlines()[1:-4]
invitation_count = 0
for line in data:
invitation_count += 1
invitation_name, guest1, guest2, child1, child2, child3, child4, _, invitation_out_of_town, invitation_plus_one = line.split(',')
add_guest(guest1, invitation_count)
if guest2:
add_guest(guest2, invitation_count)
if child1:
add_guest(child1, invitation_count)
if child2:
add_guest(child2, invitation_count)
if child3:
add_guest(child3, invitation_count)
if child4:
add_guest(child4, invitation_count)
invitation_fields = {
'name': invitation_name,
'helloGoodbyeInvite': invitation_out_of_town == 'Y',
'plusOne': invitation_plus_one == 'Y',
'updated': "2015-01-09T23:51:37.745Z"
}
invitation_document = {
'pk': invitation_count,
'model': 'rsvp.invitation',
'fields': invitation_fields
}
DB.append(invitation_document)
with open('./fixtures/guestlist.json', 'w') as json_fh:
json_fh.write(json.dumps(DB, indent=4, sort_keys=True)) | import json
DB = []
PERSON_COUNT = 0
def split_name(guest_info):
return guest_info.split()
def add_guest(guest_info, invitation_count):
global PERSON_COUNT, DB
PERSON_COUNT += 1
first_name, last_name = split_name(guest_info)
person_fields = {
'invitation': invitation_count,
'first_name': first_name,
'last_name': last_name,
'updated': "2015-01-09T23:51:37.745Z"
}
person_document = {
'pk': PERSON_COUNT,
'model': 'rsvp.person',
'fields': person_fields
}
DB.append(person_document)
with open('./guestlist.csv','r') as fh:
data = fh.read().splitlines()[1:-4]
invitation_count = 0
for line in data:
invitation_count += 1
invitation_name, guest1, guest2, child1, child2, child3, child4, _, invitation_out_of_town, invitation_plus_one = line.split(',')
add_guest(guest1, invitation_count)
if guest2:
add_guest(guest2, invitation_count)
if child1:
add_guest(child1, invitation_count)
if child2:
add_guest(child2, invitation_count)
if child3:
add_guest(child3, invitation_count)
if child4:
add_guest(child4, invitation_count)
invitation_fields = {
'name': invitation_name,
'helloGoodbyeInvite': invitation_out_of_town == 'Y',
'plusOne': invitation_plus_one == 'Y',
'updated': "2015-01-09T23:51:37.745Z"
}
invitation_document = {
'pk': invitation_count,
'model': 'rsvp.invitation',
'fields': invitation_fields
}
DB.append(invitation_document)
with open('./fixtures/guestlist.json', 'w') as json_fh:
json_fh.write(json.dumps(DB, indent=4, sort_keys=True)) | none | 1 | 2.852741 | 3 |
|
tests/settings.py | augustomen/django-celery | 0 | 6631968 | <reponame>augustomen/django-celery<filename>tests/settings.py
# Django settings for testproj project.
import os
import sys
import warnings
warnings.filterwarnings(
'error', r'DateTimeField received a naive datetime',
RuntimeWarning, r'django\.db\.models\.fields')
# import source code dir
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here)
sys.path.insert(0, os.path.join(here, os.pardir))
import djcelery # noqa
djcelery.setup_loader()
NO_NOSE = os.environ.get('DJCELERY_NO_NOSE', False)
SITE_ID = 300
DEBUG = True
ROOT_URLCONF = 'tests.urls'
SECRET_KEY = '<KEY>'
ADMINS = (
# ('<NAME>', '<EMAIL>'),
)
AUTOCOMMIT = True
if not NO_NOSE:
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
COVERAGE_EXCLUDE_MODULES = (
'djcelery',
'djcelery.tests.*',
'djcelery.management.*',
'djcelery.contrib.*',
)
NOSE_ARGS = [
os.path.join(here, os.pardir, 'djcelery', 'tests'),
os.environ.get('NOSE_VERBOSE') and '--verbose' or '',
'--cover3-package=djcelery',
'--cover3-branch',
'--cover3-exclude=%s' % ','.join(COVERAGE_EXCLUDE_MODULES),
]
BROKER_URL = 'amqp://'
TT_HOST = 'localhost'
TT_PORT = 1978
CELERY_DEFAULT_EXCHANGE = 'testcelery'
CELERY_DEFAULT_ROUTING_KEY = 'testcelery'
CELERY_DEFAULT_QUEUE = 'testcelery'
CELERY_QUEUES = {'testcelery': {'binding_key': 'testcelery'}}
CELERY_ACCEPT_CONTENT = ['pickle', 'json']
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'pickle'
MANAGERS = ADMINS
DATABASES = {
'default': {
'NAME': 'djcelery-test-db',
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'PORT': '',
},
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'dummy': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
]
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
'someapp',
'someappwotask',
)
if not NO_NOSE:
INSTALLED_APPS = INSTALLED_APPS + ('django_nose', )
CELERY_SEND_TASK_ERROR_EMAILS = False
USE_TZ = True
TIME_ZONE = 'UTC'
MIDDLEWARE = MIDDLEWARE_CLASSES = []
| # Django settings for testproj project.
import os
import sys
import warnings
warnings.filterwarnings(
'error', r'DateTimeField received a naive datetime',
RuntimeWarning, r'django\.db\.models\.fields')
# import source code dir
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here)
sys.path.insert(0, os.path.join(here, os.pardir))
import djcelery # noqa
djcelery.setup_loader()
NO_NOSE = os.environ.get('DJCELERY_NO_NOSE', False)
SITE_ID = 300
DEBUG = True
ROOT_URLCONF = 'tests.urls'
SECRET_KEY = '<KEY>'
ADMINS = (
# ('<NAME>', '<EMAIL>'),
)
AUTOCOMMIT = True
if not NO_NOSE:
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
COVERAGE_EXCLUDE_MODULES = (
'djcelery',
'djcelery.tests.*',
'djcelery.management.*',
'djcelery.contrib.*',
)
NOSE_ARGS = [
os.path.join(here, os.pardir, 'djcelery', 'tests'),
os.environ.get('NOSE_VERBOSE') and '--verbose' or '',
'--cover3-package=djcelery',
'--cover3-branch',
'--cover3-exclude=%s' % ','.join(COVERAGE_EXCLUDE_MODULES),
]
BROKER_URL = 'amqp://'
TT_HOST = 'localhost'
TT_PORT = 1978
CELERY_DEFAULT_EXCHANGE = 'testcelery'
CELERY_DEFAULT_ROUTING_KEY = 'testcelery'
CELERY_DEFAULT_QUEUE = 'testcelery'
CELERY_QUEUES = {'testcelery': {'binding_key': 'testcelery'}}
CELERY_ACCEPT_CONTENT = ['pickle', 'json']
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'pickle'
MANAGERS = ADMINS
DATABASES = {
'default': {
'NAME': 'djcelery-test-db',
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'PORT': '',
},
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'dummy': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
]
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
'someapp',
'someappwotask',
)
if not NO_NOSE:
INSTALLED_APPS = INSTALLED_APPS + ('django_nose', )
CELERY_SEND_TASK_ERROR_EMAILS = False
USE_TZ = True
TIME_ZONE = 'UTC'
MIDDLEWARE = MIDDLEWARE_CLASSES = [] | en | 0.370075 | # Django settings for testproj project. # import source code dir # noqa # ('<NAME>', '<EMAIL>'), | 1.787024 | 2 |
src/unet/model_predict.py | mschoema/Type-Design-Project | 1 | 6631969 | <reponame>mschoema/Type-Design-Project<filename>src/unet/model_predict.py
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import argparse
from unet import UNet
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
parser = argparse.ArgumentParser(description='Prediction for unseen data')
parser.add_argument('--model_dir', dest='model_dir', required=True,
help='directory that saves the model checkpoints')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=16, help='number of examples in batch')
parser.add_argument('--source_obj', dest='source_obj', type=str, required=True, help='the source images for inference')
parser.add_argument('--save_dir', default='save_dir', type=str, required=True, help='path to save inferred images')
args = parser.parse_args()
def main(_):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
model = UNet(batch_size=args.batch_size)
model.register_session(sess)
model.build_model(is_training=False)
model.infer(model_dir=args.model_dir, source_obj=args.source_obj, save_dir=args.save_dir)
if __name__ == '__main__':
tf.app.run()
| # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import argparse
from unet import UNet
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
parser = argparse.ArgumentParser(description='Prediction for unseen data')
parser.add_argument('--model_dir', dest='model_dir', required=True,
help='directory that saves the model checkpoints')
parser.add_argument('--batch_size', dest='batch_size', type=int, default=16, help='number of examples in batch')
parser.add_argument('--source_obj', dest='source_obj', type=str, required=True, help='the source images for inference')
parser.add_argument('--save_dir', default='save_dir', type=str, required=True, help='path to save inferred images')
args = parser.parse_args()
def main(_):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
model = UNet(batch_size=args.batch_size)
model.register_session(sess)
model.build_model(is_training=False)
model.infer(model_dir=args.model_dir, source_obj=args.source_obj, save_dir=args.save_dir)
if __name__ == '__main__':
tf.app.run() | en | 0.769321 | # -*- coding: utf-8 -*- | 2.077402 | 2 |
backend/repositories/migrations/0006_repository_hook_id.py | kevenleone/github-commits | 2 | 6631970 | # Generated by Django 2.2.7 on 2019-12-01 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('repositories', '0005_auto_20191124_1640'),
]
operations = [
migrations.AddField(
model_name='repository',
name='hook_id',
field=models.IntegerField(default=0),
),
]
| # Generated by Django 2.2.7 on 2019-12-01 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('repositories', '0005_auto_20191124_1640'),
]
operations = [
migrations.AddField(
model_name='repository',
name='hook_id',
field=models.IntegerField(default=0),
),
]
| en | 0.827194 | # Generated by Django 2.2.7 on 2019-12-01 18:58 | 1.438356 | 1 |
Birnn_Transformer/ncc/criterions/type_prediction/type_prediction_cross_entropy.py | code-backdoor/code-backdoor | 71 | 6631971 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from ncc.criterions import NccCriterion, register_criterion
from ncc.utils.logging import metrics
@register_criterion('type_predicition_cross_entropy')
class TypePredictionCrossEntropyCriterion(NccCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
loss, sample_size = self.compute_loss(model, net_output, sample, reduce=reduce)
# sample_size = 100 # TODO sample['target'].size(0) if self.sentence_avg else sample['ntokens']
logging_output = {
'loss': loss.data,
# 'ntokens': sample['ntokens'],
# 'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
logits = net_output[0]
target = model.get_targets(sample, net_output)#.view(-1)
no_type_id = self.task.target_dictionary.index('O')
loss = F.cross_entropy(logits.transpose(1, 2), target, ignore_index=no_type_id) # , reduction='sum'
# loss2 = F.cross_entropy(logits.data.transpose(1, 2), target.data, ignore_index=no_type_id)
# print('loss1: ', loss.item())
sample_size = torch.sum(target.ne(no_type_id))
# if sample_size == 0:
# sample_size += 1
# print('sample_size: {}; loss1: {}; loss2: {}'.format(sample_size, loss.item(), loss2.item()))
# print('target: ', target)
return loss, sample_size
# exit()
#
# lprobs = model.get_normalized_probs(net_output, log_probs=True)
# lprobs = lprobs.view(-1, lprobs.size(-1))
# target = model.get_targets(sample, net_output).view(-1)
# # loss = F.nll_loss(
# # lprobs,
# # target,
# # ignore_index=self.task.target_dictionary.index('O'),#self.padding_idx,
# # reduction='sum' if reduce else 'none',
# # )
# no_type_id = self.task.target_dictionary.index('O')
# ignore_any_loss = False
# if ignore_any_loss:
# any_id = self.task.target_dictionary.index('$any$')
# labels_ignore_any = target.clone()
# labels_ignore_any[labels_ignore_any == any_id] = no_type_id
# loss = F.nll_loss(
# lprobs,
# labels_ignore_any,
# ignore_index=no_type_id, # self.padding_idx,
# reduction='sum' if reduce else 'none',
# )
# sample_size = torch.sum(labels_ignore_any.ne(no_type_id))
# else:
# loss = F.nll_loss(
# lprobs,
# target,
# ignore_index=no_type_id, # self.padding_idx,
# reduction='sum' if reduce else 'none',
# )
# sample_size = torch.sum(target.ne(no_type_id))
#
#
# return loss, sample_size
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
# ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
metrics.log_scalar('loss', loss_sum / sample_size, sample_size, round=3) # / math.log(2)
# if sample_size != ntokens:
# metrics.log_scalar('nll_loss', loss_sum / ntokens / math.log(2), ntokens, round=3)
# metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg))
# else:
# metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from ncc.criterions import NccCriterion, register_criterion
from ncc.utils.logging import metrics
@register_criterion('type_predicition_cross_entropy')
class TypePredictionCrossEntropyCriterion(NccCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
loss, sample_size = self.compute_loss(model, net_output, sample, reduce=reduce)
# sample_size = 100 # TODO sample['target'].size(0) if self.sentence_avg else sample['ntokens']
logging_output = {
'loss': loss.data,
# 'ntokens': sample['ntokens'],
# 'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
logits = net_output[0]
target = model.get_targets(sample, net_output)#.view(-1)
no_type_id = self.task.target_dictionary.index('O')
loss = F.cross_entropy(logits.transpose(1, 2), target, ignore_index=no_type_id) # , reduction='sum'
# loss2 = F.cross_entropy(logits.data.transpose(1, 2), target.data, ignore_index=no_type_id)
# print('loss1: ', loss.item())
sample_size = torch.sum(target.ne(no_type_id))
# if sample_size == 0:
# sample_size += 1
# print('sample_size: {}; loss1: {}; loss2: {}'.format(sample_size, loss.item(), loss2.item()))
# print('target: ', target)
return loss, sample_size
# exit()
#
# lprobs = model.get_normalized_probs(net_output, log_probs=True)
# lprobs = lprobs.view(-1, lprobs.size(-1))
# target = model.get_targets(sample, net_output).view(-1)
# # loss = F.nll_loss(
# # lprobs,
# # target,
# # ignore_index=self.task.target_dictionary.index('O'),#self.padding_idx,
# # reduction='sum' if reduce else 'none',
# # )
# no_type_id = self.task.target_dictionary.index('O')
# ignore_any_loss = False
# if ignore_any_loss:
# any_id = self.task.target_dictionary.index('$any$')
# labels_ignore_any = target.clone()
# labels_ignore_any[labels_ignore_any == any_id] = no_type_id
# loss = F.nll_loss(
# lprobs,
# labels_ignore_any,
# ignore_index=no_type_id, # self.padding_idx,
# reduction='sum' if reduce else 'none',
# )
# sample_size = torch.sum(labels_ignore_any.ne(no_type_id))
# else:
# loss = F.nll_loss(
# lprobs,
# target,
# ignore_index=no_type_id, # self.padding_idx,
# reduction='sum' if reduce else 'none',
# )
# sample_size = torch.sum(target.ne(no_type_id))
#
#
# return loss, sample_size
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
# ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
metrics.log_scalar('loss', loss_sum / sample_size, sample_size, round=3) # / math.log(2)
# if sample_size != ntokens:
# metrics.log_scalar('nll_loss', loss_sum / ntokens / math.log(2), ntokens, round=3)
# metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg))
# else:
# metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| en | 0.400486 | # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training # sample_size = 100 # TODO sample['target'].size(0) if self.sentence_avg else sample['ntokens'] # 'ntokens': sample['ntokens'], # 'nsentences': sample['target'].size(0), #.view(-1) # , reduction='sum' # loss2 = F.cross_entropy(logits.data.transpose(1, 2), target.data, ignore_index=no_type_id) # print('loss1: ', loss.item()) # if sample_size == 0: # sample_size += 1 # print('sample_size: {}; loss1: {}; loss2: {}'.format(sample_size, loss.item(), loss2.item())) # print('target: ', target) # exit() # # lprobs = model.get_normalized_probs(net_output, log_probs=True) # lprobs = lprobs.view(-1, lprobs.size(-1)) # target = model.get_targets(sample, net_output).view(-1) # # loss = F.nll_loss( # # lprobs, # # target, # # ignore_index=self.task.target_dictionary.index('O'),#self.padding_idx, # # reduction='sum' if reduce else 'none', # # ) # no_type_id = self.task.target_dictionary.index('O') # ignore_any_loss = False # if ignore_any_loss: # any_id = self.task.target_dictionary.index('$any$') # labels_ignore_any = target.clone() # labels_ignore_any[labels_ignore_any == any_id] = no_type_id # loss = F.nll_loss( # lprobs, # labels_ignore_any, # ignore_index=no_type_id, # self.padding_idx, # reduction='sum' if reduce else 'none', # ) # sample_size = torch.sum(labels_ignore_any.ne(no_type_id)) # else: # loss = F.nll_loss( # lprobs, # target, # ignore_index=no_type_id, # self.padding_idx, # reduction='sum' if reduce else 'none', # ) # sample_size = torch.sum(target.ne(no_type_id)) # # # return loss, sample_size Aggregate logging outputs from data parallel training. # ntokens = sum(log.get('ntokens', 0) for log in logging_outputs) # / math.log(2) # if sample_size != ntokens: # metrics.log_scalar('nll_loss', loss_sum / ntokens / math.log(2), ntokens, round=3) # metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg)) # else: # metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg)) Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. | 2.574391 | 3 |
tests/integration/paddle/test_tail_and_tune.py | jina-ai/finetuner | 270 | 6631972 | import paddle.nn as nn
import pytest
from finetuner import fit
@pytest.fixture
def embed_model():
return nn.Sequential(
nn.Flatten(),
nn.Linear(in_features=128, out_features=256),
nn.ReLU(),
nn.Linear(in_features=256, out_features=128),
nn.ReLU(),
nn.Linear(in_features=128, out_features=64),
nn.ReLU(),
nn.Linear(in_features=64, out_features=32),
)
def test_tail_and_tune(embed_model, create_easy_data_session):
data, _ = create_easy_data_session(10, 128, 1000)
model = fit(
model=embed_model,
train_data=data,
epochs=5,
to_embedding_model=True,
input_size=(128,),
output_dim=16,
layer_name='linear_4',
)
assert model
assert model != embed_model
| import paddle.nn as nn
import pytest
from finetuner import fit
@pytest.fixture
def embed_model():
return nn.Sequential(
nn.Flatten(),
nn.Linear(in_features=128, out_features=256),
nn.ReLU(),
nn.Linear(in_features=256, out_features=128),
nn.ReLU(),
nn.Linear(in_features=128, out_features=64),
nn.ReLU(),
nn.Linear(in_features=64, out_features=32),
)
def test_tail_and_tune(embed_model, create_easy_data_session):
data, _ = create_easy_data_session(10, 128, 1000)
model = fit(
model=embed_model,
train_data=data,
epochs=5,
to_embedding_model=True,
input_size=(128,),
output_dim=16,
layer_name='linear_4',
)
assert model
assert model != embed_model
| none | 1 | 2.44483 | 2 |
|
chat/insert.py | nazmul-pro/py-channels | 0 | 6631973 | <gh_stars>0
from django.http import HttpResponse
import jwt, json
from testapp.models import AppUser, AppUserInfo, AppUserToken
def users(request):
users_json = """
[
{
"name": "Sumon",
"email": "<EMAIL>",
"phone": "555",
"password": "<PASSWORD>"
},
{
"name": "shadhin",
"email": "<EMAIL>",
"phone": "555",
"password": "<PASSWORD>"
},
{
"name": "hussain",
"email": "<EMAIL>",
"phone": "555",
"password": "<PASSWORD>"
}
]
"""
users_py = json.loads(users_json)
for u in users_py:
user = AppUser(name=u['name'], email=u['email'], phone=u['phone'], password=u['password'])
# print(user.id)
user.save()
info = AppUserInfo(app_user=user, permissions="later", avatar="later")
info.save()
payload = {
'app_user': user.id,
'name': user.name,
'email': user.email
}
jwt_token = jwt.encode(payload, "SECRET_KEY").decode('utf-8')
print(len(jwt_token))
user_token = AppUserToken(app_user=user, jwt=jwt_token)
user_token.save()
return HttpResponse('done')
| from django.http import HttpResponse
import jwt, json
from testapp.models import AppUser, AppUserInfo, AppUserToken
def users(request):
users_json = """
[
{
"name": "Sumon",
"email": "<EMAIL>",
"phone": "555",
"password": "<PASSWORD>"
},
{
"name": "shadhin",
"email": "<EMAIL>",
"phone": "555",
"password": "<PASSWORD>"
},
{
"name": "hussain",
"email": "<EMAIL>",
"phone": "555",
"password": "<PASSWORD>"
}
]
"""
users_py = json.loads(users_json)
for u in users_py:
user = AppUser(name=u['name'], email=u['email'], phone=u['phone'], password=u['password'])
# print(user.id)
user.save()
info = AppUserInfo(app_user=user, permissions="later", avatar="later")
info.save()
payload = {
'app_user': user.id,
'name': user.name,
'email': user.email
}
jwt_token = jwt.encode(payload, "SECRET_KEY").decode('utf-8')
print(len(jwt_token))
user_token = AppUserToken(app_user=user, jwt=jwt_token)
user_token.save()
return HttpResponse('done') | en | 0.359902 | [ { "name": "Sumon", "email": "<EMAIL>", "phone": "555", "password": "<PASSWORD>" }, { "name": "shadhin", "email": "<EMAIL>", "phone": "555", "password": "<PASSWORD>" }, { "name": "hussain", "email": "<EMAIL>", "phone": "555", "password": "<PASSWORD>" } ] # print(user.id) | 2.604843 | 3 |
tensorflow/python/autograph/utils/testing.py | yage99/tensorflow | 4 | 6631974 | <gh_stars>1-10
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import types
import unittest
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AutoGraphTestCase(test.TestCase):
"""Tests specialized for AutoGraph, which run as tf.functions.
These tests use a staged programming-like approach: most of the test code runs
as-is inside a tf.function, but the assertions are lifted outside the
function, and run with the corresponding function values instead.
For example, the test:
def test_foo(self):
baz = bar();
self.assertEqual(baz, value)
is equivalent to writing:
def test_foo(self):
@tf.function
def test_fn():
baz = bar();
return baz, value
baz_actual, value_actual = test_fn()
self.assertEqual(baz_actual, value_actual)
"""
def __new__(cls, *args):
obj = super().__new__(cls)
for name in cls.__dict__:
if not name.startswith(unittest.TestLoader.testMethodPrefix):
continue
m = getattr(obj, name)
if callable(m):
wrapper = obj._run_as_tf_function(m)
setattr(obj, name, types.MethodType(wrapper, obj))
return obj
def _run_as_tf_function(self, fn):
def wrapper(self):
@def_function.function(autograph=False) # Testing autograph itself.
def fn_wrapper():
self.assertions = []
fn()
targets = [args for _, args in self.assertions]
return targets
actuals = self.evaluate(fn_wrapper())
for (_, args), value in zip(self.assertions, actuals):
args[:] = value
return wrapper
def variable(self, name, value, dtype):
with ops.init_scope():
if name not in self.variables:
self.variables[name] = variables.Variable(value, dtype=dtype)
self.evaluate(self.variables[name].initializer)
return self.variables[name]
def setUp(self):
super().setUp()
self.variables = {}
def tearDown(self):
for fn, args in self.assertions:
fn(*args)
super().tearDown()
def assertEqual(self, *args):
self.assertions.append((super().assertEqual, list(args)))
| # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import types
import unittest
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AutoGraphTestCase(test.TestCase):
"""Tests specialized for AutoGraph, which run as tf.functions.
These tests use a staged programming-like approach: most of the test code runs
as-is inside a tf.function, but the assertions are lifted outside the
function, and run with the corresponding function values instead.
For example, the test:
def test_foo(self):
baz = bar();
self.assertEqual(baz, value)
is equivalent to writing:
def test_foo(self):
@tf.function
def test_fn():
baz = bar();
return baz, value
baz_actual, value_actual = test_fn()
self.assertEqual(baz_actual, value_actual)
"""
def __new__(cls, *args):
obj = super().__new__(cls)
for name in cls.__dict__:
if not name.startswith(unittest.TestLoader.testMethodPrefix):
continue
m = getattr(obj, name)
if callable(m):
wrapper = obj._run_as_tf_function(m)
setattr(obj, name, types.MethodType(wrapper, obj))
return obj
def _run_as_tf_function(self, fn):
def wrapper(self):
@def_function.function(autograph=False) # Testing autograph itself.
def fn_wrapper():
self.assertions = []
fn()
targets = [args for _, args in self.assertions]
return targets
actuals = self.evaluate(fn_wrapper())
for (_, args), value in zip(self.assertions, actuals):
args[:] = value
return wrapper
def variable(self, name, value, dtype):
with ops.init_scope():
if name not in self.variables:
self.variables[name] = variables.Variable(value, dtype=dtype)
self.evaluate(self.variables[name].initializer)
return self.variables[name]
def setUp(self):
super().setUp()
self.variables = {}
def tearDown(self):
for fn, args in self.assertions:
fn(*args)
super().tearDown()
def assertEqual(self, *args):
self.assertions.append((super().assertEqual, list(args))) | en | 0.781901 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Testing utilities. Tests specialized for AutoGraph, which run as tf.functions. These tests use a staged programming-like approach: most of the test code runs as-is inside a tf.function, but the assertions are lifted outside the function, and run with the corresponding function values instead. For example, the test: def test_foo(self): baz = bar(); self.assertEqual(baz, value) is equivalent to writing: def test_foo(self): @tf.function def test_fn(): baz = bar(); return baz, value baz_actual, value_actual = test_fn() self.assertEqual(baz_actual, value_actual) # Testing autograph itself. | 2.7433 | 3 |
codes/server.py | Kevinmuahahaha/posty | 0 | 6631975 | <filename>codes/server.py
from mod.modlog import debug
from mod.interaction_routine import interaction_routine
import threading
import sys
import socket
HOST_IP="127.0.0.1"
HOST_PORT=23869
soc = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
soc.bind((HOST_IP,HOST_PORT))
soc.listen(5)
debug("Server running at %s:%s"%(HOST_IP, HOST_PORT))
debug("Ready for incoming requests.")
while True:
client_sock, client_addr = soc.accept()
debug("Connection From: " + str(client_addr[0]) + ":" + str(client_addr[1]))
thread_interaction = threading.Thread(target=interaction_routine,args=(client_sock,))
thread_interaction.start()
| <filename>codes/server.py
from mod.modlog import debug
from mod.interaction_routine import interaction_routine
import threading
import sys
import socket
HOST_IP="127.0.0.1"
HOST_PORT=23869
soc = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
soc.bind((HOST_IP,HOST_PORT))
soc.listen(5)
debug("Server running at %s:%s"%(HOST_IP, HOST_PORT))
debug("Ready for incoming requests.")
while True:
client_sock, client_addr = soc.accept()
debug("Connection From: " + str(client_addr[0]) + ":" + str(client_addr[1]))
thread_interaction = threading.Thread(target=interaction_routine,args=(client_sock,))
thread_interaction.start()
| none | 1 | 2.539958 | 3 |
|
google/cloud/dlp_v2/services/dlp_service/client.py | anukaal/python-dlp | 32 | 6631976 | <reponame>anukaal/python-dlp
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.dlp_v2.services.dlp_service import pagers
from google.cloud.dlp_v2.types import dlp
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import DlpServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import DlpServiceGrpcTransport
from .transports.grpc_asyncio import DlpServiceGrpcAsyncIOTransport
class DlpServiceClientMeta(type):
"""Metaclass for the DlpService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[DlpServiceTransport]]
_transport_registry["grpc"] = DlpServiceGrpcTransport
_transport_registry["grpc_asyncio"] = DlpServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[DlpServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class DlpServiceClient(metaclass=DlpServiceClientMeta):
"""The Cloud Data Loss Prevention (DLP) API is a service that
allows clients to detect the presence of Personally Identifiable
Information (PII) and other privacy-sensitive data in user-
supplied, unstructured data streams, like text blocks or images.
The service also includes methods for sensitive data redaction
and scheduling of data scans on Google Cloud Platform based data
sets.
To learn more about concepts and find how-to guides see
https://cloud.google.com/dlp/docs/.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dlp.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DlpServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DlpServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> DlpServiceTransport:
"""Returns the transport used by the client instance.
Returns:
DlpServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def deidentify_template_path(organization: str, deidentify_template: str,) -> str:
"""Returns a fully-qualified deidentify_template string."""
return "organizations/{organization}/deidentifyTemplates/{deidentify_template}".format(
organization=organization, deidentify_template=deidentify_template,
)
@staticmethod
def parse_deidentify_template_path(path: str) -> Dict[str, str]:
"""Parses a deidentify_template path into its component segments."""
m = re.match(
r"^organizations/(?P<organization>.+?)/deidentifyTemplates/(?P<deidentify_template>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def dlp_content_path(project: str,) -> str:
"""Returns a fully-qualified dlp_content string."""
return "projects/{project}/dlpContent".format(project=project,)
@staticmethod
def parse_dlp_content_path(path: str) -> Dict[str, str]:
"""Parses a dlp_content path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/dlpContent$", path)
return m.groupdict() if m else {}
@staticmethod
def dlp_job_path(project: str, dlp_job: str,) -> str:
"""Returns a fully-qualified dlp_job string."""
return "projects/{project}/dlpJobs/{dlp_job}".format(
project=project, dlp_job=dlp_job,
)
@staticmethod
def parse_dlp_job_path(path: str) -> Dict[str, str]:
"""Parses a dlp_job path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/dlpJobs/(?P<dlp_job>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def finding_path(project: str, location: str, finding: str,) -> str:
"""Returns a fully-qualified finding string."""
return "projects/{project}/locations/{location}/findings/{finding}".format(
project=project, location=location, finding=finding,
)
@staticmethod
def parse_finding_path(path: str) -> Dict[str, str]:
"""Parses a finding path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/findings/(?P<finding>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def inspect_template_path(organization: str, inspect_template: str,) -> str:
"""Returns a fully-qualified inspect_template string."""
return "organizations/{organization}/inspectTemplates/{inspect_template}".format(
organization=organization, inspect_template=inspect_template,
)
@staticmethod
def parse_inspect_template_path(path: str) -> Dict[str, str]:
"""Parses a inspect_template path into its component segments."""
m = re.match(
r"^organizations/(?P<organization>.+?)/inspectTemplates/(?P<inspect_template>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def job_trigger_path(project: str, job_trigger: str,) -> str:
"""Returns a fully-qualified job_trigger string."""
return "projects/{project}/jobTriggers/{job_trigger}".format(
project=project, job_trigger=job_trigger,
)
@staticmethod
def parse_job_trigger_path(path: str) -> Dict[str, str]:
"""Parses a job_trigger path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/jobTriggers/(?P<job_trigger>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def stored_info_type_path(organization: str, stored_info_type: str,) -> str:
"""Returns a fully-qualified stored_info_type string."""
return "organizations/{organization}/storedInfoTypes/{stored_info_type}".format(
organization=organization, stored_info_type=stored_info_type,
)
@staticmethod
def parse_stored_info_type_path(path: str) -> Dict[str, str]:
"""Parses a stored_info_type path into its component segments."""
m = re.match(
r"^organizations/(?P<organization>.+?)/storedInfoTypes/(?P<stored_info_type>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, DlpServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the dlp service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, DlpServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, DlpServiceTransport):
# transport is a DlpServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def inspect_content(
self,
request: Union[dlp.InspectContentRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.InspectContentResponse:
r"""Finds potentially sensitive info in content.
This method has limits on input size, processing time,
and output size.
When no InfoTypes or CustomInfoTypes are specified in
this request, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
For how to guides, see
https://cloud.google.com/dlp/docs/inspecting-images and
https://cloud.google.com/dlp/docs/inspecting-text,
Args:
request (Union[google.cloud.dlp_v2.types.InspectContentRequest, dict]):
The request object. Request to search for potentially
sensitive info in a ContentItem.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.InspectContentResponse:
Results of inspecting an item.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.InspectContentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.InspectContentRequest):
request = dlp.InspectContentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.inspect_content]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def redact_image(
self,
request: Union[dlp.RedactImageRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.RedactImageResponse:
r"""Redacts potentially sensitive info from an image.
This method has limits on input size, processing time,
and output size. See
https://cloud.google.com/dlp/docs/redacting-sensitive-
data-images to learn more.
When no InfoTypes or CustomInfoTypes are specified in
this request, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
Args:
request (Union[google.cloud.dlp_v2.types.RedactImageRequest, dict]):
The request object. Request to search for potentially
sensitive info in an image and redact it by covering it
with a colored rectangle.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.RedactImageResponse:
Results of redacting an image.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.RedactImageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.RedactImageRequest):
request = dlp.RedactImageRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.redact_image]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def deidentify_content(
self,
request: Union[dlp.DeidentifyContentRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DeidentifyContentResponse:
r"""De-identifies potentially sensitive info from a
ContentItem. This method has limits on input size and
output size. See
https://cloud.google.com/dlp/docs/deidentify-sensitive-
data to learn more.
When no InfoTypes or CustomInfoTypes are specified in
this request, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
Args:
request (Union[google.cloud.dlp_v2.types.DeidentifyContentRequest, dict]):
The request object. Request to de-identify a list of
items.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DeidentifyContentResponse:
Results of de-identifying a
ContentItem.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.DeidentifyContentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.DeidentifyContentRequest):
request = dlp.DeidentifyContentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.deidentify_content]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def reidentify_content(
self,
request: Union[dlp.ReidentifyContentRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.ReidentifyContentResponse:
r"""Re-identifies content that has been de-identified. See
https://cloud.google.com/dlp/docs/pseudonymization#re-identification_in_free_text_code_example
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ReidentifyContentRequest, dict]):
The request object. Request to re-identify an item.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.ReidentifyContentResponse:
Results of re-identifying a item.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ReidentifyContentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ReidentifyContentRequest):
request = dlp.ReidentifyContentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.reidentify_content]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_info_types(
self,
request: Union[dlp.ListInfoTypesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.ListInfoTypesResponse:
r"""Returns a list of the sensitive information types
that the DLP API supports. See
https://cloud.google.com/dlp/docs/infotypes-reference to
learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ListInfoTypesRequest, dict]):
The request object. Request for the list of infoTypes.
parent (str):
The parent resource name.
The format of this value is as follows:
::
locations/<var>LOCATION_ID</var>
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.ListInfoTypesResponse:
Response to the ListInfoTypes
request.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ListInfoTypesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ListInfoTypesRequest):
request = dlp.ListInfoTypesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_info_types]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_inspect_template(
self,
request: Union[dlp.CreateInspectTemplateRequest, dict] = None,
*,
parent: str = None,
inspect_template: dlp.InspectTemplate = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.InspectTemplate:
r"""Creates an InspectTemplate for re-using frequently
used configuration for inspecting content, images, and
storage. See https://cloud.google.com/dlp/docs/creating-
templates to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.CreateInspectTemplateRequest, dict]):
The request object. Request message for
CreateInspectTemplate.
parent (str):
Required. Parent resource name.
The format of this value varies depending on the scope
of the request (project or organization) and whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
- Organizations scope, location specified:
``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID
- Organizations scope, no location specified (defaults
to global): ``organizations/``\ ORG_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
inspect_template (google.cloud.dlp_v2.types.InspectTemplate):
Required. The InspectTemplate to
create.
This corresponds to the ``inspect_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.InspectTemplate:
The inspectTemplate contains a
configuration (set of types of sensitive
data to be detected) to be used anywhere
you otherwise would normally specify
InspectConfig. See
https://cloud.google.com/dlp/docs/concepts-
templates to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, inspect_template])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.CreateInspectTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.CreateInspectTemplateRequest):
request = dlp.CreateInspectTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if inspect_template is not None:
request.inspect_template = inspect_template
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_inspect_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_inspect_template(
self,
request: Union[dlp.UpdateInspectTemplateRequest, dict] = None,
*,
name: str = None,
inspect_template: dlp.InspectTemplate = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.InspectTemplate:
r"""Updates the InspectTemplate.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.UpdateInspectTemplateRequest, dict]):
The request object. Request message for
UpdateInspectTemplate.
name (str):
Required. Resource name of organization and
inspectTemplate to be updated, for example
``organizations/433245324/inspectTemplates/432452342``
or projects/project-id/inspectTemplates/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
inspect_template (google.cloud.dlp_v2.types.InspectTemplate):
New InspectTemplate value.
This corresponds to the ``inspect_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask to control which fields get
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.InspectTemplate:
The inspectTemplate contains a
configuration (set of types of sensitive
data to be detected) to be used anywhere
you otherwise would normally specify
InspectConfig. See
https://cloud.google.com/dlp/docs/concepts-
templates to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, inspect_template, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.UpdateInspectTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.UpdateInspectTemplateRequest):
request = dlp.UpdateInspectTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if inspect_template is not None:
request.inspect_template = inspect_template
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_inspect_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_inspect_template(
self,
request: Union[dlp.GetInspectTemplateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.InspectTemplate:
r"""Gets an InspectTemplate.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.GetInspectTemplateRequest, dict]):
The request object. Request message for
GetInspectTemplate.
name (str):
Required. Resource name of the organization and
inspectTemplate to be read, for example
``organizations/433245324/inspectTemplates/432452342``
or projects/project-id/inspectTemplates/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.InspectTemplate:
The inspectTemplate contains a
configuration (set of types of sensitive
data to be detected) to be used anywhere
you otherwise would normally specify
InspectConfig. See
https://cloud.google.com/dlp/docs/concepts-
templates to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.GetInspectTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.GetInspectTemplateRequest):
request = dlp.GetInspectTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_inspect_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_inspect_templates(
self,
request: Union[dlp.ListInspectTemplatesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListInspectTemplatesPager:
r"""Lists InspectTemplates.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ListInspectTemplatesRequest, dict]):
The request object. Request message for
ListInspectTemplates.
parent (str):
Required. Parent resource name.
The format of this value varies depending on the scope
of the request (project or organization) and whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
- Organizations scope, location specified:
``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID
- Organizations scope, no location specified (defaults
to global): ``organizations/``\ ORG_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.services.dlp_service.pagers.ListInspectTemplatesPager:
Response message for
ListInspectTemplates.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ListInspectTemplatesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ListInspectTemplatesRequest):
request = dlp.ListInspectTemplatesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_inspect_templates]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListInspectTemplatesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_inspect_template(
self,
request: Union[dlp.DeleteInspectTemplateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an InspectTemplate.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.DeleteInspectTemplateRequest, dict]):
The request object. Request message for
DeleteInspectTemplate.
name (str):
Required. Resource name of the organization and
inspectTemplate to be deleted, for example
``organizations/433245324/inspectTemplates/432452342``
or projects/project-id/inspectTemplates/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.DeleteInspectTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.DeleteInspectTemplateRequest):
request = dlp.DeleteInspectTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_inspect_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_deidentify_template(
self,
request: Union[dlp.CreateDeidentifyTemplateRequest, dict] = None,
*,
parent: str = None,
deidentify_template: dlp.DeidentifyTemplate = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DeidentifyTemplate:
r"""Creates a DeidentifyTemplate for re-using frequently
used configuration for de-identifying content, images,
and storage. See
https://cloud.google.com/dlp/docs/creating-templates-
deid to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.CreateDeidentifyTemplateRequest, dict]):
The request object. Request message for
CreateDeidentifyTemplate.
parent (str):
Required. Parent resource name.
The format of this value varies depending on the scope
of the request (project or organization) and whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
- Organizations scope, location specified:
``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID
- Organizations scope, no location specified (defaults
to global): ``organizations/``\ ORG_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
deidentify_template (google.cloud.dlp_v2.types.DeidentifyTemplate):
Required. The DeidentifyTemplate to
create.
This corresponds to the ``deidentify_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DeidentifyTemplate:
DeidentifyTemplates contains
instructions on how to de-identify
content. See
https://cloud.google.com/dlp/docs/concepts-
templates to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, deidentify_template])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.CreateDeidentifyTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.CreateDeidentifyTemplateRequest):
request = dlp.CreateDeidentifyTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if deidentify_template is not None:
request.deidentify_template = deidentify_template
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_deidentify_template
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_deidentify_template(
self,
request: Union[dlp.UpdateDeidentifyTemplateRequest, dict] = None,
*,
name: str = None,
deidentify_template: dlp.DeidentifyTemplate = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DeidentifyTemplate:
r"""Updates the DeidentifyTemplate.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.UpdateDeidentifyTemplateRequest, dict]):
The request object. Request message for
UpdateDeidentifyTemplate.
name (str):
Required. Resource name of organization and deidentify
template to be updated, for example
``organizations/433245324/deidentifyTemplates/432452342``
or projects/project-id/deidentifyTemplates/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
deidentify_template (google.cloud.dlp_v2.types.DeidentifyTemplate):
New DeidentifyTemplate value.
This corresponds to the ``deidentify_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask to control which fields get
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DeidentifyTemplate:
DeidentifyTemplates contains
instructions on how to de-identify
content. See
https://cloud.google.com/dlp/docs/concepts-
templates to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, deidentify_template, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.UpdateDeidentifyTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.UpdateDeidentifyTemplateRequest):
request = dlp.UpdateDeidentifyTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if deidentify_template is not None:
request.deidentify_template = deidentify_template
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_deidentify_template
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_deidentify_template(
self,
request: Union[dlp.GetDeidentifyTemplateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DeidentifyTemplate:
r"""Gets a DeidentifyTemplate.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.GetDeidentifyTemplateRequest, dict]):
The request object. Request message for
GetDeidentifyTemplate.
name (str):
Required. Resource name of the organization and
deidentify template to be read, for example
``organizations/433245324/deidentifyTemplates/432452342``
or projects/project-id/deidentifyTemplates/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DeidentifyTemplate:
DeidentifyTemplates contains
instructions on how to de-identify
content. See
https://cloud.google.com/dlp/docs/concepts-
templates to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.GetDeidentifyTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.GetDeidentifyTemplateRequest):
request = dlp.GetDeidentifyTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_deidentify_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_deidentify_templates(
self,
request: Union[dlp.ListDeidentifyTemplatesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDeidentifyTemplatesPager:
r"""Lists DeidentifyTemplates.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ListDeidentifyTemplatesRequest, dict]):
The request object. Request message for
ListDeidentifyTemplates.
parent (str):
Required. Parent resource name.
The format of this value varies depending on the scope
of the request (project or organization) and whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
- Organizations scope, location specified:
``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID
- Organizations scope, no location specified (defaults
to global): ``organizations/``\ ORG_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.services.dlp_service.pagers.ListDeidentifyTemplatesPager:
Response message for
ListDeidentifyTemplates.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ListDeidentifyTemplatesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ListDeidentifyTemplatesRequest):
request = dlp.ListDeidentifyTemplatesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_deidentify_templates
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDeidentifyTemplatesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_deidentify_template(
self,
request: Union[dlp.DeleteDeidentifyTemplateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a DeidentifyTemplate.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.DeleteDeidentifyTemplateRequest, dict]):
The request object. Request message for
DeleteDeidentifyTemplate.
name (str):
Required. Resource name of the organization and
deidentify template to be deleted, for example
``organizations/433245324/deidentifyTemplates/432452342``
or projects/project-id/deidentifyTemplates/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.DeleteDeidentifyTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.DeleteDeidentifyTemplateRequest):
request = dlp.DeleteDeidentifyTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_deidentify_template
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_job_trigger(
self,
request: Union[dlp.CreateJobTriggerRequest, dict] = None,
*,
parent: str = None,
job_trigger: dlp.JobTrigger = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.JobTrigger:
r"""Creates a job trigger to run DLP actions such as
scanning storage for sensitive information on a set
schedule. See
https://cloud.google.com/dlp/docs/creating-job-triggers
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.CreateJobTriggerRequest, dict]):
The request object. Request message for
CreateJobTrigger.
parent (str):
Required. Parent resource name.
The format of this value varies depending on whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_trigger (google.cloud.dlp_v2.types.JobTrigger):
Required. The JobTrigger to create.
This corresponds to the ``job_trigger`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.JobTrigger:
Contains a configuration to make dlp
api calls on a repeating basis. See
https://cloud.google.com/dlp/docs/concepts-
job-triggers to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, job_trigger])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.CreateJobTriggerRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.CreateJobTriggerRequest):
request = dlp.CreateJobTriggerRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if job_trigger is not None:
request.job_trigger = job_trigger
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_job_trigger]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_job_trigger(
self,
request: Union[dlp.UpdateJobTriggerRequest, dict] = None,
*,
name: str = None,
job_trigger: dlp.JobTrigger = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.JobTrigger:
r"""Updates a job trigger.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.UpdateJobTriggerRequest, dict]):
The request object. Request message for
UpdateJobTrigger.
name (str):
Required. Resource name of the project and the
triggeredJob, for example
``projects/dlp-test-project/jobTriggers/53234423``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_trigger (google.cloud.dlp_v2.types.JobTrigger):
New JobTrigger value.
This corresponds to the ``job_trigger`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask to control which fields get
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.JobTrigger:
Contains a configuration to make dlp
api calls on a repeating basis. See
https://cloud.google.com/dlp/docs/concepts-
job-triggers to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, job_trigger, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.UpdateJobTriggerRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.UpdateJobTriggerRequest):
request = dlp.UpdateJobTriggerRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if job_trigger is not None:
request.job_trigger = job_trigger
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_job_trigger]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def hybrid_inspect_job_trigger(
self,
request: Union[dlp.HybridInspectJobTriggerRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.HybridInspectResponse:
r"""Inspect hybrid content and store findings to a
trigger. The inspection will be processed
asynchronously. To review the findings monitor the jobs
within the trigger.
Args:
request (Union[google.cloud.dlp_v2.types.HybridInspectJobTriggerRequest, dict]):
The request object. Request to search for potentially
sensitive info in a custom location.
name (str):
Required. Resource name of the trigger to execute a
hybrid inspect on, for example
``projects/dlp-test-project/jobTriggers/53234423``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.HybridInspectResponse:
Quota exceeded errors will be thrown
once quota has been met.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.HybridInspectJobTriggerRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.HybridInspectJobTriggerRequest):
request = dlp.HybridInspectJobTriggerRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.hybrid_inspect_job_trigger
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_job_trigger(
self,
request: Union[dlp.GetJobTriggerRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.JobTrigger:
r"""Gets a job trigger.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.GetJobTriggerRequest, dict]):
The request object. Request message for GetJobTrigger.
name (str):
Required. Resource name of the project and the
triggeredJob, for example
``projects/dlp-test-project/jobTriggers/53234423``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.JobTrigger:
Contains a configuration to make dlp
api calls on a repeating basis. See
https://cloud.google.com/dlp/docs/concepts-
job-triggers to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.GetJobTriggerRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.GetJobTriggerRequest):
request = dlp.GetJobTriggerRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_job_trigger]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_job_triggers(
self,
request: Union[dlp.ListJobTriggersRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListJobTriggersPager:
r"""Lists job triggers.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ListJobTriggersRequest, dict]):
The request object. Request message for ListJobTriggers.
parent (str):
Required. Parent resource name.
The format of this value varies depending on whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.services.dlp_service.pagers.ListJobTriggersPager:
Response message for ListJobTriggers.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ListJobTriggersRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ListJobTriggersRequest):
request = dlp.ListJobTriggersRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_job_triggers]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListJobTriggersPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_job_trigger(
self,
request: Union[dlp.DeleteJobTriggerRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a job trigger.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.DeleteJobTriggerRequest, dict]):
The request object. Request message for
DeleteJobTrigger.
name (str):
Required. Resource name of the project and the
triggeredJob, for example
``projects/dlp-test-project/jobTriggers/53234423``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.DeleteJobTriggerRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.DeleteJobTriggerRequest):
request = dlp.DeleteJobTriggerRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_job_trigger]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def activate_job_trigger(
self,
request: Union[dlp.ActivateJobTriggerRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DlpJob:
r"""Activate a job trigger. Causes the immediate execute
of a trigger instead of waiting on the trigger event to
occur.
Args:
request (Union[google.cloud.dlp_v2.types.ActivateJobTriggerRequest, dict]):
The request object. Request message for
ActivateJobTrigger.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DlpJob:
Combines all of the information about
a DLP job.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ActivateJobTriggerRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ActivateJobTriggerRequest):
request = dlp.ActivateJobTriggerRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.activate_job_trigger]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_dlp_job(
self,
request: Union[dlp.CreateDlpJobRequest, dict] = None,
*,
parent: str = None,
inspect_job: dlp.InspectJobConfig = None,
risk_job: dlp.RiskAnalysisJobConfig = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DlpJob:
r"""Creates a new job to inspect storage or calculate
risk metrics. See
https://cloud.google.com/dlp/docs/inspecting-storage and
https://cloud.google.com/dlp/docs/compute-risk-analysis
to learn more.
When no InfoTypes or CustomInfoTypes are specified in
inspect jobs, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
Args:
request (Union[google.cloud.dlp_v2.types.CreateDlpJobRequest, dict]):
The request object. Request message for
CreateDlpJobRequest. Used to initiate long running jobs
such as calculating risk metrics or inspecting Google
Cloud Storage.
parent (str):
Required. Parent resource name.
The format of this value varies depending on whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
inspect_job (google.cloud.dlp_v2.types.InspectJobConfig):
An inspection job scans a storage
repository for InfoTypes.
This corresponds to the ``inspect_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
risk_job (google.cloud.dlp_v2.types.RiskAnalysisJobConfig):
A risk analysis job calculates re-
dentification risk metrics for a
BigQuery table.
This corresponds to the ``risk_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DlpJob:
Combines all of the information about
a DLP job.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, inspect_job, risk_job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.CreateDlpJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.CreateDlpJobRequest):
request = dlp.CreateDlpJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if inspect_job is not None:
request.inspect_job = inspect_job
if risk_job is not None:
request.risk_job = risk_job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_dlp_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_dlp_jobs(
self,
request: Union[dlp.ListDlpJobsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDlpJobsPager:
r"""Lists DlpJobs that match the specified filter in the
request. See
https://cloud.google.com/dlp/docs/inspecting-storage and
https://cloud.google.com/dlp/docs/compute-risk-analysis
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ListDlpJobsRequest, dict]):
The request object. The request message for listing DLP
jobs.
parent (str):
Required. Parent resource name.
The format of this value varies depending on whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.services.dlp_service.pagers.ListDlpJobsPager:
The response message for listing DLP
jobs.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ListDlpJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ListDlpJobsRequest):
request = dlp.ListDlpJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_dlp_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDlpJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_dlp_job(
self,
request: Union[dlp.GetDlpJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DlpJob:
r"""Gets the latest state of a long-running DlpJob.
See https://cloud.google.com/dlp/docs/inspecting-storage
and https://cloud.google.com/dlp/docs/compute-risk-
analysis to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.GetDlpJobRequest, dict]):
The request object. The request message for
[DlpJobs.GetDlpJob][].
name (str):
Required. The name of the DlpJob
resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DlpJob:
Combines all of the information about
a DLP job.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.GetDlpJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.GetDlpJobRequest):
request = dlp.GetDlpJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_dlp_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_dlp_job(
self,
request: Union[dlp.DeleteDlpJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a long-running DlpJob. This method indicates
that the client is no longer interested in the DlpJob
result. The job will be cancelled if possible.
See https://cloud.google.com/dlp/docs/inspecting-storage
and https://cloud.google.com/dlp/docs/compute-risk-
analysis to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.DeleteDlpJobRequest, dict]):
The request object. The request message for deleting a
DLP job.
name (str):
Required. The name of the DlpJob
resource to be deleted.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.DeleteDlpJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.DeleteDlpJobRequest):
request = dlp.DeleteDlpJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_dlp_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def cancel_dlp_job(
self,
request: Union[dlp.CancelDlpJobRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Starts asynchronous cancellation on a long-running
DlpJob. The server makes a best effort to cancel the
DlpJob, but success is not guaranteed.
See https://cloud.google.com/dlp/docs/inspecting-storage
and https://cloud.google.com/dlp/docs/compute-risk-
analysis to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.CancelDlpJobRequest, dict]):
The request object. The request message for canceling a
DLP job.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.CancelDlpJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.CancelDlpJobRequest):
request = dlp.CancelDlpJobRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.cancel_dlp_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_stored_info_type(
self,
request: Union[dlp.CreateStoredInfoTypeRequest, dict] = None,
*,
parent: str = None,
config: dlp.StoredInfoTypeConfig = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.StoredInfoType:
r"""Creates a pre-built stored infoType to be used for
inspection. See
https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.CreateStoredInfoTypeRequest, dict]):
The request object. Request message for
CreateStoredInfoType.
parent (str):
Required. Parent resource name.
The format of this value varies depending on the scope
of the request (project or organization) and whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
- Organizations scope, location specified:
``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID
- Organizations scope, no location specified (defaults
to global): ``organizations/``\ ORG_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
config (google.cloud.dlp_v2.types.StoredInfoTypeConfig):
Required. Configuration of the
storedInfoType to create.
This corresponds to the ``config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.StoredInfoType:
StoredInfoType resource message that
contains information about the current
version and any pending updates.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.CreateStoredInfoTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.CreateStoredInfoTypeRequest):
request = dlp.CreateStoredInfoTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if config is not None:
request.config = config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_stored_info_type]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_stored_info_type(
self,
request: Union[dlp.UpdateStoredInfoTypeRequest, dict] = None,
*,
name: str = None,
config: dlp.StoredInfoTypeConfig = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.StoredInfoType:
r"""Updates the stored infoType by creating a new
version. The existing version will continue to be used
until the new version is ready. See
https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.UpdateStoredInfoTypeRequest, dict]):
The request object. Request message for
UpdateStoredInfoType.
name (str):
Required. Resource name of organization and
storedInfoType to be updated, for example
``organizations/433245324/storedInfoTypes/432452342`` or
projects/project-id/storedInfoTypes/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
config (google.cloud.dlp_v2.types.StoredInfoTypeConfig):
Updated configuration for the
storedInfoType. If not provided, a new
version of the storedInfoType will be
created with the existing configuration.
This corresponds to the ``config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask to control which fields get
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.StoredInfoType:
StoredInfoType resource message that
contains information about the current
version and any pending updates.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, config, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.UpdateStoredInfoTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.UpdateStoredInfoTypeRequest):
request = dlp.UpdateStoredInfoTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if config is not None:
request.config = config
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_stored_info_type]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_stored_info_type(
self,
request: Union[dlp.GetStoredInfoTypeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.StoredInfoType:
r"""Gets a stored infoType.
See https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.GetStoredInfoTypeRequest, dict]):
The request object. Request message for
GetStoredInfoType.
name (str):
Required. Resource name of the organization and
storedInfoType to be read, for example
``organizations/433245324/storedInfoTypes/432452342`` or
projects/project-id/storedInfoTypes/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.StoredInfoType:
StoredInfoType resource message that
contains information about the current
version and any pending updates.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.GetStoredInfoTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.GetStoredInfoTypeRequest):
request = dlp.GetStoredInfoTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_stored_info_type]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_stored_info_types(
self,
request: Union[dlp.ListStoredInfoTypesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListStoredInfoTypesPager:
r"""Lists stored infoTypes.
See https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ListStoredInfoTypesRequest, dict]):
The request object. Request message for
ListStoredInfoTypes.
parent (str):
Required. Parent resource name.
The format of this value varies depending on the scope
of the request (project or organization) and whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
- Organizations scope, location specified:
``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID
- Organizations scope, no location specified (defaults
to global): ``organizations/``\ ORG_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.services.dlp_service.pagers.ListStoredInfoTypesPager:
Response message for
ListStoredInfoTypes.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ListStoredInfoTypesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ListStoredInfoTypesRequest):
request = dlp.ListStoredInfoTypesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_stored_info_types]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListStoredInfoTypesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_stored_info_type(
self,
request: Union[dlp.DeleteStoredInfoTypeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a stored infoType.
See https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.DeleteStoredInfoTypeRequest, dict]):
The request object. Request message for
DeleteStoredInfoType.
name (str):
Required. Resource name of the organization and
storedInfoType to be deleted, for example
``organizations/433245324/storedInfoTypes/432452342`` or
projects/project-id/storedInfoTypes/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.DeleteStoredInfoTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.DeleteStoredInfoTypeRequest):
request = dlp.DeleteStoredInfoTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_stored_info_type]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def hybrid_inspect_dlp_job(
self,
request: Union[dlp.HybridInspectDlpJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.HybridInspectResponse:
r"""Inspect hybrid content and store findings to a job.
To review the findings, inspect the job. Inspection will
occur asynchronously.
Args:
request (Union[google.cloud.dlp_v2.types.HybridInspectDlpJobRequest, dict]):
The request object. Request to search for potentially
sensitive info in a custom location.
name (str):
Required. Resource name of the job to execute a hybrid
inspect on, for example
``projects/dlp-test-project/dlpJob/53234423``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.HybridInspectResponse:
Quota exceeded errors will be thrown
once quota has been met.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.HybridInspectDlpJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.HybridInspectDlpJobRequest):
request = dlp.HybridInspectDlpJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.hybrid_inspect_dlp_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def finish_dlp_job(
self,
request: Union[dlp.FinishDlpJobRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Finish a running hybrid DlpJob. Triggers the
finalization steps and running of any enabled actions
that have not yet run.
Args:
request (Union[google.cloud.dlp_v2.types.FinishDlpJobRequest, dict]):
The request object. The request message for finishing a
DLP hybrid job.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.FinishDlpJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.FinishDlpJobRequest):
request = dlp.FinishDlpJobRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.finish_dlp_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-dlp",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("DlpServiceClient",)
| # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.dlp_v2.services.dlp_service import pagers
from google.cloud.dlp_v2.types import dlp
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import DlpServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import DlpServiceGrpcTransport
from .transports.grpc_asyncio import DlpServiceGrpcAsyncIOTransport
class DlpServiceClientMeta(type):
"""Metaclass for the DlpService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[DlpServiceTransport]]
_transport_registry["grpc"] = DlpServiceGrpcTransport
_transport_registry["grpc_asyncio"] = DlpServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[DlpServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class DlpServiceClient(metaclass=DlpServiceClientMeta):
"""The Cloud Data Loss Prevention (DLP) API is a service that
allows clients to detect the presence of Personally Identifiable
Information (PII) and other privacy-sensitive data in user-
supplied, unstructured data streams, like text blocks or images.
The service also includes methods for sensitive data redaction
and scheduling of data scans on Google Cloud Platform based data
sets.
To learn more about concepts and find how-to guides see
https://cloud.google.com/dlp/docs/.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dlp.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DlpServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DlpServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> DlpServiceTransport:
"""Returns the transport used by the client instance.
Returns:
DlpServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def deidentify_template_path(organization: str, deidentify_template: str,) -> str:
"""Returns a fully-qualified deidentify_template string."""
return "organizations/{organization}/deidentifyTemplates/{deidentify_template}".format(
organization=organization, deidentify_template=deidentify_template,
)
@staticmethod
def parse_deidentify_template_path(path: str) -> Dict[str, str]:
"""Parses a deidentify_template path into its component segments."""
m = re.match(
r"^organizations/(?P<organization>.+?)/deidentifyTemplates/(?P<deidentify_template>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def dlp_content_path(project: str,) -> str:
"""Returns a fully-qualified dlp_content string."""
return "projects/{project}/dlpContent".format(project=project,)
@staticmethod
def parse_dlp_content_path(path: str) -> Dict[str, str]:
"""Parses a dlp_content path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/dlpContent$", path)
return m.groupdict() if m else {}
@staticmethod
def dlp_job_path(project: str, dlp_job: str,) -> str:
"""Returns a fully-qualified dlp_job string."""
return "projects/{project}/dlpJobs/{dlp_job}".format(
project=project, dlp_job=dlp_job,
)
@staticmethod
def parse_dlp_job_path(path: str) -> Dict[str, str]:
"""Parses a dlp_job path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/dlpJobs/(?P<dlp_job>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def finding_path(project: str, location: str, finding: str,) -> str:
"""Returns a fully-qualified finding string."""
return "projects/{project}/locations/{location}/findings/{finding}".format(
project=project, location=location, finding=finding,
)
@staticmethod
def parse_finding_path(path: str) -> Dict[str, str]:
"""Parses a finding path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/findings/(?P<finding>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def inspect_template_path(organization: str, inspect_template: str,) -> str:
"""Returns a fully-qualified inspect_template string."""
return "organizations/{organization}/inspectTemplates/{inspect_template}".format(
organization=organization, inspect_template=inspect_template,
)
@staticmethod
def parse_inspect_template_path(path: str) -> Dict[str, str]:
"""Parses a inspect_template path into its component segments."""
m = re.match(
r"^organizations/(?P<organization>.+?)/inspectTemplates/(?P<inspect_template>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def job_trigger_path(project: str, job_trigger: str,) -> str:
"""Returns a fully-qualified job_trigger string."""
return "projects/{project}/jobTriggers/{job_trigger}".format(
project=project, job_trigger=job_trigger,
)
@staticmethod
def parse_job_trigger_path(path: str) -> Dict[str, str]:
"""Parses a job_trigger path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/jobTriggers/(?P<job_trigger>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def stored_info_type_path(organization: str, stored_info_type: str,) -> str:
"""Returns a fully-qualified stored_info_type string."""
return "organizations/{organization}/storedInfoTypes/{stored_info_type}".format(
organization=organization, stored_info_type=stored_info_type,
)
@staticmethod
def parse_stored_info_type_path(path: str) -> Dict[str, str]:
"""Parses a stored_info_type path into its component segments."""
m = re.match(
r"^organizations/(?P<organization>.+?)/storedInfoTypes/(?P<stored_info_type>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, DlpServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the dlp service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, DlpServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, DlpServiceTransport):
# transport is a DlpServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def inspect_content(
self,
request: Union[dlp.InspectContentRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.InspectContentResponse:
r"""Finds potentially sensitive info in content.
This method has limits on input size, processing time,
and output size.
When no InfoTypes or CustomInfoTypes are specified in
this request, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
For how to guides, see
https://cloud.google.com/dlp/docs/inspecting-images and
https://cloud.google.com/dlp/docs/inspecting-text,
Args:
request (Union[google.cloud.dlp_v2.types.InspectContentRequest, dict]):
The request object. Request to search for potentially
sensitive info in a ContentItem.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.InspectContentResponse:
Results of inspecting an item.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.InspectContentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.InspectContentRequest):
request = dlp.InspectContentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.inspect_content]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def redact_image(
self,
request: Union[dlp.RedactImageRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.RedactImageResponse:
r"""Redacts potentially sensitive info from an image.
This method has limits on input size, processing time,
and output size. See
https://cloud.google.com/dlp/docs/redacting-sensitive-
data-images to learn more.
When no InfoTypes or CustomInfoTypes are specified in
this request, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
Args:
request (Union[google.cloud.dlp_v2.types.RedactImageRequest, dict]):
The request object. Request to search for potentially
sensitive info in an image and redact it by covering it
with a colored rectangle.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.RedactImageResponse:
Results of redacting an image.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.RedactImageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.RedactImageRequest):
request = dlp.RedactImageRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.redact_image]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def deidentify_content(
self,
request: Union[dlp.DeidentifyContentRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DeidentifyContentResponse:
r"""De-identifies potentially sensitive info from a
ContentItem. This method has limits on input size and
output size. See
https://cloud.google.com/dlp/docs/deidentify-sensitive-
data to learn more.
When no InfoTypes or CustomInfoTypes are specified in
this request, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
Args:
request (Union[google.cloud.dlp_v2.types.DeidentifyContentRequest, dict]):
The request object. Request to de-identify a list of
items.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DeidentifyContentResponse:
Results of de-identifying a
ContentItem.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.DeidentifyContentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.DeidentifyContentRequest):
request = dlp.DeidentifyContentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.deidentify_content]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def reidentify_content(
self,
request: Union[dlp.ReidentifyContentRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.ReidentifyContentResponse:
r"""Re-identifies content that has been de-identified. See
https://cloud.google.com/dlp/docs/pseudonymization#re-identification_in_free_text_code_example
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ReidentifyContentRequest, dict]):
The request object. Request to re-identify an item.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.ReidentifyContentResponse:
Results of re-identifying a item.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ReidentifyContentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ReidentifyContentRequest):
request = dlp.ReidentifyContentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.reidentify_content]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_info_types(
self,
request: Union[dlp.ListInfoTypesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.ListInfoTypesResponse:
r"""Returns a list of the sensitive information types
that the DLP API supports. See
https://cloud.google.com/dlp/docs/infotypes-reference to
learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ListInfoTypesRequest, dict]):
The request object. Request for the list of infoTypes.
parent (str):
The parent resource name.
The format of this value is as follows:
::
locations/<var>LOCATION_ID</var>
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.ListInfoTypesResponse:
Response to the ListInfoTypes
request.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ListInfoTypesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ListInfoTypesRequest):
request = dlp.ListInfoTypesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_info_types]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_inspect_template(
self,
request: Union[dlp.CreateInspectTemplateRequest, dict] = None,
*,
parent: str = None,
inspect_template: dlp.InspectTemplate = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.InspectTemplate:
r"""Creates an InspectTemplate for re-using frequently
used configuration for inspecting content, images, and
storage. See https://cloud.google.com/dlp/docs/creating-
templates to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.CreateInspectTemplateRequest, dict]):
The request object. Request message for
CreateInspectTemplate.
parent (str):
Required. Parent resource name.
The format of this value varies depending on the scope
of the request (project or organization) and whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
- Organizations scope, location specified:
``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID
- Organizations scope, no location specified (defaults
to global): ``organizations/``\ ORG_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
inspect_template (google.cloud.dlp_v2.types.InspectTemplate):
Required. The InspectTemplate to
create.
This corresponds to the ``inspect_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.InspectTemplate:
The inspectTemplate contains a
configuration (set of types of sensitive
data to be detected) to be used anywhere
you otherwise would normally specify
InspectConfig. See
https://cloud.google.com/dlp/docs/concepts-
templates to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, inspect_template])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.CreateInspectTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.CreateInspectTemplateRequest):
request = dlp.CreateInspectTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if inspect_template is not None:
request.inspect_template = inspect_template
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_inspect_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_inspect_template(
self,
request: Union[dlp.UpdateInspectTemplateRequest, dict] = None,
*,
name: str = None,
inspect_template: dlp.InspectTemplate = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.InspectTemplate:
r"""Updates the InspectTemplate.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.UpdateInspectTemplateRequest, dict]):
The request object. Request message for
UpdateInspectTemplate.
name (str):
Required. Resource name of organization and
inspectTemplate to be updated, for example
``organizations/433245324/inspectTemplates/432452342``
or projects/project-id/inspectTemplates/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
inspect_template (google.cloud.dlp_v2.types.InspectTemplate):
New InspectTemplate value.
This corresponds to the ``inspect_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask to control which fields get
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.InspectTemplate:
The inspectTemplate contains a
configuration (set of types of sensitive
data to be detected) to be used anywhere
you otherwise would normally specify
InspectConfig. See
https://cloud.google.com/dlp/docs/concepts-
templates to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, inspect_template, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.UpdateInspectTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.UpdateInspectTemplateRequest):
request = dlp.UpdateInspectTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if inspect_template is not None:
request.inspect_template = inspect_template
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_inspect_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_inspect_template(
self,
request: Union[dlp.GetInspectTemplateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.InspectTemplate:
r"""Gets an InspectTemplate.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.GetInspectTemplateRequest, dict]):
The request object. Request message for
GetInspectTemplate.
name (str):
Required. Resource name of the organization and
inspectTemplate to be read, for example
``organizations/433245324/inspectTemplates/432452342``
or projects/project-id/inspectTemplates/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.InspectTemplate:
The inspectTemplate contains a
configuration (set of types of sensitive
data to be detected) to be used anywhere
you otherwise would normally specify
InspectConfig. See
https://cloud.google.com/dlp/docs/concepts-
templates to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.GetInspectTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.GetInspectTemplateRequest):
request = dlp.GetInspectTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_inspect_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_inspect_templates(
self,
request: Union[dlp.ListInspectTemplatesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListInspectTemplatesPager:
r"""Lists InspectTemplates.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ListInspectTemplatesRequest, dict]):
The request object. Request message for
ListInspectTemplates.
parent (str):
Required. Parent resource name.
The format of this value varies depending on the scope
of the request (project or organization) and whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
- Organizations scope, location specified:
``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID
- Organizations scope, no location specified (defaults
to global): ``organizations/``\ ORG_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.services.dlp_service.pagers.ListInspectTemplatesPager:
Response message for
ListInspectTemplates.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ListInspectTemplatesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ListInspectTemplatesRequest):
request = dlp.ListInspectTemplatesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_inspect_templates]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListInspectTemplatesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_inspect_template(
self,
request: Union[dlp.DeleteInspectTemplateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes an InspectTemplate.
See https://cloud.google.com/dlp/docs/creating-templates
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.DeleteInspectTemplateRequest, dict]):
The request object. Request message for
DeleteInspectTemplate.
name (str):
Required. Resource name of the organization and
inspectTemplate to be deleted, for example
``organizations/433245324/inspectTemplates/432452342``
or projects/project-id/inspectTemplates/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.DeleteInspectTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.DeleteInspectTemplateRequest):
request = dlp.DeleteInspectTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_inspect_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_deidentify_template(
self,
request: Union[dlp.CreateDeidentifyTemplateRequest, dict] = None,
*,
parent: str = None,
deidentify_template: dlp.DeidentifyTemplate = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DeidentifyTemplate:
r"""Creates a DeidentifyTemplate for re-using frequently
used configuration for de-identifying content, images,
and storage. See
https://cloud.google.com/dlp/docs/creating-templates-
deid to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.CreateDeidentifyTemplateRequest, dict]):
The request object. Request message for
CreateDeidentifyTemplate.
parent (str):
Required. Parent resource name.
The format of this value varies depending on the scope
of the request (project or organization) and whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
- Organizations scope, location specified:
``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID
- Organizations scope, no location specified (defaults
to global): ``organizations/``\ ORG_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
deidentify_template (google.cloud.dlp_v2.types.DeidentifyTemplate):
Required. The DeidentifyTemplate to
create.
This corresponds to the ``deidentify_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DeidentifyTemplate:
DeidentifyTemplates contains
instructions on how to de-identify
content. See
https://cloud.google.com/dlp/docs/concepts-
templates to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, deidentify_template])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.CreateDeidentifyTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.CreateDeidentifyTemplateRequest):
request = dlp.CreateDeidentifyTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if deidentify_template is not None:
request.deidentify_template = deidentify_template
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_deidentify_template
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_deidentify_template(
self,
request: Union[dlp.UpdateDeidentifyTemplateRequest, dict] = None,
*,
name: str = None,
deidentify_template: dlp.DeidentifyTemplate = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DeidentifyTemplate:
r"""Updates the DeidentifyTemplate.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.UpdateDeidentifyTemplateRequest, dict]):
The request object. Request message for
UpdateDeidentifyTemplate.
name (str):
Required. Resource name of organization and deidentify
template to be updated, for example
``organizations/433245324/deidentifyTemplates/432452342``
or projects/project-id/deidentifyTemplates/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
deidentify_template (google.cloud.dlp_v2.types.DeidentifyTemplate):
New DeidentifyTemplate value.
This corresponds to the ``deidentify_template`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask to control which fields get
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DeidentifyTemplate:
DeidentifyTemplates contains
instructions on how to de-identify
content. See
https://cloud.google.com/dlp/docs/concepts-
templates to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, deidentify_template, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.UpdateDeidentifyTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.UpdateDeidentifyTemplateRequest):
request = dlp.UpdateDeidentifyTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if deidentify_template is not None:
request.deidentify_template = deidentify_template
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_deidentify_template
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_deidentify_template(
self,
request: Union[dlp.GetDeidentifyTemplateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DeidentifyTemplate:
r"""Gets a DeidentifyTemplate.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.GetDeidentifyTemplateRequest, dict]):
The request object. Request message for
GetDeidentifyTemplate.
name (str):
Required. Resource name of the organization and
deidentify template to be read, for example
``organizations/433245324/deidentifyTemplates/432452342``
or projects/project-id/deidentifyTemplates/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DeidentifyTemplate:
DeidentifyTemplates contains
instructions on how to de-identify
content. See
https://cloud.google.com/dlp/docs/concepts-
templates to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.GetDeidentifyTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.GetDeidentifyTemplateRequest):
request = dlp.GetDeidentifyTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_deidentify_template]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_deidentify_templates(
self,
request: Union[dlp.ListDeidentifyTemplatesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDeidentifyTemplatesPager:
r"""Lists DeidentifyTemplates.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ListDeidentifyTemplatesRequest, dict]):
The request object. Request message for
ListDeidentifyTemplates.
parent (str):
Required. Parent resource name.
The format of this value varies depending on the scope
of the request (project or organization) and whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
- Organizations scope, location specified:
``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID
- Organizations scope, no location specified (defaults
to global): ``organizations/``\ ORG_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.services.dlp_service.pagers.ListDeidentifyTemplatesPager:
Response message for
ListDeidentifyTemplates.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ListDeidentifyTemplatesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ListDeidentifyTemplatesRequest):
request = dlp.ListDeidentifyTemplatesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_deidentify_templates
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDeidentifyTemplatesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_deidentify_template(
self,
request: Union[dlp.DeleteDeidentifyTemplateRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a DeidentifyTemplate.
See https://cloud.google.com/dlp/docs/creating-
templates-deid to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.DeleteDeidentifyTemplateRequest, dict]):
The request object. Request message for
DeleteDeidentifyTemplate.
name (str):
Required. Resource name of the organization and
deidentify template to be deleted, for example
``organizations/433245324/deidentifyTemplates/432452342``
or projects/project-id/deidentifyTemplates/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.DeleteDeidentifyTemplateRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.DeleteDeidentifyTemplateRequest):
request = dlp.DeleteDeidentifyTemplateRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_deidentify_template
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_job_trigger(
self,
request: Union[dlp.CreateJobTriggerRequest, dict] = None,
*,
parent: str = None,
job_trigger: dlp.JobTrigger = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.JobTrigger:
r"""Creates a job trigger to run DLP actions such as
scanning storage for sensitive information on a set
schedule. See
https://cloud.google.com/dlp/docs/creating-job-triggers
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.CreateJobTriggerRequest, dict]):
The request object. Request message for
CreateJobTrigger.
parent (str):
Required. Parent resource name.
The format of this value varies depending on whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_trigger (google.cloud.dlp_v2.types.JobTrigger):
Required. The JobTrigger to create.
This corresponds to the ``job_trigger`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.JobTrigger:
Contains a configuration to make dlp
api calls on a repeating basis. See
https://cloud.google.com/dlp/docs/concepts-
job-triggers to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, job_trigger])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.CreateJobTriggerRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.CreateJobTriggerRequest):
request = dlp.CreateJobTriggerRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if job_trigger is not None:
request.job_trigger = job_trigger
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_job_trigger]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_job_trigger(
self,
request: Union[dlp.UpdateJobTriggerRequest, dict] = None,
*,
name: str = None,
job_trigger: dlp.JobTrigger = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.JobTrigger:
r"""Updates a job trigger.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.UpdateJobTriggerRequest, dict]):
The request object. Request message for
UpdateJobTrigger.
name (str):
Required. Resource name of the project and the
triggeredJob, for example
``projects/dlp-test-project/jobTriggers/53234423``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_trigger (google.cloud.dlp_v2.types.JobTrigger):
New JobTrigger value.
This corresponds to the ``job_trigger`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask to control which fields get
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.JobTrigger:
Contains a configuration to make dlp
api calls on a repeating basis. See
https://cloud.google.com/dlp/docs/concepts-
job-triggers to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, job_trigger, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.UpdateJobTriggerRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.UpdateJobTriggerRequest):
request = dlp.UpdateJobTriggerRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if job_trigger is not None:
request.job_trigger = job_trigger
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_job_trigger]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def hybrid_inspect_job_trigger(
self,
request: Union[dlp.HybridInspectJobTriggerRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.HybridInspectResponse:
r"""Inspect hybrid content and store findings to a
trigger. The inspection will be processed
asynchronously. To review the findings monitor the jobs
within the trigger.
Args:
request (Union[google.cloud.dlp_v2.types.HybridInspectJobTriggerRequest, dict]):
The request object. Request to search for potentially
sensitive info in a custom location.
name (str):
Required. Resource name of the trigger to execute a
hybrid inspect on, for example
``projects/dlp-test-project/jobTriggers/53234423``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.HybridInspectResponse:
Quota exceeded errors will be thrown
once quota has been met.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.HybridInspectJobTriggerRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.HybridInspectJobTriggerRequest):
request = dlp.HybridInspectJobTriggerRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.hybrid_inspect_job_trigger
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_job_trigger(
self,
request: Union[dlp.GetJobTriggerRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.JobTrigger:
r"""Gets a job trigger.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.GetJobTriggerRequest, dict]):
The request object. Request message for GetJobTrigger.
name (str):
Required. Resource name of the project and the
triggeredJob, for example
``projects/dlp-test-project/jobTriggers/53234423``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.JobTrigger:
Contains a configuration to make dlp
api calls on a repeating basis. See
https://cloud.google.com/dlp/docs/concepts-
job-triggers to learn more.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.GetJobTriggerRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.GetJobTriggerRequest):
request = dlp.GetJobTriggerRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_job_trigger]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_job_triggers(
self,
request: Union[dlp.ListJobTriggersRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListJobTriggersPager:
r"""Lists job triggers.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ListJobTriggersRequest, dict]):
The request object. Request message for ListJobTriggers.
parent (str):
Required. Parent resource name.
The format of this value varies depending on whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.services.dlp_service.pagers.ListJobTriggersPager:
Response message for ListJobTriggers.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ListJobTriggersRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ListJobTriggersRequest):
request = dlp.ListJobTriggersRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_job_triggers]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListJobTriggersPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_job_trigger(
self,
request: Union[dlp.DeleteJobTriggerRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a job trigger.
See https://cloud.google.com/dlp/docs/creating-job-
triggers to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.DeleteJobTriggerRequest, dict]):
The request object. Request message for
DeleteJobTrigger.
name (str):
Required. Resource name of the project and the
triggeredJob, for example
``projects/dlp-test-project/jobTriggers/53234423``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.DeleteJobTriggerRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.DeleteJobTriggerRequest):
request = dlp.DeleteJobTriggerRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_job_trigger]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def activate_job_trigger(
self,
request: Union[dlp.ActivateJobTriggerRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DlpJob:
r"""Activate a job trigger. Causes the immediate execute
of a trigger instead of waiting on the trigger event to
occur.
Args:
request (Union[google.cloud.dlp_v2.types.ActivateJobTriggerRequest, dict]):
The request object. Request message for
ActivateJobTrigger.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DlpJob:
Combines all of the information about
a DLP job.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ActivateJobTriggerRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ActivateJobTriggerRequest):
request = dlp.ActivateJobTriggerRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.activate_job_trigger]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_dlp_job(
self,
request: Union[dlp.CreateDlpJobRequest, dict] = None,
*,
parent: str = None,
inspect_job: dlp.InspectJobConfig = None,
risk_job: dlp.RiskAnalysisJobConfig = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DlpJob:
r"""Creates a new job to inspect storage or calculate
risk metrics. See
https://cloud.google.com/dlp/docs/inspecting-storage and
https://cloud.google.com/dlp/docs/compute-risk-analysis
to learn more.
When no InfoTypes or CustomInfoTypes are specified in
inspect jobs, the system will automatically choose what
detectors to run. By default this may be all types, but
may change over time as detectors are updated.
Args:
request (Union[google.cloud.dlp_v2.types.CreateDlpJobRequest, dict]):
The request object. Request message for
CreateDlpJobRequest. Used to initiate long running jobs
such as calculating risk metrics or inspecting Google
Cloud Storage.
parent (str):
Required. Parent resource name.
The format of this value varies depending on whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
inspect_job (google.cloud.dlp_v2.types.InspectJobConfig):
An inspection job scans a storage
repository for InfoTypes.
This corresponds to the ``inspect_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
risk_job (google.cloud.dlp_v2.types.RiskAnalysisJobConfig):
A risk analysis job calculates re-
dentification risk metrics for a
BigQuery table.
This corresponds to the ``risk_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DlpJob:
Combines all of the information about
a DLP job.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, inspect_job, risk_job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.CreateDlpJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.CreateDlpJobRequest):
request = dlp.CreateDlpJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if inspect_job is not None:
request.inspect_job = inspect_job
if risk_job is not None:
request.risk_job = risk_job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_dlp_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_dlp_jobs(
self,
request: Union[dlp.ListDlpJobsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDlpJobsPager:
r"""Lists DlpJobs that match the specified filter in the
request. See
https://cloud.google.com/dlp/docs/inspecting-storage and
https://cloud.google.com/dlp/docs/compute-risk-analysis
to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ListDlpJobsRequest, dict]):
The request object. The request message for listing DLP
jobs.
parent (str):
Required. Parent resource name.
The format of this value varies depending on whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.services.dlp_service.pagers.ListDlpJobsPager:
The response message for listing DLP
jobs.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ListDlpJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ListDlpJobsRequest):
request = dlp.ListDlpJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_dlp_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDlpJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_dlp_job(
self,
request: Union[dlp.GetDlpJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.DlpJob:
r"""Gets the latest state of a long-running DlpJob.
See https://cloud.google.com/dlp/docs/inspecting-storage
and https://cloud.google.com/dlp/docs/compute-risk-
analysis to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.GetDlpJobRequest, dict]):
The request object. The request message for
[DlpJobs.GetDlpJob][].
name (str):
Required. The name of the DlpJob
resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.DlpJob:
Combines all of the information about
a DLP job.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.GetDlpJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.GetDlpJobRequest):
request = dlp.GetDlpJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_dlp_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_dlp_job(
self,
request: Union[dlp.DeleteDlpJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a long-running DlpJob. This method indicates
that the client is no longer interested in the DlpJob
result. The job will be cancelled if possible.
See https://cloud.google.com/dlp/docs/inspecting-storage
and https://cloud.google.com/dlp/docs/compute-risk-
analysis to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.DeleteDlpJobRequest, dict]):
The request object. The request message for deleting a
DLP job.
name (str):
Required. The name of the DlpJob
resource to be deleted.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.DeleteDlpJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.DeleteDlpJobRequest):
request = dlp.DeleteDlpJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_dlp_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def cancel_dlp_job(
self,
request: Union[dlp.CancelDlpJobRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Starts asynchronous cancellation on a long-running
DlpJob. The server makes a best effort to cancel the
DlpJob, but success is not guaranteed.
See https://cloud.google.com/dlp/docs/inspecting-storage
and https://cloud.google.com/dlp/docs/compute-risk-
analysis to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.CancelDlpJobRequest, dict]):
The request object. The request message for canceling a
DLP job.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.CancelDlpJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.CancelDlpJobRequest):
request = dlp.CancelDlpJobRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.cancel_dlp_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_stored_info_type(
self,
request: Union[dlp.CreateStoredInfoTypeRequest, dict] = None,
*,
parent: str = None,
config: dlp.StoredInfoTypeConfig = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.StoredInfoType:
r"""Creates a pre-built stored infoType to be used for
inspection. See
https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.CreateStoredInfoTypeRequest, dict]):
The request object. Request message for
CreateStoredInfoType.
parent (str):
Required. Parent resource name.
The format of this value varies depending on the scope
of the request (project or organization) and whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
- Organizations scope, location specified:
``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID
- Organizations scope, no location specified (defaults
to global): ``organizations/``\ ORG_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
config (google.cloud.dlp_v2.types.StoredInfoTypeConfig):
Required. Configuration of the
storedInfoType to create.
This corresponds to the ``config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.StoredInfoType:
StoredInfoType resource message that
contains information about the current
version and any pending updates.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.CreateStoredInfoTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.CreateStoredInfoTypeRequest):
request = dlp.CreateStoredInfoTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if config is not None:
request.config = config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_stored_info_type]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_stored_info_type(
self,
request: Union[dlp.UpdateStoredInfoTypeRequest, dict] = None,
*,
name: str = None,
config: dlp.StoredInfoTypeConfig = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.StoredInfoType:
r"""Updates the stored infoType by creating a new
version. The existing version will continue to be used
until the new version is ready. See
https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.UpdateStoredInfoTypeRequest, dict]):
The request object. Request message for
UpdateStoredInfoType.
name (str):
Required. Resource name of organization and
storedInfoType to be updated, for example
``organizations/433245324/storedInfoTypes/432452342`` or
projects/project-id/storedInfoTypes/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
config (google.cloud.dlp_v2.types.StoredInfoTypeConfig):
Updated configuration for the
storedInfoType. If not provided, a new
version of the storedInfoType will be
created with the existing configuration.
This corresponds to the ``config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask to control which fields get
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.StoredInfoType:
StoredInfoType resource message that
contains information about the current
version and any pending updates.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, config, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.UpdateStoredInfoTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.UpdateStoredInfoTypeRequest):
request = dlp.UpdateStoredInfoTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if config is not None:
request.config = config
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_stored_info_type]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_stored_info_type(
self,
request: Union[dlp.GetStoredInfoTypeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.StoredInfoType:
r"""Gets a stored infoType.
See https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.GetStoredInfoTypeRequest, dict]):
The request object. Request message for
GetStoredInfoType.
name (str):
Required. Resource name of the organization and
storedInfoType to be read, for example
``organizations/433245324/storedInfoTypes/432452342`` or
projects/project-id/storedInfoTypes/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.StoredInfoType:
StoredInfoType resource message that
contains information about the current
version and any pending updates.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.GetStoredInfoTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.GetStoredInfoTypeRequest):
request = dlp.GetStoredInfoTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_stored_info_type]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_stored_info_types(
self,
request: Union[dlp.ListStoredInfoTypesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListStoredInfoTypesPager:
r"""Lists stored infoTypes.
See https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.ListStoredInfoTypesRequest, dict]):
The request object. Request message for
ListStoredInfoTypes.
parent (str):
Required. Parent resource name.
The format of this value varies depending on the scope
of the request (project or organization) and whether you
have `specified a processing
location <https://cloud.google.com/dlp/docs/specifying-location>`__:
- Projects scope, location specified:
``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID
- Projects scope, no location specified (defaults to
global): ``projects/``\ PROJECT_ID
- Organizations scope, location specified:
``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID
- Organizations scope, no location specified (defaults
to global): ``organizations/``\ ORG_ID
The following example ``parent`` string specifies a
parent project with the identifier ``example-project``,
and specifies the ``europe-west3`` location for
processing data:
::
parent=projects/example-project/locations/europe-west3
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.services.dlp_service.pagers.ListStoredInfoTypesPager:
Response message for
ListStoredInfoTypes.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.ListStoredInfoTypesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.ListStoredInfoTypesRequest):
request = dlp.ListStoredInfoTypesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_stored_info_types]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListStoredInfoTypesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_stored_info_type(
self,
request: Union[dlp.DeleteStoredInfoTypeRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a stored infoType.
See https://cloud.google.com/dlp/docs/creating-stored-
infotypes to learn more.
Args:
request (Union[google.cloud.dlp_v2.types.DeleteStoredInfoTypeRequest, dict]):
The request object. Request message for
DeleteStoredInfoType.
name (str):
Required. Resource name of the organization and
storedInfoType to be deleted, for example
``organizations/433245324/storedInfoTypes/432452342`` or
projects/project-id/storedInfoTypes/432452342.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.DeleteStoredInfoTypeRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.DeleteStoredInfoTypeRequest):
request = dlp.DeleteStoredInfoTypeRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_stored_info_type]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def hybrid_inspect_dlp_job(
self,
request: Union[dlp.HybridInspectDlpJobRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dlp.HybridInspectResponse:
r"""Inspect hybrid content and store findings to a job.
To review the findings, inspect the job. Inspection will
occur asynchronously.
Args:
request (Union[google.cloud.dlp_v2.types.HybridInspectDlpJobRequest, dict]):
The request object. Request to search for potentially
sensitive info in a custom location.
name (str):
Required. Resource name of the job to execute a hybrid
inspect on, for example
``projects/dlp-test-project/dlpJob/53234423``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dlp_v2.types.HybridInspectResponse:
Quota exceeded errors will be thrown
once quota has been met.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dlp.HybridInspectDlpJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.HybridInspectDlpJobRequest):
request = dlp.HybridInspectDlpJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.hybrid_inspect_dlp_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def finish_dlp_job(
self,
request: Union[dlp.FinishDlpJobRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Finish a running hybrid DlpJob. Triggers the
finalization steps and running of any enabled actions
that have not yet run.
Args:
request (Union[google.cloud.dlp_v2.types.FinishDlpJobRequest, dict]):
The request object. The request message for finishing a
DLP hybrid job.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a dlp.FinishDlpJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, dlp.FinishDlpJobRequest):
request = dlp.FinishDlpJobRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.finish_dlp_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-dlp",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("DlpServiceClient",) | en | 0.738998 | # -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # type: ignore # type: ignore # type: ignore # type: ignore # type: ignore # pragma: NO COVER # type: ignore # type: ignore # type: ignore Metaclass for the DlpService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. # type: Dict[str, Type[DlpServiceTransport]] Returns an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. # If a specific transport is requested, return that one. # No transport is requested; return the default (that is, the first one # in the dictionary). The Cloud Data Loss Prevention (DLP) API is a service that allows clients to detect the presence of Personally Identifiable Information (PII) and other privacy-sensitive data in user- supplied, unstructured data streams, like text blocks or images. The service also includes methods for sensitive data redaction and scheduling of data scans on Google Cloud Platform based data sets. To learn more about concepts and find how-to guides see https://cloud.google.com/dlp/docs/. Converts api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. # type: ignore Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: DlpServiceClient: The constructed client. Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: DlpServiceClient: The constructed client. Returns the transport used by the client instance. Returns: DlpServiceTransport: The transport used by the client instance. Returns a fully-qualified deidentify_template string. Parses a deidentify_template path into its component segments. Returns a fully-qualified dlp_content string. Parses a dlp_content path into its component segments. Returns a fully-qualified dlp_job string. Parses a dlp_job path into its component segments. Returns a fully-qualified finding string. Parses a finding path into its component segments. Returns a fully-qualified inspect_template string. Parses a inspect_template path into its component segments. Returns a fully-qualified job_trigger string. Parses a job_trigger path into its component segments. Returns a fully-qualified stored_info_type string. Parses a stored_info_type path into its component segments. Returns a fully-qualified billing_account string. Parse a billing_account path into its component segments. Returns a fully-qualified folder string. Parse a folder path into its component segments. Returns a fully-qualified organization string. Parse a organization path into its component segments. Returns a fully-qualified project string. Parse a project path into its component segments. Returns a fully-qualified location string. Parse a location path into its component segments. Instantiates the dlp service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, DlpServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. # Create SSL credentials for mutual TLS if needed. # Figure out which api endpoint to use. # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. # transport is a DlpServiceTransport instance. Finds potentially sensitive info in content. This method has limits on input size, processing time, and output size. When no InfoTypes or CustomInfoTypes are specified in this request, the system will automatically choose what detectors to run. By default this may be all types, but may change over time as detectors are updated. For how to guides, see https://cloud.google.com/dlp/docs/inspecting-images and https://cloud.google.com/dlp/docs/inspecting-text, Args: request (Union[google.cloud.dlp_v2.types.InspectContentRequest, dict]): The request object. Request to search for potentially sensitive info in a ContentItem. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.InspectContentResponse: Results of inspecting an item. # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a dlp.InspectContentRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Redacts potentially sensitive info from an image. This method has limits on input size, processing time, and output size. See https://cloud.google.com/dlp/docs/redacting-sensitive- data-images to learn more. When no InfoTypes or CustomInfoTypes are specified in this request, the system will automatically choose what detectors to run. By default this may be all types, but may change over time as detectors are updated. Args: request (Union[google.cloud.dlp_v2.types.RedactImageRequest, dict]): The request object. Request to search for potentially sensitive info in an image and redact it by covering it with a colored rectangle. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.RedactImageResponse: Results of redacting an image. # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a dlp.RedactImageRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. De-identifies potentially sensitive info from a ContentItem. This method has limits on input size and output size. See https://cloud.google.com/dlp/docs/deidentify-sensitive- data to learn more. When no InfoTypes or CustomInfoTypes are specified in this request, the system will automatically choose what detectors to run. By default this may be all types, but may change over time as detectors are updated. Args: request (Union[google.cloud.dlp_v2.types.DeidentifyContentRequest, dict]): The request object. Request to de-identify a list of items. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.DeidentifyContentResponse: Results of de-identifying a ContentItem. # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a dlp.DeidentifyContentRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Re-identifies content that has been de-identified. See https://cloud.google.com/dlp/docs/pseudonymization#re-identification_in_free_text_code_example to learn more. Args: request (Union[google.cloud.dlp_v2.types.ReidentifyContentRequest, dict]): The request object. Request to re-identify an item. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.ReidentifyContentResponse: Results of re-identifying a item. # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a dlp.ReidentifyContentRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Returns a list of the sensitive information types that the DLP API supports. See https://cloud.google.com/dlp/docs/infotypes-reference to learn more. Args: request (Union[google.cloud.dlp_v2.types.ListInfoTypesRequest, dict]): The request object. Request for the list of infoTypes. parent (str): The parent resource name. The format of this value is as follows: :: locations/<var>LOCATION_ID</var> This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.ListInfoTypesResponse: Response to the ListInfoTypes request. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.ListInfoTypesRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Send the request. # Done; return the response. Creates an InspectTemplate for re-using frequently used configuration for inspecting content, images, and storage. See https://cloud.google.com/dlp/docs/creating- templates to learn more. Args: request (Union[google.cloud.dlp_v2.types.CreateInspectTemplateRequest, dict]): The request object. Request message for CreateInspectTemplate. parent (str): Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have `specified a processing location <https://cloud.google.com/dlp/docs/specifying-location>`__: - Projects scope, location specified: ``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID - Projects scope, no location specified (defaults to global): ``projects/``\ PROJECT_ID - Organizations scope, location specified: ``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID - Organizations scope, no location specified (defaults to global): ``organizations/``\ ORG_ID The following example ``parent`` string specifies a parent project with the identifier ``example-project``, and specifies the ``europe-west3`` location for processing data: :: parent=projects/example-project/locations/europe-west3 This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. inspect_template (google.cloud.dlp_v2.types.InspectTemplate): Required. The InspectTemplate to create. This corresponds to the ``inspect_template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.InspectTemplate: The inspectTemplate contains a configuration (set of types of sensitive data to be detected) to be used anywhere you otherwise would normally specify InspectConfig. See https://cloud.google.com/dlp/docs/concepts- templates to learn more. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.CreateInspectTemplateRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Updates the InspectTemplate. See https://cloud.google.com/dlp/docs/creating-templates to learn more. Args: request (Union[google.cloud.dlp_v2.types.UpdateInspectTemplateRequest, dict]): The request object. Request message for UpdateInspectTemplate. name (str): Required. Resource name of organization and inspectTemplate to be updated, for example ``organizations/433245324/inspectTemplates/432452342`` or projects/project-id/inspectTemplates/432452342. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. inspect_template (google.cloud.dlp_v2.types.InspectTemplate): New InspectTemplate value. This corresponds to the ``inspect_template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Mask to control which fields get updated. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.InspectTemplate: The inspectTemplate contains a configuration (set of types of sensitive data to be detected) to be used anywhere you otherwise would normally specify InspectConfig. See https://cloud.google.com/dlp/docs/concepts- templates to learn more. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.UpdateInspectTemplateRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Gets an InspectTemplate. See https://cloud.google.com/dlp/docs/creating-templates to learn more. Args: request (Union[google.cloud.dlp_v2.types.GetInspectTemplateRequest, dict]): The request object. Request message for GetInspectTemplate. name (str): Required. Resource name of the organization and inspectTemplate to be read, for example ``organizations/433245324/inspectTemplates/432452342`` or projects/project-id/inspectTemplates/432452342. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.InspectTemplate: The inspectTemplate contains a configuration (set of types of sensitive data to be detected) to be used anywhere you otherwise would normally specify InspectConfig. See https://cloud.google.com/dlp/docs/concepts- templates to learn more. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.GetInspectTemplateRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Lists InspectTemplates. See https://cloud.google.com/dlp/docs/creating-templates to learn more. Args: request (Union[google.cloud.dlp_v2.types.ListInspectTemplatesRequest, dict]): The request object. Request message for ListInspectTemplates. parent (str): Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have `specified a processing location <https://cloud.google.com/dlp/docs/specifying-location>`__: - Projects scope, location specified: ``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID - Projects scope, no location specified (defaults to global): ``projects/``\ PROJECT_ID - Organizations scope, location specified: ``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID - Organizations scope, no location specified (defaults to global): ``organizations/``\ ORG_ID The following example ``parent`` string specifies a parent project with the identifier ``example-project``, and specifies the ``europe-west3`` location for processing data: :: parent=projects/example-project/locations/europe-west3 This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.services.dlp_service.pagers.ListInspectTemplatesPager: Response message for ListInspectTemplates. Iterating over this object will yield results and resolve additional pages automatically. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.ListInspectTemplatesRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. # Done; return the response. Deletes an InspectTemplate. See https://cloud.google.com/dlp/docs/creating-templates to learn more. Args: request (Union[google.cloud.dlp_v2.types.DeleteInspectTemplateRequest, dict]): The request object. Request message for DeleteInspectTemplate. name (str): Required. Resource name of the organization and inspectTemplate to be deleted, for example ``organizations/433245324/inspectTemplates/432452342`` or projects/project-id/inspectTemplates/432452342. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.DeleteInspectTemplateRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. Creates a DeidentifyTemplate for re-using frequently used configuration for de-identifying content, images, and storage. See https://cloud.google.com/dlp/docs/creating-templates- deid to learn more. Args: request (Union[google.cloud.dlp_v2.types.CreateDeidentifyTemplateRequest, dict]): The request object. Request message for CreateDeidentifyTemplate. parent (str): Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have `specified a processing location <https://cloud.google.com/dlp/docs/specifying-location>`__: - Projects scope, location specified: ``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID - Projects scope, no location specified (defaults to global): ``projects/``\ PROJECT_ID - Organizations scope, location specified: ``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID - Organizations scope, no location specified (defaults to global): ``organizations/``\ ORG_ID The following example ``parent`` string specifies a parent project with the identifier ``example-project``, and specifies the ``europe-west3`` location for processing data: :: parent=projects/example-project/locations/europe-west3 This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. deidentify_template (google.cloud.dlp_v2.types.DeidentifyTemplate): Required. The DeidentifyTemplate to create. This corresponds to the ``deidentify_template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.DeidentifyTemplate: DeidentifyTemplates contains instructions on how to de-identify content. See https://cloud.google.com/dlp/docs/concepts- templates to learn more. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.CreateDeidentifyTemplateRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Updates the DeidentifyTemplate. See https://cloud.google.com/dlp/docs/creating- templates-deid to learn more. Args: request (Union[google.cloud.dlp_v2.types.UpdateDeidentifyTemplateRequest, dict]): The request object. Request message for UpdateDeidentifyTemplate. name (str): Required. Resource name of organization and deidentify template to be updated, for example ``organizations/433245324/deidentifyTemplates/432452342`` or projects/project-id/deidentifyTemplates/432452342. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. deidentify_template (google.cloud.dlp_v2.types.DeidentifyTemplate): New DeidentifyTemplate value. This corresponds to the ``deidentify_template`` field on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Mask to control which fields get updated. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.DeidentifyTemplate: DeidentifyTemplates contains instructions on how to de-identify content. See https://cloud.google.com/dlp/docs/concepts- templates to learn more. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.UpdateDeidentifyTemplateRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Gets a DeidentifyTemplate. See https://cloud.google.com/dlp/docs/creating- templates-deid to learn more. Args: request (Union[google.cloud.dlp_v2.types.GetDeidentifyTemplateRequest, dict]): The request object. Request message for GetDeidentifyTemplate. name (str): Required. Resource name of the organization and deidentify template to be read, for example ``organizations/433245324/deidentifyTemplates/432452342`` or projects/project-id/deidentifyTemplates/432452342. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.DeidentifyTemplate: DeidentifyTemplates contains instructions on how to de-identify content. See https://cloud.google.com/dlp/docs/concepts- templates to learn more. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.GetDeidentifyTemplateRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Lists DeidentifyTemplates. See https://cloud.google.com/dlp/docs/creating- templates-deid to learn more. Args: request (Union[google.cloud.dlp_v2.types.ListDeidentifyTemplatesRequest, dict]): The request object. Request message for ListDeidentifyTemplates. parent (str): Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have `specified a processing location <https://cloud.google.com/dlp/docs/specifying-location>`__: - Projects scope, location specified: ``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID - Projects scope, no location specified (defaults to global): ``projects/``\ PROJECT_ID - Organizations scope, location specified: ``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID - Organizations scope, no location specified (defaults to global): ``organizations/``\ ORG_ID The following example ``parent`` string specifies a parent project with the identifier ``example-project``, and specifies the ``europe-west3`` location for processing data: :: parent=projects/example-project/locations/europe-west3 This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.services.dlp_service.pagers.ListDeidentifyTemplatesPager: Response message for ListDeidentifyTemplates. Iterating over this object will yield results and resolve additional pages automatically. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.ListDeidentifyTemplatesRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. # Done; return the response. Deletes a DeidentifyTemplate. See https://cloud.google.com/dlp/docs/creating- templates-deid to learn more. Args: request (Union[google.cloud.dlp_v2.types.DeleteDeidentifyTemplateRequest, dict]): The request object. Request message for DeleteDeidentifyTemplate. name (str): Required. Resource name of the organization and deidentify template to be deleted, for example ``organizations/433245324/deidentifyTemplates/432452342`` or projects/project-id/deidentifyTemplates/432452342. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.DeleteDeidentifyTemplateRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. Creates a job trigger to run DLP actions such as scanning storage for sensitive information on a set schedule. See https://cloud.google.com/dlp/docs/creating-job-triggers to learn more. Args: request (Union[google.cloud.dlp_v2.types.CreateJobTriggerRequest, dict]): The request object. Request message for CreateJobTrigger. parent (str): Required. Parent resource name. The format of this value varies depending on whether you have `specified a processing location <https://cloud.google.com/dlp/docs/specifying-location>`__: - Projects scope, location specified: ``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID - Projects scope, no location specified (defaults to global): ``projects/``\ PROJECT_ID The following example ``parent`` string specifies a parent project with the identifier ``example-project``, and specifies the ``europe-west3`` location for processing data: :: parent=projects/example-project/locations/europe-west3 This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job_trigger (google.cloud.dlp_v2.types.JobTrigger): Required. The JobTrigger to create. This corresponds to the ``job_trigger`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.JobTrigger: Contains a configuration to make dlp api calls on a repeating basis. See https://cloud.google.com/dlp/docs/concepts- job-triggers to learn more. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.CreateJobTriggerRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Updates a job trigger. See https://cloud.google.com/dlp/docs/creating-job- triggers to learn more. Args: request (Union[google.cloud.dlp_v2.types.UpdateJobTriggerRequest, dict]): The request object. Request message for UpdateJobTrigger. name (str): Required. Resource name of the project and the triggeredJob, for example ``projects/dlp-test-project/jobTriggers/53234423``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. job_trigger (google.cloud.dlp_v2.types.JobTrigger): New JobTrigger value. This corresponds to the ``job_trigger`` field on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Mask to control which fields get updated. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.JobTrigger: Contains a configuration to make dlp api calls on a repeating basis. See https://cloud.google.com/dlp/docs/concepts- job-triggers to learn more. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.UpdateJobTriggerRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Inspect hybrid content and store findings to a trigger. The inspection will be processed asynchronously. To review the findings monitor the jobs within the trigger. Args: request (Union[google.cloud.dlp_v2.types.HybridInspectJobTriggerRequest, dict]): The request object. Request to search for potentially sensitive info in a custom location. name (str): Required. Resource name of the trigger to execute a hybrid inspect on, for example ``projects/dlp-test-project/jobTriggers/53234423``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.HybridInspectResponse: Quota exceeded errors will be thrown once quota has been met. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.HybridInspectJobTriggerRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Gets a job trigger. See https://cloud.google.com/dlp/docs/creating-job- triggers to learn more. Args: request (Union[google.cloud.dlp_v2.types.GetJobTriggerRequest, dict]): The request object. Request message for GetJobTrigger. name (str): Required. Resource name of the project and the triggeredJob, for example ``projects/dlp-test-project/jobTriggers/53234423``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.JobTrigger: Contains a configuration to make dlp api calls on a repeating basis. See https://cloud.google.com/dlp/docs/concepts- job-triggers to learn more. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.GetJobTriggerRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Lists job triggers. See https://cloud.google.com/dlp/docs/creating-job- triggers to learn more. Args: request (Union[google.cloud.dlp_v2.types.ListJobTriggersRequest, dict]): The request object. Request message for ListJobTriggers. parent (str): Required. Parent resource name. The format of this value varies depending on whether you have `specified a processing location <https://cloud.google.com/dlp/docs/specifying-location>`__: - Projects scope, location specified: ``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID - Projects scope, no location specified (defaults to global): ``projects/``\ PROJECT_ID The following example ``parent`` string specifies a parent project with the identifier ``example-project``, and specifies the ``europe-west3`` location for processing data: :: parent=projects/example-project/locations/europe-west3 This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.services.dlp_service.pagers.ListJobTriggersPager: Response message for ListJobTriggers. Iterating over this object will yield results and resolve additional pages automatically. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.ListJobTriggersRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. # Done; return the response. Deletes a job trigger. See https://cloud.google.com/dlp/docs/creating-job- triggers to learn more. Args: request (Union[google.cloud.dlp_v2.types.DeleteJobTriggerRequest, dict]): The request object. Request message for DeleteJobTrigger. name (str): Required. Resource name of the project and the triggeredJob, for example ``projects/dlp-test-project/jobTriggers/53234423``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.DeleteJobTriggerRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. Activate a job trigger. Causes the immediate execute of a trigger instead of waiting on the trigger event to occur. Args: request (Union[google.cloud.dlp_v2.types.ActivateJobTriggerRequest, dict]): The request object. Request message for ActivateJobTrigger. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.DlpJob: Combines all of the information about a DLP job. # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a dlp.ActivateJobTriggerRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Creates a new job to inspect storage or calculate risk metrics. See https://cloud.google.com/dlp/docs/inspecting-storage and https://cloud.google.com/dlp/docs/compute-risk-analysis to learn more. When no InfoTypes or CustomInfoTypes are specified in inspect jobs, the system will automatically choose what detectors to run. By default this may be all types, but may change over time as detectors are updated. Args: request (Union[google.cloud.dlp_v2.types.CreateDlpJobRequest, dict]): The request object. Request message for CreateDlpJobRequest. Used to initiate long running jobs such as calculating risk metrics or inspecting Google Cloud Storage. parent (str): Required. Parent resource name. The format of this value varies depending on whether you have `specified a processing location <https://cloud.google.com/dlp/docs/specifying-location>`__: - Projects scope, location specified: ``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID - Projects scope, no location specified (defaults to global): ``projects/``\ PROJECT_ID The following example ``parent`` string specifies a parent project with the identifier ``example-project``, and specifies the ``europe-west3`` location for processing data: :: parent=projects/example-project/locations/europe-west3 This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. inspect_job (google.cloud.dlp_v2.types.InspectJobConfig): An inspection job scans a storage repository for InfoTypes. This corresponds to the ``inspect_job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. risk_job (google.cloud.dlp_v2.types.RiskAnalysisJobConfig): A risk analysis job calculates re- dentification risk metrics for a BigQuery table. This corresponds to the ``risk_job`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.DlpJob: Combines all of the information about a DLP job. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.CreateDlpJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Lists DlpJobs that match the specified filter in the request. See https://cloud.google.com/dlp/docs/inspecting-storage and https://cloud.google.com/dlp/docs/compute-risk-analysis to learn more. Args: request (Union[google.cloud.dlp_v2.types.ListDlpJobsRequest, dict]): The request object. The request message for listing DLP jobs. parent (str): Required. Parent resource name. The format of this value varies depending on whether you have `specified a processing location <https://cloud.google.com/dlp/docs/specifying-location>`__: - Projects scope, location specified: ``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID - Projects scope, no location specified (defaults to global): ``projects/``\ PROJECT_ID The following example ``parent`` string specifies a parent project with the identifier ``example-project``, and specifies the ``europe-west3`` location for processing data: :: parent=projects/example-project/locations/europe-west3 This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.services.dlp_service.pagers.ListDlpJobsPager: The response message for listing DLP jobs. Iterating over this object will yield results and resolve additional pages automatically. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.ListDlpJobsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. # Done; return the response. Gets the latest state of a long-running DlpJob. See https://cloud.google.com/dlp/docs/inspecting-storage and https://cloud.google.com/dlp/docs/compute-risk- analysis to learn more. Args: request (Union[google.cloud.dlp_v2.types.GetDlpJobRequest, dict]): The request object. The request message for [DlpJobs.GetDlpJob][]. name (str): Required. The name of the DlpJob resource. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.DlpJob: Combines all of the information about a DLP job. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.GetDlpJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Deletes a long-running DlpJob. This method indicates that the client is no longer interested in the DlpJob result. The job will be cancelled if possible. See https://cloud.google.com/dlp/docs/inspecting-storage and https://cloud.google.com/dlp/docs/compute-risk- analysis to learn more. Args: request (Union[google.cloud.dlp_v2.types.DeleteDlpJobRequest, dict]): The request object. The request message for deleting a DLP job. name (str): Required. The name of the DlpJob resource to be deleted. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.DeleteDlpJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. Starts asynchronous cancellation on a long-running DlpJob. The server makes a best effort to cancel the DlpJob, but success is not guaranteed. See https://cloud.google.com/dlp/docs/inspecting-storage and https://cloud.google.com/dlp/docs/compute-risk- analysis to learn more. Args: request (Union[google.cloud.dlp_v2.types.CancelDlpJobRequest, dict]): The request object. The request message for canceling a DLP job. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a dlp.CancelDlpJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. Creates a pre-built stored infoType to be used for inspection. See https://cloud.google.com/dlp/docs/creating-stored- infotypes to learn more. Args: request (Union[google.cloud.dlp_v2.types.CreateStoredInfoTypeRequest, dict]): The request object. Request message for CreateStoredInfoType. parent (str): Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have `specified a processing location <https://cloud.google.com/dlp/docs/specifying-location>`__: - Projects scope, location specified: ``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID - Projects scope, no location specified (defaults to global): ``projects/``\ PROJECT_ID - Organizations scope, location specified: ``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID - Organizations scope, no location specified (defaults to global): ``organizations/``\ ORG_ID The following example ``parent`` string specifies a parent project with the identifier ``example-project``, and specifies the ``europe-west3`` location for processing data: :: parent=projects/example-project/locations/europe-west3 This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. config (google.cloud.dlp_v2.types.StoredInfoTypeConfig): Required. Configuration of the storedInfoType to create. This corresponds to the ``config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.StoredInfoType: StoredInfoType resource message that contains information about the current version and any pending updates. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.CreateStoredInfoTypeRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Updates the stored infoType by creating a new version. The existing version will continue to be used until the new version is ready. See https://cloud.google.com/dlp/docs/creating-stored- infotypes to learn more. Args: request (Union[google.cloud.dlp_v2.types.UpdateStoredInfoTypeRequest, dict]): The request object. Request message for UpdateStoredInfoType. name (str): Required. Resource name of organization and storedInfoType to be updated, for example ``organizations/433245324/storedInfoTypes/432452342`` or projects/project-id/storedInfoTypes/432452342. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. config (google.cloud.dlp_v2.types.StoredInfoTypeConfig): Updated configuration for the storedInfoType. If not provided, a new version of the storedInfoType will be created with the existing configuration. This corresponds to the ``config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Mask to control which fields get updated. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.StoredInfoType: StoredInfoType resource message that contains information about the current version and any pending updates. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.UpdateStoredInfoTypeRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Gets a stored infoType. See https://cloud.google.com/dlp/docs/creating-stored- infotypes to learn more. Args: request (Union[google.cloud.dlp_v2.types.GetStoredInfoTypeRequest, dict]): The request object. Request message for GetStoredInfoType. name (str): Required. Resource name of the organization and storedInfoType to be read, for example ``organizations/433245324/storedInfoTypes/432452342`` or projects/project-id/storedInfoTypes/432452342. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.StoredInfoType: StoredInfoType resource message that contains information about the current version and any pending updates. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.GetStoredInfoTypeRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Lists stored infoTypes. See https://cloud.google.com/dlp/docs/creating-stored- infotypes to learn more. Args: request (Union[google.cloud.dlp_v2.types.ListStoredInfoTypesRequest, dict]): The request object. Request message for ListStoredInfoTypes. parent (str): Required. Parent resource name. The format of this value varies depending on the scope of the request (project or organization) and whether you have `specified a processing location <https://cloud.google.com/dlp/docs/specifying-location>`__: - Projects scope, location specified: ``projects/``\ PROJECT_ID\ ``/locations/``\ LOCATION_ID - Projects scope, no location specified (defaults to global): ``projects/``\ PROJECT_ID - Organizations scope, location specified: ``organizations/``\ ORG_ID\ ``/locations/``\ LOCATION_ID - Organizations scope, no location specified (defaults to global): ``organizations/``\ ORG_ID The following example ``parent`` string specifies a parent project with the identifier ``example-project``, and specifies the ``europe-west3`` location for processing data: :: parent=projects/example-project/locations/europe-west3 This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.services.dlp_service.pagers.ListStoredInfoTypesPager: Response message for ListStoredInfoTypes. Iterating over this object will yield results and resolve additional pages automatically. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.ListStoredInfoTypesRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. # Done; return the response. Deletes a stored infoType. See https://cloud.google.com/dlp/docs/creating-stored- infotypes to learn more. Args: request (Union[google.cloud.dlp_v2.types.DeleteStoredInfoTypeRequest, dict]): The request object. Request message for DeleteStoredInfoType. name (str): Required. Resource name of the organization and storedInfoType to be deleted, for example ``organizations/433245324/storedInfoTypes/432452342`` or projects/project-id/storedInfoTypes/432452342. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.DeleteStoredInfoTypeRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. Inspect hybrid content and store findings to a job. To review the findings, inspect the job. Inspection will occur asynchronously. Args: request (Union[google.cloud.dlp_v2.types.HybridInspectDlpJobRequest, dict]): The request object. Request to search for potentially sensitive info in a custom location. name (str): Required. Resource name of the job to execute a hybrid inspect on, for example ``projects/dlp-test-project/dlpJob/53234423``. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.dlp_v2.types.HybridInspectResponse: Quota exceeded errors will be thrown once quota has been met. # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. # Minor optimization to avoid making a copy if the user passes # in a dlp.HybridInspectDlpJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # If we have keyword arguments corresponding to fields on the # request, apply these. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. # Done; return the response. Finish a running hybrid DlpJob. Triggers the finalization steps and running of any enabled actions that have not yet run. Args: request (Union[google.cloud.dlp_v2.types.FinishDlpJobRequest, dict]): The request object. The request message for finishing a DLP hybrid job. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. # Create or coerce a protobuf request object. # Minor optimization to avoid making a copy if the user passes # in a dlp.FinishDlpJobRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. # Certain fields should be provided within the metadata header; # add these here. # Send the request. Releases underlying transport's resources. .. warning:: ONLY use as a context manager if the transport is NOT shared with other clients! Exiting the with block will CLOSE the transport and may cause errors in other clients! | 1.456275 | 1 |
heatsim2/boundary_insulating.py | isuthermography/heatsim2 | 2 | 6631977 |
def qx(kmat55m,kmat55p,
dz,dy,dx,
T55m,T55p,
T45m,T45p,
T65m,T65p,
T54m,T54p,
T56m,T56p,
T46m,T46p,
T64m,T64p):
return 0.0
def qy(kmat5m5,kmat5p5,
dz,dy,dx,
T5m5,T5p5,
T4m5,T4p5,
T6m5,T6p5,
T5m4,T5p4,
T5m6,T5p6,
T4m6,T4p6,
T6m4,T6p4):
return 0.0
def qz(kmatm55,kmatp55,
dz,dy,dx,
Tm55,Tp55,
Tm45,Tp45,
Tm65,Tp65,
Tm54,Tp54,
Tm56,Tp56,
Tm46,Tp46,
Tm64,Tp64):
return 0.0
|
def qx(kmat55m,kmat55p,
dz,dy,dx,
T55m,T55p,
T45m,T45p,
T65m,T65p,
T54m,T54p,
T56m,T56p,
T46m,T46p,
T64m,T64p):
return 0.0
def qy(kmat5m5,kmat5p5,
dz,dy,dx,
T5m5,T5p5,
T4m5,T4p5,
T6m5,T6p5,
T5m4,T5p4,
T5m6,T5p6,
T4m6,T4p6,
T6m4,T6p4):
return 0.0
def qz(kmatm55,kmatp55,
dz,dy,dx,
Tm55,Tp55,
Tm45,Tp45,
Tm65,Tp65,
Tm54,Tp54,
Tm56,Tp56,
Tm46,Tp46,
Tm64,Tp64):
return 0.0
| none | 1 | 1.890569 | 2 |
|
mmseg/models/backbones/vit_mae.py | Eiphodos/SwinSemanticSegmentation | 1 | 6631978 | <reponame>Eiphodos/SwinSemanticSegmentation<filename>mmseg/models/backbones/vit_mae.py<gh_stars>1-10
import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from mmcv_custom import load_checkpoint
from mmseg.utils import get_root_logger
from ..builder import BACKBONES
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0., window_size=None):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, self.heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.0)
def forward(self, x, rel_pos_bias=None):
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
if self.relative_position_bias_table is not None:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
dots = dots + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
dots = dots + rel_pos_bias
attn = self.attend(dots)
out = torch.matmul(attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0., window_size=None):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout, window_size=window_size)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
@BACKBONES.register_module()
class ViTMAE(nn.Module):
def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3,
dim_head = 64, dropout = 0., emb_dropout = 0., out_indices=[3, 5, 7, 11], use_rel_pos_bias=False):
super().__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
self.Hp = (image_height // patch_height)
self.Wp = (image_width // patch_width)
num_patches = self.Hp * self.Wp
patch_dim = channels * patch_height * patch_width
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width),
nn.Linear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.dropout = nn.Dropout(emb_dropout)
self.use_rel_pos_bias = use_rel_pos_bias
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout,
window_size=(self.Hp, self.Wp) if self.use_rel_pos_bias else None)
self.out_indices = out_indices
if patch_size == 16:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(dim, dim, kernel_size=2, stride=2),
nn.SyncBatchNorm(dim),
nn.GELU(),
nn.ConvTranspose2d(dim, dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(dim, dim, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
elif patch_size == 8:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Identity()
self.fpn3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fpn4 = nn.Sequential(
nn.MaxPool2d(kernel_size=4, stride=4),
)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def forward_features(self, img):
x = self.to_patch_embedding(img)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
x += self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
features = []
for i, (attn, ff) in enumerate(self.transformer.layers):
x = attn(x) + x
x = ff(x) + x
if i in self.out_indices:
xp = x[:, 1:, :].permute(0, 2, 1).reshape(b, -1, self.Hp, self.Wp)
features.append(xp.contiguous())
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
return tuple(features)
def forward(self, img):
x = self.forward_features(img)
return x
| import torch
from torch import nn
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from mmcv_custom import load_checkpoint
from mmseg.utils import get_root_logger
from ..builder import BACKBONES
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0., window_size=None):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, self.heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.0)
def forward(self, x, rel_pos_bias=None):
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
if self.relative_position_bias_table is not None:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
dots = dots + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
dots = dots + rel_pos_bias
attn = self.attend(dots)
out = torch.matmul(attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0., window_size=None):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout, window_size=window_size)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
@BACKBONES.register_module()
class ViTMAE(nn.Module):
def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, pool = 'cls', channels = 3,
dim_head = 64, dropout = 0., emb_dropout = 0., out_indices=[3, 5, 7, 11], use_rel_pos_bias=False):
super().__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
self.Hp = (image_height // patch_height)
self.Wp = (image_width // patch_width)
num_patches = self.Hp * self.Wp
patch_dim = channels * patch_height * patch_width
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width),
nn.Linear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.dropout = nn.Dropout(emb_dropout)
self.use_rel_pos_bias = use_rel_pos_bias
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout,
window_size=(self.Hp, self.Wp) if self.use_rel_pos_bias else None)
self.out_indices = out_indices
if patch_size == 16:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(dim, dim, kernel_size=2, stride=2),
nn.SyncBatchNorm(dim),
nn.GELU(),
nn.ConvTranspose2d(dim, dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(dim, dim, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
elif patch_size == 8:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Identity()
self.fpn3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fpn4 = nn.Sequential(
nn.MaxPool2d(kernel_size=4, stride=4),
)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def forward_features(self, img):
x = self.to_patch_embedding(img)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
x += self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
features = []
for i, (attn, ff) in enumerate(self.transformer.layers):
x = attn(x) + x
x = ff(x) + x
if i in self.out_indices:
xp = x[:, 1:, :].permute(0, 2, 1).reshape(b, -1, self.Hp, self.Wp)
features.append(xp.contiguous())
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
return tuple(features)
def forward(self, img):
x = self.forward_features(img)
return x | en | 0.54028 | # helpers # classes # 2*Wh-1 * 2*Ww-1, nH # cls to token & token 2 cls & cls to cls # get pair-wise relative position index for each token inside the window # 2, Wh, Ww # 2, Wh*Ww # 2, Wh*Ww, Wh*Ww # Wh*Ww, Wh*Ww, 2 # shift to start from 0 # Wh*Ww, Wh*Ww # trunc_normal_(self.relative_position_bias_table, std=.0) # Wh*Ww,Wh*Ww,nH # nH, Wh*Ww, Wh*Ww Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. | 1.820077 | 2 |
tests/bugs/core_2579_test.py | reevespaul/firebird-qa | 1 | 6631979 | #coding:utf-8
#
# id: bugs.core_2579
# title: Parameters and variables cannot be used as expressions in EXECUTE PROCEDURE parameters without a colon prefix
# decription:
# tracker_id: CORE-2579
# min_versions: []
# versions: 2.5.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """set term ^ ;
create procedure P123 (param int)
as
begin
execute procedure p123 (param);
end ^
set term ; ^
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=2.5.0')
def test_1(act_1: Action):
act_1.execute()
| #coding:utf-8
#
# id: bugs.core_2579
# title: Parameters and variables cannot be used as expressions in EXECUTE PROCEDURE parameters without a colon prefix
# decription:
# tracker_id: CORE-2579
# min_versions: []
# versions: 2.5.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """set term ^ ;
create procedure P123 (param int)
as
begin
execute procedure p123 (param);
end ^
set term ; ^
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=2.5.0')
def test_1(act_1: Action):
act_1.execute()
| en | 0.60055 | #coding:utf-8 # # id: bugs.core_2579 # title: Parameters and variables cannot be used as expressions in EXECUTE PROCEDURE parameters without a colon prefix # decription: # tracker_id: CORE-2579 # min_versions: [] # versions: 2.5.0 # qmid: None # version: 2.5.0 # resources: None set term ^ ; create procedure P123 (param int) as begin execute procedure p123 (param); end ^ set term ; ^ | 1.467499 | 1 |
nimbus/nimbus.py | sidmohite/nimbus-astro | 6 | 6631980 | """
The main module of nimbus that sets up the Bayesian formalism.
Classes:
Kilonova_Inference
"""
__author__ = '<NAME>'
import numpy as np
from scipy.stats import norm, truncnorm
from scipy.integrate import quad
from scipy.special import expit
from multiprocessing import Pool
from functools import partial
class Kilonova_Inference():
"""
Initializes utility functions for inference and defines the model.
Attributes
----------
lc_model_funcs : array-like
The array whose elements are band-specific functions that define the
light-curve evolution as a function of time.
nullevent_mlim_pdf : func
The function that evaluates the pdf for the observed upper limits when
the event is either not in the observed fields or is terrestrial.
Usage
-----
kne_inf = Kilonova_Inference(lc_model_func)
"""
def __init__(self, lc_model_funcs, nullevent_mlim_pdf):
print("Initializing inference framework...")
self.lc_model_funcs = lc_model_funcs
self.nbands = len(lc_model_funcs)
self.nullevent_mlim_pdf = nullevent_mlim_pdf
def lc_model_powerlaw(self, M_0, gamma, t_0, t):
"""
Returns the absolute magnitude evolution as a power law.
Parameters
----------
M_0 : float
The peak absolute magnitude of the light curve.
gamma : float
Power law index for the light curve decay.
t_0 : float
Initial time of the event.
t : float or array
Array of observation times.
Returns
-------
M : float or array
Absolute magnitude light curve as a function of time (same shape as
t).
"""
return (M_0 * pow(t_0/t, gamma))
def lc_model_linear(self, M_0, alpha, t_0, t):
"""
Returns the absolute magnitude evolution as a linear decay/rise.
Parameters
----------
M_0 : float
The peak absolute magnitude of the light curve.
alpha : float
Linear decay/rise index for the light curve.
t_0 : float
Initial time of the event.
t : float or array
Array of observation times.
Returns
-------
M : float or array
Absolute magnitude light curve as a function of time (same shape as
t).
"""
return M_0 + alpha*(t-t_0)
def M_to_m(self, M, distance):
"""
Returns the apparent magnitude using a distance and absolute
magnitude.
Parameters
----------
M : float or array
Absolute magnitude of object.
distance : float or array
Distance of the object (must have same size as M).
Returns
-------
m : float or array
Apparent magnitude of the object (same size as M or distance).
"""
return (M + 5 * np.log10(distance * 1e6) - 5)
def dlim(self, mlim, M):
"""
Returns the limiting distance for a model with absolute magnitude M
and limiting magnitude mlim.
Parameters
----------
mlim : float or array
Limitng magnitude from observations.
M : float or array
Absolute magnitude from model (must have same shape as mlim).
Returns
-------
dlim : float or array (same shape as mlim)
Limiting distance for given parameters.
"""
return 10**((mlim - M)/5.) * 10 * 1e-6
def create_distance_dist(self, mu_f, sigma_f):
"""
Returns a truncated normal distribution as the distance distribution.
Parameters
----------
mu_f : float
Mean of the distance distribution.
sigma_f : float
Standard deviation of the distance distribution.
Returns
-------
distance_dist : scipy.stats.rv_continuous.pdf object
The probability density function of the truncated normal
distribution.
"""
#set min,max distances as 0 Mpc, 4000 Mpc
a = (0. - mu_f)/sigma_f
b = (4000. - mu_f)/sigma_f
return truncnorm(a, b, mu_f, sigma_f)
def calc_expit_argument(self,d_lim,maglim_err=0.1):
"""
Returns a logistic/expit function that accounts for errors in the
measurement of limiting magnitudes.
Parameters
----------
d_lim : float
Limiting distance corresponding to the observed limiting
magnitude.
maglim_err : float
Error in the limiting magnitude measurement (default=0.1 mag).
Returns
-------
expit_func : func
Logitic function based on errors in the limiting magnitude.
"""
if maglim_err==0.:
maglim_err = 0.1
dlow = d_lim*10**-(3*maglim_err/5) # set dlow at 3-sigma
dmid = d_lim*10**-(maglim_err/5) # set dmid at 1-sigma
a = np.log(0.021/0.979)/(dlow - dmid)
b = -1.0*dmid
return lambda x : expit(a*(x + b))
def calc_likelihood_integral(self, M, expit_func, dist_samples,
mlow, mhigh):
"""
Returns the single observation likelihood integral evaluated using
posterior samples drawn from the distance distribution.
"""
dist_samples_survey = dist_samples[(dist_samples>self.dlim(mlow,M))
&(dist_samples<=self.dlim(mhigh,M))]
dist_samples_high = dist_samples[dist_samples>self.dlim(mhigh,M)]
N_samples_survey = len(dist_samples_survey)
N_samples_high = len(dist_samples_high)
N_total = N_samples_survey + N_samples_high
if (N_samples_survey==0)&(N_samples_high!=0):
return 1./(mhigh-mlow)
elif (N_samples_survey!=0)&(N_samples_high==0):
return np.sum((1./(
np.vectorize(
self.M_to_m)(M, dist_samples_survey) -mlow))*\
np.vectorize(expit_func)(dist_samples_survey))/\
N_samples_survey
elif (N_samples_survey!=0)&(N_samples_high!=0):
return N_samples_survey/N_total * np.sum((1./(
np.vectorize(self.M_to_m)(M, dist_samples_survey) - mlow))*\
np.vectorize(expit_func)(dist_samples_survey)) +\
(N_samples_high/N_total) * (1./(mhigh-mlow))
return 0.
def create_dlim_pdf(self, M, d_lim, maglim_err, norm_factor, p_d, d_min,
d_max):
"""
Returns the likelihood of a single observation for a given field and
model, under the astrophysical hypothesis and using distance limits.
Parameters
----------
M : float
The absolute magnitude of the model.
d_lim : float
The observed limiting distance below which non-detection is
invalid.
maglim_err : float
Error in limiting magnitude measurement.
norm_factor : float
Pre-computed normalization factor for the likelihood.
p_d : func
The probability density function (pdf) of the distance.
d_min : float
Lower limit of the distance distribution.
d_max : float
Upper limit of the distance distribution.
Returns
-------
dlim_pdf : float
The likelihood of obtaining the observed limiting magnitude
given model absolute magnitude M.
"""
expit_num = self.calc_expit_argument(d_lim, maglim_err)
num = quad(lambda d : (1./(self.M_to_m(M,d)-self.M_to_m(M,d_min)))*\
expit_num(d)*p_d(d), d_min+0.1, d_max)[0]
return num/norm_factor
def create_mlim_pdf(self, M, d_lim, maglim_err, p_d, m_low, m_high,
eps=0.1, dmax=3000):
"""
Returns the likelihood of a single observation for a given field and
model, under the astrophysical hypothesis and using survey upper limits.
Parameters
----------
M : float
The absolute magnitude of the model.
d_lim : float
The limiting distance below which non-detection is invalid.
maglim_err : float
Error in limiting magnitude measurement.
p_d : func
The probability density function (pdf) of the distance.
m_low : float
Lower limit of the limiting magnitude distribution.
m_high : float
Upper limit of the limiting magnitude distribution.
Returns
-------
mlim_pdf : float
The likelihood of obtaining the observed limiting magnitude
given model absolute magnitude M.
"""
expit_num = self.calc_expit_argument(d_lim, maglim_err)
num = quad(
lambda d : (1./(self.M_to_m(M, d) - m_low))*expit_num(d)*p_d(d),
self.dlim(m_low,M)+eps, self.dlim(m_high,M))[0] +\
quad(lambda d : (1./(m_high - m_low))*p_d(d), self.dlim(m_high,M),
dmax)[0]
den = quad(
lambda m: quad(
lambda d : (1./(self.M_to_m(M, d) - m_low))*\
self.calc_expit_argument(self.dlim(m,M),maglim_err)(d)*\
p_d(d), self.dlim(m_low,M)+eps,
self.dlim(m_high,M))[0], m_low, m_high)[0] +\
quad(lambda d : p_d(d), self.dlim(m_high,M), dmax)[0]
if den==0.:
return 0.
return num/den
def create_mlim_pdf_fromsamples(self, M, d_lim, maglim_err, dist_samples,
m_low, m_high):
"""
Returns the likelihood of a single observation for a given field and
model, under the astrophysical hypothesis using survey limits and
distance posterior samples.
"""
expit_num = self.calc_expit_argument(d_lim, maglim_err)
num = self.calc_likelihood_integral(M, expit_num, dist_samples,
m_low, m_high)
den = quad(
lambda m: self.calc_likelihood_integral(M,
self.calc_expit_argument(self.dlim(m,M), maglim_err), dist_samples,
m_low, m_high), m_low, m_high)[0]
if den==0.:
return 0.
return num/den
def calc_infield_filter_dlim_likelihood(
self, params, fid, mlims, t0, T,
p_d_f, maglimerrs, dmin, dmax,
mlow_t, mhigh_t, norm_factors):
"""
Returns the likelihood of observations for a single field in a single
filter (filter ID : fid) using distance limits, under the astrophysical
hypothesis.
Parameters
----------
params : array-like
List or array of model parameters for the kilonova light-curve.
fid : integer
Filter ID number of the corresponding filter.
mlims : array
Array of observed filter-specific limiting magnitudes
corresponding to the time array T.
t0 : float
Initial time of the event.
T : array
Array of observation times corrsponding to the array of upper
limits mlims.
p_d_f : func
The field-specific probability density function (pdf) of the
distance.
maglimerrs : array
Array of measurement errors in the limiting magnitudes (mlims).
dmin : float
Lower limit of the distance distribution.
dmax : float
Upper limit of the distance distribution.
mlow_t : float
Lower limit of the limiting magnitude distribution in the
null-event hypothesis.
mhigh_t : float
Upper limit of the limiting magnitude distribution in the
null-event hypothesis.
norm_factors : array
Array of normalization factors for each observation.
Returns
-------
plims_f : float
Likelihood of observations for a single field in a single
filter (filter ID : fid) using distance limits, under the
astrophysical hypothesis.
"""
M = np.array([self.lc_model_funcs[fid-1](*params, t_0=t0, t=t)\
for t in T])
dlims = np.array(list(map(self.dlim, mlims, M)))
pool = Pool(processes=2)
plims_f_t = pool.starmap(partial(self.create_dlim_pdf, p_d=p_d_f,
d_min=dmin, d_max=dmax),
np.c_[M,dlims,maglimerrs,norm_factors])
pool.close()
plims_f_t_nondet = np.array([self.nullevent_mlim_pdf(mlow_t,mhigh_t)\
for m in mlims])
plims_f = np.product(plims_f_t/plims_f_t_nondet)
return plims_f
def calc_infield_dlim_likelihood(
self, params, filter_ids, mlims_array, t0,
filter_obs_times, p_d_f, P_f,
maglimerr_array, dmin, dmax, m_low_t,
m_high_t, norm_factor_array):
"""
Returns the overall likelihood of observations for a single field using
distance limits, under the astrophysical hypothesis.
Parameters
----------
params : array-like
List of lists or array of arrays of model parameters for the
kilonova light-curve corresponding to each filter.
filter_ids : array-like
Array of Filter ID numbers (integers) as per the survey.
mlims_array : array-like
List/Array of arrays with observed filter-specific limiting
magnitudes corresponding to the time array filter_obs_times.
t0 : float
Initial time of the event.
filter_obs_times : array-like
List/Array of arrays of observation times corrsponding to the
array of upper limits mlims (shape same as mlims_array).
p_d_f : func
The field-specific probability density function (pdf) of the
distance.
P_f : float
Sky probability of the event being in the given field (f).
maglimerrs_array : array
List/Array of arrays with measurement errors in the limiting
magnitudes (shape same as mlims_array).
dmin : float
Lower limit of the distance distribution.
dmax : float
Upper limit of the distance distribution.
m_low_t : float
Lower limit of the limiting magnitude distribution in the
null-event hypothesis.
m_high_t : float
Upper limit of the limiting magnitude distribution in the
null-event hypothesis.
norm_factor_array : array
List/Array of arrays with normalization factors for each
observation (shape same as mlims_array).
Returns
-------
likelihood : float
Overall likelihood of observations for a single field using
distance limits, under the astrophysical hypothesis.
"""
plims = np.array([self.calc_infield_filter_dlim_likelihood(
params[2*(fid-1):2*fid], fid, mlims_array[i], t0,
filter_obs_times[i], p_d_f, maglimerr_array[i], dmin,
dmax, m_low_t, m_high_t, norm_factor_array[i])\
for i,fid in enumerate(filter_ids)])
return np.product(plims)*P_f
def calc_infield_filter_mlim_likelihood(
self, params, fid, mlims, t0, T,
p_d_f, maglimerrs, mlow_a, mhigh_a,
mlow_t, mhigh_t):
"""
Returns the likelihood of observations for a single field in a single
filter (filter ID : fid) using survey limits, under the astrophysical
hypothesis.
Parameters
----------
params : array-like
List or array of model parameters for the kilonova light-curve.
fid : integer
Filter ID number of the corresponding filter.
mlims : array
Array of observed filter-specific limiting magnitudes
corresponding to the time array T.
t0 : float
Initial time of the event.
T : array
Array of observation times corrsponding to the array of upper
limits mlims.
p_d_f : func
The field-specific probability density function (pdf) of the
distance.
maglimerrs : array
Array of measurement errors in the limiting magnitudes (mlims).
mlow_a : float
Lower limit of the limiting magnitude distribution in the
astrophysical hypothesis.
mhigh_a : float
Upper limit of the limiting magnitude distribution in the
astrophysical hypothesis.
mlow_t : float
Lower limit of the limiting magnitude distribution in the
null-event hypothesis.
mhigh_t : float
Upper limit of the limiting magnitude distribution in the
null-event hypothesis.
Returns
-------
plims_f : float
Likelihood of observations for a single field in a single
filter (filter ID : fid) using survey limits, under the
astrophysical hypothesis.
"""
M = np.array([self.lc_model_funcs[fid-1](*params, t_0=t0, t=t)\
for t in T])
dlims = np.array(list(map(self.dlim, mlims, M)))
pool = Pool(processes=2)
plims_f_t = pool.starmap(partial(self.create_mlim_pdf, p_d=p_d_f,
m_low=mlow_a,m_high=mhigh_a),
np.c_[M,dlims,maglimerrs])
pool.close()
plims_f_t_nondet = np.array([self.nullevent_mlim_pdf(mlow_t,mhigh_t)\
for m in mlims])
plims_f = np.product(plims_f_t/plims_f_t_nondet)
return plims_f
def calc_infield_mlim_likelihood(
self, params, filter_ids, mlims_array, t0,
filter_obs_times, p_d_f, P_f,
maglimerr_array, m_low_a, m_high_a,
m_low_t, m_high_t):
"""
Returns the overall likelihood of observations for a single field using
survey limits, under the astrophysical hypothesis.
Parameters
----------
params : array-like
List of lists or array of arrays of model parameters for the
kilonova light-curve corresponding to each filter.
filter_ids : array-like
Array of Filter ID numbers (integers) as per the survey.
mlims_array : array-like
List/Array of arrays with observed filter-specific limiting
magnitudes corresponding to the time array filter_obs_times.
t0 : float
Initial time of the event.
filter_obs_times : array-like
List/Array of arrays of observation times corrsponding to the
array of upper limits mlims (shape same as mlims_array).
p_d_f : func
The field-specific probability density function (pdf) of the
distance.
P_f : float
Sky probability of the event being in the given field (f).
maglimerrs_array : array
List/Array of arrays with measurement errors in the limiting
magnitudes (shape same as mlims_array).
m_low_a : float
Lower limit of the limiting magnitude distribution in the
astrophysical hypothesis.
m_high_a : float
Upper limit of the limiting magnitude distribution in the
astrophysical hypothesis.
m_low_t : float
Lower limit of the limiting magnitude distribution in the
null-event hypothesis.
m_high_t : float
Upper limit of the limiting magnitude distribution in the
null-event hypothesis.
Returns
-------
likelihood : float
Overall likelihood of observations for a single field using
survey limits, under the astrophysical hypothesis.
"""
plims = np.array([self.calc_infield_filter_mlim_likelihood(
params[2*(fid-1):2*fid], fid, mlims_array[i], t0,
filter_obs_times[i], p_d_f, maglimerr_array[i],
m_low_a, m_high_a, m_low_t, m_high_t)\
for i,fid in enumerate(filter_ids)])
return np.product(plims)*P_f
def calc_infield_filter_mlim_likelihood_fromsamples(
self, params, fid,
mlims, t0, T, d_samples,
maglimerrs, mlow_a,
mhigh_a, mlow_t,
mhigh_t):
"""
Returns the likelihood of observations for a single field in a single
filter (filter ID : fid) using survey limits and distance posterior
samples, under the astrophysical hypothesis.
"""
M = np.array([self.lc_model_funcs[fid-1](*params, t_0=t0, t=t)\
for t in T])
dlims = np.array(list(map(self.dlim, mlims, M)))
pool = Pool(processes=2)
plims_f_t = pool.starmap(partial(self.create_mlim_pdf_fromsamples,
dist_samples=d_samples, m_low=mlow_a,
m_high=mhigh_a), np.c_[M,dlims,maglimerrs])
pool.close()
plims_f_t_nondet = np.array([self.nullevent_mlim_pdf(mlow_t,mhigh_t)\
for m in mlims])
plims_f = np.product(plims_f_t/plims_f_t_nondet)
return plims_f
def calc_infield_mlim_likelihood_fromsamples(
self, params, filter_ids,
mlims_array, t0,
filter_obs_times, d_samples,
P_f, maglimerr_array, m_low_a,
m_high_a, m_low_t, m_high_t):
"""
Returns the overall likelihood of observations for a single field using
survey limits and distance posterior samples, under the astrophysical
hypothesis.
"""
plims = np.array([self.calc_infield_filter_mlim_likelihood_fromsamples(
params[2*(fid-1):2*fid], fid, mlims_array[i], t0,
filter_obs_times[i], d_samples, maglimerr_array[i],
m_low_a, m_high_a, m_low_t, m_high_t)\
for i,fid in enumerate(filter_ids)])
return np.product(plims)*P_f
| """
The main module of nimbus that sets up the Bayesian formalism.
Classes:
Kilonova_Inference
"""
__author__ = '<NAME>'
import numpy as np
from scipy.stats import norm, truncnorm
from scipy.integrate import quad
from scipy.special import expit
from multiprocessing import Pool
from functools import partial
class Kilonova_Inference():
"""
Initializes utility functions for inference and defines the model.
Attributes
----------
lc_model_funcs : array-like
The array whose elements are band-specific functions that define the
light-curve evolution as a function of time.
nullevent_mlim_pdf : func
The function that evaluates the pdf for the observed upper limits when
the event is either not in the observed fields or is terrestrial.
Usage
-----
kne_inf = Kilonova_Inference(lc_model_func)
"""
def __init__(self, lc_model_funcs, nullevent_mlim_pdf):
print("Initializing inference framework...")
self.lc_model_funcs = lc_model_funcs
self.nbands = len(lc_model_funcs)
self.nullevent_mlim_pdf = nullevent_mlim_pdf
def lc_model_powerlaw(self, M_0, gamma, t_0, t):
"""
Returns the absolute magnitude evolution as a power law.
Parameters
----------
M_0 : float
The peak absolute magnitude of the light curve.
gamma : float
Power law index for the light curve decay.
t_0 : float
Initial time of the event.
t : float or array
Array of observation times.
Returns
-------
M : float or array
Absolute magnitude light curve as a function of time (same shape as
t).
"""
return (M_0 * pow(t_0/t, gamma))
def lc_model_linear(self, M_0, alpha, t_0, t):
"""
Returns the absolute magnitude evolution as a linear decay/rise.
Parameters
----------
M_0 : float
The peak absolute magnitude of the light curve.
alpha : float
Linear decay/rise index for the light curve.
t_0 : float
Initial time of the event.
t : float or array
Array of observation times.
Returns
-------
M : float or array
Absolute magnitude light curve as a function of time (same shape as
t).
"""
return M_0 + alpha*(t-t_0)
def M_to_m(self, M, distance):
"""
Returns the apparent magnitude using a distance and absolute
magnitude.
Parameters
----------
M : float or array
Absolute magnitude of object.
distance : float or array
Distance of the object (must have same size as M).
Returns
-------
m : float or array
Apparent magnitude of the object (same size as M or distance).
"""
return (M + 5 * np.log10(distance * 1e6) - 5)
def dlim(self, mlim, M):
"""
Returns the limiting distance for a model with absolute magnitude M
and limiting magnitude mlim.
Parameters
----------
mlim : float or array
Limitng magnitude from observations.
M : float or array
Absolute magnitude from model (must have same shape as mlim).
Returns
-------
dlim : float or array (same shape as mlim)
Limiting distance for given parameters.
"""
return 10**((mlim - M)/5.) * 10 * 1e-6
def create_distance_dist(self, mu_f, sigma_f):
"""
Returns a truncated normal distribution as the distance distribution.
Parameters
----------
mu_f : float
Mean of the distance distribution.
sigma_f : float
Standard deviation of the distance distribution.
Returns
-------
distance_dist : scipy.stats.rv_continuous.pdf object
The probability density function of the truncated normal
distribution.
"""
#set min,max distances as 0 Mpc, 4000 Mpc
a = (0. - mu_f)/sigma_f
b = (4000. - mu_f)/sigma_f
return truncnorm(a, b, mu_f, sigma_f)
def calc_expit_argument(self,d_lim,maglim_err=0.1):
"""
Returns a logistic/expit function that accounts for errors in the
measurement of limiting magnitudes.
Parameters
----------
d_lim : float
Limiting distance corresponding to the observed limiting
magnitude.
maglim_err : float
Error in the limiting magnitude measurement (default=0.1 mag).
Returns
-------
expit_func : func
Logitic function based on errors in the limiting magnitude.
"""
if maglim_err==0.:
maglim_err = 0.1
dlow = d_lim*10**-(3*maglim_err/5) # set dlow at 3-sigma
dmid = d_lim*10**-(maglim_err/5) # set dmid at 1-sigma
a = np.log(0.021/0.979)/(dlow - dmid)
b = -1.0*dmid
return lambda x : expit(a*(x + b))
def calc_likelihood_integral(self, M, expit_func, dist_samples,
mlow, mhigh):
"""
Returns the single observation likelihood integral evaluated using
posterior samples drawn from the distance distribution.
"""
dist_samples_survey = dist_samples[(dist_samples>self.dlim(mlow,M))
&(dist_samples<=self.dlim(mhigh,M))]
dist_samples_high = dist_samples[dist_samples>self.dlim(mhigh,M)]
N_samples_survey = len(dist_samples_survey)
N_samples_high = len(dist_samples_high)
N_total = N_samples_survey + N_samples_high
if (N_samples_survey==0)&(N_samples_high!=0):
return 1./(mhigh-mlow)
elif (N_samples_survey!=0)&(N_samples_high==0):
return np.sum((1./(
np.vectorize(
self.M_to_m)(M, dist_samples_survey) -mlow))*\
np.vectorize(expit_func)(dist_samples_survey))/\
N_samples_survey
elif (N_samples_survey!=0)&(N_samples_high!=0):
return N_samples_survey/N_total * np.sum((1./(
np.vectorize(self.M_to_m)(M, dist_samples_survey) - mlow))*\
np.vectorize(expit_func)(dist_samples_survey)) +\
(N_samples_high/N_total) * (1./(mhigh-mlow))
return 0.
def create_dlim_pdf(self, M, d_lim, maglim_err, norm_factor, p_d, d_min,
d_max):
"""
Returns the likelihood of a single observation for a given field and
model, under the astrophysical hypothesis and using distance limits.
Parameters
----------
M : float
The absolute magnitude of the model.
d_lim : float
The observed limiting distance below which non-detection is
invalid.
maglim_err : float
Error in limiting magnitude measurement.
norm_factor : float
Pre-computed normalization factor for the likelihood.
p_d : func
The probability density function (pdf) of the distance.
d_min : float
Lower limit of the distance distribution.
d_max : float
Upper limit of the distance distribution.
Returns
-------
dlim_pdf : float
The likelihood of obtaining the observed limiting magnitude
given model absolute magnitude M.
"""
expit_num = self.calc_expit_argument(d_lim, maglim_err)
num = quad(lambda d : (1./(self.M_to_m(M,d)-self.M_to_m(M,d_min)))*\
expit_num(d)*p_d(d), d_min+0.1, d_max)[0]
return num/norm_factor
def create_mlim_pdf(self, M, d_lim, maglim_err, p_d, m_low, m_high,
eps=0.1, dmax=3000):
"""
Returns the likelihood of a single observation for a given field and
model, under the astrophysical hypothesis and using survey upper limits.
Parameters
----------
M : float
The absolute magnitude of the model.
d_lim : float
The limiting distance below which non-detection is invalid.
maglim_err : float
Error in limiting magnitude measurement.
p_d : func
The probability density function (pdf) of the distance.
m_low : float
Lower limit of the limiting magnitude distribution.
m_high : float
Upper limit of the limiting magnitude distribution.
Returns
-------
mlim_pdf : float
The likelihood of obtaining the observed limiting magnitude
given model absolute magnitude M.
"""
expit_num = self.calc_expit_argument(d_lim, maglim_err)
num = quad(
lambda d : (1./(self.M_to_m(M, d) - m_low))*expit_num(d)*p_d(d),
self.dlim(m_low,M)+eps, self.dlim(m_high,M))[0] +\
quad(lambda d : (1./(m_high - m_low))*p_d(d), self.dlim(m_high,M),
dmax)[0]
den = quad(
lambda m: quad(
lambda d : (1./(self.M_to_m(M, d) - m_low))*\
self.calc_expit_argument(self.dlim(m,M),maglim_err)(d)*\
p_d(d), self.dlim(m_low,M)+eps,
self.dlim(m_high,M))[0], m_low, m_high)[0] +\
quad(lambda d : p_d(d), self.dlim(m_high,M), dmax)[0]
if den==0.:
return 0.
return num/den
def create_mlim_pdf_fromsamples(self, M, d_lim, maglim_err, dist_samples,
m_low, m_high):
"""
Returns the likelihood of a single observation for a given field and
model, under the astrophysical hypothesis using survey limits and
distance posterior samples.
"""
expit_num = self.calc_expit_argument(d_lim, maglim_err)
num = self.calc_likelihood_integral(M, expit_num, dist_samples,
m_low, m_high)
den = quad(
lambda m: self.calc_likelihood_integral(M,
self.calc_expit_argument(self.dlim(m,M), maglim_err), dist_samples,
m_low, m_high), m_low, m_high)[0]
if den==0.:
return 0.
return num/den
def calc_infield_filter_dlim_likelihood(
self, params, fid, mlims, t0, T,
p_d_f, maglimerrs, dmin, dmax,
mlow_t, mhigh_t, norm_factors):
"""
Returns the likelihood of observations for a single field in a single
filter (filter ID : fid) using distance limits, under the astrophysical
hypothesis.
Parameters
----------
params : array-like
List or array of model parameters for the kilonova light-curve.
fid : integer
Filter ID number of the corresponding filter.
mlims : array
Array of observed filter-specific limiting magnitudes
corresponding to the time array T.
t0 : float
Initial time of the event.
T : array
Array of observation times corrsponding to the array of upper
limits mlims.
p_d_f : func
The field-specific probability density function (pdf) of the
distance.
maglimerrs : array
Array of measurement errors in the limiting magnitudes (mlims).
dmin : float
Lower limit of the distance distribution.
dmax : float
Upper limit of the distance distribution.
mlow_t : float
Lower limit of the limiting magnitude distribution in the
null-event hypothesis.
mhigh_t : float
Upper limit of the limiting magnitude distribution in the
null-event hypothesis.
norm_factors : array
Array of normalization factors for each observation.
Returns
-------
plims_f : float
Likelihood of observations for a single field in a single
filter (filter ID : fid) using distance limits, under the
astrophysical hypothesis.
"""
M = np.array([self.lc_model_funcs[fid-1](*params, t_0=t0, t=t)\
for t in T])
dlims = np.array(list(map(self.dlim, mlims, M)))
pool = Pool(processes=2)
plims_f_t = pool.starmap(partial(self.create_dlim_pdf, p_d=p_d_f,
d_min=dmin, d_max=dmax),
np.c_[M,dlims,maglimerrs,norm_factors])
pool.close()
plims_f_t_nondet = np.array([self.nullevent_mlim_pdf(mlow_t,mhigh_t)\
for m in mlims])
plims_f = np.product(plims_f_t/plims_f_t_nondet)
return plims_f
def calc_infield_dlim_likelihood(
self, params, filter_ids, mlims_array, t0,
filter_obs_times, p_d_f, P_f,
maglimerr_array, dmin, dmax, m_low_t,
m_high_t, norm_factor_array):
"""
Returns the overall likelihood of observations for a single field using
distance limits, under the astrophysical hypothesis.
Parameters
----------
params : array-like
List of lists or array of arrays of model parameters for the
kilonova light-curve corresponding to each filter.
filter_ids : array-like
Array of Filter ID numbers (integers) as per the survey.
mlims_array : array-like
List/Array of arrays with observed filter-specific limiting
magnitudes corresponding to the time array filter_obs_times.
t0 : float
Initial time of the event.
filter_obs_times : array-like
List/Array of arrays of observation times corrsponding to the
array of upper limits mlims (shape same as mlims_array).
p_d_f : func
The field-specific probability density function (pdf) of the
distance.
P_f : float
Sky probability of the event being in the given field (f).
maglimerrs_array : array
List/Array of arrays with measurement errors in the limiting
magnitudes (shape same as mlims_array).
dmin : float
Lower limit of the distance distribution.
dmax : float
Upper limit of the distance distribution.
m_low_t : float
Lower limit of the limiting magnitude distribution in the
null-event hypothesis.
m_high_t : float
Upper limit of the limiting magnitude distribution in the
null-event hypothesis.
norm_factor_array : array
List/Array of arrays with normalization factors for each
observation (shape same as mlims_array).
Returns
-------
likelihood : float
Overall likelihood of observations for a single field using
distance limits, under the astrophysical hypothesis.
"""
plims = np.array([self.calc_infield_filter_dlim_likelihood(
params[2*(fid-1):2*fid], fid, mlims_array[i], t0,
filter_obs_times[i], p_d_f, maglimerr_array[i], dmin,
dmax, m_low_t, m_high_t, norm_factor_array[i])\
for i,fid in enumerate(filter_ids)])
return np.product(plims)*P_f
def calc_infield_filter_mlim_likelihood(
self, params, fid, mlims, t0, T,
p_d_f, maglimerrs, mlow_a, mhigh_a,
mlow_t, mhigh_t):
"""
Returns the likelihood of observations for a single field in a single
filter (filter ID : fid) using survey limits, under the astrophysical
hypothesis.
Parameters
----------
params : array-like
List or array of model parameters for the kilonova light-curve.
fid : integer
Filter ID number of the corresponding filter.
mlims : array
Array of observed filter-specific limiting magnitudes
corresponding to the time array T.
t0 : float
Initial time of the event.
T : array
Array of observation times corrsponding to the array of upper
limits mlims.
p_d_f : func
The field-specific probability density function (pdf) of the
distance.
maglimerrs : array
Array of measurement errors in the limiting magnitudes (mlims).
mlow_a : float
Lower limit of the limiting magnitude distribution in the
astrophysical hypothesis.
mhigh_a : float
Upper limit of the limiting magnitude distribution in the
astrophysical hypothesis.
mlow_t : float
Lower limit of the limiting magnitude distribution in the
null-event hypothesis.
mhigh_t : float
Upper limit of the limiting magnitude distribution in the
null-event hypothesis.
Returns
-------
plims_f : float
Likelihood of observations for a single field in a single
filter (filter ID : fid) using survey limits, under the
astrophysical hypothesis.
"""
M = np.array([self.lc_model_funcs[fid-1](*params, t_0=t0, t=t)\
for t in T])
dlims = np.array(list(map(self.dlim, mlims, M)))
pool = Pool(processes=2)
plims_f_t = pool.starmap(partial(self.create_mlim_pdf, p_d=p_d_f,
m_low=mlow_a,m_high=mhigh_a),
np.c_[M,dlims,maglimerrs])
pool.close()
plims_f_t_nondet = np.array([self.nullevent_mlim_pdf(mlow_t,mhigh_t)\
for m in mlims])
plims_f = np.product(plims_f_t/plims_f_t_nondet)
return plims_f
def calc_infield_mlim_likelihood(
self, params, filter_ids, mlims_array, t0,
filter_obs_times, p_d_f, P_f,
maglimerr_array, m_low_a, m_high_a,
m_low_t, m_high_t):
"""
Returns the overall likelihood of observations for a single field using
survey limits, under the astrophysical hypothesis.
Parameters
----------
params : array-like
List of lists or array of arrays of model parameters for the
kilonova light-curve corresponding to each filter.
filter_ids : array-like
Array of Filter ID numbers (integers) as per the survey.
mlims_array : array-like
List/Array of arrays with observed filter-specific limiting
magnitudes corresponding to the time array filter_obs_times.
t0 : float
Initial time of the event.
filter_obs_times : array-like
List/Array of arrays of observation times corrsponding to the
array of upper limits mlims (shape same as mlims_array).
p_d_f : func
The field-specific probability density function (pdf) of the
distance.
P_f : float
Sky probability of the event being in the given field (f).
maglimerrs_array : array
List/Array of arrays with measurement errors in the limiting
magnitudes (shape same as mlims_array).
m_low_a : float
Lower limit of the limiting magnitude distribution in the
astrophysical hypothesis.
m_high_a : float
Upper limit of the limiting magnitude distribution in the
astrophysical hypothesis.
m_low_t : float
Lower limit of the limiting magnitude distribution in the
null-event hypothesis.
m_high_t : float
Upper limit of the limiting magnitude distribution in the
null-event hypothesis.
Returns
-------
likelihood : float
Overall likelihood of observations for a single field using
survey limits, under the astrophysical hypothesis.
"""
plims = np.array([self.calc_infield_filter_mlim_likelihood(
params[2*(fid-1):2*fid], fid, mlims_array[i], t0,
filter_obs_times[i], p_d_f, maglimerr_array[i],
m_low_a, m_high_a, m_low_t, m_high_t)\
for i,fid in enumerate(filter_ids)])
return np.product(plims)*P_f
def calc_infield_filter_mlim_likelihood_fromsamples(
self, params, fid,
mlims, t0, T, d_samples,
maglimerrs, mlow_a,
mhigh_a, mlow_t,
mhigh_t):
"""
Returns the likelihood of observations for a single field in a single
filter (filter ID : fid) using survey limits and distance posterior
samples, under the astrophysical hypothesis.
"""
M = np.array([self.lc_model_funcs[fid-1](*params, t_0=t0, t=t)\
for t in T])
dlims = np.array(list(map(self.dlim, mlims, M)))
pool = Pool(processes=2)
plims_f_t = pool.starmap(partial(self.create_mlim_pdf_fromsamples,
dist_samples=d_samples, m_low=mlow_a,
m_high=mhigh_a), np.c_[M,dlims,maglimerrs])
pool.close()
plims_f_t_nondet = np.array([self.nullevent_mlim_pdf(mlow_t,mhigh_t)\
for m in mlims])
plims_f = np.product(plims_f_t/plims_f_t_nondet)
return plims_f
def calc_infield_mlim_likelihood_fromsamples(
self, params, filter_ids,
mlims_array, t0,
filter_obs_times, d_samples,
P_f, maglimerr_array, m_low_a,
m_high_a, m_low_t, m_high_t):
"""
Returns the overall likelihood of observations for a single field using
survey limits and distance posterior samples, under the astrophysical
hypothesis.
"""
plims = np.array([self.calc_infield_filter_mlim_likelihood_fromsamples(
params[2*(fid-1):2*fid], fid, mlims_array[i], t0,
filter_obs_times[i], d_samples, maglimerr_array[i],
m_low_a, m_high_a, m_low_t, m_high_t)\
for i,fid in enumerate(filter_ids)])
return np.product(plims)*P_f
| en | 0.68593 | The main module of nimbus that sets up the Bayesian formalism. Classes: Kilonova_Inference Initializes utility functions for inference and defines the model. Attributes ---------- lc_model_funcs : array-like The array whose elements are band-specific functions that define the light-curve evolution as a function of time. nullevent_mlim_pdf : func The function that evaluates the pdf for the observed upper limits when the event is either not in the observed fields or is terrestrial. Usage ----- kne_inf = Kilonova_Inference(lc_model_func) Returns the absolute magnitude evolution as a power law. Parameters ---------- M_0 : float The peak absolute magnitude of the light curve. gamma : float Power law index for the light curve decay. t_0 : float Initial time of the event. t : float or array Array of observation times. Returns ------- M : float or array Absolute magnitude light curve as a function of time (same shape as t). Returns the absolute magnitude evolution as a linear decay/rise. Parameters ---------- M_0 : float The peak absolute magnitude of the light curve. alpha : float Linear decay/rise index for the light curve. t_0 : float Initial time of the event. t : float or array Array of observation times. Returns ------- M : float or array Absolute magnitude light curve as a function of time (same shape as t). Returns the apparent magnitude using a distance and absolute magnitude. Parameters ---------- M : float or array Absolute magnitude of object. distance : float or array Distance of the object (must have same size as M). Returns ------- m : float or array Apparent magnitude of the object (same size as M or distance). Returns the limiting distance for a model with absolute magnitude M and limiting magnitude mlim. Parameters ---------- mlim : float or array Limitng magnitude from observations. M : float or array Absolute magnitude from model (must have same shape as mlim). Returns ------- dlim : float or array (same shape as mlim) Limiting distance for given parameters. Returns a truncated normal distribution as the distance distribution. Parameters ---------- mu_f : float Mean of the distance distribution. sigma_f : float Standard deviation of the distance distribution. Returns ------- distance_dist : scipy.stats.rv_continuous.pdf object The probability density function of the truncated normal distribution. #set min,max distances as 0 Mpc, 4000 Mpc Returns a logistic/expit function that accounts for errors in the measurement of limiting magnitudes. Parameters ---------- d_lim : float Limiting distance corresponding to the observed limiting magnitude. maglim_err : float Error in the limiting magnitude measurement (default=0.1 mag). Returns ------- expit_func : func Logitic function based on errors in the limiting magnitude. # set dlow at 3-sigma # set dmid at 1-sigma Returns the single observation likelihood integral evaluated using posterior samples drawn from the distance distribution. Returns the likelihood of a single observation for a given field and model, under the astrophysical hypothesis and using distance limits. Parameters ---------- M : float The absolute magnitude of the model. d_lim : float The observed limiting distance below which non-detection is invalid. maglim_err : float Error in limiting magnitude measurement. norm_factor : float Pre-computed normalization factor for the likelihood. p_d : func The probability density function (pdf) of the distance. d_min : float Lower limit of the distance distribution. d_max : float Upper limit of the distance distribution. Returns ------- dlim_pdf : float The likelihood of obtaining the observed limiting magnitude given model absolute magnitude M. Returns the likelihood of a single observation for a given field and model, under the astrophysical hypothesis and using survey upper limits. Parameters ---------- M : float The absolute magnitude of the model. d_lim : float The limiting distance below which non-detection is invalid. maglim_err : float Error in limiting magnitude measurement. p_d : func The probability density function (pdf) of the distance. m_low : float Lower limit of the limiting magnitude distribution. m_high : float Upper limit of the limiting magnitude distribution. Returns ------- mlim_pdf : float The likelihood of obtaining the observed limiting magnitude given model absolute magnitude M. Returns the likelihood of a single observation for a given field and model, under the astrophysical hypothesis using survey limits and distance posterior samples. Returns the likelihood of observations for a single field in a single filter (filter ID : fid) using distance limits, under the astrophysical hypothesis. Parameters ---------- params : array-like List or array of model parameters for the kilonova light-curve. fid : integer Filter ID number of the corresponding filter. mlims : array Array of observed filter-specific limiting magnitudes corresponding to the time array T. t0 : float Initial time of the event. T : array Array of observation times corrsponding to the array of upper limits mlims. p_d_f : func The field-specific probability density function (pdf) of the distance. maglimerrs : array Array of measurement errors in the limiting magnitudes (mlims). dmin : float Lower limit of the distance distribution. dmax : float Upper limit of the distance distribution. mlow_t : float Lower limit of the limiting magnitude distribution in the null-event hypothesis. mhigh_t : float Upper limit of the limiting magnitude distribution in the null-event hypothesis. norm_factors : array Array of normalization factors for each observation. Returns ------- plims_f : float Likelihood of observations for a single field in a single filter (filter ID : fid) using distance limits, under the astrophysical hypothesis. Returns the overall likelihood of observations for a single field using distance limits, under the astrophysical hypothesis. Parameters ---------- params : array-like List of lists or array of arrays of model parameters for the kilonova light-curve corresponding to each filter. filter_ids : array-like Array of Filter ID numbers (integers) as per the survey. mlims_array : array-like List/Array of arrays with observed filter-specific limiting magnitudes corresponding to the time array filter_obs_times. t0 : float Initial time of the event. filter_obs_times : array-like List/Array of arrays of observation times corrsponding to the array of upper limits mlims (shape same as mlims_array). p_d_f : func The field-specific probability density function (pdf) of the distance. P_f : float Sky probability of the event being in the given field (f). maglimerrs_array : array List/Array of arrays with measurement errors in the limiting magnitudes (shape same as mlims_array). dmin : float Lower limit of the distance distribution. dmax : float Upper limit of the distance distribution. m_low_t : float Lower limit of the limiting magnitude distribution in the null-event hypothesis. m_high_t : float Upper limit of the limiting magnitude distribution in the null-event hypothesis. norm_factor_array : array List/Array of arrays with normalization factors for each observation (shape same as mlims_array). Returns ------- likelihood : float Overall likelihood of observations for a single field using distance limits, under the astrophysical hypothesis. Returns the likelihood of observations for a single field in a single filter (filter ID : fid) using survey limits, under the astrophysical hypothesis. Parameters ---------- params : array-like List or array of model parameters for the kilonova light-curve. fid : integer Filter ID number of the corresponding filter. mlims : array Array of observed filter-specific limiting magnitudes corresponding to the time array T. t0 : float Initial time of the event. T : array Array of observation times corrsponding to the array of upper limits mlims. p_d_f : func The field-specific probability density function (pdf) of the distance. maglimerrs : array Array of measurement errors in the limiting magnitudes (mlims). mlow_a : float Lower limit of the limiting magnitude distribution in the astrophysical hypothesis. mhigh_a : float Upper limit of the limiting magnitude distribution in the astrophysical hypothesis. mlow_t : float Lower limit of the limiting magnitude distribution in the null-event hypothesis. mhigh_t : float Upper limit of the limiting magnitude distribution in the null-event hypothesis. Returns ------- plims_f : float Likelihood of observations for a single field in a single filter (filter ID : fid) using survey limits, under the astrophysical hypothesis. Returns the overall likelihood of observations for a single field using survey limits, under the astrophysical hypothesis. Parameters ---------- params : array-like List of lists or array of arrays of model parameters for the kilonova light-curve corresponding to each filter. filter_ids : array-like Array of Filter ID numbers (integers) as per the survey. mlims_array : array-like List/Array of arrays with observed filter-specific limiting magnitudes corresponding to the time array filter_obs_times. t0 : float Initial time of the event. filter_obs_times : array-like List/Array of arrays of observation times corrsponding to the array of upper limits mlims (shape same as mlims_array). p_d_f : func The field-specific probability density function (pdf) of the distance. P_f : float Sky probability of the event being in the given field (f). maglimerrs_array : array List/Array of arrays with measurement errors in the limiting magnitudes (shape same as mlims_array). m_low_a : float Lower limit of the limiting magnitude distribution in the astrophysical hypothesis. m_high_a : float Upper limit of the limiting magnitude distribution in the astrophysical hypothesis. m_low_t : float Lower limit of the limiting magnitude distribution in the null-event hypothesis. m_high_t : float Upper limit of the limiting magnitude distribution in the null-event hypothesis. Returns ------- likelihood : float Overall likelihood of observations for a single field using survey limits, under the astrophysical hypothesis. Returns the likelihood of observations for a single field in a single filter (filter ID : fid) using survey limits and distance posterior samples, under the astrophysical hypothesis. Returns the overall likelihood of observations for a single field using survey limits and distance posterior samples, under the astrophysical hypothesis. | 2.739511 | 3 |
granule_ingester/tests/processors/test_ForceAscendingLatitude.py | skorper/incubator-sdap-ingester | 0 | 6631981 | <gh_stars>0
import unittest
import xarray as xr
import numpy as np
from os import path
from nexusproto import DataTile_pb2 as nexusproto
from nexusproto.serialization import from_shaped_array, to_shaped_array
from granule_ingester.processors import ForceAscendingLatitude
from granule_ingester.processors.reading_processors.GridReadingProcessor import GridReadingProcessor
class TestForceAscendingLatitude(unittest.TestCase):
def read_tile(self):
reading_processor = GridReadingProcessor('B03', 'lat', 'lon', time='time')
granule_path = path.join(path.dirname(__file__), '../granules/HLS.S30.T11SPC.2020001.v1.4.hdf.nc')
input_tile = nexusproto.NexusTile()
input_tile.summary.granule = granule_path
dimensions_to_slices = {
'time': slice(0, 1),
'lat': slice(0, 30),
'lon': slice(0, 30)
}
with xr.open_dataset(granule_path) as ds:
return reading_processor._generate_tile(ds, dimensions_to_slices, input_tile)
def test_process(self):
processor = ForceAscendingLatitude()
tile = self.read_tile()
tile_type = tile.tile.WhichOneof("tile_type")
tile_data = getattr(tile.tile, tile_type)
latitudes = from_shaped_array(tile_data.latitude)
variable_data = from_shaped_array(tile_data.variable_data)
print(latitudes)
print(variable_data)
flipped_tile = processor.process(tile)
the_flipped_tile_type = flipped_tile.tile.WhichOneof("tile_type")
the_flipped_tile_data = getattr(flipped_tile.tile, the_flipped_tile_type)
flipped_latitudes = from_shaped_array(the_flipped_tile_data.latitude)
flipped_data = from_shaped_array(the_flipped_tile_data.variable_data)
print(flipped_latitudes[1])
np.testing.assert_almost_equal(flipped_latitudes[1], 38.72608, decimal=5, err_msg='', verbose=True)
print(flipped_data[1,1])
np.testing.assert_almost_equal(flipped_data[1,1], 0.3116, decimal=4, err_msg='', verbose=True)
| import unittest
import xarray as xr
import numpy as np
from os import path
from nexusproto import DataTile_pb2 as nexusproto
from nexusproto.serialization import from_shaped_array, to_shaped_array
from granule_ingester.processors import ForceAscendingLatitude
from granule_ingester.processors.reading_processors.GridReadingProcessor import GridReadingProcessor
class TestForceAscendingLatitude(unittest.TestCase):
def read_tile(self):
reading_processor = GridReadingProcessor('B03', 'lat', 'lon', time='time')
granule_path = path.join(path.dirname(__file__), '../granules/HLS.S30.T11SPC.2020001.v1.4.hdf.nc')
input_tile = nexusproto.NexusTile()
input_tile.summary.granule = granule_path
dimensions_to_slices = {
'time': slice(0, 1),
'lat': slice(0, 30),
'lon': slice(0, 30)
}
with xr.open_dataset(granule_path) as ds:
return reading_processor._generate_tile(ds, dimensions_to_slices, input_tile)
def test_process(self):
processor = ForceAscendingLatitude()
tile = self.read_tile()
tile_type = tile.tile.WhichOneof("tile_type")
tile_data = getattr(tile.tile, tile_type)
latitudes = from_shaped_array(tile_data.latitude)
variable_data = from_shaped_array(tile_data.variable_data)
print(latitudes)
print(variable_data)
flipped_tile = processor.process(tile)
the_flipped_tile_type = flipped_tile.tile.WhichOneof("tile_type")
the_flipped_tile_data = getattr(flipped_tile.tile, the_flipped_tile_type)
flipped_latitudes = from_shaped_array(the_flipped_tile_data.latitude)
flipped_data = from_shaped_array(the_flipped_tile_data.variable_data)
print(flipped_latitudes[1])
np.testing.assert_almost_equal(flipped_latitudes[1], 38.72608, decimal=5, err_msg='', verbose=True)
print(flipped_data[1,1])
np.testing.assert_almost_equal(flipped_data[1,1], 0.3116, decimal=4, err_msg='', verbose=True) | none | 1 | 2.408847 | 2 |
|
tests/__init__.py | lisong996/akshare | 4,202 | 6631982 | <filename>tests/__init__.py
# -*- coding:utf-8 -*-
#!/usr/bin/env python
"""
Date: 2019/12/12 18:16
Desc:
"""
| <filename>tests/__init__.py
# -*- coding:utf-8 -*-
#!/usr/bin/env python
"""
Date: 2019/12/12 18:16
Desc:
"""
| en | 0.40024 | # -*- coding:utf-8 -*- #!/usr/bin/env python Date: 2019/12/12 18:16 Desc: | 1.040434 | 1 |
pytensor/ops/__init__.py | xinjli/pyml | 13 | 6631983 | from pytensor.ops.array_ops import *
from pytensor.ops.embedding_ops import *
from pytensor.ops.loss_ops import *
from pytensor.ops.lstm_ops import *
from pytensor.ops.math_ops import *
from pytensor.ops.rnn_ops import *
from pytensor.ops.rnn_util_ops import * | from pytensor.ops.array_ops import *
from pytensor.ops.embedding_ops import *
from pytensor.ops.loss_ops import *
from pytensor.ops.lstm_ops import *
from pytensor.ops.math_ops import *
from pytensor.ops.rnn_ops import *
from pytensor.ops.rnn_util_ops import * | none | 1 | 1.1935 | 1 |
|
sdk/python/pulumi_azure/privatelink/get_service.py | aangelisc/pulumi-azure | 0 | 6631984 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetServiceResult',
'AwaitableGetServiceResult',
'get_service',
]
@pulumi.output_type
class GetServiceResult:
"""
A collection of values returned by getService.
"""
def __init__(__self__, alias=None, auto_approval_subscription_ids=None, enable_proxy_protocol=None, id=None, load_balancer_frontend_ip_configuration_ids=None, location=None, name=None, nat_ip_configurations=None, resource_group_name=None, tags=None, visibility_subscription_ids=None):
if alias and not isinstance(alias, str):
raise TypeError("Expected argument 'alias' to be a str")
pulumi.set(__self__, "alias", alias)
if auto_approval_subscription_ids and not isinstance(auto_approval_subscription_ids, list):
raise TypeError("Expected argument 'auto_approval_subscription_ids' to be a list")
pulumi.set(__self__, "auto_approval_subscription_ids", auto_approval_subscription_ids)
if enable_proxy_protocol and not isinstance(enable_proxy_protocol, bool):
raise TypeError("Expected argument 'enable_proxy_protocol' to be a bool")
pulumi.set(__self__, "enable_proxy_protocol", enable_proxy_protocol)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if load_balancer_frontend_ip_configuration_ids and not isinstance(load_balancer_frontend_ip_configuration_ids, list):
raise TypeError("Expected argument 'load_balancer_frontend_ip_configuration_ids' to be a list")
pulumi.set(__self__, "load_balancer_frontend_ip_configuration_ids", load_balancer_frontend_ip_configuration_ids)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if nat_ip_configurations and not isinstance(nat_ip_configurations, list):
raise TypeError("Expected argument 'nat_ip_configurations' to be a list")
pulumi.set(__self__, "nat_ip_configurations", nat_ip_configurations)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if visibility_subscription_ids and not isinstance(visibility_subscription_ids, list):
raise TypeError("Expected argument 'visibility_subscription_ids' to be a list")
pulumi.set(__self__, "visibility_subscription_ids", visibility_subscription_ids)
@property
@pulumi.getter
def alias(self) -> str:
"""
The alias is a globally unique name for your private link service which Azure generates for you. Your can use this alias to request a connection to your private link service.
"""
return pulumi.get(self, "alias")
@property
@pulumi.getter(name="autoApprovalSubscriptionIds")
def auto_approval_subscription_ids(self) -> Sequence[str]:
"""
The list of subscription(s) globally unique identifiers that will be auto approved to use the private link service.
"""
return pulumi.get(self, "auto_approval_subscription_ids")
@property
@pulumi.getter(name="enableProxyProtocol")
def enable_proxy_protocol(self) -> bool:
"""
Does the Private Link Service support the Proxy Protocol?
"""
return pulumi.get(self, "enable_proxy_protocol")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loadBalancerFrontendIpConfigurationIds")
def load_balancer_frontend_ip_configuration_ids(self) -> Sequence[str]:
"""
The list of Standard Load Balancer(SLB) resource IDs. The Private Link service is tied to the frontend IP address of a SLB. All traffic destined for the private link service will reach the frontend of the SLB. You can configure SLB rules to direct this traffic to appropriate backend pools where your applications are running.
"""
return pulumi.get(self, "load_balancer_frontend_ip_configuration_ids")
@property
@pulumi.getter
def location(self) -> str:
"""
The supported Azure location where the resource exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of private link service NAT IP configuration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="natIpConfigurations")
def nat_ip_configurations(self) -> Sequence['outputs.GetServiceNatIpConfigurationResult']:
"""
The `nat_ip_configuration` block as defined below.
"""
return pulumi.get(self, "nat_ip_configurations")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="visibilitySubscriptionIds")
def visibility_subscription_ids(self) -> Sequence[str]:
"""
The list of subscription(s) globally unique identifiers(GUID) that will be able to see the private link service.
"""
return pulumi.get(self, "visibility_subscription_ids")
class AwaitableGetServiceResult(GetServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceResult(
alias=self.alias,
auto_approval_subscription_ids=self.auto_approval_subscription_ids,
enable_proxy_protocol=self.enable_proxy_protocol,
id=self.id,
load_balancer_frontend_ip_configuration_ids=self.load_balancer_frontend_ip_configuration_ids,
location=self.location,
name=self.name,
nat_ip_configurations=self.nat_ip_configurations,
resource_group_name=self.resource_group_name,
tags=self.tags,
visibility_subscription_ids=self.visibility_subscription_ids)
def get_service(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceResult:
"""
Use this data source to access information about an existing Private Link Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.privatelink.get_service(name="myPrivateLinkService",
resource_group_name="PrivateLinkServiceRG")
pulumi.export("privateLinkServiceId", example.id)
```
:param str name: The name of the private link service.
:param str resource_group_name: The name of the resource group in which the private link service resides.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:privatelink/getService:getService', __args__, opts=opts, typ=GetServiceResult).value
return AwaitableGetServiceResult(
alias=__ret__.alias,
auto_approval_subscription_ids=__ret__.auto_approval_subscription_ids,
enable_proxy_protocol=__ret__.enable_proxy_protocol,
id=__ret__.id,
load_balancer_frontend_ip_configuration_ids=__ret__.load_balancer_frontend_ip_configuration_ids,
location=__ret__.location,
name=__ret__.name,
nat_ip_configurations=__ret__.nat_ip_configurations,
resource_group_name=__ret__.resource_group_name,
tags=__ret__.tags,
visibility_subscription_ids=__ret__.visibility_subscription_ids)
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetServiceResult',
'AwaitableGetServiceResult',
'get_service',
]
@pulumi.output_type
class GetServiceResult:
"""
A collection of values returned by getService.
"""
def __init__(__self__, alias=None, auto_approval_subscription_ids=None, enable_proxy_protocol=None, id=None, load_balancer_frontend_ip_configuration_ids=None, location=None, name=None, nat_ip_configurations=None, resource_group_name=None, tags=None, visibility_subscription_ids=None):
if alias and not isinstance(alias, str):
raise TypeError("Expected argument 'alias' to be a str")
pulumi.set(__self__, "alias", alias)
if auto_approval_subscription_ids and not isinstance(auto_approval_subscription_ids, list):
raise TypeError("Expected argument 'auto_approval_subscription_ids' to be a list")
pulumi.set(__self__, "auto_approval_subscription_ids", auto_approval_subscription_ids)
if enable_proxy_protocol and not isinstance(enable_proxy_protocol, bool):
raise TypeError("Expected argument 'enable_proxy_protocol' to be a bool")
pulumi.set(__self__, "enable_proxy_protocol", enable_proxy_protocol)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if load_balancer_frontend_ip_configuration_ids and not isinstance(load_balancer_frontend_ip_configuration_ids, list):
raise TypeError("Expected argument 'load_balancer_frontend_ip_configuration_ids' to be a list")
pulumi.set(__self__, "load_balancer_frontend_ip_configuration_ids", load_balancer_frontend_ip_configuration_ids)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if nat_ip_configurations and not isinstance(nat_ip_configurations, list):
raise TypeError("Expected argument 'nat_ip_configurations' to be a list")
pulumi.set(__self__, "nat_ip_configurations", nat_ip_configurations)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if visibility_subscription_ids and not isinstance(visibility_subscription_ids, list):
raise TypeError("Expected argument 'visibility_subscription_ids' to be a list")
pulumi.set(__self__, "visibility_subscription_ids", visibility_subscription_ids)
@property
@pulumi.getter
def alias(self) -> str:
"""
The alias is a globally unique name for your private link service which Azure generates for you. Your can use this alias to request a connection to your private link service.
"""
return pulumi.get(self, "alias")
@property
@pulumi.getter(name="autoApprovalSubscriptionIds")
def auto_approval_subscription_ids(self) -> Sequence[str]:
"""
The list of subscription(s) globally unique identifiers that will be auto approved to use the private link service.
"""
return pulumi.get(self, "auto_approval_subscription_ids")
@property
@pulumi.getter(name="enableProxyProtocol")
def enable_proxy_protocol(self) -> bool:
"""
Does the Private Link Service support the Proxy Protocol?
"""
return pulumi.get(self, "enable_proxy_protocol")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loadBalancerFrontendIpConfigurationIds")
def load_balancer_frontend_ip_configuration_ids(self) -> Sequence[str]:
"""
The list of Standard Load Balancer(SLB) resource IDs. The Private Link service is tied to the frontend IP address of a SLB. All traffic destined for the private link service will reach the frontend of the SLB. You can configure SLB rules to direct this traffic to appropriate backend pools where your applications are running.
"""
return pulumi.get(self, "load_balancer_frontend_ip_configuration_ids")
@property
@pulumi.getter
def location(self) -> str:
"""
The supported Azure location where the resource exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of private link service NAT IP configuration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="natIpConfigurations")
def nat_ip_configurations(self) -> Sequence['outputs.GetServiceNatIpConfigurationResult']:
"""
The `nat_ip_configuration` block as defined below.
"""
return pulumi.get(self, "nat_ip_configurations")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="visibilitySubscriptionIds")
def visibility_subscription_ids(self) -> Sequence[str]:
"""
The list of subscription(s) globally unique identifiers(GUID) that will be able to see the private link service.
"""
return pulumi.get(self, "visibility_subscription_ids")
class AwaitableGetServiceResult(GetServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceResult(
alias=self.alias,
auto_approval_subscription_ids=self.auto_approval_subscription_ids,
enable_proxy_protocol=self.enable_proxy_protocol,
id=self.id,
load_balancer_frontend_ip_configuration_ids=self.load_balancer_frontend_ip_configuration_ids,
location=self.location,
name=self.name,
nat_ip_configurations=self.nat_ip_configurations,
resource_group_name=self.resource_group_name,
tags=self.tags,
visibility_subscription_ids=self.visibility_subscription_ids)
def get_service(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceResult:
"""
Use this data source to access information about an existing Private Link Service.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.privatelink.get_service(name="myPrivateLinkService",
resource_group_name="PrivateLinkServiceRG")
pulumi.export("privateLinkServiceId", example.id)
```
:param str name: The name of the private link service.
:param str resource_group_name: The name of the resource group in which the private link service resides.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:privatelink/getService:getService', __args__, opts=opts, typ=GetServiceResult).value
return AwaitableGetServiceResult(
alias=__ret__.alias,
auto_approval_subscription_ids=__ret__.auto_approval_subscription_ids,
enable_proxy_protocol=__ret__.enable_proxy_protocol,
id=__ret__.id,
load_balancer_frontend_ip_configuration_ids=__ret__.load_balancer_frontend_ip_configuration_ids,
location=__ret__.location,
name=__ret__.name,
nat_ip_configurations=__ret__.nat_ip_configurations,
resource_group_name=__ret__.resource_group_name,
tags=__ret__.tags,
visibility_subscription_ids=__ret__.visibility_subscription_ids)
| en | 0.803209 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** A collection of values returned by getService. The alias is a globally unique name for your private link service which Azure generates for you. Your can use this alias to request a connection to your private link service. The list of subscription(s) globally unique identifiers that will be auto approved to use the private link service. Does the Private Link Service support the Proxy Protocol? The provider-assigned unique ID for this managed resource. The list of Standard Load Balancer(SLB) resource IDs. The Private Link service is tied to the frontend IP address of a SLB. All traffic destined for the private link service will reach the frontend of the SLB. You can configure SLB rules to direct this traffic to appropriate backend pools where your applications are running. The supported Azure location where the resource exists. The name of private link service NAT IP configuration. The `nat_ip_configuration` block as defined below. A mapping of tags to assign to the resource. The list of subscription(s) globally unique identifiers(GUID) that will be able to see the private link service. # pylint: disable=using-constant-test Use this data source to access information about an existing Private Link Service. ## Example Usage ```python import pulumi import pulumi_azure as azure example = azure.privatelink.get_service(name="myPrivateLinkService", resource_group_name="PrivateLinkServiceRG") pulumi.export("privateLinkServiceId", example.id) ``` :param str name: The name of the private link service. :param str resource_group_name: The name of the resource group in which the private link service resides. | 1.626419 | 2 |
modules/api/functional_test/live_tests/internal/status_test.py | slandry90/vinyldns | 333 | 6631985 | <gh_stars>100-1000
import pytest
import time
from hamcrest import *
from vinyldns_python import VinylDNSClient
from vinyldns_context import VinylDNSTestContext
from utils import *
def test_get_status_success(shared_zone_test_context):
"""
Tests that the status endpoint returns the current processing status, color, key name and version
"""
client = shared_zone_test_context.ok_vinyldns_client
result = client.get_status()
assert_that([True, False], has_item(result['processingDisabled']))
assert_that(["green","blue"], has_item(result['color']))
assert_that(result['keyName'], not_none())
assert_that(result['version'], not_none())
@pytest.mark.serial
@pytest.mark.skip_production
def test_toggle_processing(shared_zone_test_context):
"""
Test that updating a zone when processing is disabled does not happen
"""
client = shared_zone_test_context.ok_vinyldns_client
ok_zone = shared_zone_test_context.ok_zone
# disable processing
client.post_status(True)
status = client.get_status()
assert_that(status['processingDisabled'], is_(True))
client.post_status(False)
status = client.get_status()
assert_that(status['processingDisabled'], is_(False))
# Create changes to make sure we can process after the toggle
# attempt to perform an update
ok_zone['email'] = '<EMAIL>'
zone_change_result = client.update_zone(ok_zone, status=202)
# attempt to a create a record
new_rs = {
'zoneId': ok_zone['id'],
'name': 'test-status-disable-processing',
'type': 'A',
'ttl': 100,
'records': [
{
'address': '10.1.1.1'
},
{
'address': '10.2.2.2'
}
]
}
record_change = client.create_recordset(new_rs, status=202)
assert_that(record_change['status'], is_('Pending'))
# Make sure that the changes are processed
client.wait_until_zone_change_status_synced(zone_change_result)
client.wait_until_recordset_change_status(record_change, 'Complete')
recordset_length = len(client.list_recordsets_by_zone(ok_zone['id'])['recordSets'])
client.delete_recordset(ok_zone['id'], record_change['recordSet']['id'], status=202)
client.wait_until_recordset_deleted(ok_zone['id'], record_change['recordSet']['id'])
assert_that(client.list_recordsets_by_zone(ok_zone['id'])['recordSets'], has_length(recordset_length - 1))
| import pytest
import time
from hamcrest import *
from vinyldns_python import VinylDNSClient
from vinyldns_context import VinylDNSTestContext
from utils import *
def test_get_status_success(shared_zone_test_context):
"""
Tests that the status endpoint returns the current processing status, color, key name and version
"""
client = shared_zone_test_context.ok_vinyldns_client
result = client.get_status()
assert_that([True, False], has_item(result['processingDisabled']))
assert_that(["green","blue"], has_item(result['color']))
assert_that(result['keyName'], not_none())
assert_that(result['version'], not_none())
@pytest.mark.serial
@pytest.mark.skip_production
def test_toggle_processing(shared_zone_test_context):
"""
Test that updating a zone when processing is disabled does not happen
"""
client = shared_zone_test_context.ok_vinyldns_client
ok_zone = shared_zone_test_context.ok_zone
# disable processing
client.post_status(True)
status = client.get_status()
assert_that(status['processingDisabled'], is_(True))
client.post_status(False)
status = client.get_status()
assert_that(status['processingDisabled'], is_(False))
# Create changes to make sure we can process after the toggle
# attempt to perform an update
ok_zone['email'] = '<EMAIL>'
zone_change_result = client.update_zone(ok_zone, status=202)
# attempt to a create a record
new_rs = {
'zoneId': ok_zone['id'],
'name': 'test-status-disable-processing',
'type': 'A',
'ttl': 100,
'records': [
{
'address': '10.1.1.1'
},
{
'address': '10.2.2.2'
}
]
}
record_change = client.create_recordset(new_rs, status=202)
assert_that(record_change['status'], is_('Pending'))
# Make sure that the changes are processed
client.wait_until_zone_change_status_synced(zone_change_result)
client.wait_until_recordset_change_status(record_change, 'Complete')
recordset_length = len(client.list_recordsets_by_zone(ok_zone['id'])['recordSets'])
client.delete_recordset(ok_zone['id'], record_change['recordSet']['id'], status=202)
client.wait_until_recordset_deleted(ok_zone['id'], record_change['recordSet']['id'])
assert_that(client.list_recordsets_by_zone(ok_zone['id'])['recordSets'], has_length(recordset_length - 1)) | en | 0.875443 | Tests that the status endpoint returns the current processing status, color, key name and version Test that updating a zone when processing is disabled does not happen # disable processing # Create changes to make sure we can process after the toggle # attempt to perform an update # attempt to a create a record # Make sure that the changes are processed | 2.17536 | 2 |
democracy_club/apps/projects/urls.py | DemocracyClub/Website | 3 | 6631986 | from django.urls import path
from django.urls import reverse_lazy
from django.views.generic import RedirectView, TemplateView
from core.report_helpers.views import MarkdownFileView
app_name = "projects"
urlpatterns = [
path(
"",
TemplateView.as_view(template_name="projects/projects_home.html"),
name="home",
),
path(
"past/",
TemplateView.as_view(template_name="projects/past.html"),
name="past",
),
path(
"reports/",
TemplateView.as_view(template_name="projects/reports_home.html"),
name="reports",
),
path(
"reports/registers/",
MarkdownFileView.as_view(
markdown_file="apps/projects/templates/projects/report_odi_registers.md"
),
name="reports_registers",
),
path(
"polling-stations/",
TemplateView.as_view(
template_name="projects/polling-stations/home.html"
),
name="polling_one_pager",
),
path(
"polling-stations/technical/",
RedirectView.as_view(url=reverse_lazy("polling_data_upload")),
name="polling_technical_explainer",
),
path(
"projects/polling-stations/faqs/",
RedirectView.as_view(url=reverse_lazy("projects:polling_one_pager")),
name="polling_faqs",
),
path(
"polling-stations/embed/",
TemplateView.as_view(
template_name="projects/polling-stations/embed_code.html"
),
name="polling_embed_code",
),
path(
"polling-stations/upload/",
TemplateView.as_view(
template_name="projects/polling-stations/upload_data.html"
),
name="polling_data_upload",
),
path(
"polling-stations/techincal/",
RedirectView.as_view(
url=reverse_lazy("projects:polling_technical_explainer")
),
),
path(
"election-ids/reference/",
RedirectView.as_view(
url="https://elections.democracyclub.org.uk/reference_definition"
),
name="election_ids_reference",
),
path(
"election-ids/",
RedirectView.as_view(url=reverse_lazy("projects:every_election")),
name="election_ids",
),
path(
"every-election/",
TemplateView.as_view(template_name="projects/every_election.html"),
name="every_election",
),
path(
"election-ids/",
RedirectView.as_view(url=reverse_lazy("projects:every_election")),
name="election_ids",
),
path(
"who-can-i-vote-for/",
RedirectView.as_view(url=reverse_lazy("projects:candidates")),
name="whocanivotefor",
),
path(
"election-widget/",
TemplateView.as_view(template_name="projects/election-widget.html"),
name="election_widget",
),
path(
"candidates-wiki/",
TemplateView.as_view(template_name="projects/candidates.html"),
name="candidates",
),
path(
"data/",
TemplateView.as_view(template_name="projects/data.html"),
name="data",
),
path(
"election-leaflets/",
TemplateView.as_view(template_name="projects/electionleaflets.html"),
name="election_leaflets",
),
path(
"csv/",
TemplateView.as_view(template_name="projects/cvs.html"),
name="cvs",
),
path(
"representatives/",
TemplateView.as_view(template_name="projects/representatives.html"),
name="representatives",
),
]
| from django.urls import path
from django.urls import reverse_lazy
from django.views.generic import RedirectView, TemplateView
from core.report_helpers.views import MarkdownFileView
app_name = "projects"
urlpatterns = [
path(
"",
TemplateView.as_view(template_name="projects/projects_home.html"),
name="home",
),
path(
"past/",
TemplateView.as_view(template_name="projects/past.html"),
name="past",
),
path(
"reports/",
TemplateView.as_view(template_name="projects/reports_home.html"),
name="reports",
),
path(
"reports/registers/",
MarkdownFileView.as_view(
markdown_file="apps/projects/templates/projects/report_odi_registers.md"
),
name="reports_registers",
),
path(
"polling-stations/",
TemplateView.as_view(
template_name="projects/polling-stations/home.html"
),
name="polling_one_pager",
),
path(
"polling-stations/technical/",
RedirectView.as_view(url=reverse_lazy("polling_data_upload")),
name="polling_technical_explainer",
),
path(
"projects/polling-stations/faqs/",
RedirectView.as_view(url=reverse_lazy("projects:polling_one_pager")),
name="polling_faqs",
),
path(
"polling-stations/embed/",
TemplateView.as_view(
template_name="projects/polling-stations/embed_code.html"
),
name="polling_embed_code",
),
path(
"polling-stations/upload/",
TemplateView.as_view(
template_name="projects/polling-stations/upload_data.html"
),
name="polling_data_upload",
),
path(
"polling-stations/techincal/",
RedirectView.as_view(
url=reverse_lazy("projects:polling_technical_explainer")
),
),
path(
"election-ids/reference/",
RedirectView.as_view(
url="https://elections.democracyclub.org.uk/reference_definition"
),
name="election_ids_reference",
),
path(
"election-ids/",
RedirectView.as_view(url=reverse_lazy("projects:every_election")),
name="election_ids",
),
path(
"every-election/",
TemplateView.as_view(template_name="projects/every_election.html"),
name="every_election",
),
path(
"election-ids/",
RedirectView.as_view(url=reverse_lazy("projects:every_election")),
name="election_ids",
),
path(
"who-can-i-vote-for/",
RedirectView.as_view(url=reverse_lazy("projects:candidates")),
name="whocanivotefor",
),
path(
"election-widget/",
TemplateView.as_view(template_name="projects/election-widget.html"),
name="election_widget",
),
path(
"candidates-wiki/",
TemplateView.as_view(template_name="projects/candidates.html"),
name="candidates",
),
path(
"data/",
TemplateView.as_view(template_name="projects/data.html"),
name="data",
),
path(
"election-leaflets/",
TemplateView.as_view(template_name="projects/electionleaflets.html"),
name="election_leaflets",
),
path(
"csv/",
TemplateView.as_view(template_name="projects/cvs.html"),
name="cvs",
),
path(
"representatives/",
TemplateView.as_view(template_name="projects/representatives.html"),
name="representatives",
),
]
| none | 1 | 2.092001 | 2 |
|
controller/pbutton_rpi.py | huberthoegl/tsgrain | 1 | 6631987 | <filename>controller/pbutton_rpi.py
'''Pushbuttons on RPi with MCP23017 (I2C)
'''
import mcp23017
# Panelkeys
PB1 = 'PB1'
PB2 = 'PB2'
PB3 = 'PB3'
PB4 = 'PB4'
PB5 = 'PB5'
PB6 = 'PB6'
PB7 = 'PB7'
PBAutoOff = 'PBAutoOff'
MAN_KEYS = (PB1, PB2, PB3, PB4, PB5, PB6, PB7)
def key_to_index(key):
if key == PB1:
return 0
elif key == PB2:
return 1
elif key == PB3:
return 2
elif key == PB4:
return 3
elif key == PB5:
return 4
elif key == PB6:
return 5
elif key == PB7:
return 6
elif key == PBAutoOff:
return 7
_instance = None
def pb_press_handler(ir_nr, key_nr):
_instance._press(key_nr)
def pb_release_handler(ir_nr, key_nr):
_instance._release(key_nr)
class PButtons:
def __init__(self):
# only one instantiation is allowed!
global _instance
self.cblist = []
self.panelkey = None
self.pressed = False
_instance = self
mcp23017.add_press_handler(pb_press_handler)
mcp23017.add_release_handler(pb_release_handler)
def _press(self, key_nr):
'''The _press() method can be called multiple times, but it calls
the callback functions only at the first time. This is a good
behaviour when the low-level key fires repeatedly on a longer
press duration.
'''
if key_nr == 0:
self.panelkey = PB1
elif key_nr == 1:
self.panelkey = PB2
elif key_nr == 2:
self.panelkey = PB3
elif key_nr == 3:
self.panelkey = PB4
elif key_nr == 4:
self.panelkey = PB5
elif key_nr == 5:
self.panelkey = PB6
elif key_nr == 6:
self.panelkey = PB7
elif key_nr == 7:
self.panelkey = PBAutoOff
else:
return
if not self.pressed:
self.pressed = True
for f in self.cblist:
f(self.panelkey)
def _release(self, key_nr):
self.pressed = False
self.panelkey = None
def subscribe(self, cb):
# XXX future extension: it should be possible to subscribe only
# a single button, e.g. PBAutoOff
self.cblist.append(cb)
def unsubscribe(self, cb):
self.cblist.remove(cb)
if __name__ == "__main__":
import time
def hello(panelkey):
print("callback hello:", panelkey)
def bunny(panelkey):
print("callback bunny:", panelkey)
b = PButtons()
b.subscribe(hello)
b.subscribe(bunny)
n = 0
while n < 10:
time.sleep(1)
n = n + 1
| <filename>controller/pbutton_rpi.py
'''Pushbuttons on RPi with MCP23017 (I2C)
'''
import mcp23017
# Panelkeys
PB1 = 'PB1'
PB2 = 'PB2'
PB3 = 'PB3'
PB4 = 'PB4'
PB5 = 'PB5'
PB6 = 'PB6'
PB7 = 'PB7'
PBAutoOff = 'PBAutoOff'
MAN_KEYS = (PB1, PB2, PB3, PB4, PB5, PB6, PB7)
def key_to_index(key):
if key == PB1:
return 0
elif key == PB2:
return 1
elif key == PB3:
return 2
elif key == PB4:
return 3
elif key == PB5:
return 4
elif key == PB6:
return 5
elif key == PB7:
return 6
elif key == PBAutoOff:
return 7
_instance = None
def pb_press_handler(ir_nr, key_nr):
_instance._press(key_nr)
def pb_release_handler(ir_nr, key_nr):
_instance._release(key_nr)
class PButtons:
def __init__(self):
# only one instantiation is allowed!
global _instance
self.cblist = []
self.panelkey = None
self.pressed = False
_instance = self
mcp23017.add_press_handler(pb_press_handler)
mcp23017.add_release_handler(pb_release_handler)
def _press(self, key_nr):
'''The _press() method can be called multiple times, but it calls
the callback functions only at the first time. This is a good
behaviour when the low-level key fires repeatedly on a longer
press duration.
'''
if key_nr == 0:
self.panelkey = PB1
elif key_nr == 1:
self.panelkey = PB2
elif key_nr == 2:
self.panelkey = PB3
elif key_nr == 3:
self.panelkey = PB4
elif key_nr == 4:
self.panelkey = PB5
elif key_nr == 5:
self.panelkey = PB6
elif key_nr == 6:
self.panelkey = PB7
elif key_nr == 7:
self.panelkey = PBAutoOff
else:
return
if not self.pressed:
self.pressed = True
for f in self.cblist:
f(self.panelkey)
def _release(self, key_nr):
self.pressed = False
self.panelkey = None
def subscribe(self, cb):
# XXX future extension: it should be possible to subscribe only
# a single button, e.g. PBAutoOff
self.cblist.append(cb)
def unsubscribe(self, cb):
self.cblist.remove(cb)
if __name__ == "__main__":
import time
def hello(panelkey):
print("callback hello:", panelkey)
def bunny(panelkey):
print("callback bunny:", panelkey)
b = PButtons()
b.subscribe(hello)
b.subscribe(bunny)
n = 0
while n < 10:
time.sleep(1)
n = n + 1
| en | 0.852322 | Pushbuttons on RPi with MCP23017 (I2C) # Panelkeys # only one instantiation is allowed! The _press() method can be called multiple times, but it calls the callback functions only at the first time. This is a good behaviour when the low-level key fires repeatedly on a longer press duration. # XXX future extension: it should be possible to subscribe only # a single button, e.g. PBAutoOff | 2.915039 | 3 |
pycharm/module/changefile/__init__.py | jinbao-x/python | 0 | 6631988 | # from . import readfile # 先在init里导入一下模块,先初始化一下,然后,包之外的另外一个文件就可以使用from 包 imoprt *来调用了
# from . import writefile
# __init__
# 1、声明文件夹是个包
# 2、可以做初始化操作
# 3、可以声明__all__影响 from 包 import *导入,在__all__里写的才会导入
# 当然这个init文件也可以想其他普通模块文件内一样,可以使用__all__ = ['readfile']这样的方式来确认模块里的哪些方法可以使用
# 这种是为了解决其他模块文件里用from changefile import *这种星花的导入方式,避免其他模块文件里用星花的方式引用了init里指定的模块的所有的方法
| # from . import readfile # 先在init里导入一下模块,先初始化一下,然后,包之外的另外一个文件就可以使用from 包 imoprt *来调用了
# from . import writefile
# __init__
# 1、声明文件夹是个包
# 2、可以做初始化操作
# 3、可以声明__all__影响 from 包 import *导入,在__all__里写的才会导入
# 当然这个init文件也可以想其他普通模块文件内一样,可以使用__all__ = ['readfile']这样的方式来确认模块里的哪些方法可以使用
# 这种是为了解决其他模块文件里用from changefile import *这种星花的导入方式,避免其他模块文件里用星花的方式引用了init里指定的模块的所有的方法
| zh | 0.968314 | # from . import readfile # 先在init里导入一下模块,先初始化一下,然后,包之外的另外一个文件就可以使用from 包 imoprt *来调用了 # from . import writefile # __init__ # 1、声明文件夹是个包 # 2、可以做初始化操作 # 3、可以声明__all__影响 from 包 import *导入,在__all__里写的才会导入 # 当然这个init文件也可以想其他普通模块文件内一样,可以使用__all__ = ['readfile']这样的方式来确认模块里的哪些方法可以使用 # 这种是为了解决其他模块文件里用from changefile import *这种星花的导入方式,避免其他模块文件里用星花的方式引用了init里指定的模块的所有的方法 | 2.066516 | 2 |
tests/test_pipelinetree.py | vsbogd/language-learning | 21 | 6631989 | <gh_stars>10-100
import unittest
from src.pipeline.pipelinetree import PipelineTreeNode2, build_tree, prepare_parameters
config = [
{
'component': 'grammar-learner',
'common-parameters': {
'space': 'connectors-DRK-connectors',
'input_parses': '~/data/parses/POC-Turtle/LG/parses',
'output_categories': '%LEAF',
'output_grammar': '%LEAF',
'output_statistics': '',
'cluster_criteria': 'silhouette',
'cluster_level': 1,
'tmpath': '/var/tmp/',
'verbose': 'min',
'categories_generalization': 'off',
'context': 1,
'word_space': 'vectors',
'clustering': ['kmeans', 'kmeans++', 18],
'cluster_range': [2, 50, 9],
'grammar_rules': 2,
'temp_dir': ''
},
'specific-parameters': [
{'!space': 'connectors-DRK-connectors', 'wtf': 'MST-fixed-manually', 'left_wall': 'LW', 'period': True},
{'!space': 'connectors-DRK-connectors', 'wtf': 'MST-fixed-manually', 'left_wall': '', 'period': False}
]
},
{
'component': 'grammar-tester',
'common-parameters': {
'input_grammar': '%PREV/',
'corpus_path': '%ROOT/aging3.txt',
'output_path': '%PREV/',
'linkage_limit': '1000',
'rm_grammar_dir': True,
'use_link_parser': True,
'ull_input': True,
'ignore_left_wall': True,
'ignore_period': True,
'calc_parse_quality': True
},
'specific-parameters': [
{'parse_format': 'ull'},
{'parse_format': 'diagram', "follow_exec_path": False},
{'parse_format': 'postscript', "follow_exec_path": False},
{'parse_format': 'constituent', "follow_exec_path": False}
]
}
]
# {
# 'component': 'dash-board',
# 'common-parameters': {
# 'input_grammar': '%PREV/',
# 'corpus_path': '%ROOT/aging3.txt',
# 'output_path': '%PREV/',
# 'linkage_limit': '1000',
# 'rm_grammar_dir': True,
# 'use_link_parser': True,
# 'ull_input': True,
# 'ignore_left_wall': True,
# 'ignore_period': True,
# 'calc_parse_quality': True
# },
# 'specific-parameters': [
# {'row': '1', 'col': '8', 'value': '%PREV.PA'},
# {'row': '2', 'col': '9', 'value': '%PREV.PQ'},
# ]
# }
class PipelineTreeTestCase(unittest.TestCase):
@unittest.skip
def test_init(self):
root = PipelineTreeNode2("grammar-learner", {"space": "cDRKc"}, {"input_parses": "~/data/parses/poc-turtle"})
self.assertEqual("grammar-learner", root._component_name)
self.assertEqual({"space": "cDRKc"}, root._specific_parameters)
self.assertEqual({"input_parses": "~/data/parses/poc-turtle"}, root._common_parameters)
self.assertEqual({}, root._environment)
@unittest.skip
def test_add_siblings(self):
root = PipelineTreeNode2("grammar-learner", {"space": "cDRKc"}, {"input_parses": "~/data/parses/poc-turtle"})
root.add_sibling(PipelineTreeNode2("grammar-tester", {"A": "a"}))
root.add_sibling(PipelineTreeNode2("grammar-tester", {"B": "b"}))
root.add_sibling(PipelineTreeNode2("grammar-tester", {"C": "c"}))
self.assertEqual(3, len(root._siblings))
@unittest.skip
def test_traverse(self):
root = PipelineTreeNode2(0, "grammar-learner", {"space": "cDRKc"}, {"input_parses": "~/data/parses/poc-turtle"})
PipelineTreeNode2(1, "grammar-tester", {"A": "a"}, None, None, root)
PipelineTreeNode2(1, "grammar-tester", {"B": "b"}, None, None, root)
node = PipelineTreeNode2(1, "grammar-tester", {"C": "c"}, None, None, root)
PipelineTreeNode2(2, "grammar-tester", {"CC": "cc"}, None, None, node)
# root.traverse(lambda n: print(n._component_name), root.root)
PipelineTreeNode2.traverse_all(lambda n: print("\t"*n.seq_no+n._component_name+": "+str(n._specific_parameters)))
self.assertEqual(True, True)
def test_prepare_parameters(self):
p, e = prepare_parameters(None, {"path_to_somewhere": "%ROOT/abc", "another_path": "%LEAF"},
{"path_to_elsewhere": "%ROOT/efg", "X": "xx", "n": 1},
{"ROOT": "~/data/2018-09-01"}, "%", True)
# print(p, e, sep="\n")
self.assertEqual(p["path_to_somewhere"], "~/data/2018-09-01/abc")
self.assertEqual(p["path_to_elsewhere"], "~/data/2018-09-01/efg")
self.assertEqual(e["LEAF"], "~/data/2018-09-01/_X:xx_n:1")
# @unittest.skip
def test_build_tree(self):
globals = {"ROOT": "~/data/2018-09-01"}
roots = build_tree(config, globals)
# PipelineTreeNode2.traverse_all(lambda n: print("\t"*n.seq_no+n._component_name))
PipelineTreeNode2.traverse_all(lambda n: print("\t"*n.seq_no+n._component_name+": "+str(n._parameters)))
print(globals)
self.assertEqual(True, True)
@unittest.skip
def test_traverse_all(self):
tp = [
PipelineTreeNode2(0, "text-parser", {"dummy": "1"}),
PipelineTreeNode2(0, "text-parser", {"dummy": "2"}),
PipelineTreeNode2(0, "text-parser", {"dummy": "3"})
]
for parent in tp:
parent.add_sibling(PipelineTreeNode2(1, "grammar-learner", {"dummy": "a"}, None, None, parent))
parent.add_sibling(PipelineTreeNode2(1, "grammar-learner", {"dummy": "b"}, None, None, parent))
parent.add_sibling(PipelineTreeNode2(1, "grammar-learner", {"dummy": "c"}, None, None, parent))
print(PipelineTreeNode2.roots)
PipelineTreeNode2.traverse_all(lambda p, e: print(p))
self.assertEqual(True, True)
| import unittest
from src.pipeline.pipelinetree import PipelineTreeNode2, build_tree, prepare_parameters
config = [
{
'component': 'grammar-learner',
'common-parameters': {
'space': 'connectors-DRK-connectors',
'input_parses': '~/data/parses/POC-Turtle/LG/parses',
'output_categories': '%LEAF',
'output_grammar': '%LEAF',
'output_statistics': '',
'cluster_criteria': 'silhouette',
'cluster_level': 1,
'tmpath': '/var/tmp/',
'verbose': 'min',
'categories_generalization': 'off',
'context': 1,
'word_space': 'vectors',
'clustering': ['kmeans', 'kmeans++', 18],
'cluster_range': [2, 50, 9],
'grammar_rules': 2,
'temp_dir': ''
},
'specific-parameters': [
{'!space': 'connectors-DRK-connectors', 'wtf': 'MST-fixed-manually', 'left_wall': 'LW', 'period': True},
{'!space': 'connectors-DRK-connectors', 'wtf': 'MST-fixed-manually', 'left_wall': '', 'period': False}
]
},
{
'component': 'grammar-tester',
'common-parameters': {
'input_grammar': '%PREV/',
'corpus_path': '%ROOT/aging3.txt',
'output_path': '%PREV/',
'linkage_limit': '1000',
'rm_grammar_dir': True,
'use_link_parser': True,
'ull_input': True,
'ignore_left_wall': True,
'ignore_period': True,
'calc_parse_quality': True
},
'specific-parameters': [
{'parse_format': 'ull'},
{'parse_format': 'diagram', "follow_exec_path": False},
{'parse_format': 'postscript', "follow_exec_path": False},
{'parse_format': 'constituent', "follow_exec_path": False}
]
}
]
# {
# 'component': 'dash-board',
# 'common-parameters': {
# 'input_grammar': '%PREV/',
# 'corpus_path': '%ROOT/aging3.txt',
# 'output_path': '%PREV/',
# 'linkage_limit': '1000',
# 'rm_grammar_dir': True,
# 'use_link_parser': True,
# 'ull_input': True,
# 'ignore_left_wall': True,
# 'ignore_period': True,
# 'calc_parse_quality': True
# },
# 'specific-parameters': [
# {'row': '1', 'col': '8', 'value': '%PREV.PA'},
# {'row': '2', 'col': '9', 'value': '%PREV.PQ'},
# ]
# }
class PipelineTreeTestCase(unittest.TestCase):
@unittest.skip
def test_init(self):
root = PipelineTreeNode2("grammar-learner", {"space": "cDRKc"}, {"input_parses": "~/data/parses/poc-turtle"})
self.assertEqual("grammar-learner", root._component_name)
self.assertEqual({"space": "cDRKc"}, root._specific_parameters)
self.assertEqual({"input_parses": "~/data/parses/poc-turtle"}, root._common_parameters)
self.assertEqual({}, root._environment)
@unittest.skip
def test_add_siblings(self):
root = PipelineTreeNode2("grammar-learner", {"space": "cDRKc"}, {"input_parses": "~/data/parses/poc-turtle"})
root.add_sibling(PipelineTreeNode2("grammar-tester", {"A": "a"}))
root.add_sibling(PipelineTreeNode2("grammar-tester", {"B": "b"}))
root.add_sibling(PipelineTreeNode2("grammar-tester", {"C": "c"}))
self.assertEqual(3, len(root._siblings))
@unittest.skip
def test_traverse(self):
root = PipelineTreeNode2(0, "grammar-learner", {"space": "cDRKc"}, {"input_parses": "~/data/parses/poc-turtle"})
PipelineTreeNode2(1, "grammar-tester", {"A": "a"}, None, None, root)
PipelineTreeNode2(1, "grammar-tester", {"B": "b"}, None, None, root)
node = PipelineTreeNode2(1, "grammar-tester", {"C": "c"}, None, None, root)
PipelineTreeNode2(2, "grammar-tester", {"CC": "cc"}, None, None, node)
# root.traverse(lambda n: print(n._component_name), root.root)
PipelineTreeNode2.traverse_all(lambda n: print("\t"*n.seq_no+n._component_name+": "+str(n._specific_parameters)))
self.assertEqual(True, True)
def test_prepare_parameters(self):
p, e = prepare_parameters(None, {"path_to_somewhere": "%ROOT/abc", "another_path": "%LEAF"},
{"path_to_elsewhere": "%ROOT/efg", "X": "xx", "n": 1},
{"ROOT": "~/data/2018-09-01"}, "%", True)
# print(p, e, sep="\n")
self.assertEqual(p["path_to_somewhere"], "~/data/2018-09-01/abc")
self.assertEqual(p["path_to_elsewhere"], "~/data/2018-09-01/efg")
self.assertEqual(e["LEAF"], "~/data/2018-09-01/_X:xx_n:1")
# @unittest.skip
def test_build_tree(self):
globals = {"ROOT": "~/data/2018-09-01"}
roots = build_tree(config, globals)
# PipelineTreeNode2.traverse_all(lambda n: print("\t"*n.seq_no+n._component_name))
PipelineTreeNode2.traverse_all(lambda n: print("\t"*n.seq_no+n._component_name+": "+str(n._parameters)))
print(globals)
self.assertEqual(True, True)
@unittest.skip
def test_traverse_all(self):
tp = [
PipelineTreeNode2(0, "text-parser", {"dummy": "1"}),
PipelineTreeNode2(0, "text-parser", {"dummy": "2"}),
PipelineTreeNode2(0, "text-parser", {"dummy": "3"})
]
for parent in tp:
parent.add_sibling(PipelineTreeNode2(1, "grammar-learner", {"dummy": "a"}, None, None, parent))
parent.add_sibling(PipelineTreeNode2(1, "grammar-learner", {"dummy": "b"}, None, None, parent))
parent.add_sibling(PipelineTreeNode2(1, "grammar-learner", {"dummy": "c"}, None, None, parent))
print(PipelineTreeNode2.roots)
PipelineTreeNode2.traverse_all(lambda p, e: print(p))
self.assertEqual(True, True) | en | 0.043554 | # { # 'component': 'dash-board', # 'common-parameters': { # 'input_grammar': '%PREV/', # 'corpus_path': '%ROOT/aging3.txt', # 'output_path': '%PREV/', # 'linkage_limit': '1000', # 'rm_grammar_dir': True, # 'use_link_parser': True, # 'ull_input': True, # 'ignore_left_wall': True, # 'ignore_period': True, # 'calc_parse_quality': True # }, # 'specific-parameters': [ # {'row': '1', 'col': '8', 'value': '%PREV.PA'}, # {'row': '2', 'col': '9', 'value': '%PREV.PQ'}, # ] # } # root.traverse(lambda n: print(n._component_name), root.root) # print(p, e, sep="\n") # @unittest.skip # PipelineTreeNode2.traverse_all(lambda n: print("\t"*n.seq_no+n._component_name)) | 1.965006 | 2 |
2016/21_scrambled_letters_and_hash_test.py | pchudzik/adventofcode | 0 | 6631990 | import importlib
import pytest
module = importlib.import_module("21_scrambled_letters_and_hash")
parse_cmd = module.parse_cmd
password_generator = module.password_generator
unscrabble_password = module.unscrabble_password
def test_password_generator():
cmds = [
"swap position 4 with position 0",
"swap letter d with letter b",
"reverse positions 0 through 4",
"rotate left 1 step",
"move position 1 to position 4",
"move position 3 to position 0",
"rotate based on position of letter b",
"rotate based on position of letter d"
]
assert password_generator(cmds, "abcde") == "<PASSWORD>"
def test_password_unscrabble():
cmds = [
"swap position 4 with position 0",
"swap letter d with letter b",
"reverse positions 0 through 4",
"rotate left 1 step",
"move position 1 to position 4",
"move position 3 to position 0",
"rotate based on position of letter b",
"rotate based on position of letter d"
]
assert unscrabble_password(cmds, "<PASSWORD>") == "<PASSWORD>"
@pytest.mark.parametrize(
"cmd, input, expected", [
("swap position 4 with position 0", "abcde", "ebcda"),
("swap position 0 with position 4", "abcde", "ebcda"),
("swap position 0 with position 1", "abcde", "bacde"),
("swap position 0 with position 1", "abcde", "bacde")])
def test_swap_position(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("swap position 4 with position 0", "ebcda", "abcde"),
("swap position 0 with position 4", "ebcda", "abcde"),
("swap position 0 with position 1", "bacde", "abcde"),
("swap position 0 with position 1", "bacde", "abcde")])
def test_swap_position_undo(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd.undo(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("swap letter d with letter b", "ebcda", "edcba"),
("swap letter a with letter b", "abcde", "bacde")])
def test_swap_letter(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("swap letter d with letter b", "edcba", "ebcda"),
("swap letter a with letter b", "bacde", "abcde")])
def test_swap_letter_undo(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd.undo(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("rotate left 1 step", "abcde", "bcdea"),
("rotate left 3 steps", "abcde", "deabc"),
("rotate right 1 step", "abcde", "eabcd"),
("rotate right 2 steps", "abcde", "deabc")])
def test_rotate(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("rotate left 1 step", "bcdea", "abcde"),
("rotate left 3 steps", "deabc", "abcde"),
("rotate right 1 step", "eabcd", "abcde"),
("rotate right 2 steps", "deabc", "abcde")])
def test_rotate_undo(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd.undo(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("rotate based on position of letter b", "abdec", "ecabd"),
("rotate based on position of letter d", "ecabd", "decab"),
("rotate based on position of letter g", "ghefbcad", "dghefbca")
])
def test_rotate_based_on_position(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("rotate based on position of letter X", "7X123456", "X1234567"),
("rotate based on position of letter X", "670X2345", "0X234567"),
("rotate based on position of letter X", "56701X34", "01X34567"),
("rotate based on position of letter X", "4567012X", "012X4567"),
("rotate based on position of letter X", "23X56701", "0123X567"),
("rotate based on position of letter X", "1234X670", "01234X67"),
("rotate based on position of letter X", "012345X7", "012345X7"),
("rotate based on position of letter X", "X0123456", "0123456X")])
def test_rotate_based_on_position_undo(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd.undo(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("reverse positions 0 through 4", "edcba", "abcde"),
("reverse positions 0 through 1", "abcd", "bacd")])
def test_reverse_positions(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("reverse positions 0 through 4", "abcde", "edcba"),
("reverse positions 0 through 1", "bacd", "abcd")])
def test_reverse_positions_undo(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd.undo(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("move position 1 to position 4", "bcdea", "bdeac"),
("move position 3 to position 0", "bdeac", "abdec")
])
def test_move_position(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("move position 1 to position 4", "bdeac", "bcdea"),
("move position 3 to position 0", "abdec", "bdeac")])
def test_move_position_undo(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd.undo(input) == expected
| import importlib
import pytest
module = importlib.import_module("21_scrambled_letters_and_hash")
parse_cmd = module.parse_cmd
password_generator = module.password_generator
unscrabble_password = module.unscrabble_password
def test_password_generator():
cmds = [
"swap position 4 with position 0",
"swap letter d with letter b",
"reverse positions 0 through 4",
"rotate left 1 step",
"move position 1 to position 4",
"move position 3 to position 0",
"rotate based on position of letter b",
"rotate based on position of letter d"
]
assert password_generator(cmds, "abcde") == "<PASSWORD>"
def test_password_unscrabble():
cmds = [
"swap position 4 with position 0",
"swap letter d with letter b",
"reverse positions 0 through 4",
"rotate left 1 step",
"move position 1 to position 4",
"move position 3 to position 0",
"rotate based on position of letter b",
"rotate based on position of letter d"
]
assert unscrabble_password(cmds, "<PASSWORD>") == "<PASSWORD>"
@pytest.mark.parametrize(
"cmd, input, expected", [
("swap position 4 with position 0", "abcde", "ebcda"),
("swap position 0 with position 4", "abcde", "ebcda"),
("swap position 0 with position 1", "abcde", "bacde"),
("swap position 0 with position 1", "abcde", "bacde")])
def test_swap_position(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("swap position 4 with position 0", "ebcda", "abcde"),
("swap position 0 with position 4", "ebcda", "abcde"),
("swap position 0 with position 1", "bacde", "abcde"),
("swap position 0 with position 1", "bacde", "abcde")])
def test_swap_position_undo(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd.undo(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("swap letter d with letter b", "ebcda", "edcba"),
("swap letter a with letter b", "abcde", "bacde")])
def test_swap_letter(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("swap letter d with letter b", "edcba", "ebcda"),
("swap letter a with letter b", "bacde", "abcde")])
def test_swap_letter_undo(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd.undo(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("rotate left 1 step", "abcde", "bcdea"),
("rotate left 3 steps", "abcde", "deabc"),
("rotate right 1 step", "abcde", "eabcd"),
("rotate right 2 steps", "abcde", "deabc")])
def test_rotate(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("rotate left 1 step", "bcdea", "abcde"),
("rotate left 3 steps", "deabc", "abcde"),
("rotate right 1 step", "eabcd", "abcde"),
("rotate right 2 steps", "deabc", "abcde")])
def test_rotate_undo(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd.undo(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("rotate based on position of letter b", "abdec", "ecabd"),
("rotate based on position of letter d", "ecabd", "decab"),
("rotate based on position of letter g", "ghefbcad", "dghefbca")
])
def test_rotate_based_on_position(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("rotate based on position of letter X", "7X123456", "X1234567"),
("rotate based on position of letter X", "670X2345", "0X234567"),
("rotate based on position of letter X", "56701X34", "01X34567"),
("rotate based on position of letter X", "4567012X", "012X4567"),
("rotate based on position of letter X", "23X56701", "0123X567"),
("rotate based on position of letter X", "1234X670", "01234X67"),
("rotate based on position of letter X", "012345X7", "012345X7"),
("rotate based on position of letter X", "X0123456", "0123456X")])
def test_rotate_based_on_position_undo(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd.undo(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("reverse positions 0 through 4", "edcba", "abcde"),
("reverse positions 0 through 1", "abcd", "bacd")])
def test_reverse_positions(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("reverse positions 0 through 4", "abcde", "edcba"),
("reverse positions 0 through 1", "bacd", "abcd")])
def test_reverse_positions_undo(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd.undo(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("move position 1 to position 4", "bcdea", "bdeac"),
("move position 3 to position 0", "bdeac", "abdec")
])
def test_move_position(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd(input) == expected
@pytest.mark.parametrize(
"cmd, input, expected", [
("move position 1 to position 4", "bdeac", "bcdea"),
("move position 3 to position 0", "abdec", "bdeac")])
def test_move_position_undo(cmd, input, expected):
cmd = parse_cmd(cmd)
assert cmd.undo(input) == expected
| none | 1 | 2.729848 | 3 |
|
url_manager/views.py | haandol/url_shortener | 1 | 6631991 | <filename>url_manager/views.py
#coding: utf-8
import re
import urllib
from models import WrongURL, LongURL, ShortURL
BASE62 = '01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
nBASE62 = len(BASE62)
PATTERN = r'http://[^/^\s]+[.]\w{2,4}\S/?'
def get_short_id(url):
u'''긴 URL을 받아서 짧은 URL의 id를 반환'''
if not url:
return WrongURL(0)
elif not re.match(PATTERN, url):
return WrongURL(1)
longUrl, isnew = LongURL.objects.get_or_create(url=url)
if isnew:
id = encode_basen(longUrl.id)
try:
return ShortURL.objects.create(id=id, longUrl=longUrl).id
except:
raise WrongURL(2)
else:
try:
return ShortURL.objects.get(longUrl=longUrl).id
except ShortURL.DoesNotExist:
raise WrongURL(3)
def get_long_url(id):
u'''아이디를 입력받아 인코딩한다'''
try:
return urllib.quote(ShortURL.objects.get(id=id).longUrl.url.encode('utf-8'), safe=':/=\?&')
except:
return ''
def encode_basen(id, n=nBASE62):
u'''아이디를 입력받아 인코딩한다'''
base = id
rests = []
while base!=0:
quotient, rest = divmod(base, n)
rests.append(BASE62[rest])
base = quotient
return ''.join(rests)
| <filename>url_manager/views.py
#coding: utf-8
import re
import urllib
from models import WrongURL, LongURL, ShortURL
BASE62 = '01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
nBASE62 = len(BASE62)
PATTERN = r'http://[^/^\s]+[.]\w{2,4}\S/?'
def get_short_id(url):
u'''긴 URL을 받아서 짧은 URL의 id를 반환'''
if not url:
return WrongURL(0)
elif not re.match(PATTERN, url):
return WrongURL(1)
longUrl, isnew = LongURL.objects.get_or_create(url=url)
if isnew:
id = encode_basen(longUrl.id)
try:
return ShortURL.objects.create(id=id, longUrl=longUrl).id
except:
raise WrongURL(2)
else:
try:
return ShortURL.objects.get(longUrl=longUrl).id
except ShortURL.DoesNotExist:
raise WrongURL(3)
def get_long_url(id):
u'''아이디를 입력받아 인코딩한다'''
try:
return urllib.quote(ShortURL.objects.get(id=id).longUrl.url.encode('utf-8'), safe=':/=\?&')
except:
return ''
def encode_basen(id, n=nBASE62):
u'''아이디를 입력받아 인코딩한다'''
base = id
rests = []
while base!=0:
quotient, rest = divmod(base, n)
rests.append(BASE62[rest])
base = quotient
return ''.join(rests)
| ko | 0.999615 | #coding: utf-8 긴 URL을 받아서 짧은 URL의 id를 반환 아이디를 입력받아 인코딩한다 아이디를 입력받아 인코딩한다 | 2.28553 | 2 |
FAReinforcement/CartPoleDemo.py | jamartinh/ReinforcementLearning | 0 | 6631992 | <reponame>jamartinh/ReinforcementLearning
from rltools.FARLBasic import *
#from Environments.CartPoleEnvironment import CartPoleEnvironment
#from Environments.CartPoleEnvironmentG import CartPoleEnvironment
from Environments.CartPoleEnvironmentGN import CartPoleEnvironment
#from kNNQ import kNNQ
#from rltools.kNNQC import kNNQC
from rltools.ExaSCIPY import Exa as kNNQC
#from NeuroQ import NeuroQ
#from RNeuroQ import RNeuroQ
#from SNeuroQ import SNeuroQ
#from SOMQ import SOMQ
from rltools.ActionSelection import *
import pickle
#from pylab import *
import time
def CartPoleExperiment(Episodes=100,nk=0):
print()
print('===================================================================')
print(' INIT EXPERIMENT','k='+str(nk+1))
# results of the experiment
x = list(range(1,Episodes+1))
y =[]
yr =[]
#Build the Environment
Env = CartPoleEnvironment()
# Build a function approximator
#Q = kNNQ(nactions=Env.nactions,input_ranges=Env.input_ranges,nelemns=[2,3,10,2],npoints=False,k=1,alpha=0.25)
#Q = kNNQ(nactions=Env.nactions,input_ranges=Env.input_ranges,nelemns=[2+7,3+7,10+3,2+7],npoints=False,k=nk+1,alpha=0.3,lm=0.95)
#Q = NeuroQ(Env.nactions, Env.input_ranges, 20, Env.reward_ranges,Env.deep_in,Env.deep_out,alpha=0.3)
#Q = SNeuroQ(Env.nactions, Env.input_ranges, 6, Env.output_ranges,alpha=0.2)
#Experiments
#Q = kNNQC(Env.input_ranges,[2+2,3+2,10+1,2+1],Env.output_ranges,[11],nk+1,0.3,0.90) #excelent
#BEST
Q = kNNQC(Env.input_ranges,[2+2,3+2,10+1,2+1],Env.output_ranges,[11],4,0.6,0.90,0.0) #excelent
#Q = kNNQC(Env.input_ranges,[2+4,3+4,10+3,2+4],Env.output_ranges,[11],9,0.3,0.90) #notbad
#Q = kNNQC(Env.input_ranges,[10,10,10,10],Env.output_ranges,[11],32,2.0,0.90) #good
# Get the Action Selector
As = e_greedy_selection(epsilon=0.0)
#As = e_softmax_selection(epsilon=0.1)
#As = None
#Build the Agent
CP = FARLBase(Q,Env,As,gamma=1.0)
CP.Environment.graphs=True
for i in range(Episodes):
#result = CP.SARSAEpisode(1000)
#result = CP.NeuroQEpisode(1000)
t1=time.clock()
result = CP.kNNCQEpisode(1000)
t2=time.clock()
#result = CP.QLearningEpisode(1000)
CP.SelectAction.epsilon = CP.SelectAction.epsilon * 0.9
CP.PlotLearningCurve(i,result[1],CP.SelectAction.epsilon)
print("Episode:",str(i),'Total Reward:',str(result[0]),'Steps:',str(result[1]),"time",t2-t1)
y.append(result[1])
yr.append(result[0])
## if i==50:
## miny =min(y)
## figure(i)
## plot(range(1,len(y)+1),y,'k')
## title(r'$ k = 4, \quad \lambda=0.9, \quad \alpha=0.3 $')
## grid('on')
## axis([1, i, 0, 1100])
## xlabel('Episodes')
## ylabel('Steps')
## savefig('cpresultcontinuous.pdf')
## print "salvado"
## close(i)
CP.LearningCurveGraph.display.visible = False
return [[x,y,nk],[x,yr,nk]]
def Experiments():
results1=[]
results2=[]
for i in range(0,10):
x = CartPoleExperiment(Episodes=200,nk=i)
results1.append( x[0] )
results2.append( x[1] )
pickle.dump(results1,open('cartpolestepscq.dat','w'))
pickle.dump(results2,open('cartpolerewardcq.dat','w'))
if __name__ == '__main__':
#Experiments()
x = CartPoleExperiment(50,3)
pickle.dump(x[0],open('contiuouscartpolesteps.dat','w'))
| from rltools.FARLBasic import *
#from Environments.CartPoleEnvironment import CartPoleEnvironment
#from Environments.CartPoleEnvironmentG import CartPoleEnvironment
from Environments.CartPoleEnvironmentGN import CartPoleEnvironment
#from kNNQ import kNNQ
#from rltools.kNNQC import kNNQC
from rltools.ExaSCIPY import Exa as kNNQC
#from NeuroQ import NeuroQ
#from RNeuroQ import RNeuroQ
#from SNeuroQ import SNeuroQ
#from SOMQ import SOMQ
from rltools.ActionSelection import *
import pickle
#from pylab import *
import time
def CartPoleExperiment(Episodes=100,nk=0):
print()
print('===================================================================')
print(' INIT EXPERIMENT','k='+str(nk+1))
# results of the experiment
x = list(range(1,Episodes+1))
y =[]
yr =[]
#Build the Environment
Env = CartPoleEnvironment()
# Build a function approximator
#Q = kNNQ(nactions=Env.nactions,input_ranges=Env.input_ranges,nelemns=[2,3,10,2],npoints=False,k=1,alpha=0.25)
#Q = kNNQ(nactions=Env.nactions,input_ranges=Env.input_ranges,nelemns=[2+7,3+7,10+3,2+7],npoints=False,k=nk+1,alpha=0.3,lm=0.95)
#Q = NeuroQ(Env.nactions, Env.input_ranges, 20, Env.reward_ranges,Env.deep_in,Env.deep_out,alpha=0.3)
#Q = SNeuroQ(Env.nactions, Env.input_ranges, 6, Env.output_ranges,alpha=0.2)
#Experiments
#Q = kNNQC(Env.input_ranges,[2+2,3+2,10+1,2+1],Env.output_ranges,[11],nk+1,0.3,0.90) #excelent
#BEST
Q = kNNQC(Env.input_ranges,[2+2,3+2,10+1,2+1],Env.output_ranges,[11],4,0.6,0.90,0.0) #excelent
#Q = kNNQC(Env.input_ranges,[2+4,3+4,10+3,2+4],Env.output_ranges,[11],9,0.3,0.90) #notbad
#Q = kNNQC(Env.input_ranges,[10,10,10,10],Env.output_ranges,[11],32,2.0,0.90) #good
# Get the Action Selector
As = e_greedy_selection(epsilon=0.0)
#As = e_softmax_selection(epsilon=0.1)
#As = None
#Build the Agent
CP = FARLBase(Q,Env,As,gamma=1.0)
CP.Environment.graphs=True
for i in range(Episodes):
#result = CP.SARSAEpisode(1000)
#result = CP.NeuroQEpisode(1000)
t1=time.clock()
result = CP.kNNCQEpisode(1000)
t2=time.clock()
#result = CP.QLearningEpisode(1000)
CP.SelectAction.epsilon = CP.SelectAction.epsilon * 0.9
CP.PlotLearningCurve(i,result[1],CP.SelectAction.epsilon)
print("Episode:",str(i),'Total Reward:',str(result[0]),'Steps:',str(result[1]),"time",t2-t1)
y.append(result[1])
yr.append(result[0])
## if i==50:
## miny =min(y)
## figure(i)
## plot(range(1,len(y)+1),y,'k')
## title(r'$ k = 4, \quad \lambda=0.9, \quad \alpha=0.3 $')
## grid('on')
## axis([1, i, 0, 1100])
## xlabel('Episodes')
## ylabel('Steps')
## savefig('cpresultcontinuous.pdf')
## print "salvado"
## close(i)
CP.LearningCurveGraph.display.visible = False
return [[x,y,nk],[x,yr,nk]]
def Experiments():
results1=[]
results2=[]
for i in range(0,10):
x = CartPoleExperiment(Episodes=200,nk=i)
results1.append( x[0] )
results2.append( x[1] )
pickle.dump(results1,open('cartpolestepscq.dat','w'))
pickle.dump(results2,open('cartpolerewardcq.dat','w'))
if __name__ == '__main__':
#Experiments()
x = CartPoleExperiment(50,3)
pickle.dump(x[0],open('contiuouscartpolesteps.dat','w')) | en | 0.322041 | #from Environments.CartPoleEnvironment import CartPoleEnvironment #from Environments.CartPoleEnvironmentG import CartPoleEnvironment #from kNNQ import kNNQ #from rltools.kNNQC import kNNQC #from NeuroQ import NeuroQ #from RNeuroQ import RNeuroQ #from SNeuroQ import SNeuroQ #from SOMQ import SOMQ #from pylab import * # results of the experiment #Build the Environment # Build a function approximator #Q = kNNQ(nactions=Env.nactions,input_ranges=Env.input_ranges,nelemns=[2,3,10,2],npoints=False,k=1,alpha=0.25) #Q = kNNQ(nactions=Env.nactions,input_ranges=Env.input_ranges,nelemns=[2+7,3+7,10+3,2+7],npoints=False,k=nk+1,alpha=0.3,lm=0.95) #Q = NeuroQ(Env.nactions, Env.input_ranges, 20, Env.reward_ranges,Env.deep_in,Env.deep_out,alpha=0.3) #Q = SNeuroQ(Env.nactions, Env.input_ranges, 6, Env.output_ranges,alpha=0.2) #Experiments #Q = kNNQC(Env.input_ranges,[2+2,3+2,10+1,2+1],Env.output_ranges,[11],nk+1,0.3,0.90) #excelent #BEST #excelent #Q = kNNQC(Env.input_ranges,[2+4,3+4,10+3,2+4],Env.output_ranges,[11],9,0.3,0.90) #notbad #Q = kNNQC(Env.input_ranges,[10,10,10,10],Env.output_ranges,[11],32,2.0,0.90) #good # Get the Action Selector #As = e_softmax_selection(epsilon=0.1) #As = None #Build the Agent #result = CP.SARSAEpisode(1000) #result = CP.NeuroQEpisode(1000) #result = CP.QLearningEpisode(1000) ## if i==50: ## miny =min(y) ## figure(i) ## plot(range(1,len(y)+1),y,'k') ## title(r'$ k = 4, \quad \lambda=0.9, \quad \alpha=0.3 $') ## grid('on') ## axis([1, i, 0, 1100]) ## xlabel('Episodes') ## ylabel('Steps') ## savefig('cpresultcontinuous.pdf') ## print "salvado" ## close(i) #Experiments() | 2.175601 | 2 |
watchapp/migrations/0001_initial.py | kepha-okari/the-watch | 0 | 6631993 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-09 16:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('business_name', models.CharField(max_length=30, null=True)),
('email', models.EmailField(blank=True, max_length=70)),
('hood_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('neighborhood_name', models.CharField(max_length=30, null=True)),
('neighborhood_location', models.CharField(max_length=30, null=True)),
('population', models.PositiveIntegerField(default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='photos/')),
('image_name', models.CharField(max_length=30)),
('message', models.TextField(blank=True, max_length=100, null=True)),
('date_uploaded', models.DateTimeField(auto_now_add=True, null=True)),
],
options={
'ordering': ['-date_uploaded'],
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_photo', models.ImageField(null=True, upload_to='profiles/')),
('name', models.CharField(max_length=30, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='post',
name='profile',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='watchapp.Profile'),
),
migrations.AddField(
model_name='post',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-09 16:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('business_name', models.CharField(max_length=30, null=True)),
('email', models.EmailField(blank=True, max_length=70)),
('hood_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('neighborhood_name', models.CharField(max_length=30, null=True)),
('neighborhood_location', models.CharField(max_length=30, null=True)),
('population', models.PositiveIntegerField(default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='photos/')),
('image_name', models.CharField(max_length=30)),
('message', models.TextField(blank=True, max_length=100, null=True)),
('date_uploaded', models.DateTimeField(auto_now_add=True, null=True)),
],
options={
'ordering': ['-date_uploaded'],
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_photo', models.ImageField(null=True, upload_to='profiles/')),
('name', models.CharField(max_length=30, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='post',
name='profile',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='watchapp.Profile'),
),
migrations.AddField(
model_name='post',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| en | 0.7577 | # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-02-09 16:38 | 1.658047 | 2 |
config.py | AdrianM0Hdz/Linkr | 0 | 6631994 | <gh_stars>0
import os
basedir = os.path.abspath(os.path.dirname(__name__))
class Config:
SECRET_KEY = 'PassWdntid&&dddgy234256dsbbdaafssd2dddd'
SQLALCHEMY_TRACK_MODIFICATIONS = False
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
FLASK_DEBUG = True
SQLALCHEMY_DATABASE_URI = os.getenv('DEV_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir, 'dev-data.sqlite')
class ProductionConfig(Config):
FLASK_DEBUG = False
SQLALCHEMY_DATABASE_URI = os.getenv('PROD_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
} | import os
basedir = os.path.abspath(os.path.dirname(__name__))
class Config:
SECRET_KEY = 'PassWdntid&&dddgy234256dsbbdaafssd2dddd'
SQLALCHEMY_TRACK_MODIFICATIONS = False
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
FLASK_DEBUG = True
SQLALCHEMY_DATABASE_URI = os.getenv('DEV_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir, 'dev-data.sqlite')
class ProductionConfig(Config):
FLASK_DEBUG = False
SQLALCHEMY_DATABASE_URI = os.getenv('PROD_DATABASE_URI') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
} | none | 1 | 2.022684 | 2 |
|
timm/loss/__init__.py | cxxgtxy/pytorch-image-models | 41 | 6631995 | from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
| from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
| none | 1 | 1.236548 | 1 |
|
peeringdb_server/migrations/0039_delete_duplicateipnetworkixlan.py | tbaschak/peeringdb | 0 | 6631996 | # Generated by Django 2.2.13 on 2020-07-08 20:48
from django.db import migrations, models
import django_countries.fields
import django_inet.models
import django_peeringdb.models.abstract
class Migration(migrations.Migration):
dependencies = [
("peeringdb_server", "0038_netixlan_ipaddr_unique"),
]
operations = [migrations.DeleteModel(name="DuplicateIPNetworkIXLan",)]
| # Generated by Django 2.2.13 on 2020-07-08 20:48
from django.db import migrations, models
import django_countries.fields
import django_inet.models
import django_peeringdb.models.abstract
class Migration(migrations.Migration):
dependencies = [
("peeringdb_server", "0038_netixlan_ipaddr_unique"),
]
operations = [migrations.DeleteModel(name="DuplicateIPNetworkIXLan",)]
| en | 0.719934 | # Generated by Django 2.2.13 on 2020-07-08 20:48 | 1.536449 | 2 |
data/build_probe_1_data.py | googleinterns/e2e-convrec | 4 | 6631997 | <reponame>googleinterns/e2e-convrec<gh_stars>1-10
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scripts For Building Probe 1 (Movie-to-Movie Recommendations)."""
import collections
import json
import random
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from tqdm import tqdm
from trainer import constants
FLAGS = flags.FLAGS
flags.DEFINE_enum("mode", "auto", ["ids", "probes", "all", "auto"],
"auto to build whatever's missing, ids to rebuild ids, "
+ "cooccurrence and MI, all to do all, probes to generate "
+ "just the probe data without rebuilding the movie id or "
+ "pmi matricies")
flags.DEFINE_integer("random_seed", 1, "seed for random movie selection. Choose"
+ "-1 for a randomly picked seed")
flags.DEFINE_integer("probe_min_pop", 30, "minimum popularity to be in probe")
flags.DEFINE_integer("popular_min_pop", 138, "minimum popularity to be"
+ " considered a popular movie")
flags.DEFINE_enum("format", "normal", ["normal", "sequences"],
"specify the probe format: normal for pairs in dialogue, "
+ "sequences for movie only probes for sequences task")
def create_pmi(co_matrix, movie_ids):
"""Build pointwise mutual info matrix from cooccurrence matrix.
Args:
co_matrix: a cooccurence matrix off all the movies in the movielens
sequences dataset
movie_ids: a dictionary containing the movie to id mapping generated in this
script
Returns:
a matrix pmi_matrix where:
pmi_matrix[i][j] = pointwise_mutual_info(movie_i, movie_j)
"""
popularities = []
for x in range(len(movie_ids["all_movies"])):
popularities.append(movie_ids["popularity"][movie_ids["id_to_movie"][x]])
popularities = np.array(popularities)
popularities[popularities < 1] = 1
# PMI^2 is calculated as log(P(X, Y)^2 / (P(X) * P(Y)))
pxy = co_matrix / movie_ids["num_sequences"]
pxy[pxy == 0] = 1e-12
px = (popularities / movie_ids["num_sequences"]).reshape((-1, 1))
py = (popularities / movie_ids["num_sequences"]).reshape((1, -1))
pmi = np.log((pxy**2) / np.matmul(px, py))
return pmi
def create_cooccurrence(sequences, movie_ids):
"""Build cooccurrence matrix from list of sequences.
Args:
sequences: a list of lists of strings containing the 10 movies in each
sequences in the movielens sequences dataset
movie_ids: a dictionary containing the movie to id mapping generated in this
script
Returns:
a matrix co_matrix where:
co_matrix[i][j] = number of sequences containing both movie_i and movie_j
"""
co_matrix = np.zeros((len(movie_ids["all_movies"]),
len(movie_ids["all_movies"])))
for seq in tqdm(sequences):
for movie1 in seq:
for movie2 in seq:
id1 = movie_ids["movie_to_id"][movie1]
id2 = movie_ids["movie_to_id"][movie2]
co_matrix[id1][id2] += 1
return co_matrix
def create_movie_ids(sequences_data):
"""Build cooccurrence matrix from list of sequences.
Args:
sequences_data: a list of lists of strings containing the 10 movies in each
sequences in the movielens sequences dataset
Returns:
a dictionary movie_ids wwhich keeps track of a movie to id mapping for each
movie as well as the movie popularity and number of sequences information
"""
movie_set = set()
popularity = collections.defaultdict(int)
# record each movie's popularity (# of sequences containing it)
for seq in sequences_data:
for movie in seq:
movie_set.add(movie)
popularity[movie] += 1
num_sequences = len(sequences_data)
movie_set = sorted(movie_set)
vocab_size = len(movie_set)
movie_to_id = dict(zip(movie_set, list(range(vocab_size))))
id_to_movie = dict(zip(list(range(vocab_size)), movie_set))
movie_ids = {
"all_movies": movie_set,
"movie_count": vocab_size,
"movie_to_id": movie_to_id,
"id_to_movie": id_to_movie,
"popularity": popularity,
"num_sequences": num_sequences
}
return movie_ids
def get_related_movies(movie, movie_ids, pmi_matrix, filtered_set, k=5):
"""Get the k closest related movies as sorted by pmi.
The results are filtered so that the related movies are above
FLAGS.probe_min_pop popularity.
Args:
movie: a string representing the title of the query movie
movie_ids: dictionary containing the movie-id mappings
pmi_matrix: matrix containing the pmi values
filtered_set: set of movies to filter with
k: an int representing the number of related movies to retrieve
Returns:
a list of strings: the titles of the k most related movies
"""
movie_id = movie_ids["movie_to_id"][movie]
row = pmi_matrix[movie_id]
related_ids = list(np.argsort(row)[::-1])
# convert to strings and ignore the 1st most related movie (itself)
related_ids.remove(movie_id)
movie_titles = [movie_ids["id_to_movie"][str(x)] for x in related_ids]
# filter out movies with popularity < FLAGS.probe_min_pop
movie_titles = [x for x in movie_titles if x in filtered_set]
return movie_titles[:k]
def main(_):
"""Generate probe 1 data from movielens sequences."""
if (not tf.io.gfile.exists(constants.MATRIX_PATHS["movie_ids"]) or
FLAGS.mode in ["all", "ids"]):
logging.info("generating movie_id_info.json")
def parse_sequence(sequence_str):
sequence_str = sequence_str.replace("\n", "")
sequence_str = sequence_str.replace("\t", "")
return [x.strip() for x in sequence_str.split("@") if x.strip()]
with tf.io.gfile.GFile(constants.ML_SEQ_TSV_PATH["full_train"], "r") as f:
sequence_list = list(f)
sequences_data = []
for sequence_str in tqdm(sequence_list):
sequences_data.append(parse_sequence(sequence_str))
with tf.io.gfile.GFile(constants.ML_SEQ_TSV_PATH["full_validation"],
"r") as f:
sequence_list = list(f)
for sequence_str in tqdm(sequence_list):
sequences_data.append(parse_sequence(sequence_str))
movie_ids = create_movie_ids(sequences_data)
with tf.io.gfile.GFile(constants.MATRIX_PATHS["movie_ids"], "w") as f:
json.dump(movie_ids, f)
logging.info("generating co_matrix.npy and pmi_matrix.npy")
co_matrix = create_cooccurrence(sequences_data, movie_ids)
pmi_matrix = create_pmi(co_matrix, movie_ids)
logging.info("writing_matricies")
with tf.io.gfile.GFile(constants.MATRIX_PATHS["co_matrix"], "w") as f:
np.save(f, co_matrix)
with tf.io.gfile.GFile(constants.MATRIX_PATHS["pmi_matrix"], "w") as f:
np.save(f, pmi_matrix)
if (not tf.io.gfile.exists(constants.PROBE_1_TSV_PATH["validation"]) or
FLAGS.mode in ["all", "probes"]):
logging.info("generating probe_1.tsv")
# set random seed for picking random movies
if FLAGS.random_seed != -1:
random.seed(FLAGS.random_seed)
with tf.io.gfile.GFile(constants.MATRIX_PATHS["co_matrix"], "rb") as f:
co_matrix = np.load(f)
with tf.io.gfile.GFile(constants.MATRIX_PATHS["pmi_matrix"], "rb") as f:
pmi_matrix = np.load(f)
with tf.io.gfile.GFile(constants.MATRIX_PATHS["movie_ids"], "r") as f:
movie_ids = json.load(f)
# define "popular" set as movie which appear in over FLAGS.popular_min_pop
# user sequences
popular_movies = [x for x in movie_ids["all_movies"]
if movie_ids["popularity"][x] >= FLAGS.popular_min_pop]
logging.info("popular movies: filtered %d movies where popularity > %d",
len(popular_movies), FLAGS.popular_min_pop)
# define "filtered" set as movie which appear in over FLAGS.probe_min_pop
# user sequences
filtered_movies = [x for x in movie_ids["all_movies"]
if movie_ids["popularity"][x] >= FLAGS.probe_min_pop]
logging.info("filtered movies: filtered %d movies where popularity > %d",
len(filtered_movies), FLAGS.probe_min_pop)
filtered_set = set(filtered_movies)
probes = []
for movie in tqdm(filtered_movies):
related_list = get_related_movies(movie, movie_ids, pmi_matrix,
filtered_set, k=10)
random_list = random.sample(popular_movies, k=10)
for related, rand in zip(related_list, random_list):
if FLAGS.format == "sequences":
probes.append(f"@ {movie} @\t{related}")
probes.append(f"@ {movie} @\t{rand}")
path, extension = constants.PROBE_1_TSV_PATH["validation"].split(".")
probe_1_path = path + "_sequences" + "." + extension
else:
prompt = f"[User] Can you recommend me a movie like @ {movie} @"
probes.append(f"{prompt}\tSure, have you seen @ {related} @?")
probes.append(f"{prompt}\tSure, have you seen @ {rand} @?")
probe_1_path = constants.PROBE_1_TSV_PATH["validation"]
logging.info("%d pairs generated", len(probes))
with tf.io.gfile.GFile(probe_1_path, "w") as f:
for line in probes:
f.write(f"{line}\n")
if __name__ == "__main__":
app.run(main)
| # Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scripts For Building Probe 1 (Movie-to-Movie Recommendations)."""
import collections
import json
import random
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from tqdm import tqdm
from trainer import constants
FLAGS = flags.FLAGS
flags.DEFINE_enum("mode", "auto", ["ids", "probes", "all", "auto"],
"auto to build whatever's missing, ids to rebuild ids, "
+ "cooccurrence and MI, all to do all, probes to generate "
+ "just the probe data without rebuilding the movie id or "
+ "pmi matricies")
flags.DEFINE_integer("random_seed", 1, "seed for random movie selection. Choose"
+ "-1 for a randomly picked seed")
flags.DEFINE_integer("probe_min_pop", 30, "minimum popularity to be in probe")
flags.DEFINE_integer("popular_min_pop", 138, "minimum popularity to be"
+ " considered a popular movie")
flags.DEFINE_enum("format", "normal", ["normal", "sequences"],
"specify the probe format: normal for pairs in dialogue, "
+ "sequences for movie only probes for sequences task")
def create_pmi(co_matrix, movie_ids):
"""Build pointwise mutual info matrix from cooccurrence matrix.
Args:
co_matrix: a cooccurence matrix off all the movies in the movielens
sequences dataset
movie_ids: a dictionary containing the movie to id mapping generated in this
script
Returns:
a matrix pmi_matrix where:
pmi_matrix[i][j] = pointwise_mutual_info(movie_i, movie_j)
"""
popularities = []
for x in range(len(movie_ids["all_movies"])):
popularities.append(movie_ids["popularity"][movie_ids["id_to_movie"][x]])
popularities = np.array(popularities)
popularities[popularities < 1] = 1
# PMI^2 is calculated as log(P(X, Y)^2 / (P(X) * P(Y)))
pxy = co_matrix / movie_ids["num_sequences"]
pxy[pxy == 0] = 1e-12
px = (popularities / movie_ids["num_sequences"]).reshape((-1, 1))
py = (popularities / movie_ids["num_sequences"]).reshape((1, -1))
pmi = np.log((pxy**2) / np.matmul(px, py))
return pmi
def create_cooccurrence(sequences, movie_ids):
"""Build cooccurrence matrix from list of sequences.
Args:
sequences: a list of lists of strings containing the 10 movies in each
sequences in the movielens sequences dataset
movie_ids: a dictionary containing the movie to id mapping generated in this
script
Returns:
a matrix co_matrix where:
co_matrix[i][j] = number of sequences containing both movie_i and movie_j
"""
co_matrix = np.zeros((len(movie_ids["all_movies"]),
len(movie_ids["all_movies"])))
for seq in tqdm(sequences):
for movie1 in seq:
for movie2 in seq:
id1 = movie_ids["movie_to_id"][movie1]
id2 = movie_ids["movie_to_id"][movie2]
co_matrix[id1][id2] += 1
return co_matrix
def create_movie_ids(sequences_data):
"""Build cooccurrence matrix from list of sequences.
Args:
sequences_data: a list of lists of strings containing the 10 movies in each
sequences in the movielens sequences dataset
Returns:
a dictionary movie_ids wwhich keeps track of a movie to id mapping for each
movie as well as the movie popularity and number of sequences information
"""
movie_set = set()
popularity = collections.defaultdict(int)
# record each movie's popularity (# of sequences containing it)
for seq in sequences_data:
for movie in seq:
movie_set.add(movie)
popularity[movie] += 1
num_sequences = len(sequences_data)
movie_set = sorted(movie_set)
vocab_size = len(movie_set)
movie_to_id = dict(zip(movie_set, list(range(vocab_size))))
id_to_movie = dict(zip(list(range(vocab_size)), movie_set))
movie_ids = {
"all_movies": movie_set,
"movie_count": vocab_size,
"movie_to_id": movie_to_id,
"id_to_movie": id_to_movie,
"popularity": popularity,
"num_sequences": num_sequences
}
return movie_ids
def get_related_movies(movie, movie_ids, pmi_matrix, filtered_set, k=5):
"""Get the k closest related movies as sorted by pmi.
The results are filtered so that the related movies are above
FLAGS.probe_min_pop popularity.
Args:
movie: a string representing the title of the query movie
movie_ids: dictionary containing the movie-id mappings
pmi_matrix: matrix containing the pmi values
filtered_set: set of movies to filter with
k: an int representing the number of related movies to retrieve
Returns:
a list of strings: the titles of the k most related movies
"""
movie_id = movie_ids["movie_to_id"][movie]
row = pmi_matrix[movie_id]
related_ids = list(np.argsort(row)[::-1])
# convert to strings and ignore the 1st most related movie (itself)
related_ids.remove(movie_id)
movie_titles = [movie_ids["id_to_movie"][str(x)] for x in related_ids]
# filter out movies with popularity < FLAGS.probe_min_pop
movie_titles = [x for x in movie_titles if x in filtered_set]
return movie_titles[:k]
def main(_):
"""Generate probe 1 data from movielens sequences."""
if (not tf.io.gfile.exists(constants.MATRIX_PATHS["movie_ids"]) or
FLAGS.mode in ["all", "ids"]):
logging.info("generating movie_id_info.json")
def parse_sequence(sequence_str):
sequence_str = sequence_str.replace("\n", "")
sequence_str = sequence_str.replace("\t", "")
return [x.strip() for x in sequence_str.split("@") if x.strip()]
with tf.io.gfile.GFile(constants.ML_SEQ_TSV_PATH["full_train"], "r") as f:
sequence_list = list(f)
sequences_data = []
for sequence_str in tqdm(sequence_list):
sequences_data.append(parse_sequence(sequence_str))
with tf.io.gfile.GFile(constants.ML_SEQ_TSV_PATH["full_validation"],
"r") as f:
sequence_list = list(f)
for sequence_str in tqdm(sequence_list):
sequences_data.append(parse_sequence(sequence_str))
movie_ids = create_movie_ids(sequences_data)
with tf.io.gfile.GFile(constants.MATRIX_PATHS["movie_ids"], "w") as f:
json.dump(movie_ids, f)
logging.info("generating co_matrix.npy and pmi_matrix.npy")
co_matrix = create_cooccurrence(sequences_data, movie_ids)
pmi_matrix = create_pmi(co_matrix, movie_ids)
logging.info("writing_matricies")
with tf.io.gfile.GFile(constants.MATRIX_PATHS["co_matrix"], "w") as f:
np.save(f, co_matrix)
with tf.io.gfile.GFile(constants.MATRIX_PATHS["pmi_matrix"], "w") as f:
np.save(f, pmi_matrix)
if (not tf.io.gfile.exists(constants.PROBE_1_TSV_PATH["validation"]) or
FLAGS.mode in ["all", "probes"]):
logging.info("generating probe_1.tsv")
# set random seed for picking random movies
if FLAGS.random_seed != -1:
random.seed(FLAGS.random_seed)
with tf.io.gfile.GFile(constants.MATRIX_PATHS["co_matrix"], "rb") as f:
co_matrix = np.load(f)
with tf.io.gfile.GFile(constants.MATRIX_PATHS["pmi_matrix"], "rb") as f:
pmi_matrix = np.load(f)
with tf.io.gfile.GFile(constants.MATRIX_PATHS["movie_ids"], "r") as f:
movie_ids = json.load(f)
# define "popular" set as movie which appear in over FLAGS.popular_min_pop
# user sequences
popular_movies = [x for x in movie_ids["all_movies"]
if movie_ids["popularity"][x] >= FLAGS.popular_min_pop]
logging.info("popular movies: filtered %d movies where popularity > %d",
len(popular_movies), FLAGS.popular_min_pop)
# define "filtered" set as movie which appear in over FLAGS.probe_min_pop
# user sequences
filtered_movies = [x for x in movie_ids["all_movies"]
if movie_ids["popularity"][x] >= FLAGS.probe_min_pop]
logging.info("filtered movies: filtered %d movies where popularity > %d",
len(filtered_movies), FLAGS.probe_min_pop)
filtered_set = set(filtered_movies)
probes = []
for movie in tqdm(filtered_movies):
related_list = get_related_movies(movie, movie_ids, pmi_matrix,
filtered_set, k=10)
random_list = random.sample(popular_movies, k=10)
for related, rand in zip(related_list, random_list):
if FLAGS.format == "sequences":
probes.append(f"@ {movie} @\t{related}")
probes.append(f"@ {movie} @\t{rand}")
path, extension = constants.PROBE_1_TSV_PATH["validation"].split(".")
probe_1_path = path + "_sequences" + "." + extension
else:
prompt = f"[User] Can you recommend me a movie like @ {movie} @"
probes.append(f"{prompt}\tSure, have you seen @ {related} @?")
probes.append(f"{prompt}\tSure, have you seen @ {rand} @?")
probe_1_path = constants.PROBE_1_TSV_PATH["validation"]
logging.info("%d pairs generated", len(probes))
with tf.io.gfile.GFile(probe_1_path, "w") as f:
for line in probes:
f.write(f"{line}\n")
if __name__ == "__main__":
app.run(main) | en | 0.843384 | # Copyright 2020 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Scripts For Building Probe 1 (Movie-to-Movie Recommendations). Build pointwise mutual info matrix from cooccurrence matrix. Args: co_matrix: a cooccurence matrix off all the movies in the movielens sequences dataset movie_ids: a dictionary containing the movie to id mapping generated in this script Returns: a matrix pmi_matrix where: pmi_matrix[i][j] = pointwise_mutual_info(movie_i, movie_j) # PMI^2 is calculated as log(P(X, Y)^2 / (P(X) * P(Y))) Build cooccurrence matrix from list of sequences. Args: sequences: a list of lists of strings containing the 10 movies in each sequences in the movielens sequences dataset movie_ids: a dictionary containing the movie to id mapping generated in this script Returns: a matrix co_matrix where: co_matrix[i][j] = number of sequences containing both movie_i and movie_j Build cooccurrence matrix from list of sequences. Args: sequences_data: a list of lists of strings containing the 10 movies in each sequences in the movielens sequences dataset Returns: a dictionary movie_ids wwhich keeps track of a movie to id mapping for each movie as well as the movie popularity and number of sequences information # record each movie's popularity (# of sequences containing it) Get the k closest related movies as sorted by pmi. The results are filtered so that the related movies are above FLAGS.probe_min_pop popularity. Args: movie: a string representing the title of the query movie movie_ids: dictionary containing the movie-id mappings pmi_matrix: matrix containing the pmi values filtered_set: set of movies to filter with k: an int representing the number of related movies to retrieve Returns: a list of strings: the titles of the k most related movies # convert to strings and ignore the 1st most related movie (itself) # filter out movies with popularity < FLAGS.probe_min_pop Generate probe 1 data from movielens sequences. # set random seed for picking random movies # define "popular" set as movie which appear in over FLAGS.popular_min_pop # user sequences # define "filtered" set as movie which appear in over FLAGS.probe_min_pop # user sequences | 1.84453 | 2 |
wooey/tests/mixins.py | macdaliot/Wooey | 1 | 6631998 | import shutil
import os
from ..models import ScriptVersion, WooeyFile, WooeyJob
from ..backend import utils
from .. import settings as wooey_settings
from . import factories, config
# TODO: Track down where file handles are not being closed. This is not a problem on Linux/Mac, but is on Windows
# and likely reflects being careless somewhere as opposed to Windows being a PITA
try:
WindowsError
except NameError:
WindowsError = None
class FileCleanupMixin(object):
def tearDown(self):
for i in WooeyFile.objects.all():
try:
path = i.filepath.name
utils.get_storage().delete(path)
if wooey_settings.WOOEY_EPHEMERAL_FILES:
utils.get_storage(local=False).delete(path)
except WindowsError:
print('unable to delete {}'.format(path))
# delete job dirs
local_storage = utils.get_storage(local=True)
for i in WooeyJob.objects.all():
path = i.get_output_path()
try:
shutil.rmtree(local_storage.path(path))
except WindowsError:
print('unable to delete {}'.format(path))
super(FileCleanupMixin, self).tearDown()
class ScriptFactoryMixin(object):
def tearDown(self):
for i in ScriptVersion.objects.all():
path = i.script_path.name
# import pdb; pdb.set_trace();
utils.get_storage().delete(path)
if wooey_settings.WOOEY_EPHEMERAL_FILES:
utils.get_storage(local=False).delete(path)
path += 'c' # handle pyc junk
utils.get_storage().delete(path)
ScriptVersion.objects.all().delete()
super(ScriptFactoryMixin, self).tearDown()
def setUp(self):
self.translate_script = factories.generate_script(os.path.join(config.WOOEY_TEST_SCRIPTS, 'translate.py'))
self.choice_script = factories.generate_script(os.path.join(config.WOOEY_TEST_SCRIPTS, 'choices.py'))
self.without_args = factories.generate_script(os.path.join(config.WOOEY_TEST_SCRIPTS, 'without_args.py'))
self.version1_script = factories.generate_script(
os.path.join(config.WOOEY_TEST_SCRIPTS, 'versioned_script', 'v1.py'),
script_name='version_test',
)
self.version2_script = factories.generate_script(
os.path.join(config.WOOEY_TEST_SCRIPTS, 'versioned_script', 'v2.py'),
script_name='version_test',
)
super(ScriptFactoryMixin, self).setUp()
class FileMixin(object):
def setUp(self):
self.storage = utils.get_storage(local=not wooey_settings.WOOEY_EPHEMERAL_FILES)
self.filename_func = lambda x: os.path.join(wooey_settings.WOOEY_SCRIPT_DIR, x)
super(FileMixin, self).setUp()
def get_any_file(self):
script = os.path.join(config.WOOEY_TEST_SCRIPTS, 'command_order.py')
return self.storage.save(self.filename_func('command_order.py'), open(script)) | import shutil
import os
from ..models import ScriptVersion, WooeyFile, WooeyJob
from ..backend import utils
from .. import settings as wooey_settings
from . import factories, config
# TODO: Track down where file handles are not being closed. This is not a problem on Linux/Mac, but is on Windows
# and likely reflects being careless somewhere as opposed to Windows being a PITA
try:
WindowsError
except NameError:
WindowsError = None
class FileCleanupMixin(object):
def tearDown(self):
for i in WooeyFile.objects.all():
try:
path = i.filepath.name
utils.get_storage().delete(path)
if wooey_settings.WOOEY_EPHEMERAL_FILES:
utils.get_storage(local=False).delete(path)
except WindowsError:
print('unable to delete {}'.format(path))
# delete job dirs
local_storage = utils.get_storage(local=True)
for i in WooeyJob.objects.all():
path = i.get_output_path()
try:
shutil.rmtree(local_storage.path(path))
except WindowsError:
print('unable to delete {}'.format(path))
super(FileCleanupMixin, self).tearDown()
class ScriptFactoryMixin(object):
def tearDown(self):
for i in ScriptVersion.objects.all():
path = i.script_path.name
# import pdb; pdb.set_trace();
utils.get_storage().delete(path)
if wooey_settings.WOOEY_EPHEMERAL_FILES:
utils.get_storage(local=False).delete(path)
path += 'c' # handle pyc junk
utils.get_storage().delete(path)
ScriptVersion.objects.all().delete()
super(ScriptFactoryMixin, self).tearDown()
def setUp(self):
self.translate_script = factories.generate_script(os.path.join(config.WOOEY_TEST_SCRIPTS, 'translate.py'))
self.choice_script = factories.generate_script(os.path.join(config.WOOEY_TEST_SCRIPTS, 'choices.py'))
self.without_args = factories.generate_script(os.path.join(config.WOOEY_TEST_SCRIPTS, 'without_args.py'))
self.version1_script = factories.generate_script(
os.path.join(config.WOOEY_TEST_SCRIPTS, 'versioned_script', 'v1.py'),
script_name='version_test',
)
self.version2_script = factories.generate_script(
os.path.join(config.WOOEY_TEST_SCRIPTS, 'versioned_script', 'v2.py'),
script_name='version_test',
)
super(ScriptFactoryMixin, self).setUp()
class FileMixin(object):
def setUp(self):
self.storage = utils.get_storage(local=not wooey_settings.WOOEY_EPHEMERAL_FILES)
self.filename_func = lambda x: os.path.join(wooey_settings.WOOEY_SCRIPT_DIR, x)
super(FileMixin, self).setUp()
def get_any_file(self):
script = os.path.join(config.WOOEY_TEST_SCRIPTS, 'command_order.py')
return self.storage.save(self.filename_func('command_order.py'), open(script)) | en | 0.959902 | # TODO: Track down where file handles are not being closed. This is not a problem on Linux/Mac, but is on Windows # and likely reflects being careless somewhere as opposed to Windows being a PITA # delete job dirs # import pdb; pdb.set_trace(); # handle pyc junk | 2.012215 | 2 |
checker/layout.py | fausecteam/faustctf-2021-thelostbottle | 0 | 6631999 | <gh_stars>0
import random
import subprocess
import string
def gen_name(x = 10, y = 15):
length = random.randint(x, y)
return "".join(random.choice(string.ascii_letters) for _ in range(length))
# should always print 1 / ..., because we don't shuffle the first
def bottlecheck(bs, target, num = 10000):
ids = list(range(len(bs)))
yes = 0
for _ in range(num):
y = 0
for i in ids:
percent, volume = bs[i]
y = (y * (4 - volume) + percent * volume) / 4
if y >= target:
yes += 1
break
random.shuffle(ids)
print(f">> {yes}\t/ {num}")
if yes > 2:
print(sorted(bs))
class Room:
def __init__(self, saveable, large = False):
if large:
self.w = random.randint(15, 20)
self.h = random.randint(15, 20)
else:
self.w = random.randint(5, 20)
self.h = random.randint(5, 20)
self.name = gen_name(20, 30)
self.saveable = saveable
self.exits = []
self.blocks = []
self.elements = {}
self.spaces = [ # 1. dimension is x (right) !
[None] * self.h
for _ in range(self.w)
]
self.fill_blocks()
self.fill_barrels()
def fill_blocks(self):
objs = [
":resources:/images/tiles/cactus.png",
":resources:/images/tiles/bush.png",
":resources:/images/tiles/rock.png",
":resources:/images/space_shooter/meteorGrey_big3.png"
]
poss = [[] for _ in range(len(objs))]
self.blocks = []
m = self.h // 2
for i in range(2, self.w - 2):
if random.randint(0, 2) == 0:
r = random.randint(0, len(objs) - 1)
if r < len(objs):
poss[r] += [[i, m]]
self.spaces[i][m] = "block"
for i in range(len(objs)):
if len(poss[i]) > 0:
self.blocks += [{"img": objs[i], "pos": poss[i]}]
def fill_barrels(self):
bc = random.randint(0, 5)
for _ in range(bc):
self.add_element(gen_name(5, 10), {
"id": 4,
"value": random.choice(["intact"] * 5 + ["broken"])
})
def add_element(self, uuid, el):
if uuid in self.elements:
print("FAIL duplicate uuid")
el["x"], el["y"] = self.free_pos("element")
self.elements[uuid] = el
return el
def free_pos(self, fill_with):
tries = 0
while tries < self.w * self.h * 3:
x = random.randint(1, self.w - 2)
y = random.randint(1, self.h - 2)
if not self.spaces[x][y]:
self.spaces[x][y] = fill_with
return x, y
print("FAIL no empty spaces")
return None
def find_exit(self):
tries = 0
while tries < 50:
if random.randint(0, 1) == 0: # sides
x = random.choice([0, self.w - 1])
y = random.randint(1, self.h - 2)
else: # top/bot
x = random.randint(1, self.w - 2)
y = random.choice([0, self.h - 1])
if not self.spaces[x][y]:
return (x, y)
print("Failed")
return (1, 0)
def add_exit(self, pos, other, otherpos, pwd):
ox, oy = otherpos
if ox == 0:
ox = 1
elif oy == 0:
oy = 1
elif ox == other.w - 1:
ox = other.w - 2
else:
oy = other.h - 2
self.exits += [{
"x": pos[0],
"y": pos[1],
"room": other.name,
"targetx": ox,
"targety": oy,
"open": True,
"keypad": pwd
}]
self.spaces[pos[0]][pos[1]] = "exit"
def dump(self):
return self.name, {
"w": self.w,
"h": self.h,
"saveable": self.saveable,
"background": "cave.png",
"exits": self.exits,
"blocks": self.blocks,
"elements": self.elements
}
class Graph:
def __init__(self, n):
self.n = n
self.parent = [i for i in range(n)] # union-find
self.groups = n
self.edges = []
self._marks = [[] for _ in range(n)] # for debug only
def find(self, a):
if self.parent[a] != a:
self.parent[a] = self.find(self.parent[a])
return self.parent[a]
def union(self, a, b):
a = self.find(a)
b = self.find(b)
if a == b:
return
self.parent[a] = b
self.groups -= 1
def randomfill(self):
# make connected
while self.groups > 1:
a = random.randint(0, self.n-1)
b = random.randint(0, self.n-1)
if a == b:
continue
if a > b:
a, b = b, a
if (a, b) in self.edges:
continue
self.edges += [(a, b)]
self.union(a, b)
def find_path(self, start, end, cur):
if start == end:
return cur + [end]
for a, b in self.edges:
if b == start:
a, b = b, a
if a == start:
if not b in cur:
p = self.find_path(b, end, cur + [start])
if p:
return p
return None
def to_dot(self):
s = "graph {\n"
for a, b in self.edges:
s += f"a{a} -- a{b};\n"
for ni in range(self.n):
if len(self._marks[ni]) > 0:
s += f"a{ni}[label=\"{ni}|" + "|".join(self._marks[ni]) + "\"];\n"
s += "}\n"
return s
def _marknode(self, ni, s):
self._marks[ni] += [s]
def add_edge(a, b, pwd = None): # rooms a, b
ea = a.find_exit()
eb = b.find_exit()
a.add_exit(ea, b, eb, pwd)
b.add_exit(eb, a, ea, None)
"""
target := % value to achieve for success
"""
def gen_bottles(target):
bottles = []
for i in range(random.randint(15, 22)): # never enough
bottles += [(random.randint(0, target - 1), random.randint(1, 2))]
bottles = list(sorted(bottles))
cur = 0
for i in range(len(bottles)):
cur = drink(cur, bottles[i][1], bottles[i][0])
while cur < target + 0.0001: # just in case
# use v = 2 as many as needed
v = 2
p = int(2 * (target - cur) + cur) + 1
if p > 99:
p = target # small percent that makes enough progress to eventually terminate
bottles += [(p, v)]
cur = drink(cur, v, p)
return bottles
def drink(oldpercent, volume, percent):
return (oldpercent * (4 - volume) + percent * volume) / 4
# input: list of room ids
# output: list of exits to use (including keypad)
def path_to_exits(rooms, path):
p = []
for i in range(1, len(path)):
for e in rooms[path[i-1]].exits:
if e["room"] == rooms[path[i]].name:
p += [(e["x"], e["y"], e["keypad"])]
break
else:
raise RuntimeError("Failed converting path to exit sequence")
return p
dreamlist = [
"You become relaxed and dream of an island far far away with a hidden treasure...",
"There is a giant owl behind you. You turn around and it vanished...",
"Digging for a treasure you unveil an old artifact. However, it is useless...",
"Life is beautiful. You are on your own island filled with wildlife and listen to the singing of birds above you..."
]
def gen_map(flag = "INSERT FLAG", check_bottles = False, num = 10000):
g1 = Graph(random.randint(3, 7))
g1.randomfill()
g2 = Graph(random.randint(3, 7))
g2.randomfill()
rooms = []
bottleroomid = random.randint(0, g1.n - 1) # need this one large
for i in range(g1.n):
rooms += [Room(True, large = (i == bottleroomid))]
for a, b in g1.edges:
add_edge(rooms[a], rooms[b])
for i in range(g2.n):
rooms += [Room(False)]
for a, b in g2.edges:
add_edge(rooms[a+g1.n], rooms[b+g1.n])
a = random.randint(0, g1.n-1)
b = g1.n + random.randint(0, g2.n - 1)
add_edge(rooms[a], rooms[b], pwd = gen_name(8, 8))
starterid = random.randint(0, g1.n-1)
targetroomid = g1.n + random.randint(0, g2.n-1)
starter = rooms[starterid]
bottleroom = rooms[bottleroomid]
signpath = g1.find_path(starterid, a, []) + [g1.n + x for x in g2.find_path(b - g1.n, targetroomid - g1.n, [])]
# debugging
if False:
g1._marknode(starterid, "start")
g1._marknode(a, "portal")
g1._marknode(bottleroomid, "bottles")
with open("/tmp/a.dot", "w") as outf:
outf.write(g1.to_dot())
r = subprocess.check_output([
"dot",
"-Tpdf",
"/tmp/a.dot"
])
with open("/tmp/a.pdf", "wb") as outf:
outf.write(r)
signpath = path_to_exits(rooms, signpath)
bottlepath = g1.find_path(starterid, bottleroomid, [])
bottlepath = path_to_exits(rooms, bottlepath)
# sign
sign = rooms[targetroomid].add_element(
gen_name(10, 15),
{
"id": 1,
"value": flag
})
# bottles
target = random.randint(70, 85)
bottles = gen_bottles(target)
bottleorder = []
for b in bottles:
e = bottleroom.add_element(
gen_name(10, 15),
{
"id": 5,
"value": "{}|{}%".format(b[1], b[0])
})
bottleorder += [(e["x"], e["y"])]
# shuffle bottle order in dictionary
bottlelist = [(k, bottleroom.elements[k]) for k in bottleroom.elements]
random.shuffle(bottlelist)
bottleroom.elements = {k : v for k, v in bottlelist}
if check_bottles:
bottlecheck(bottles, target, num)
# map
roomdict = {}
for r in rooms:
n, d = r.dump()
roomdict[n] = d
startx, starty = starter.free_pos("start")
m = {
"rooms": roomdict,
"globals": {
"posx": startx,
"posy": starty,
"room": starter.name
},
"dreams": []
}
targets = list(sorted([random.randint(0, 70) for _ in range(2)]))
for t in targets:
m["dreams"] += [[t, random.choice(dreamlist)]]
m["dreams"] += [[target, "You start to hallucinating about the meaning of life and start to beliebe that the answer to life is " + flag]]
# debugging
if False:
with open("/tmp/a.dot", "w") as outf:
outf.write(g1.to_dot())
r = subprocess.check_output([
"dot",
"-Tpdf",
"/tmp/a.dot"
])
with open("/tmp/a.pdf", "wb") as outf:
outf.write(r)
return {
"map": m,
"signpos": (sign["x"], sign["y"]),
"signpath": signpath,
"bottlepath": bottlepath,
"bottleorder": bottleorder
}
## just for test generation
# check_bottles := checks how easy it is to brute force bottle order
def gen_test(check_bottles = False, num = 10000):
import json
for i in range(100):
d = gen_map("FAUST_" + "".join(random.choice(string.ascii_lowercase) for _ in range(10)), check_bottles, num)
d["map"]["map"] = "testmaps/m_%d.json" % i
with open("testmaps/m_%d.json" % i, "w") as outf:
json.dump(d["map"], outf, indent='\t')
if __name__ == "__main__":
import sys
if len(sys.argv) > 1 and sys.argv[1] in ["brute", "check"]:
if len(sys.argv) > 2:
num = int(sys.argv[2])
else:
num = 10000
gen_test(True, num)
else:
gen_test(False)
| import random
import subprocess
import string
def gen_name(x = 10, y = 15):
length = random.randint(x, y)
return "".join(random.choice(string.ascii_letters) for _ in range(length))
# should always print 1 / ..., because we don't shuffle the first
def bottlecheck(bs, target, num = 10000):
ids = list(range(len(bs)))
yes = 0
for _ in range(num):
y = 0
for i in ids:
percent, volume = bs[i]
y = (y * (4 - volume) + percent * volume) / 4
if y >= target:
yes += 1
break
random.shuffle(ids)
print(f">> {yes}\t/ {num}")
if yes > 2:
print(sorted(bs))
class Room:
def __init__(self, saveable, large = False):
if large:
self.w = random.randint(15, 20)
self.h = random.randint(15, 20)
else:
self.w = random.randint(5, 20)
self.h = random.randint(5, 20)
self.name = gen_name(20, 30)
self.saveable = saveable
self.exits = []
self.blocks = []
self.elements = {}
self.spaces = [ # 1. dimension is x (right) !
[None] * self.h
for _ in range(self.w)
]
self.fill_blocks()
self.fill_barrels()
def fill_blocks(self):
objs = [
":resources:/images/tiles/cactus.png",
":resources:/images/tiles/bush.png",
":resources:/images/tiles/rock.png",
":resources:/images/space_shooter/meteorGrey_big3.png"
]
poss = [[] for _ in range(len(objs))]
self.blocks = []
m = self.h // 2
for i in range(2, self.w - 2):
if random.randint(0, 2) == 0:
r = random.randint(0, len(objs) - 1)
if r < len(objs):
poss[r] += [[i, m]]
self.spaces[i][m] = "block"
for i in range(len(objs)):
if len(poss[i]) > 0:
self.blocks += [{"img": objs[i], "pos": poss[i]}]
def fill_barrels(self):
bc = random.randint(0, 5)
for _ in range(bc):
self.add_element(gen_name(5, 10), {
"id": 4,
"value": random.choice(["intact"] * 5 + ["broken"])
})
def add_element(self, uuid, el):
if uuid in self.elements:
print("FAIL duplicate uuid")
el["x"], el["y"] = self.free_pos("element")
self.elements[uuid] = el
return el
def free_pos(self, fill_with):
tries = 0
while tries < self.w * self.h * 3:
x = random.randint(1, self.w - 2)
y = random.randint(1, self.h - 2)
if not self.spaces[x][y]:
self.spaces[x][y] = fill_with
return x, y
print("FAIL no empty spaces")
return None
def find_exit(self):
tries = 0
while tries < 50:
if random.randint(0, 1) == 0: # sides
x = random.choice([0, self.w - 1])
y = random.randint(1, self.h - 2)
else: # top/bot
x = random.randint(1, self.w - 2)
y = random.choice([0, self.h - 1])
if not self.spaces[x][y]:
return (x, y)
print("Failed")
return (1, 0)
def add_exit(self, pos, other, otherpos, pwd):
ox, oy = otherpos
if ox == 0:
ox = 1
elif oy == 0:
oy = 1
elif ox == other.w - 1:
ox = other.w - 2
else:
oy = other.h - 2
self.exits += [{
"x": pos[0],
"y": pos[1],
"room": other.name,
"targetx": ox,
"targety": oy,
"open": True,
"keypad": pwd
}]
self.spaces[pos[0]][pos[1]] = "exit"
def dump(self):
return self.name, {
"w": self.w,
"h": self.h,
"saveable": self.saveable,
"background": "cave.png",
"exits": self.exits,
"blocks": self.blocks,
"elements": self.elements
}
class Graph:
def __init__(self, n):
self.n = n
self.parent = [i for i in range(n)] # union-find
self.groups = n
self.edges = []
self._marks = [[] for _ in range(n)] # for debug only
def find(self, a):
if self.parent[a] != a:
self.parent[a] = self.find(self.parent[a])
return self.parent[a]
def union(self, a, b):
a = self.find(a)
b = self.find(b)
if a == b:
return
self.parent[a] = b
self.groups -= 1
def randomfill(self):
# make connected
while self.groups > 1:
a = random.randint(0, self.n-1)
b = random.randint(0, self.n-1)
if a == b:
continue
if a > b:
a, b = b, a
if (a, b) in self.edges:
continue
self.edges += [(a, b)]
self.union(a, b)
def find_path(self, start, end, cur):
if start == end:
return cur + [end]
for a, b in self.edges:
if b == start:
a, b = b, a
if a == start:
if not b in cur:
p = self.find_path(b, end, cur + [start])
if p:
return p
return None
def to_dot(self):
s = "graph {\n"
for a, b in self.edges:
s += f"a{a} -- a{b};\n"
for ni in range(self.n):
if len(self._marks[ni]) > 0:
s += f"a{ni}[label=\"{ni}|" + "|".join(self._marks[ni]) + "\"];\n"
s += "}\n"
return s
def _marknode(self, ni, s):
self._marks[ni] += [s]
def add_edge(a, b, pwd = None): # rooms a, b
ea = a.find_exit()
eb = b.find_exit()
a.add_exit(ea, b, eb, pwd)
b.add_exit(eb, a, ea, None)
"""
target := % value to achieve for success
"""
def gen_bottles(target):
bottles = []
for i in range(random.randint(15, 22)): # never enough
bottles += [(random.randint(0, target - 1), random.randint(1, 2))]
bottles = list(sorted(bottles))
cur = 0
for i in range(len(bottles)):
cur = drink(cur, bottles[i][1], bottles[i][0])
while cur < target + 0.0001: # just in case
# use v = 2 as many as needed
v = 2
p = int(2 * (target - cur) + cur) + 1
if p > 99:
p = target # small percent that makes enough progress to eventually terminate
bottles += [(p, v)]
cur = drink(cur, v, p)
return bottles
def drink(oldpercent, volume, percent):
return (oldpercent * (4 - volume) + percent * volume) / 4
# input: list of room ids
# output: list of exits to use (including keypad)
def path_to_exits(rooms, path):
p = []
for i in range(1, len(path)):
for e in rooms[path[i-1]].exits:
if e["room"] == rooms[path[i]].name:
p += [(e["x"], e["y"], e["keypad"])]
break
else:
raise RuntimeError("Failed converting path to exit sequence")
return p
dreamlist = [
"You become relaxed and dream of an island far far away with a hidden treasure...",
"There is a giant owl behind you. You turn around and it vanished...",
"Digging for a treasure you unveil an old artifact. However, it is useless...",
"Life is beautiful. You are on your own island filled with wildlife and listen to the singing of birds above you..."
]
def gen_map(flag = "INSERT FLAG", check_bottles = False, num = 10000):
g1 = Graph(random.randint(3, 7))
g1.randomfill()
g2 = Graph(random.randint(3, 7))
g2.randomfill()
rooms = []
bottleroomid = random.randint(0, g1.n - 1) # need this one large
for i in range(g1.n):
rooms += [Room(True, large = (i == bottleroomid))]
for a, b in g1.edges:
add_edge(rooms[a], rooms[b])
for i in range(g2.n):
rooms += [Room(False)]
for a, b in g2.edges:
add_edge(rooms[a+g1.n], rooms[b+g1.n])
a = random.randint(0, g1.n-1)
b = g1.n + random.randint(0, g2.n - 1)
add_edge(rooms[a], rooms[b], pwd = gen_name(8, 8))
starterid = random.randint(0, g1.n-1)
targetroomid = g1.n + random.randint(0, g2.n-1)
starter = rooms[starterid]
bottleroom = rooms[bottleroomid]
signpath = g1.find_path(starterid, a, []) + [g1.n + x for x in g2.find_path(b - g1.n, targetroomid - g1.n, [])]
# debugging
if False:
g1._marknode(starterid, "start")
g1._marknode(a, "portal")
g1._marknode(bottleroomid, "bottles")
with open("/tmp/a.dot", "w") as outf:
outf.write(g1.to_dot())
r = subprocess.check_output([
"dot",
"-Tpdf",
"/tmp/a.dot"
])
with open("/tmp/a.pdf", "wb") as outf:
outf.write(r)
signpath = path_to_exits(rooms, signpath)
bottlepath = g1.find_path(starterid, bottleroomid, [])
bottlepath = path_to_exits(rooms, bottlepath)
# sign
sign = rooms[targetroomid].add_element(
gen_name(10, 15),
{
"id": 1,
"value": flag
})
# bottles
target = random.randint(70, 85)
bottles = gen_bottles(target)
bottleorder = []
for b in bottles:
e = bottleroom.add_element(
gen_name(10, 15),
{
"id": 5,
"value": "{}|{}%".format(b[1], b[0])
})
bottleorder += [(e["x"], e["y"])]
# shuffle bottle order in dictionary
bottlelist = [(k, bottleroom.elements[k]) for k in bottleroom.elements]
random.shuffle(bottlelist)
bottleroom.elements = {k : v for k, v in bottlelist}
if check_bottles:
bottlecheck(bottles, target, num)
# map
roomdict = {}
for r in rooms:
n, d = r.dump()
roomdict[n] = d
startx, starty = starter.free_pos("start")
m = {
"rooms": roomdict,
"globals": {
"posx": startx,
"posy": starty,
"room": starter.name
},
"dreams": []
}
targets = list(sorted([random.randint(0, 70) for _ in range(2)]))
for t in targets:
m["dreams"] += [[t, random.choice(dreamlist)]]
m["dreams"] += [[target, "You start to hallucinating about the meaning of life and start to beliebe that the answer to life is " + flag]]
# debugging
if False:
with open("/tmp/a.dot", "w") as outf:
outf.write(g1.to_dot())
r = subprocess.check_output([
"dot",
"-Tpdf",
"/tmp/a.dot"
])
with open("/tmp/a.pdf", "wb") as outf:
outf.write(r)
return {
"map": m,
"signpos": (sign["x"], sign["y"]),
"signpath": signpath,
"bottlepath": bottlepath,
"bottleorder": bottleorder
}
## just for test generation
# check_bottles := checks how easy it is to brute force bottle order
def gen_test(check_bottles = False, num = 10000):
import json
for i in range(100):
d = gen_map("FAUST_" + "".join(random.choice(string.ascii_lowercase) for _ in range(10)), check_bottles, num)
d["map"]["map"] = "testmaps/m_%d.json" % i
with open("testmaps/m_%d.json" % i, "w") as outf:
json.dump(d["map"], outf, indent='\t')
if __name__ == "__main__":
import sys
if len(sys.argv) > 1 and sys.argv[1] in ["brute", "check"]:
if len(sys.argv) > 2:
num = int(sys.argv[2])
else:
num = 10000
gen_test(True, num)
else:
gen_test(False) | en | 0.911941 | # should always print 1 / ..., because we don't shuffle the first # 1. dimension is x (right) ! # sides # top/bot # union-find # for debug only # make connected # rooms a, b target := % value to achieve for success # never enough # just in case # use v = 2 as many as needed # small percent that makes enough progress to eventually terminate # input: list of room ids # output: list of exits to use (including keypad) # need this one large # debugging # sign # bottles # shuffle bottle order in dictionary # map # debugging ## just for test generation # check_bottles := checks how easy it is to brute force bottle order | 2.993709 | 3 |
prosodyextruct.py | EmergentSystemLabStudent/Prosodic-DAA | 0 | 6632000 | import os
import re
import pyreaper
import numpy as np
import matplotlib.pyplot as plt
from python_speech_features import delta as delta_mfcc
from speech_feature_extraction import Extractor
from speech_feature_extraction.util import WavLoader
from scipy.io import wavfile
try:
from tqdm import tqdm
except:
def tqdm(x): return x
def get_names(dataset_dir):
names = np.loadtxt("data/" + dataset_dir + "files.txt", dtype=str)
np.savetxt("results/files.txt", names, fmt="%s")
return names
def load_data(name, dataset_dir):
file = "data/" + dataset_dir + name + ".wav"
fs, x = wavfile.read(file)
if x.ndim == 2:
x = x[:,0].copy(order='C')
#x = x[:,1].copy(order='C')
#x = x.mean(axis=0)
return (x,fs)
def load_lab_conv(name, length, fs, pdict, wdict, dataset_dir, wrddist):
phn = np.loadtxt("data/" + dataset_dir + name + ".lab", dtype=[('col1', 'f16'), ('col2', 'f16'), ('col3', 'S10')])
wrd = np.loadtxt("data/" + dataset_dir + name + ".lab2", dtype=[('col1', 'f16'), ('col2', 'f16'), ('col3', 'S10')])
phn_frm = np.empty(length)
lab_len = len(phn)
adj = length / (phn[-1][1] * fs)
prev = 0
for i in range(lab_len):
if i == lab_len - 1:
end = length
else:
end = int(phn[i][1] * fs * adj)
if phn[i][2] in pdict:
phn_frm[prev:end] = pdict[phn[i][2]]
else:
pdict[phn[i][2]] = len(pdict)
phn_frm[prev:end] = pdict[phn[i][2]]
prev = end
wrd_frm = np.empty(length)
lab_len = len(wrd)
adj = length / (wrd[-1][1] * fs)
prev = 0
for i in range(len(wrd)):
if i == lab_len - 1:
end = length
else:
end = int(wrd[i][1] * fs * adj)
if wrd[i][2] in wdict:
wrddist[wdict[wrd[i][2]]]+=1
wrd_frm[prev:end] = wdict[wrd[i][2]]
else:
wdict[wrd[i][2]] = len(wdict)
wrd_frm[prev:end] = wdict[wrd[i][2]]
wrddist[wdict[wrd[i][2]]]+=1
prev = end + 1
return (phn_frm, wrd_frm, pdict, wdict)
def plotfig(name,x,fs,f0,sil):
time = len(x)/fs
reaper_time = np.linspace(0, time, len(f0))
plt.clf()
plt.figure(figsize=(16, 9), dpi=100)
ax1 = plt.subplot2grid((5,1), (0,0))
ax2 = plt.subplot2grid((5,1), (1,0))
ax3 = plt.subplot2grid((5,1), (2,0))
ax4 = plt.subplot2grid((5,1), (3,0))
ax5 = plt.subplot2grid((5,1), (4,0))
ax1.set_title('spectrogram')
ax1.set_ylabel('frequency')
pxx, freqs, bins, im = ax1.specgram(x, Fs=fs)
ax2.set_title('f0 contour')
ax2.set_ylabel('frequency')
ax2.set_xlim(0, np.max(time))
ax2.plot(reaper_time, f0[:,0], linewidth=1)
ax2.set_ylim(0, )
ax3.set_title('f0 delta')
ax3.set_ylabel('f0 delta')
ax3.set_xlim(0, np.max(time))
ax3.plot(reaper_time, f0[:,1], linewidth=1)
ax4.set_title('f0 delta delta')
ax4.set_ylabel('f0 delta delta')
ax4.set_xlim(0, np.max(time))
ax4.plot(reaper_time, f0[:,2], linewidth=1)
ax5.set_title('silent interval')
ax5.set_xlabel('time [sec]')
ax5.set_ylabel('length [msec]')
ax5.set_xlim(0, np.max(time))
ax5.plot(reaper_time, sil, linewidth=1)
ax2.set_ylim(0, )
plt.tight_layout()
plt.savefig("results/figures/" + name + ".png")
plt.close()
def delta(sdata, window = 1, order = 1):
data = np.pad(sdata, (window, window), mode='constant', constant_values=-1)
#data = np.pad(sdata, (window, window), mode='edge')
difdata = np.zeros(len(sdata))
for i in range(len(sdata)):
difdata[i] = np.dot(np.arange(-window, window+1), data[i : i+2*window+1]) / (2 * sum([j**2 for j in range(1, window+1)]))
if order > 1:
difdata = np.vstack((difdata, delta(sdata=difdata, window=window, order=order-1)))
return difdata
def sil_cut(sdata, phn, wrd, fs, sil_len = 0.2, sil_thr = -16, sil_edg = 0.01):
data_len = len(sdata)
sil_feature = np.zeros(data_len)
sil_len = int(sil_len * fs)
if sil_len > data_len or sil_len < sil_edg:
return (sdata, sil_feature, phn, wrd)
if sil_thr != None:
sil_thr = (10 ** (sil_thr/10)) * sdata.max()
else:
print(sdata.min(), (10 ** (-16/10)) * sdata.max())
sil_thr = 10
sil_det = np.where(sdata <= sil_thr)
if not sil_det:
return (sdata, sil_feature, phn, wrd)
sil_int = []
start = sil_det[0][0]
prev = sil_det[0][0]
cont = 0
sil_det_len = len(sil_det[0])
for i in range(sil_det_len):
if sil_det[0][i] - prev != 1 or i == sil_det_len - 1:
if cont == 1:
sil_int.insert(0, [start, sil_det[0][i]])
cont = 0
start = sil_det[0][i]
elif cont == 0 and (sil_det[0][i] - start) >= sil_len:
cont = 1
prev = sil_det[0][i]
if not sil_int:
return (sdata, sil_feature, phn, wrd)
sil_edg = int(sil_edg * fs)
data = sdata
for i, j in sil_int:
if i != 0:
i += sil_edg
data = np.delete(data, range(i,j+1))
sil_feature = np.delete(sil_feature, range(i,j+1))
phn = np.delete(phn, range(i,j+1))
wrd = np.delete(wrd, range(i,j+1))
if i != 0:
sil_feature[i - 1] = (j+1 - i) / fs
sil_feature[-1] = 0
return (data, sil_feature, phn, wrd)
def silent_fit(silent, fs, frame_period=0.01, window_len=0.025):
window_len *= fs
frame_period *= fs
silent_fit = []
if int(frame_period - ((len(silent) - window_len) % frame_period)) != frame_period:
silent = np.pad(silent, (0, int(frame_period - ((len(silent) - window_len) % frame_period))), mode='constant', constant_values=0)
for i in range(int((len(silent) - window_len + frame_period) / frame_period)):
silent_fit = np.append(silent_fit, np.sum(silent[int(i * frame_period):int(i * frame_period + window_len - 1)]))
return silent_fit
def label_fit(phn, wrd, fs, frame_period=0.01, window_len=0.025):
window_len *= fs
frame_period *= fs
if int(frame_period - ((len(phn) - window_len) % frame_period)) != frame_period:
phn = np.pad(phn, (0, int(frame_period - ((len(phn) - window_len) % frame_period))), mode='edge')
wrd = np.pad(wrd, (0, int(frame_period - ((len(wrd) - window_len) % frame_period))), mode='edge')
phn_fit = []
wrd_fit = []
for i in range(int((len(phn) - window_len + frame_period) / frame_period)):
phn_fit = np.append(phn_fit, phn[int(i * frame_period + (window_len / 2))])
wrd_fit = np.append(wrd_fit, wrd[int(i * frame_period + (window_len / 2))])
return phn_fit, wrd_fit
if not os.path.exists("results"):
os.mkdir("results")
# if not os.path.exists("results/WAVE"):
# os.mkdir("results/WAVE")
# if not os.path.exists("results/figures"):
# os.mkdir("results/figures")
dataset_dir = "aioi_dataset/"
extractor = Extractor(WavLoader)
names = get_names(dataset_dir)
pdict = {}
wdict = {}
mfcc = {}
mfccd = {}
mfccdd = {}
f0dd = {}
silent = {}
phn_lab = {}
wrd_lab = {}
f0dd_max = 0
sil_max = 0
wrddist = np.zeros(50)
for name in tqdm(names):
y,fs = load_data(name, dataset_dir)
phn, wrd, pdict, wdict = load_lab_conv(name, len(y), fs, pdict, wdict, dataset_dir, wrddist)
x, sil, phn, wrd = sil_cut(y, phn, wrd, fs, sil_len=0.01, sil_thr=-8, sil_edg=0) #aioi_dataset
#x, sil, phn, wrd = sil_cut(y, phn, wrd, fs, sil_len=0.03, sil_thr=-24, sil_edg=0) #murakami_dataset
#x, sil, phn, wrd = sil_cut(y, phn, wrd, fs, sil_len=0.35, sil_thr=-10, sil_edg=0.15) #murakami_dataset
sil = silent_fit(sil, fs, frame_period=0.01, window_len=0.025)
phn, wrd = label_fit(phn, wrd, fs, frame_period=0.01, window_len=0.025)
pm_times, pm, f0_times, f0, corr = pyreaper.reaper(x, fs, minf0=40.0, maxf0=300.0, frame_period=0.01)
f0 = np.pad(f0, (0, len(sil)-len(f0)), 'constant')
f0_delta = delta(sdata = f0, window = 2, order=2)
s = extractor._mfcc_cord(x, fs)
if f0dd_max < f0_delta[1].max():
f0dd_max = f0_delta[1].max()
if sil_max < sil.max():
sil_max = sil.max()
d = delta_mfcc(s, 2)
dd = delta_mfcc(d, 2)
mfcc[name] = s
mfccd[name] = d
mfccdd[name] = dd
phn_lab[name] = phn
wrd_lab[name] = wrd
silent[name] = sil
f0dd[name] = f0_delta[1]
check = s.shape[0]
if check != d.shape[0] or check != dd.shape[0] or check != phn.shape[0] or check != wrd.shape[0] or check != sil.shape[0] or check != f0_delta[1].shape[0]:
print(name, s.shape, d.shape, dd.shape, phn.shape, wrd.shape, sil.shape, f0_delta[1].shape)
assert 0
# wavfile.write("results/WAVE/" + name + ".wav", fs, x)
# plotfig(name, x, fs, np.vstack((f0, f0_delta)).T, sil)
print(pdict, wdict, wrddist)
for key in names:
f0dd[key][np.where(f0dd[key] < 0)] = 0
if f0dd_max > 0:
f0dd[key] /= f0dd_max
if sil_max > 0:
silent[key] /= sil_max
silent[key][-1] = 1
np.savez("results/mfcc_12dim.npz", **mfcc)
np.savez("results/mfcc_delta_12dim.npz", **mfccd)
np.savez("results/mfcc_delta_delta_12dim.npz", **mfccdd)
np.savez("results/phoneme_label.npz", **phn_lab)
np.savez("results/word_label.npz", **wrd_lab)
np.savez("results/silent_feature.npz", **silent)
np.savez("results/f0_delta_delta.npz", **f0dd)
| import os
import re
import pyreaper
import numpy as np
import matplotlib.pyplot as plt
from python_speech_features import delta as delta_mfcc
from speech_feature_extraction import Extractor
from speech_feature_extraction.util import WavLoader
from scipy.io import wavfile
try:
from tqdm import tqdm
except:
def tqdm(x): return x
def get_names(dataset_dir):
names = np.loadtxt("data/" + dataset_dir + "files.txt", dtype=str)
np.savetxt("results/files.txt", names, fmt="%s")
return names
def load_data(name, dataset_dir):
file = "data/" + dataset_dir + name + ".wav"
fs, x = wavfile.read(file)
if x.ndim == 2:
x = x[:,0].copy(order='C')
#x = x[:,1].copy(order='C')
#x = x.mean(axis=0)
return (x,fs)
def load_lab_conv(name, length, fs, pdict, wdict, dataset_dir, wrddist):
phn = np.loadtxt("data/" + dataset_dir + name + ".lab", dtype=[('col1', 'f16'), ('col2', 'f16'), ('col3', 'S10')])
wrd = np.loadtxt("data/" + dataset_dir + name + ".lab2", dtype=[('col1', 'f16'), ('col2', 'f16'), ('col3', 'S10')])
phn_frm = np.empty(length)
lab_len = len(phn)
adj = length / (phn[-1][1] * fs)
prev = 0
for i in range(lab_len):
if i == lab_len - 1:
end = length
else:
end = int(phn[i][1] * fs * adj)
if phn[i][2] in pdict:
phn_frm[prev:end] = pdict[phn[i][2]]
else:
pdict[phn[i][2]] = len(pdict)
phn_frm[prev:end] = pdict[phn[i][2]]
prev = end
wrd_frm = np.empty(length)
lab_len = len(wrd)
adj = length / (wrd[-1][1] * fs)
prev = 0
for i in range(len(wrd)):
if i == lab_len - 1:
end = length
else:
end = int(wrd[i][1] * fs * adj)
if wrd[i][2] in wdict:
wrddist[wdict[wrd[i][2]]]+=1
wrd_frm[prev:end] = wdict[wrd[i][2]]
else:
wdict[wrd[i][2]] = len(wdict)
wrd_frm[prev:end] = wdict[wrd[i][2]]
wrddist[wdict[wrd[i][2]]]+=1
prev = end + 1
return (phn_frm, wrd_frm, pdict, wdict)
def plotfig(name,x,fs,f0,sil):
time = len(x)/fs
reaper_time = np.linspace(0, time, len(f0))
plt.clf()
plt.figure(figsize=(16, 9), dpi=100)
ax1 = plt.subplot2grid((5,1), (0,0))
ax2 = plt.subplot2grid((5,1), (1,0))
ax3 = plt.subplot2grid((5,1), (2,0))
ax4 = plt.subplot2grid((5,1), (3,0))
ax5 = plt.subplot2grid((5,1), (4,0))
ax1.set_title('spectrogram')
ax1.set_ylabel('frequency')
pxx, freqs, bins, im = ax1.specgram(x, Fs=fs)
ax2.set_title('f0 contour')
ax2.set_ylabel('frequency')
ax2.set_xlim(0, np.max(time))
ax2.plot(reaper_time, f0[:,0], linewidth=1)
ax2.set_ylim(0, )
ax3.set_title('f0 delta')
ax3.set_ylabel('f0 delta')
ax3.set_xlim(0, np.max(time))
ax3.plot(reaper_time, f0[:,1], linewidth=1)
ax4.set_title('f0 delta delta')
ax4.set_ylabel('f0 delta delta')
ax4.set_xlim(0, np.max(time))
ax4.plot(reaper_time, f0[:,2], linewidth=1)
ax5.set_title('silent interval')
ax5.set_xlabel('time [sec]')
ax5.set_ylabel('length [msec]')
ax5.set_xlim(0, np.max(time))
ax5.plot(reaper_time, sil, linewidth=1)
ax2.set_ylim(0, )
plt.tight_layout()
plt.savefig("results/figures/" + name + ".png")
plt.close()
def delta(sdata, window = 1, order = 1):
data = np.pad(sdata, (window, window), mode='constant', constant_values=-1)
#data = np.pad(sdata, (window, window), mode='edge')
difdata = np.zeros(len(sdata))
for i in range(len(sdata)):
difdata[i] = np.dot(np.arange(-window, window+1), data[i : i+2*window+1]) / (2 * sum([j**2 for j in range(1, window+1)]))
if order > 1:
difdata = np.vstack((difdata, delta(sdata=difdata, window=window, order=order-1)))
return difdata
def sil_cut(sdata, phn, wrd, fs, sil_len = 0.2, sil_thr = -16, sil_edg = 0.01):
data_len = len(sdata)
sil_feature = np.zeros(data_len)
sil_len = int(sil_len * fs)
if sil_len > data_len or sil_len < sil_edg:
return (sdata, sil_feature, phn, wrd)
if sil_thr != None:
sil_thr = (10 ** (sil_thr/10)) * sdata.max()
else:
print(sdata.min(), (10 ** (-16/10)) * sdata.max())
sil_thr = 10
sil_det = np.where(sdata <= sil_thr)
if not sil_det:
return (sdata, sil_feature, phn, wrd)
sil_int = []
start = sil_det[0][0]
prev = sil_det[0][0]
cont = 0
sil_det_len = len(sil_det[0])
for i in range(sil_det_len):
if sil_det[0][i] - prev != 1 or i == sil_det_len - 1:
if cont == 1:
sil_int.insert(0, [start, sil_det[0][i]])
cont = 0
start = sil_det[0][i]
elif cont == 0 and (sil_det[0][i] - start) >= sil_len:
cont = 1
prev = sil_det[0][i]
if not sil_int:
return (sdata, sil_feature, phn, wrd)
sil_edg = int(sil_edg * fs)
data = sdata
for i, j in sil_int:
if i != 0:
i += sil_edg
data = np.delete(data, range(i,j+1))
sil_feature = np.delete(sil_feature, range(i,j+1))
phn = np.delete(phn, range(i,j+1))
wrd = np.delete(wrd, range(i,j+1))
if i != 0:
sil_feature[i - 1] = (j+1 - i) / fs
sil_feature[-1] = 0
return (data, sil_feature, phn, wrd)
def silent_fit(silent, fs, frame_period=0.01, window_len=0.025):
window_len *= fs
frame_period *= fs
silent_fit = []
if int(frame_period - ((len(silent) - window_len) % frame_period)) != frame_period:
silent = np.pad(silent, (0, int(frame_period - ((len(silent) - window_len) % frame_period))), mode='constant', constant_values=0)
for i in range(int((len(silent) - window_len + frame_period) / frame_period)):
silent_fit = np.append(silent_fit, np.sum(silent[int(i * frame_period):int(i * frame_period + window_len - 1)]))
return silent_fit
def label_fit(phn, wrd, fs, frame_period=0.01, window_len=0.025):
window_len *= fs
frame_period *= fs
if int(frame_period - ((len(phn) - window_len) % frame_period)) != frame_period:
phn = np.pad(phn, (0, int(frame_period - ((len(phn) - window_len) % frame_period))), mode='edge')
wrd = np.pad(wrd, (0, int(frame_period - ((len(wrd) - window_len) % frame_period))), mode='edge')
phn_fit = []
wrd_fit = []
for i in range(int((len(phn) - window_len + frame_period) / frame_period)):
phn_fit = np.append(phn_fit, phn[int(i * frame_period + (window_len / 2))])
wrd_fit = np.append(wrd_fit, wrd[int(i * frame_period + (window_len / 2))])
return phn_fit, wrd_fit
if not os.path.exists("results"):
os.mkdir("results")
# if not os.path.exists("results/WAVE"):
# os.mkdir("results/WAVE")
# if not os.path.exists("results/figures"):
# os.mkdir("results/figures")
dataset_dir = "aioi_dataset/"
extractor = Extractor(WavLoader)
names = get_names(dataset_dir)
pdict = {}
wdict = {}
mfcc = {}
mfccd = {}
mfccdd = {}
f0dd = {}
silent = {}
phn_lab = {}
wrd_lab = {}
f0dd_max = 0
sil_max = 0
wrddist = np.zeros(50)
for name in tqdm(names):
y,fs = load_data(name, dataset_dir)
phn, wrd, pdict, wdict = load_lab_conv(name, len(y), fs, pdict, wdict, dataset_dir, wrddist)
x, sil, phn, wrd = sil_cut(y, phn, wrd, fs, sil_len=0.01, sil_thr=-8, sil_edg=0) #aioi_dataset
#x, sil, phn, wrd = sil_cut(y, phn, wrd, fs, sil_len=0.03, sil_thr=-24, sil_edg=0) #murakami_dataset
#x, sil, phn, wrd = sil_cut(y, phn, wrd, fs, sil_len=0.35, sil_thr=-10, sil_edg=0.15) #murakami_dataset
sil = silent_fit(sil, fs, frame_period=0.01, window_len=0.025)
phn, wrd = label_fit(phn, wrd, fs, frame_period=0.01, window_len=0.025)
pm_times, pm, f0_times, f0, corr = pyreaper.reaper(x, fs, minf0=40.0, maxf0=300.0, frame_period=0.01)
f0 = np.pad(f0, (0, len(sil)-len(f0)), 'constant')
f0_delta = delta(sdata = f0, window = 2, order=2)
s = extractor._mfcc_cord(x, fs)
if f0dd_max < f0_delta[1].max():
f0dd_max = f0_delta[1].max()
if sil_max < sil.max():
sil_max = sil.max()
d = delta_mfcc(s, 2)
dd = delta_mfcc(d, 2)
mfcc[name] = s
mfccd[name] = d
mfccdd[name] = dd
phn_lab[name] = phn
wrd_lab[name] = wrd
silent[name] = sil
f0dd[name] = f0_delta[1]
check = s.shape[0]
if check != d.shape[0] or check != dd.shape[0] or check != phn.shape[0] or check != wrd.shape[0] or check != sil.shape[0] or check != f0_delta[1].shape[0]:
print(name, s.shape, d.shape, dd.shape, phn.shape, wrd.shape, sil.shape, f0_delta[1].shape)
assert 0
# wavfile.write("results/WAVE/" + name + ".wav", fs, x)
# plotfig(name, x, fs, np.vstack((f0, f0_delta)).T, sil)
print(pdict, wdict, wrddist)
for key in names:
f0dd[key][np.where(f0dd[key] < 0)] = 0
if f0dd_max > 0:
f0dd[key] /= f0dd_max
if sil_max > 0:
silent[key] /= sil_max
silent[key][-1] = 1
np.savez("results/mfcc_12dim.npz", **mfcc)
np.savez("results/mfcc_delta_12dim.npz", **mfccd)
np.savez("results/mfcc_delta_delta_12dim.npz", **mfccdd)
np.savez("results/phoneme_label.npz", **phn_lab)
np.savez("results/word_label.npz", **wrd_lab)
np.savez("results/silent_feature.npz", **silent)
np.savez("results/f0_delta_delta.npz", **f0dd)
| en | 0.219183 | #x = x[:,1].copy(order='C') #x = x.mean(axis=0) #data = np.pad(sdata, (window, window), mode='edge') # if not os.path.exists("results/WAVE"): # os.mkdir("results/WAVE") # if not os.path.exists("results/figures"): # os.mkdir("results/figures") #aioi_dataset #x, sil, phn, wrd = sil_cut(y, phn, wrd, fs, sil_len=0.03, sil_thr=-24, sil_edg=0) #murakami_dataset #x, sil, phn, wrd = sil_cut(y, phn, wrd, fs, sil_len=0.35, sil_thr=-10, sil_edg=0.15) #murakami_dataset # wavfile.write("results/WAVE/" + name + ".wav", fs, x) # plotfig(name, x, fs, np.vstack((f0, f0_delta)).T, sil) | 2.495966 | 2 |
drop-linux/generate_key.py | ddesmond/clarisse-drop | 5 | 6632001 | <filename>drop-linux/generate_key.py
# author: https://github.com/ddesmond/clarisse-drop
# this file generates youre personal encryption key.
# after you loose it you loose acces to your online repository, so be careful and
# BACKUP THE GENERATED KEY FILE.
import os
from cryptography.fernet import Fernet
if os.path.isfile('keyfile.key'):
print "-------------------------------------------------------"
print "keyfile exists, please delete the old keyfile to reset."
f = open('keyfile.key', 'r')
print f.readline()
print "-------------------------------------------------------"
else:
print "-------------------------------------------------------"
print "creating new keyfile"
key = Fernet.generate_key()
f = open('keyfile.key', 'w')
f.write(key)
f.close()
print "Keyfile written", key
print "-------------------------------------------------------" | <filename>drop-linux/generate_key.py
# author: https://github.com/ddesmond/clarisse-drop
# this file generates youre personal encryption key.
# after you loose it you loose acces to your online repository, so be careful and
# BACKUP THE GENERATED KEY FILE.
import os
from cryptography.fernet import Fernet
if os.path.isfile('keyfile.key'):
print "-------------------------------------------------------"
print "keyfile exists, please delete the old keyfile to reset."
f = open('keyfile.key', 'r')
print f.readline()
print "-------------------------------------------------------"
else:
print "-------------------------------------------------------"
print "creating new keyfile"
key = Fernet.generate_key()
f = open('keyfile.key', 'w')
f.write(key)
f.close()
print "Keyfile written", key
print "-------------------------------------------------------" | en | 0.76988 | # author: https://github.com/ddesmond/clarisse-drop # this file generates youre personal encryption key. # after you loose it you loose acces to your online repository, so be careful and # BACKUP THE GENERATED KEY FILE. | 3.043741 | 3 |
daiquiri/conesearch/views.py | agy-why/daiquiri | 14 | 6632002 | <gh_stars>10-100
from django.http import HttpResponse
from daiquiri.core.renderers.voresource import VoresourceRenderer
from daiquiri.core.renderers.vosi import AvailabilityRenderer, CapabilitiesRenderer
from .vo import get_resource, get_availability, get_capabilities
def resource(request):
return HttpResponse(VoresourceRenderer().render(get_resource()), content_type="application/xml")
def availability(request):
return HttpResponse(AvailabilityRenderer().render(get_availability()), content_type="application/xml")
def capabilities(request):
return HttpResponse(CapabilitiesRenderer().render(get_capabilities()), content_type="application/xml")
| from django.http import HttpResponse
from daiquiri.core.renderers.voresource import VoresourceRenderer
from daiquiri.core.renderers.vosi import AvailabilityRenderer, CapabilitiesRenderer
from .vo import get_resource, get_availability, get_capabilities
def resource(request):
return HttpResponse(VoresourceRenderer().render(get_resource()), content_type="application/xml")
def availability(request):
return HttpResponse(AvailabilityRenderer().render(get_availability()), content_type="application/xml")
def capabilities(request):
return HttpResponse(CapabilitiesRenderer().render(get_capabilities()), content_type="application/xml") | none | 1 | 1.880977 | 2 |
|
tests/backends/dailymotion_test.py | Diolor/python-social-auth | 1 | 6632003 | import json
from tests.oauth import OAuth2Test
class DailymotionOAuth2Test(OAuth2Test):
backend_path = 'social.backends.dailymotion.DailymotionOAuth2'
user_data_url = 'https://api.dailymotion.com/me/'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'id': 'foobar',
'screenname': 'foobar'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| import json
from tests.oauth import OAuth2Test
class DailymotionOAuth2Test(OAuth2Test):
backend_path = 'social.backends.dailymotion.DailymotionOAuth2'
user_data_url = 'https://api.dailymotion.com/me/'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'id': 'foobar',
'screenname': 'foobar'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| none | 1 | 2.437485 | 2 |
|
code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/group_widget_definition.py | Valisback/hiring-engineers | 0 | 6632004 | <reponame>Valisback/hiring-engineers<gh_stars>0
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import (
ModelNormal,
cached_property,
)
def lazy_import():
from datadog_api_client.v1.model.group_widget_definition_type import GroupWidgetDefinitionType
from datadog_api_client.v1.model.widget import Widget
from datadog_api_client.v1.model.widget_layout_type import WidgetLayoutType
from datadog_api_client.v1.model.widget_text_align import WidgetTextAlign
globals()["GroupWidgetDefinitionType"] = GroupWidgetDefinitionType
globals()["Widget"] = Widget
globals()["WidgetLayoutType"] = WidgetLayoutType
globals()["WidgetTextAlign"] = WidgetTextAlign
class GroupWidgetDefinition(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {}
@cached_property
def openapi_types():
lazy_import()
return {
"background_color": (str,),
"banner_img": (str,),
"layout_type": (WidgetLayoutType,),
"show_title": (bool,),
"title": (str,),
"title_align": (WidgetTextAlign,),
"type": (GroupWidgetDefinitionType,),
"widgets": ([Widget],),
}
attribute_map = {
"layout_type": "layout_type",
"type": "type",
"widgets": "widgets",
"background_color": "background_color",
"banner_img": "banner_img",
"show_title": "show_title",
"title": "title",
"title_align": "title_align",
}
read_only_vars = {}
def __init__(self, layout_type, type, widgets, *args, **kwargs):
"""GroupWidgetDefinition - a model defined in OpenAPI
Args:
layout_type (WidgetLayoutType):
type (GroupWidgetDefinitionType):
widgets ([Widget]): List of widget groups.
Keyword Args:
background_color (str): [optional] Background color of the group title.
banner_img (str): [optional] URL of image to display as a banner for the group.
show_title (bool): [optional] Whether to show the title or not. If omitted the server will use the default value of True.
title (str): [optional] Title of the widget.
title_align (WidgetTextAlign): [optional]
"""
super().__init__(kwargs)
self._check_pos_args(args)
self.layout_type = layout_type
self.type = type
self.widgets = widgets
@classmethod
def _from_openapi_data(cls, layout_type, type, widgets, *args, **kwargs):
"""Helper creating a new instance from a response."""
self = super(GroupWidgetDefinition, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
self.layout_type = layout_type
self.type = type
self.widgets = widgets
return self
| # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import (
ModelNormal,
cached_property,
)
def lazy_import():
from datadog_api_client.v1.model.group_widget_definition_type import GroupWidgetDefinitionType
from datadog_api_client.v1.model.widget import Widget
from datadog_api_client.v1.model.widget_layout_type import WidgetLayoutType
from datadog_api_client.v1.model.widget_text_align import WidgetTextAlign
globals()["GroupWidgetDefinitionType"] = GroupWidgetDefinitionType
globals()["Widget"] = Widget
globals()["WidgetLayoutType"] = WidgetLayoutType
globals()["WidgetTextAlign"] = WidgetTextAlign
class GroupWidgetDefinition(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {}
@cached_property
def openapi_types():
lazy_import()
return {
"background_color": (str,),
"banner_img": (str,),
"layout_type": (WidgetLayoutType,),
"show_title": (bool,),
"title": (str,),
"title_align": (WidgetTextAlign,),
"type": (GroupWidgetDefinitionType,),
"widgets": ([Widget],),
}
attribute_map = {
"layout_type": "layout_type",
"type": "type",
"widgets": "widgets",
"background_color": "background_color",
"banner_img": "banner_img",
"show_title": "show_title",
"title": "title",
"title_align": "title_align",
}
read_only_vars = {}
def __init__(self, layout_type, type, widgets, *args, **kwargs):
"""GroupWidgetDefinition - a model defined in OpenAPI
Args:
layout_type (WidgetLayoutType):
type (GroupWidgetDefinitionType):
widgets ([Widget]): List of widget groups.
Keyword Args:
background_color (str): [optional] Background color of the group title.
banner_img (str): [optional] URL of image to display as a banner for the group.
show_title (bool): [optional] Whether to show the title or not. If omitted the server will use the default value of True.
title (str): [optional] Title of the widget.
title_align (WidgetTextAlign): [optional]
"""
super().__init__(kwargs)
self._check_pos_args(args)
self.layout_type = layout_type
self.type = type
self.widgets = widgets
@classmethod
def _from_openapi_data(cls, layout_type, type, widgets, *args, **kwargs):
"""Helper creating a new instance from a response."""
self = super(GroupWidgetDefinition, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
self.layout_type = layout_type
self.type = type
self.widgets = widgets
return self | en | 0.660073 | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2019-Present Datadog, Inc. NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. GroupWidgetDefinition - a model defined in OpenAPI Args: layout_type (WidgetLayoutType): type (GroupWidgetDefinitionType): widgets ([Widget]): List of widget groups. Keyword Args: background_color (str): [optional] Background color of the group title. banner_img (str): [optional] URL of image to display as a banner for the group. show_title (bool): [optional] Whether to show the title or not. If omitted the server will use the default value of True. title (str): [optional] Title of the widget. title_align (WidgetTextAlign): [optional] Helper creating a new instance from a response. | 1.679755 | 2 |
vnpy/trader/app/riskManager/__init__.py | ghjan/vnpy | 34 | 6632005 | <reponame>ghjan/vnpy
# encoding: UTF-8
from vnpy.trader.app.riskManager.rmEngine import RmEngine
from vnpy.trader.app.riskManager.uiRmWidget import RmEngineManager
appName = 'RiskManager'
appDisplayName = u'风险管理'
appEngine = RmEngine
appWidget = RmEngineManager
appIco = 'rm.ico' | # encoding: UTF-8
from vnpy.trader.app.riskManager.rmEngine import RmEngine
from vnpy.trader.app.riskManager.uiRmWidget import RmEngineManager
appName = 'RiskManager'
appDisplayName = u'风险管理'
appEngine = RmEngine
appWidget = RmEngineManager
appIco = 'rm.ico' | en | 0.156115 | # encoding: UTF-8 | 1.123078 | 1 |
zapier/exceptions.py | yunojuno/django-zapier-trigger | 1 | 6632006 | <gh_stars>1-10
class TokenAuthError(Exception):
"""Base token authentication/authorisation error."""
pass
class MissingTokenHeader(TokenAuthError):
"""Request is missing the X-Api-Token header."""
pass
class UnknownToken(TokenAuthError):
"""Token does not exist."""
pass
class TokenUserError(TokenAuthError):
"""User is inactive, or is not the same as request.user."""
pass
class TokenScopeError(TokenAuthError):
"""Token does not have the valid scope."""
pass
class JsonResponseError(Exception):
"""Response does not contain valid JSON."""
pass
| class TokenAuthError(Exception):
"""Base token authentication/authorisation error."""
pass
class MissingTokenHeader(TokenAuthError):
"""Request is missing the X-Api-Token header."""
pass
class UnknownToken(TokenAuthError):
"""Token does not exist."""
pass
class TokenUserError(TokenAuthError):
"""User is inactive, or is not the same as request.user."""
pass
class TokenScopeError(TokenAuthError):
"""Token does not have the valid scope."""
pass
class JsonResponseError(Exception):
"""Response does not contain valid JSON."""
pass | en | 0.856954 | Base token authentication/authorisation error. Request is missing the X-Api-Token header. Token does not exist. User is inactive, or is not the same as request.user. Token does not have the valid scope. Response does not contain valid JSON. | 2.834357 | 3 |
mupit/write_json_probands.py | jeremymcrae/mup | 4 | 6632007 | """
Copyright (c) 2016 Genome Research Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import json
from mupit import LOF_CQ, MISSENSE_CQ
def write_probands_by_gene(de_novos, fp):
""" Write a json-formatted list of probands per gene
We want a correctly-formatted JSON list of probands per gene, for analysis
of phenotype similarity between probands, see
https://github.com/jeremymcrae/hpo_similarity.
Args:
de_novos: dataframe of de-novo mutations per proband
fp: path or file handle to write the json data to.
"""
de_novos = de_novos.copy()
de_novos = de_novos[de_novos['consequence'].isin(LOF_CQ | MISSENSE_CQ)]
probands_by_gene = {}
for (gene, group) in de_novos.groupby("hgnc", sort=True):
probands_by_gene[gene] = sorted(group["person_id"])
try:
with open(fp, 'w') as handle:
json.dump(probands_by_gene, handle, indent=True)
except TypeError:
json.dump(probands_by_gene, fp, indent=True)
| """
Copyright (c) 2016 Genome Research Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import json
from mupit import LOF_CQ, MISSENSE_CQ
def write_probands_by_gene(de_novos, fp):
""" Write a json-formatted list of probands per gene
We want a correctly-formatted JSON list of probands per gene, for analysis
of phenotype similarity between probands, see
https://github.com/jeremymcrae/hpo_similarity.
Args:
de_novos: dataframe of de-novo mutations per proband
fp: path or file handle to write the json data to.
"""
de_novos = de_novos.copy()
de_novos = de_novos[de_novos['consequence'].isin(LOF_CQ | MISSENSE_CQ)]
probands_by_gene = {}
for (gene, group) in de_novos.groupby("hgnc", sort=True):
probands_by_gene[gene] = sorted(group["person_id"])
try:
with open(fp, 'w') as handle:
json.dump(probands_by_gene, handle, indent=True)
except TypeError:
json.dump(probands_by_gene, fp, indent=True)
| en | 0.740675 | Copyright (c) 2016 Genome Research Ltd. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Write a json-formatted list of probands per gene We want a correctly-formatted JSON list of probands per gene, for analysis of phenotype similarity between probands, see https://github.com/jeremymcrae/hpo_similarity. Args: de_novos: dataframe of de-novo mutations per proband fp: path or file handle to write the json data to. | 2.082495 | 2 |
main.py | Brfrance/energydatahack | 0 | 6632008 | """Main. Point d'entrer du programme."""
import CNN
import tensorflow as tf
from CNN_data import get_data_sets
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
TrainX, Trainy, ValidX, Validy, TestX, Testy = get_data_sets(CNN.input_length)
cnn = CNN.compile_and_fit(TrainX, Trainy, ValidX, Validy, TestX, Testy)
print(cnn)
| """Main. Point d'entrer du programme."""
import CNN
import tensorflow as tf
from CNN_data import get_data_sets
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
TrainX, Trainy, ValidX, Validy, TestX, Testy = get_data_sets(CNN.input_length)
cnn = CNN.compile_and_fit(TrainX, Trainy, ValidX, Validy, TestX, Testy)
print(cnn)
| fr | 0.924255 | Main. Point d'entrer du programme. | 2.648436 | 3 |
tests/conftest.py | dued/andamio-base | 0 | 6632009 | import os
from pathlib import Path
import pytest
import yaml
from plumbum import local
from plumbum.cmd import git
with open("copier.yml") as copier_fd:
COPIER_SETTINGS = yaml.safe_load(copier_fd)
# Diferentes tests diferentes versiones de odoo
OLDEST_SUPPORTED_ODOO_VERSION = 8.0
ALL_ODOO_VERSIONS = tuple(COPIER_SETTINGS["odoo_version"]["choices"])
SUPPORTED_ODOO_VERSIONS = tuple(
v for v in ALL_ODOO_VERSIONS if v >= OLDEST_SUPPORTED_ODOO_VERSION
)
LAST_ODOO_VERSION = max(SUPPORTED_ODOO_VERSIONS)
SELECTED_ODOO_VERSIONS = (
frozenset(map(float, os.environ.get("SELECTED_ODOO_VERSIONS", "").split()))
or ALL_ODOO_VERSIONS
)
@pytest.fixture(params=ALL_ODOO_VERSIONS)
def any_odoo_version(request) -> float:
"""Devuelve cualquier version odoo utilizable."""
if request.param not in SELECTED_ODOO_VERSIONS:
pytest.skip("La version odoo no esta en el rango seleccionado")
return request.param
@pytest.fixture(params=SUPPORTED_ODOO_VERSIONS)
def supported_odoo_version(request) -> float:
"""Devuelve cualquier version odoo soportada."""
if request.param not in SELECTED_ODOO_VERSIONS:
pytest.skip("La version de Odoo Soportada no esta en el rango seleccionado")
return request.param
@pytest.fixture()
def cloned_template(tmp_path_factory):
"""Este repositorio clonado a un destino temporal.
El clon incluirá cambios sucios y tendrá una etiqueta de 'prueba' en su HEAD.
Devuelve el `Path` local al clon.
"""
patches = [git("diff", "--cached"), git("diff")]
with tmp_path_factory.mktemp("cloned_template_") as dirty_template_clone:
git("clone", ".", dirty_template_clone)
with local.cwd(dirty_template_clone):
for patch in patches:
if patch:
(git["apply", "--reject"] << patch)()
git("add", ".")
git(
"commit",
"--author=Test<test@test>",
"--message=dirty changes",
"--no-verify",
)
git("tag", "--force", "test")
yield dirty_template_clone
@pytest.fixture()
def versionless_odoo_autoskip(request):
"""Accesorio para omitir automaticamente las pruebas en versiones anteriores de odoo."""
is_version_specific_test = (
"any_odoo_version" in request.fixturenames
or "supported_odoo_version" in request.fixturenames
)
if LAST_ODOO_VERSION not in SELECTED_ODOO_VERSIONS and not is_version_specific_test:
pytest.skip(
"test version-independent en la sesión de prueba de odoo versionada antigua"
)
def teardown_function(function):
pre_commit_log = (
Path("~") / ".cache" / "pre-commit" / "pre-commit.log"
).expanduser()
if pre_commit_log.is_file():
print(pre_commit_log.read_text())
pre_commit_log.unlink()
| import os
from pathlib import Path
import pytest
import yaml
from plumbum import local
from plumbum.cmd import git
with open("copier.yml") as copier_fd:
COPIER_SETTINGS = yaml.safe_load(copier_fd)
# Diferentes tests diferentes versiones de odoo
OLDEST_SUPPORTED_ODOO_VERSION = 8.0
ALL_ODOO_VERSIONS = tuple(COPIER_SETTINGS["odoo_version"]["choices"])
SUPPORTED_ODOO_VERSIONS = tuple(
v for v in ALL_ODOO_VERSIONS if v >= OLDEST_SUPPORTED_ODOO_VERSION
)
LAST_ODOO_VERSION = max(SUPPORTED_ODOO_VERSIONS)
SELECTED_ODOO_VERSIONS = (
frozenset(map(float, os.environ.get("SELECTED_ODOO_VERSIONS", "").split()))
or ALL_ODOO_VERSIONS
)
@pytest.fixture(params=ALL_ODOO_VERSIONS)
def any_odoo_version(request) -> float:
"""Devuelve cualquier version odoo utilizable."""
if request.param not in SELECTED_ODOO_VERSIONS:
pytest.skip("La version odoo no esta en el rango seleccionado")
return request.param
@pytest.fixture(params=SUPPORTED_ODOO_VERSIONS)
def supported_odoo_version(request) -> float:
"""Devuelve cualquier version odoo soportada."""
if request.param not in SELECTED_ODOO_VERSIONS:
pytest.skip("La version de Odoo Soportada no esta en el rango seleccionado")
return request.param
@pytest.fixture()
def cloned_template(tmp_path_factory):
"""Este repositorio clonado a un destino temporal.
El clon incluirá cambios sucios y tendrá una etiqueta de 'prueba' en su HEAD.
Devuelve el `Path` local al clon.
"""
patches = [git("diff", "--cached"), git("diff")]
with tmp_path_factory.mktemp("cloned_template_") as dirty_template_clone:
git("clone", ".", dirty_template_clone)
with local.cwd(dirty_template_clone):
for patch in patches:
if patch:
(git["apply", "--reject"] << patch)()
git("add", ".")
git(
"commit",
"--author=Test<test@test>",
"--message=dirty changes",
"--no-verify",
)
git("tag", "--force", "test")
yield dirty_template_clone
@pytest.fixture()
def versionless_odoo_autoskip(request):
"""Accesorio para omitir automaticamente las pruebas en versiones anteriores de odoo."""
is_version_specific_test = (
"any_odoo_version" in request.fixturenames
or "supported_odoo_version" in request.fixturenames
)
if LAST_ODOO_VERSION not in SELECTED_ODOO_VERSIONS and not is_version_specific_test:
pytest.skip(
"test version-independent en la sesión de prueba de odoo versionada antigua"
)
def teardown_function(function):
pre_commit_log = (
Path("~") / ".cache" / "pre-commit" / "pre-commit.log"
).expanduser()
if pre_commit_log.is_file():
print(pre_commit_log.read_text())
pre_commit_log.unlink()
| es | 0.930951 | # Diferentes tests diferentes versiones de odoo Devuelve cualquier version odoo utilizable. Devuelve cualquier version odoo soportada. Este repositorio clonado a un destino temporal. El clon incluirá cambios sucios y tendrá una etiqueta de 'prueba' en su HEAD. Devuelve el `Path` local al clon. Accesorio para omitir automaticamente las pruebas en versiones anteriores de odoo. | 2.120951 | 2 |
2021/Day 2 - Dive!/1.py | Ashwin-op/Advent_of_Code | 2 | 6632010 | MOVEMENTS = {
'forward': lambda x, y, a: (x + a, y),
'down': lambda x, y, a: (x, y - a),
'up': lambda x, y, a: (x, y + a),
}
with open("input.txt") as fp:
movements = []
for line in fp.readlines():
direction, amount = line.strip().split()
movements.append((direction, int(amount)))
x, y = 0, 0
for direction, amount in movements:
x, y = MOVEMENTS[direction](x, y, amount)
print(abs(x) * abs(y))
| MOVEMENTS = {
'forward': lambda x, y, a: (x + a, y),
'down': lambda x, y, a: (x, y - a),
'up': lambda x, y, a: (x, y + a),
}
with open("input.txt") as fp:
movements = []
for line in fp.readlines():
direction, amount = line.strip().split()
movements.append((direction, int(amount)))
x, y = 0, 0
for direction, amount in movements:
x, y = MOVEMENTS[direction](x, y, amount)
print(abs(x) * abs(y))
| none | 1 | 3.199081 | 3 |
|
livemark/plugins/links/plugin.py | AyrtonB/livemark | 73 | 6632011 | <gh_stars>10-100
from copy import deepcopy
from ...plugin import Plugin
class LinksPlugin(Plugin):
identity = "links"
priority = 10
validity = {
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {"type": "string"},
"path": {"type": "string"},
},
},
},
},
}
# Context
@property
def items(self):
github = self.document.get_plugin("github")
items = deepcopy(self.config.get("items", []))
if github:
if github.report_url:
items.append({"name": "Report", "path": github.report_url})
if github.fork_url:
items.append({"name": "Fork", "path": github.fork_url})
if github.edit_url:
items.append({"name": "Edit", "path": github.edit_url})
return items
# Process
def process_markup(self, markup):
if self.items:
markup.add_style("style.css")
markup.add_markup("markup.html", target="#livemark-right")
| from copy import deepcopy
from ...plugin import Plugin
class LinksPlugin(Plugin):
identity = "links"
priority = 10
validity = {
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {"type": "string"},
"path": {"type": "string"},
},
},
},
},
}
# Context
@property
def items(self):
github = self.document.get_plugin("github")
items = deepcopy(self.config.get("items", []))
if github:
if github.report_url:
items.append({"name": "Report", "path": github.report_url})
if github.fork_url:
items.append({"name": "Fork", "path": github.fork_url})
if github.edit_url:
items.append({"name": "Edit", "path": github.edit_url})
return items
# Process
def process_markup(self, markup):
if self.items:
markup.add_style("style.css")
markup.add_markup("markup.html", target="#livemark-right") | en | 0.408743 | # Context # Process | 2.541228 | 3 |
neadva/Util.py | andrew-azarov/neadva | 0 | 6632012 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import hashlib
import hmac
import binascii
import base64
import os
if os.name == "nt":
import _locale
_locale._gdl_bak = _locale._getdefaultlocale
_locale._getdefaultlocale = (lambda *args: (_locale._gdl_bak()[0], 'utf8'))
from typing import Union
from Crypto.Cipher import AES
def crc32(text: str) -> str:
return str(binascii.crc32(text.encode()))
def sha512(text: str) -> str:
return hashlib.sha512(text.encode()).hexdigest().upper()
def sha3_512(text: Union[bytes, str]) -> str:
#hashout = hmac.new(b'', b'', 'sha3_512')
hashout = hashlib.sha3_512()
if isinstance(text, bytes):
hashout.update(text)
else:
hashout.update(text.encode())
return hashout.hexdigest().upper()
def aes128_decrypt(text: str, key: str) -> str:
# A workaround because NAV uses PHP version of unsafe openssl based AES-128-ECB which is not directly compatible with raw AES
def unpad(s): return s[0:-ord(s[-1])]
decobj = AES.new(key.encode()[:16], AES.MODE_ECB)
data = decobj.decrypt(base64.decodebytes(text.encode("utf8")))
return unpad(data.decode("utf8"))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import hashlib
import hmac
import binascii
import base64
import os
if os.name == "nt":
import _locale
_locale._gdl_bak = _locale._getdefaultlocale
_locale._getdefaultlocale = (lambda *args: (_locale._gdl_bak()[0], 'utf8'))
from typing import Union
from Crypto.Cipher import AES
def crc32(text: str) -> str:
return str(binascii.crc32(text.encode()))
def sha512(text: str) -> str:
return hashlib.sha512(text.encode()).hexdigest().upper()
def sha3_512(text: Union[bytes, str]) -> str:
#hashout = hmac.new(b'', b'', 'sha3_512')
hashout = hashlib.sha3_512()
if isinstance(text, bytes):
hashout.update(text)
else:
hashout.update(text.encode())
return hashout.hexdigest().upper()
def aes128_decrypt(text: str, key: str) -> str:
# A workaround because NAV uses PHP version of unsafe openssl based AES-128-ECB which is not directly compatible with raw AES
def unpad(s): return s[0:-ord(s[-1])]
decobj = AES.new(key.encode()[:16], AES.MODE_ECB)
data = decobj.decrypt(base64.decodebytes(text.encode("utf8")))
return unpad(data.decode("utf8"))
| en | 0.801508 | #!/usr/bin/env python # -*- coding: utf-8 -*- #hashout = hmac.new(b'', b'', 'sha3_512') # A workaround because NAV uses PHP version of unsafe openssl based AES-128-ECB which is not directly compatible with raw AES | 2.590737 | 3 |
src/plot_spec.py | vitrioil/Speech-Separation | 55 | 6632013 | <filename>src/plot_spec.py
import sys
import librosa
import librosa.display
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
from src import generate_audio
from src.models import Audio_Visual_Fusion as AVFusion
from src.loader import convert_to_spectrogram
def _plot(i, spec, title):
if len(spec.shape) == 3:
spec = spec[:, :, 0] + 1j*spec[:, :, 1]
plt.subplot(3, 2, i)
librosa.display.specshow(spec)
plt.title(title)
def plot_all(mixed_spec, pred_first_spec, pred_second_spec, true_first_spec, true_second_spec):
_plot(1, pred_first_spec, "First Prediction")
_plot(2, true_first_spec, "True First")
_plot(3, pred_second_spec, "Second Prediction")
_plot(4, true_second_spec, "True Second")
_plot(5, mixed_spec, "Mixed Input")
plt.show()
def plot(spec):
plt.subplot(3, 1, 1)
spec = spec[:, :, 0] + 1j*spec[:, :, 1]
librosa.display.specshow(spec)
plt.show()
def plot_row(model, df, row_idx, device):
row = df.iloc[row_idx]
mixed_spec = convert_to_spectrogram(librosa.load(row[-1], sr=16_000)[0])
first_spec = convert_to_spectrogram(librosa.load(row[2], sr=16_000)[0])
second_spec = convert_to_spectrogram(librosa.load(row[3], sr=16_000)[0])
audio = row[-1]
video = [row[2], row[3]]
video = [i.replace("audio", "embed").replace("wav", "npy") for i in video]
audio = Path(audio)
video = [Path(i) for i in video]
output_audios = generate_audio(model, audio, video, device=device, save=False, return_spectrograms=True)
first = output_audios[0]
second = output_audios[1]
plot_all(mixed_spec, first, second, first_spec, second_spec)
if __name__ == "__main__":
import torch
from tqdm import trange
from pathlib import Path
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--file", default=Path("train.csv"), type=Path)
parser.add_argument("--n", default=3, type=int)
parser.add_argument("--model-path", default=Path("last_full.pth"), type=Path)
args = parser.parse_args()
df = pd.read_csv(args.file)
device = torch.device("cuda")
model = AVFusion().to(device)
model.load_state_dict(torch.load(args.model_path)["model_state_dict"])
for i in trange(args.n):
plot_row(model, df, i, device)
| <filename>src/plot_spec.py
import sys
import librosa
import librosa.display
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
from src import generate_audio
from src.models import Audio_Visual_Fusion as AVFusion
from src.loader import convert_to_spectrogram
def _plot(i, spec, title):
if len(spec.shape) == 3:
spec = spec[:, :, 0] + 1j*spec[:, :, 1]
plt.subplot(3, 2, i)
librosa.display.specshow(spec)
plt.title(title)
def plot_all(mixed_spec, pred_first_spec, pred_second_spec, true_first_spec, true_second_spec):
_plot(1, pred_first_spec, "First Prediction")
_plot(2, true_first_spec, "True First")
_plot(3, pred_second_spec, "Second Prediction")
_plot(4, true_second_spec, "True Second")
_plot(5, mixed_spec, "Mixed Input")
plt.show()
def plot(spec):
plt.subplot(3, 1, 1)
spec = spec[:, :, 0] + 1j*spec[:, :, 1]
librosa.display.specshow(spec)
plt.show()
def plot_row(model, df, row_idx, device):
row = df.iloc[row_idx]
mixed_spec = convert_to_spectrogram(librosa.load(row[-1], sr=16_000)[0])
first_spec = convert_to_spectrogram(librosa.load(row[2], sr=16_000)[0])
second_spec = convert_to_spectrogram(librosa.load(row[3], sr=16_000)[0])
audio = row[-1]
video = [row[2], row[3]]
video = [i.replace("audio", "embed").replace("wav", "npy") for i in video]
audio = Path(audio)
video = [Path(i) for i in video]
output_audios = generate_audio(model, audio, video, device=device, save=False, return_spectrograms=True)
first = output_audios[0]
second = output_audios[1]
plot_all(mixed_spec, first, second, first_spec, second_spec)
if __name__ == "__main__":
import torch
from tqdm import trange
from pathlib import Path
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--file", default=Path("train.csv"), type=Path)
parser.add_argument("--n", default=3, type=int)
parser.add_argument("--model-path", default=Path("last_full.pth"), type=Path)
args = parser.parse_args()
df = pd.read_csv(args.file)
device = torch.device("cuda")
model = AVFusion().to(device)
model.load_state_dict(torch.load(args.model_path)["model_state_dict"])
for i in trange(args.n):
plot_row(model, df, i, device)
| none | 1 | 2.541232 | 3 |
|
homeassistant/components/vaillant/hub.py | pepsonEL/home-assistant | 0 | 6632014 | """Api hub and integration data."""
import logging
from pymultimatic.api import ApiError
from pymultimatic.model import (
Circulation,
HolidayMode,
HotWater,
OperatingModes,
QuickMode,
QuickModes,
QuickVeto,
Room,
System,
Zone,
ZoneCooling,
ZoneHeating,
)
import pymultimatic.systemmanager
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
CONF_SERIAL_NUMBER,
DEFAULT_QUICK_VETO_DURATION,
DEFAULT_SMART_PHONE_ID,
DOMAIN,
REFRESH_ENTITIES_EVENT,
)
from .utils import get_scan_interval
_LOGGER = logging.getLogger(__name__)
async def check_authentication(hass, username, password, serial):
"""Check if provided username an password are corrects."""
return await pymultimatic.systemmanager.SystemManager(
username,
password,
async_create_clientsession(hass),
DEFAULT_SMART_PHONE_ID,
serial,
).login(True)
class ApiHub(DataUpdateCoordinator):
"""Vaillant entry point for home-assistant."""
def __init__(self, hass, entry: ConfigEntry):
"""Initialize hub."""
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
serial = entry.data.get(CONF_SERIAL_NUMBER)
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=get_scan_interval(entry),
update_method=self._fetch_data,
)
session = async_create_clientsession(hass)
self._manager = pymultimatic.systemmanager.SystemManager(
username, password, session, DEFAULT_SMART_PHONE_ID, serial
)
self.system: System = None
self._hass = hass
async def authenticate(self):
"""Try to authenticate to the API."""
try:
return await self._manager.login(True)
except ApiError as err:
await self._handle_api_error(err)
return False
async def request_hvac_update(self):
"""Request is not on the classic update since it won't fetch data.
The request update will trigger something at vaillant API and it will
ask data to your system.
"""
try:
_LOGGER.debug("Will request_hvac_update")
await self._manager.request_hvac_update()
except ApiError as err:
if err.response.status == 409:
_LOGGER.warning("request_hvac_update is done too often")
else:
await self._handle_api_error(err)
await self.authenticate()
async def _fetch_data(self):
"""Fetch vaillant system."""
try:
self.system = await self._manager.get_system()
_LOGGER.debug("fetch_data successful")
except ApiError as err:
# update_system is called by all entities, if it fails for
# one entity, it will certainly fail for others.
# catching exception so the throttling is occurring
await self._handle_api_error(err)
await self.authenticate()
async def logout(self):
"""Logout from API."""
try:
await self._manager.logout()
except ApiError:
_LOGGER.warning("Cannot logout from vaillant API", exc_info=True)
return False
return True
async def _handle_api_error(self, api_err):
resp = await api_err.response.text()
_LOGGER.exception(
"Unable to fetch data from vaillant, API says: %s, status: %s",
resp,
api_err.response.status,
)
def find_component(self, comp):
"""Find a component in the system with the given id, no IO is done."""
if isinstance(comp, Zone):
for zone in self.system.zones:
if zone.id == comp.id:
return zone
if isinstance(comp, Room):
for room in self.system.rooms:
if room.id == comp.id:
return room
if isinstance(comp, HotWater):
if self.system.dhw.hotwater and self.system.dhw.hotwater.id == comp.id:
return self.system.dhw.hotwater
if isinstance(comp, Circulation):
if (
self.system.dhw.circulation
and self.system.dhw.circulation.id == comp.id
):
return self.system.dhw.circulation
return None
async def set_hot_water_target_temperature(self, entity, target_temp):
"""Set hot water target temperature.
* If there is a quick mode that impact dhw running on or holiday mode,
remove it.
* If dhw is ON or AUTO, modify the target temperature
* If dhw is OFF, change to ON and set target temperature
"""
hotwater = entity.component
touch_system = await self._remove_quick_mode_or_holiday(entity)
current_mode = self.system.get_active_mode_hot_water(hotwater).current
if current_mode == OperatingModes.OFF or touch_system:
await self._manager.set_hot_water_operating_mode(
hotwater.id, OperatingModes.ON
)
await self._manager.set_hot_water_setpoint_temperature(hotwater.id, target_temp)
self.system.hot_water = hotwater
await self._refresh(touch_system, entity)
async def set_room_target_temperature(self, entity, target_temp):
"""Set target temperature for a room.
* If there is a quick mode that impact room running on or holiday mode,
remove it.
* If the room is in MANUAL mode, simply modify the target temperature.
* if the room is not in MANUAL mode, create à quick veto.
"""
touch_system = await self._remove_quick_mode_or_holiday(entity)
room = entity.component
current_mode = self.system.get_active_mode_room(room).current
if current_mode == OperatingModes.MANUAL:
await self._manager.set_room_setpoint_temperature(room.id, target_temp)
room.target_temperature = target_temp
else:
if current_mode == OperatingModes.QUICK_VETO:
await self._manager.remove_room_quick_veto(room.id)
qveto = QuickVeto(DEFAULT_QUICK_VETO_DURATION, target_temp)
await self._manager.set_room_quick_veto(room.id, qveto)
room.quick_veto = qveto
self.system.set_room(room.id, room)
await self._refresh(touch_system, entity)
async def set_zone_target_temperature(self, entity, target_temp):
"""Set target temperature for a zone.
* If there is a quick mode related to zone running or holiday mode,
remove it.
* If quick veto running on, remove it and create a new one with the
new target temp
* If any other mode, create a quick veto
"""
touch_system = await self._remove_quick_mode_or_holiday(entity)
zone = entity.component
current_mode = self.system.get_active_mode_zone(zone).current
if current_mode == OperatingModes.QUICK_VETO:
await self._manager.remove_zone_quick_veto(zone.id)
veto = QuickVeto(None, target_temp)
await self._manager.set_zone_quick_veto(zone.id, veto)
zone.quick_veto = veto
self.system.set_zone(zone.id, zone)
await self._refresh(touch_system, entity)
async def set_hot_water_operating_mode(self, entity, mode):
"""Set hot water operation mode.
If there is a quick mode that impact hot warter running on or holiday
mode, remove it.
"""
hotwater = entity.component
touch_system = await self._remove_quick_mode_or_holiday(entity)
await self._manager.set_hot_water_operating_mode(hotwater.id, mode)
hotwater.operating_mode = mode
self.system.dhw.hotwater = hotwater
await self._refresh(touch_system, entity)
async def set_room_operating_mode(self, entity, mode):
"""Set room operation mode.
If there is a quick mode that impact room running on or holiday mode,
remove it.
"""
touch_system = await self._remove_quick_mode_or_holiday(entity)
room = entity.component
if room.quick_veto is not None:
await self._manager.remove_room_quick_veto(room.id)
room.quick_veto = None
if isinstance(mode, QuickMode):
await self._manager.set_quick_mode(mode)
self.system.quick_mode = mode
touch_system = True
else:
await self._manager.set_room_operating_mode(room.id, mode)
room.operating_mode = mode
self.system.set_room(room.id, room)
await self._refresh(touch_system, entity)
async def set_zone_operating_mode(self, entity, mode):
"""Set zone operation mode.
If there is a quick mode that impact zone running on or holiday mode,
remove it.
"""
touch_system = await self._remove_quick_mode_or_holiday(entity)
zone = entity.component
if zone.quick_veto is not None:
await self._manager.remove_zone_quick_veto(zone.id)
zone.quick_veto = None
if isinstance(mode, QuickMode):
await self._manager.set_quick_mode(mode)
self.system.quick_mode = mode
touch_system = True
else:
if zone.heating and mode in ZoneHeating.MODES:
await self._manager.set_zone_heating_operating_mode(zone.id, mode)
zone.heating.operating_mode = mode
if zone.cooling and mode in ZoneCooling.MODES:
await self._manager.set_zone_cooling_operating_mode(zone.id, mode)
zone.cooling.operating_mode = mode
self.system.set_zone(zone.id, zone)
await self._refresh(touch_system, entity)
async def remove_quick_mode(self, entity=None):
"""Remove quick mode.
If entity is not None, only remove if the quick mode applies to the
given entity.
"""
if await self._remove_quick_mode_no_refresh(entity):
await self._refresh_entities()
async def remove_holiday_mode(self):
"""Remove holiday mode."""
if await self._remove_holiday_mode_no_refresh():
await self._refresh_entities()
async def set_holiday_mode(self, start_date, end_date, temperature):
"""Set holiday mode."""
await self._manager.set_holiday_mode(start_date, end_date, temperature)
self.system.holiday = HolidayMode(True, start_date, end_date, temperature)
await self._refresh_entities()
async def set_quick_mode(self, mode):
"""Set quick mode (remove previous one)."""
await self._remove_quick_mode_no_refresh()
qmode = QuickModes.get(mode)
await self._manager.set_quick_mode(qmode)
self.system.quick_mode = qmode
await self._refresh_entities()
async def set_quick_veto(self, entity, temperature, duration=None):
"""Set quick veto for the given entity."""
comp = self.find_component(entity.component)
q_duration = duration if duration else DEFAULT_QUICK_VETO_DURATION
qveto = QuickVeto(q_duration, temperature)
if isinstance(comp, Zone):
if comp.quick_veto:
await self._manager.remove_zone_quick_veto(comp.id)
await self._manager.set_zone_quick_veto(comp.id, qveto)
else:
if comp.quick_veto:
await self._manager.remove_room_quick_veto(comp.id)
await self._manager.set_room_quick_veto(comp.id, qveto)
comp.quick_veto = qveto
await self._refresh(False, entity)
async def remove_quick_veto(self, entity):
"""Remove quick veto for the given entity."""
comp = self.find_component(entity.component)
if comp and comp.quick_veto:
if isinstance(comp, Zone):
await self._manager.remove_zone_quick_veto(comp.id)
else:
await self._manager.remove_room_quick_veto(comp.id)
comp.quick_veto = None
await self._refresh(False, entity)
async def _remove_quick_mode_no_refresh(self, entity=None):
removed = False
if self.system.quick_mode is not None:
qmode = self.system.quick_mode
if entity:
if qmode.is_for(entity.component):
await self._hard_remove_quick_mode()
removed = True
else:
await self._hard_remove_quick_mode()
removed = True
return removed
async def _hard_remove_quick_mode(self):
await self._manager.remove_quick_mode()
self.system.quick_mode = None
async def _remove_holiday_mode_no_refresh(self):
removed = False
if self.system.holiday is not None and self.system.holiday.is_applied:
removed = True
await self._manager.remove_holiday_mode()
self.system.holiday = HolidayMode(False)
return removed
async def _remove_quick_mode_or_holiday(self, entity):
return (
await self._remove_holiday_mode_no_refresh()
| await self._remove_quick_mode_no_refresh(entity)
)
async def _refresh_entities(self):
"""Fetch vaillant data and force refresh of all listening entities."""
# await self.async_refresh()
self._hass.bus.async_fire(REFRESH_ENTITIES_EVENT, {})
async def _refresh(self, touch_system, entity):
if touch_system:
await self._refresh_entities()
else:
entity.async_schedule_update_ha_state(True)
| """Api hub and integration data."""
import logging
from pymultimatic.api import ApiError
from pymultimatic.model import (
Circulation,
HolidayMode,
HotWater,
OperatingModes,
QuickMode,
QuickModes,
QuickVeto,
Room,
System,
Zone,
ZoneCooling,
ZoneHeating,
)
import pymultimatic.systemmanager
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
CONF_SERIAL_NUMBER,
DEFAULT_QUICK_VETO_DURATION,
DEFAULT_SMART_PHONE_ID,
DOMAIN,
REFRESH_ENTITIES_EVENT,
)
from .utils import get_scan_interval
_LOGGER = logging.getLogger(__name__)
async def check_authentication(hass, username, password, serial):
"""Check if provided username an password are corrects."""
return await pymultimatic.systemmanager.SystemManager(
username,
password,
async_create_clientsession(hass),
DEFAULT_SMART_PHONE_ID,
serial,
).login(True)
class ApiHub(DataUpdateCoordinator):
"""Vaillant entry point for home-assistant."""
def __init__(self, hass, entry: ConfigEntry):
"""Initialize hub."""
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
serial = entry.data.get(CONF_SERIAL_NUMBER)
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=get_scan_interval(entry),
update_method=self._fetch_data,
)
session = async_create_clientsession(hass)
self._manager = pymultimatic.systemmanager.SystemManager(
username, password, session, DEFAULT_SMART_PHONE_ID, serial
)
self.system: System = None
self._hass = hass
async def authenticate(self):
"""Try to authenticate to the API."""
try:
return await self._manager.login(True)
except ApiError as err:
await self._handle_api_error(err)
return False
async def request_hvac_update(self):
"""Request is not on the classic update since it won't fetch data.
The request update will trigger something at vaillant API and it will
ask data to your system.
"""
try:
_LOGGER.debug("Will request_hvac_update")
await self._manager.request_hvac_update()
except ApiError as err:
if err.response.status == 409:
_LOGGER.warning("request_hvac_update is done too often")
else:
await self._handle_api_error(err)
await self.authenticate()
async def _fetch_data(self):
"""Fetch vaillant system."""
try:
self.system = await self._manager.get_system()
_LOGGER.debug("fetch_data successful")
except ApiError as err:
# update_system is called by all entities, if it fails for
# one entity, it will certainly fail for others.
# catching exception so the throttling is occurring
await self._handle_api_error(err)
await self.authenticate()
async def logout(self):
"""Logout from API."""
try:
await self._manager.logout()
except ApiError:
_LOGGER.warning("Cannot logout from vaillant API", exc_info=True)
return False
return True
async def _handle_api_error(self, api_err):
resp = await api_err.response.text()
_LOGGER.exception(
"Unable to fetch data from vaillant, API says: %s, status: %s",
resp,
api_err.response.status,
)
def find_component(self, comp):
"""Find a component in the system with the given id, no IO is done."""
if isinstance(comp, Zone):
for zone in self.system.zones:
if zone.id == comp.id:
return zone
if isinstance(comp, Room):
for room in self.system.rooms:
if room.id == comp.id:
return room
if isinstance(comp, HotWater):
if self.system.dhw.hotwater and self.system.dhw.hotwater.id == comp.id:
return self.system.dhw.hotwater
if isinstance(comp, Circulation):
if (
self.system.dhw.circulation
and self.system.dhw.circulation.id == comp.id
):
return self.system.dhw.circulation
return None
async def set_hot_water_target_temperature(self, entity, target_temp):
"""Set hot water target temperature.
* If there is a quick mode that impact dhw running on or holiday mode,
remove it.
* If dhw is ON or AUTO, modify the target temperature
* If dhw is OFF, change to ON and set target temperature
"""
hotwater = entity.component
touch_system = await self._remove_quick_mode_or_holiday(entity)
current_mode = self.system.get_active_mode_hot_water(hotwater).current
if current_mode == OperatingModes.OFF or touch_system:
await self._manager.set_hot_water_operating_mode(
hotwater.id, OperatingModes.ON
)
await self._manager.set_hot_water_setpoint_temperature(hotwater.id, target_temp)
self.system.hot_water = hotwater
await self._refresh(touch_system, entity)
async def set_room_target_temperature(self, entity, target_temp):
"""Set target temperature for a room.
* If there is a quick mode that impact room running on or holiday mode,
remove it.
* If the room is in MANUAL mode, simply modify the target temperature.
* if the room is not in MANUAL mode, create à quick veto.
"""
touch_system = await self._remove_quick_mode_or_holiday(entity)
room = entity.component
current_mode = self.system.get_active_mode_room(room).current
if current_mode == OperatingModes.MANUAL:
await self._manager.set_room_setpoint_temperature(room.id, target_temp)
room.target_temperature = target_temp
else:
if current_mode == OperatingModes.QUICK_VETO:
await self._manager.remove_room_quick_veto(room.id)
qveto = QuickVeto(DEFAULT_QUICK_VETO_DURATION, target_temp)
await self._manager.set_room_quick_veto(room.id, qveto)
room.quick_veto = qveto
self.system.set_room(room.id, room)
await self._refresh(touch_system, entity)
async def set_zone_target_temperature(self, entity, target_temp):
"""Set target temperature for a zone.
* If there is a quick mode related to zone running or holiday mode,
remove it.
* If quick veto running on, remove it and create a new one with the
new target temp
* If any other mode, create a quick veto
"""
touch_system = await self._remove_quick_mode_or_holiday(entity)
zone = entity.component
current_mode = self.system.get_active_mode_zone(zone).current
if current_mode == OperatingModes.QUICK_VETO:
await self._manager.remove_zone_quick_veto(zone.id)
veto = QuickVeto(None, target_temp)
await self._manager.set_zone_quick_veto(zone.id, veto)
zone.quick_veto = veto
self.system.set_zone(zone.id, zone)
await self._refresh(touch_system, entity)
async def set_hot_water_operating_mode(self, entity, mode):
"""Set hot water operation mode.
If there is a quick mode that impact hot warter running on or holiday
mode, remove it.
"""
hotwater = entity.component
touch_system = await self._remove_quick_mode_or_holiday(entity)
await self._manager.set_hot_water_operating_mode(hotwater.id, mode)
hotwater.operating_mode = mode
self.system.dhw.hotwater = hotwater
await self._refresh(touch_system, entity)
async def set_room_operating_mode(self, entity, mode):
"""Set room operation mode.
If there is a quick mode that impact room running on or holiday mode,
remove it.
"""
touch_system = await self._remove_quick_mode_or_holiday(entity)
room = entity.component
if room.quick_veto is not None:
await self._manager.remove_room_quick_veto(room.id)
room.quick_veto = None
if isinstance(mode, QuickMode):
await self._manager.set_quick_mode(mode)
self.system.quick_mode = mode
touch_system = True
else:
await self._manager.set_room_operating_mode(room.id, mode)
room.operating_mode = mode
self.system.set_room(room.id, room)
await self._refresh(touch_system, entity)
async def set_zone_operating_mode(self, entity, mode):
"""Set zone operation mode.
If there is a quick mode that impact zone running on or holiday mode,
remove it.
"""
touch_system = await self._remove_quick_mode_or_holiday(entity)
zone = entity.component
if zone.quick_veto is not None:
await self._manager.remove_zone_quick_veto(zone.id)
zone.quick_veto = None
if isinstance(mode, QuickMode):
await self._manager.set_quick_mode(mode)
self.system.quick_mode = mode
touch_system = True
else:
if zone.heating and mode in ZoneHeating.MODES:
await self._manager.set_zone_heating_operating_mode(zone.id, mode)
zone.heating.operating_mode = mode
if zone.cooling and mode in ZoneCooling.MODES:
await self._manager.set_zone_cooling_operating_mode(zone.id, mode)
zone.cooling.operating_mode = mode
self.system.set_zone(zone.id, zone)
await self._refresh(touch_system, entity)
async def remove_quick_mode(self, entity=None):
"""Remove quick mode.
If entity is not None, only remove if the quick mode applies to the
given entity.
"""
if await self._remove_quick_mode_no_refresh(entity):
await self._refresh_entities()
async def remove_holiday_mode(self):
"""Remove holiday mode."""
if await self._remove_holiday_mode_no_refresh():
await self._refresh_entities()
async def set_holiday_mode(self, start_date, end_date, temperature):
"""Set holiday mode."""
await self._manager.set_holiday_mode(start_date, end_date, temperature)
self.system.holiday = HolidayMode(True, start_date, end_date, temperature)
await self._refresh_entities()
async def set_quick_mode(self, mode):
"""Set quick mode (remove previous one)."""
await self._remove_quick_mode_no_refresh()
qmode = QuickModes.get(mode)
await self._manager.set_quick_mode(qmode)
self.system.quick_mode = qmode
await self._refresh_entities()
async def set_quick_veto(self, entity, temperature, duration=None):
"""Set quick veto for the given entity."""
comp = self.find_component(entity.component)
q_duration = duration if duration else DEFAULT_QUICK_VETO_DURATION
qveto = QuickVeto(q_duration, temperature)
if isinstance(comp, Zone):
if comp.quick_veto:
await self._manager.remove_zone_quick_veto(comp.id)
await self._manager.set_zone_quick_veto(comp.id, qveto)
else:
if comp.quick_veto:
await self._manager.remove_room_quick_veto(comp.id)
await self._manager.set_room_quick_veto(comp.id, qveto)
comp.quick_veto = qveto
await self._refresh(False, entity)
async def remove_quick_veto(self, entity):
"""Remove quick veto for the given entity."""
comp = self.find_component(entity.component)
if comp and comp.quick_veto:
if isinstance(comp, Zone):
await self._manager.remove_zone_quick_veto(comp.id)
else:
await self._manager.remove_room_quick_veto(comp.id)
comp.quick_veto = None
await self._refresh(False, entity)
async def _remove_quick_mode_no_refresh(self, entity=None):
removed = False
if self.system.quick_mode is not None:
qmode = self.system.quick_mode
if entity:
if qmode.is_for(entity.component):
await self._hard_remove_quick_mode()
removed = True
else:
await self._hard_remove_quick_mode()
removed = True
return removed
async def _hard_remove_quick_mode(self):
await self._manager.remove_quick_mode()
self.system.quick_mode = None
async def _remove_holiday_mode_no_refresh(self):
removed = False
if self.system.holiday is not None and self.system.holiday.is_applied:
removed = True
await self._manager.remove_holiday_mode()
self.system.holiday = HolidayMode(False)
return removed
async def _remove_quick_mode_or_holiday(self, entity):
return (
await self._remove_holiday_mode_no_refresh()
| await self._remove_quick_mode_no_refresh(entity)
)
async def _refresh_entities(self):
"""Fetch vaillant data and force refresh of all listening entities."""
# await self.async_refresh()
self._hass.bus.async_fire(REFRESH_ENTITIES_EVENT, {})
async def _refresh(self, touch_system, entity):
if touch_system:
await self._refresh_entities()
else:
entity.async_schedule_update_ha_state(True)
| en | 0.806674 | Api hub and integration data. Check if provided username an password are corrects. Vaillant entry point for home-assistant. Initialize hub. Try to authenticate to the API. Request is not on the classic update since it won't fetch data. The request update will trigger something at vaillant API and it will ask data to your system. Fetch vaillant system. # update_system is called by all entities, if it fails for # one entity, it will certainly fail for others. # catching exception so the throttling is occurring Logout from API. Find a component in the system with the given id, no IO is done. Set hot water target temperature. * If there is a quick mode that impact dhw running on or holiday mode, remove it. * If dhw is ON or AUTO, modify the target temperature * If dhw is OFF, change to ON and set target temperature Set target temperature for a room. * If there is a quick mode that impact room running on or holiday mode, remove it. * If the room is in MANUAL mode, simply modify the target temperature. * if the room is not in MANUAL mode, create à quick veto. Set target temperature for a zone. * If there is a quick mode related to zone running or holiday mode, remove it. * If quick veto running on, remove it and create a new one with the new target temp * If any other mode, create a quick veto Set hot water operation mode. If there is a quick mode that impact hot warter running on or holiday mode, remove it. Set room operation mode. If there is a quick mode that impact room running on or holiday mode, remove it. Set zone operation mode. If there is a quick mode that impact zone running on or holiday mode, remove it. Remove quick mode. If entity is not None, only remove if the quick mode applies to the given entity. Remove holiday mode. Set holiday mode. Set quick mode (remove previous one). Set quick veto for the given entity. Remove quick veto for the given entity. Fetch vaillant data and force refresh of all listening entities. # await self.async_refresh() | 2.041571 | 2 |
pyclustering/nnet/tests/unit/ut_som.py | JosephChataignon/pyclustering | 1,013 | 6632015 | """!
@brief Unit-tests for self-organized feature map.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.nnet.tests.som_templates import SomTestTemplates
from pyclustering.nnet.som import som, type_conn, type_init, som_parameters
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES
class SomUnitTest(unittest.TestCase):
def testTwoNeuronsTwoClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], False, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1, 100, [5, 5], False, False)
def testTwoNeuronsTwoClustersStoreLoad(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], False, False, store_load=True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1, 100, [5, 5], False, False, store_load=True)
def testAutostopTwoNeuronsTwoClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], True, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1, 100, [5, 5], True, False)
def testAutostopTwoNeuronsTwoClustersStoreLoad(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], True, False, store_load=True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1, 100, [5, 5], True, False, store_load=True)
def testThreeNeuronsThreeClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, 3, 100, [5, 8, 10], False, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, 1, 100, [5, 8, 10], False, False)
def testAutostopThreeNeuronsThreeClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, 3, 100, [5, 8, 10], True, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, 1, 100, [5, 8, 10], True, False)
def testFourNeuronsFourClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 4, 100, [10, 10, 10, 30], False, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 2, 2, 100, [10, 10, 10, 30], False, False)
def testAutostopFourNeuronsFourClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 4, 100, [10, 10, 10, 30], True, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 2, 2, 100, [10, 10, 10, 30], True, False)
def testTwoNeuronsFourClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 2, 100, [30, 30], False, False)
def testAutostopTwoNeuronsFourClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 2, 100, [30, 30], True, False)
def testSevenNeuronsHeptaClusters(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_HEPTA, 1, 7, 100, [30, 30, 30, 30, 30, 30, 32], False, False)
def testAutostopSevenNeuronsHeptaClusters(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_HEPTA, 1, 7, 100, [30, 30, 30, 30, 30, 30, 32], True, False)
def testFourNeuronsTetraClusters(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_TETRA, 1, 4, 100, [100, 100, 100, 100], False, False)
def testAutostopFourNeuronsTetraClusters(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_TETRA, 1, 4, 100, [100, 100, 100, 100], True, False)
def testTwoNeuronsTwoDiamondsClusters(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 1, 2, 100, [400, 400], False, False)
def testAutostopTwoNeuronsTwoDiamondsClusters(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 1, 2, 100, [400, 400], True, False)
def testFiveNeuronsFiveClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 1, 5, 100, [15, 15, 15, 15, 15], False, False)
def testAutostopFiveNeuronsFiveClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 1, 5, 100, [15, 15, 15, 15, 15], True, False)
def testFourNeuronsSquareClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 2, 2, 100, [15, 15, 15, 15], False, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 1, 4, 100, [15, 15, 15, 15], False, False)
def testAutostopFourNeuronsSquareClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 2, 2, 100, [15, 15, 15, 15], True, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 1, 4, 100, [15, 15, 15, 15], True, False)
def testOneDimensionSampleSimple7Cluster(self):
parameters = som_parameters()
parameters.init_type = type_init.random_surface
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, 1, 100, [10, 10], True, False, parameters)
def testHighEpochs(self):
epochs = 1000
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, epochs, [5, 5], False, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 2, 2, epochs, [10, 10, 10, 30], False, False)
def testWinners(self):
SomTestTemplates.templateTestWinners(False)
def testDoubleTrain(self):
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
parameters = som_parameters()
network = som(2, 2, type_conn.grid_eight, parameters, ccore = False)
network.train(sample, 100, False)
network.train(sample, 100, False)
assert sum(network.awards) == len(sample)
total_capture_points = 0
for points in network.capture_objects:
total_capture_points += len(points)
assert total_capture_points == len(sample)
def testSimulateCheckWinnerFuncNeighbor(self):
SomTestTemplates.templateTestSimulate(type_conn.func_neighbor, False)
def testSimulateCheckWinnerFuncNeighborStoreLoad(self):
SomTestTemplates.templateTestSimulate(type_conn.func_neighbor, False, store_load=True)
def testSimulateCheckWinnerGridFour(self):
SomTestTemplates.templateTestSimulate(type_conn.grid_four, False)
def testSimulateCheckWinnerGridFourStoreLoad(self):
SomTestTemplates.templateTestSimulate(type_conn.grid_four, False, store_load=True)
def testSimulateCheckWinnerGridEight(self):
SomTestTemplates.templateTestSimulate(type_conn.grid_eight, False)
def testSimulateCheckWinnerGridEightStoreLoad(self):
SomTestTemplates.templateTestSimulate(type_conn.grid_eight, False, store_load=True)
def testSimulateCheckWinnerHoneycomb(self):
SomTestTemplates.templateTestSimulate(type_conn.honeycomb, False)
def testSimulateCheckWinnerHoneycombStoreLoad(self):
SomTestTemplates.templateTestSimulate(type_conn.honeycomb, False, store_load=True)
def testNetwork2x2RandomState5(self):
SomTestTemplates.random_state(2, 2, type_conn.honeycomb, 5, False)
def testNetwork2x2RandomState5FuncNeighbor(self):
SomTestTemplates.random_state(2, 2, type_conn.func_neighbor, 5, False)
def testNetwork2x2RandomState10(self):
SomTestTemplates.random_state(2, 2, type_conn.honeycomb, 10, False)
def testNetwork2x2RandomState10FuncNeighbor(self):
SomTestTemplates.random_state(2, 2, type_conn.func_neighbor, 10, False)
def testNetwork2x3RandomState5(self):
SomTestTemplates.random_state(2, 3, type_conn.honeycomb, 5, False)
def testNetwork2x3RandomState10(self):
SomTestTemplates.random_state(2, 3, type_conn.honeycomb, 10, False)
def testNetwork1x8RandomState5(self):
SomTestTemplates.random_state(1, 8, type_conn.honeycomb, 5, False)
def testNetwork1x8RandomState10(self):
SomTestTemplates.random_state(1, 8, type_conn.honeycomb, 10, False)
def testNetwork1x8GridFour(self):
SomTestTemplates.random_state(1, 8, type_conn.grid_four, 5, False)
SomTestTemplates.random_state(8, 1, type_conn.grid_four, 5, False)
def testNetwork1x8GridEight(self):
SomTestTemplates.random_state(1, 8, type_conn.grid_eight, 5, False)
SomTestTemplates.random_state(8, 1, type_conn.grid_eight, 5, False)
def testNetwork1x8FuncNeughbor(self):
SomTestTemplates.random_state(1, 8, type_conn.func_neighbor, 5, False)
SomTestTemplates.random_state(8, 1, type_conn.func_neighbor, 5, False)
| """!
@brief Unit-tests for self-organized feature map.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.nnet.tests.som_templates import SomTestTemplates
from pyclustering.nnet.som import som, type_conn, type_init, som_parameters
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES
class SomUnitTest(unittest.TestCase):
def testTwoNeuronsTwoClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], False, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1, 100, [5, 5], False, False)
def testTwoNeuronsTwoClustersStoreLoad(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], False, False, store_load=True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1, 100, [5, 5], False, False, store_load=True)
def testAutostopTwoNeuronsTwoClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], True, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1, 100, [5, 5], True, False)
def testAutostopTwoNeuronsTwoClustersStoreLoad(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, 100, [5, 5], True, False, store_load=True)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, 1, 100, [5, 5], True, False, store_load=True)
def testThreeNeuronsThreeClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, 3, 100, [5, 8, 10], False, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, 1, 100, [5, 8, 10], False, False)
def testAutostopThreeNeuronsThreeClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, 3, 100, [5, 8, 10], True, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, 1, 100, [5, 8, 10], True, False)
def testFourNeuronsFourClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 4, 100, [10, 10, 10, 30], False, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 2, 2, 100, [10, 10, 10, 30], False, False)
def testAutostopFourNeuronsFourClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 4, 100, [10, 10, 10, 30], True, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 2, 2, 100, [10, 10, 10, 30], True, False)
def testTwoNeuronsFourClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 2, 100, [30, 30], False, False)
def testAutostopTwoNeuronsFourClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, 2, 100, [30, 30], True, False)
def testSevenNeuronsHeptaClusters(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_HEPTA, 1, 7, 100, [30, 30, 30, 30, 30, 30, 32], False, False)
def testAutostopSevenNeuronsHeptaClusters(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_HEPTA, 1, 7, 100, [30, 30, 30, 30, 30, 30, 32], True, False)
def testFourNeuronsTetraClusters(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_TETRA, 1, 4, 100, [100, 100, 100, 100], False, False)
def testAutostopFourNeuronsTetraClusters(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_TETRA, 1, 4, 100, [100, 100, 100, 100], True, False)
def testTwoNeuronsTwoDiamondsClusters(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 1, 2, 100, [400, 400], False, False)
def testAutostopTwoNeuronsTwoDiamondsClusters(self):
SomTestTemplates.templateTestAwardNeurons(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 1, 2, 100, [400, 400], True, False)
def testFiveNeuronsFiveClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 1, 5, 100, [15, 15, 15, 15, 15], False, False)
def testAutostopFiveNeuronsFiveClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 1, 5, 100, [15, 15, 15, 15, 15], True, False)
def testFourNeuronsSquareClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 2, 2, 100, [15, 15, 15, 15], False, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 1, 4, 100, [15, 15, 15, 15], False, False)
def testAutostopFourNeuronsSquareClusters(self):
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 2, 2, 100, [15, 15, 15, 15], True, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 1, 4, 100, [15, 15, 15, 15], True, False)
def testOneDimensionSampleSimple7Cluster(self):
parameters = som_parameters()
parameters.init_type = type_init.random_surface
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, 1, 100, [10, 10], True, False, parameters)
def testHighEpochs(self):
epochs = 1000
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, 2, epochs, [5, 5], False, False)
SomTestTemplates.templateTestAwardNeurons(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 2, 2, epochs, [10, 10, 10, 30], False, False)
def testWinners(self):
SomTestTemplates.templateTestWinners(False)
def testDoubleTrain(self):
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1)
parameters = som_parameters()
network = som(2, 2, type_conn.grid_eight, parameters, ccore = False)
network.train(sample, 100, False)
network.train(sample, 100, False)
assert sum(network.awards) == len(sample)
total_capture_points = 0
for points in network.capture_objects:
total_capture_points += len(points)
assert total_capture_points == len(sample)
def testSimulateCheckWinnerFuncNeighbor(self):
SomTestTemplates.templateTestSimulate(type_conn.func_neighbor, False)
def testSimulateCheckWinnerFuncNeighborStoreLoad(self):
SomTestTemplates.templateTestSimulate(type_conn.func_neighbor, False, store_load=True)
def testSimulateCheckWinnerGridFour(self):
SomTestTemplates.templateTestSimulate(type_conn.grid_four, False)
def testSimulateCheckWinnerGridFourStoreLoad(self):
SomTestTemplates.templateTestSimulate(type_conn.grid_four, False, store_load=True)
def testSimulateCheckWinnerGridEight(self):
SomTestTemplates.templateTestSimulate(type_conn.grid_eight, False)
def testSimulateCheckWinnerGridEightStoreLoad(self):
SomTestTemplates.templateTestSimulate(type_conn.grid_eight, False, store_load=True)
def testSimulateCheckWinnerHoneycomb(self):
SomTestTemplates.templateTestSimulate(type_conn.honeycomb, False)
def testSimulateCheckWinnerHoneycombStoreLoad(self):
SomTestTemplates.templateTestSimulate(type_conn.honeycomb, False, store_load=True)
def testNetwork2x2RandomState5(self):
SomTestTemplates.random_state(2, 2, type_conn.honeycomb, 5, False)
def testNetwork2x2RandomState5FuncNeighbor(self):
SomTestTemplates.random_state(2, 2, type_conn.func_neighbor, 5, False)
def testNetwork2x2RandomState10(self):
SomTestTemplates.random_state(2, 2, type_conn.honeycomb, 10, False)
def testNetwork2x2RandomState10FuncNeighbor(self):
SomTestTemplates.random_state(2, 2, type_conn.func_neighbor, 10, False)
def testNetwork2x3RandomState5(self):
SomTestTemplates.random_state(2, 3, type_conn.honeycomb, 5, False)
def testNetwork2x3RandomState10(self):
SomTestTemplates.random_state(2, 3, type_conn.honeycomb, 10, False)
def testNetwork1x8RandomState5(self):
SomTestTemplates.random_state(1, 8, type_conn.honeycomb, 5, False)
def testNetwork1x8RandomState10(self):
SomTestTemplates.random_state(1, 8, type_conn.honeycomb, 10, False)
def testNetwork1x8GridFour(self):
SomTestTemplates.random_state(1, 8, type_conn.grid_four, 5, False)
SomTestTemplates.random_state(8, 1, type_conn.grid_four, 5, False)
def testNetwork1x8GridEight(self):
SomTestTemplates.random_state(1, 8, type_conn.grid_eight, 5, False)
SomTestTemplates.random_state(8, 1, type_conn.grid_eight, 5, False)
def testNetwork1x8FuncNeughbor(self):
SomTestTemplates.random_state(1, 8, type_conn.func_neighbor, 5, False)
SomTestTemplates.random_state(8, 1, type_conn.func_neighbor, 5, False)
| en | 0.70592 | !
@brief Unit-tests for self-organized feature map.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause # Generate images without having a window appear. | 2.369531 | 2 |
mnist/test_predict.py | rootpia/mnist_ai | 0 | 6632016 | <gh_stars>0
#!/usr/bin/env python
import argparse
import chainer
import numpy as np
from train_mnist import MLP
def print_predict(model):
# Load the MNIST dataset
_, test = chainer.datasets.get_mnist()
test, label = chainer.dataset.concat_examples(test)
pred = model(test)
pred = chainer.functions.softmax(pred).data
label_y = [np.argmax(pred[i]) for i in range(len(pred))]
for ii in range(1):
print('-------------------------------')
print('gt :{0}'.format(label[ii]))
print('pred :{0}'.format(label_y[ii]))
print('percentage:')
for jj in range(10):
print('[{0}]: {1:1.3f}'.format(jj, pred[ii][jj]))
def single_predictor(model, image):
test = np.array(image).reshape(1, -1)
pred = model(test)
pred = chainer.functions.softmax(pred).data
label_y = [np.argmax(pred[i]) for i in range(len(pred))]
return (pred[0], label_y[0])
def seq_predictor(model):
# Load the MNIST dataset
_, test = chainer.datasets.get_mnist()
test, label = chainer.dataset.concat_examples(test)
for ii in range(2):
pred = single_predictor(model, test[ii])
print('-------------------------------')
print('gt :{0}'.format(label[ii]))
print('pred :{0}'.format(pred[1]))
print('percentage:')
for jj in range(10):
print('[{0}]: {1:1.3f}'.format(jj, pred[0][jj]))
def main():
parser = argparse.ArgumentParser(description='regression of kWh')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--unit', '-u', type=int, default=100,
help='Number of units')
args = parser.parse_args()
# load model
model = MLP(args.unit, 10)
chainer.serializers.load_npz(args.out + '/pretrained_model', model)
# print_predict(model)
seq_predictor(model)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import argparse
import chainer
import numpy as np
from train_mnist import MLP
def print_predict(model):
# Load the MNIST dataset
_, test = chainer.datasets.get_mnist()
test, label = chainer.dataset.concat_examples(test)
pred = model(test)
pred = chainer.functions.softmax(pred).data
label_y = [np.argmax(pred[i]) for i in range(len(pred))]
for ii in range(1):
print('-------------------------------')
print('gt :{0}'.format(label[ii]))
print('pred :{0}'.format(label_y[ii]))
print('percentage:')
for jj in range(10):
print('[{0}]: {1:1.3f}'.format(jj, pred[ii][jj]))
def single_predictor(model, image):
test = np.array(image).reshape(1, -1)
pred = model(test)
pred = chainer.functions.softmax(pred).data
label_y = [np.argmax(pred[i]) for i in range(len(pred))]
return (pred[0], label_y[0])
def seq_predictor(model):
# Load the MNIST dataset
_, test = chainer.datasets.get_mnist()
test, label = chainer.dataset.concat_examples(test)
for ii in range(2):
pred = single_predictor(model, test[ii])
print('-------------------------------')
print('gt :{0}'.format(label[ii]))
print('pred :{0}'.format(pred[1]))
print('percentage:')
for jj in range(10):
print('[{0}]: {1:1.3f}'.format(jj, pred[0][jj]))
def main():
parser = argparse.ArgumentParser(description='regression of kWh')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--unit', '-u', type=int, default=100,
help='Number of units')
args = parser.parse_args()
# load model
model = MLP(args.unit, 10)
chainer.serializers.load_npz(args.out + '/pretrained_model', model)
# print_predict(model)
seq_predictor(model)
if __name__ == '__main__':
main() | en | 0.214704 | #!/usr/bin/env python # Load the MNIST dataset # Load the MNIST dataset # load model # print_predict(model) | 2.851578 | 3 |
CIFAR10/models.py | ankanbansal/semi-supervised-learning | 0 | 6632017 | import numpy as np
# import ipdb
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
import torchvision.models as tv_models
from torchvision import transforms
from densenet import densenet_cifar
class WSODModel(nn.Module):
"""
The best performance is achieved by densenet_cifar model. Implementation has been obtained
from: https://github.com/kuangliu/pytorch-cifar'''
"""
def __init__(self, options):
super(WSODModel, self).__init__()
self.arch = options['base_arch']
if self.arch == 'densenet_cifar':
self.pretrained_model = densenet_cifar()
elif self.arch == 'densenet121':
pretrained_model = tv_models.densenet121(pretrained=False,growth_rate=12)
self.features = pretrained_model.features
self.classifier = nn.Linear(pretrained_model.classifier.in_features,options['num_classes'])
def forward(self, img, options):
if self.arch == 'densenet_cifar':
lin_feat, logits = self.pretrained_model(img)
elif self.arch == 'densenet121':
feat_map = self.features(img)
feat_map_relu = F.relu(feat_map,inplace=True)
lin_feat = feat_map_relu.view(feat_map_relu.size(0),-1)
logits = self.classifier(lin_feat)
final_output = F.softmax(logits)
return lin_feat, logits, final_output
| import numpy as np
# import ipdb
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
import torchvision.models as tv_models
from torchvision import transforms
from densenet import densenet_cifar
class WSODModel(nn.Module):
"""
The best performance is achieved by densenet_cifar model. Implementation has been obtained
from: https://github.com/kuangliu/pytorch-cifar'''
"""
def __init__(self, options):
super(WSODModel, self).__init__()
self.arch = options['base_arch']
if self.arch == 'densenet_cifar':
self.pretrained_model = densenet_cifar()
elif self.arch == 'densenet121':
pretrained_model = tv_models.densenet121(pretrained=False,growth_rate=12)
self.features = pretrained_model.features
self.classifier = nn.Linear(pretrained_model.classifier.in_features,options['num_classes'])
def forward(self, img, options):
if self.arch == 'densenet_cifar':
lin_feat, logits = self.pretrained_model(img)
elif self.arch == 'densenet121':
feat_map = self.features(img)
feat_map_relu = F.relu(feat_map,inplace=True)
lin_feat = feat_map_relu.view(feat_map_relu.size(0),-1)
logits = self.classifier(lin_feat)
final_output = F.softmax(logits)
return lin_feat, logits, final_output
| en | 0.851353 | # import ipdb The best performance is achieved by densenet_cifar model. Implementation has been obtained from: https://github.com/kuangliu/pytorch-cifar''' | 2.673766 | 3 |
fairseq-apr19/paraphrase-rescore-batch.py | ninikolov/low_resource_summarization | 3 | 6632018 | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
from collections import namedtuple
import fileinput
from tqdm import *
import torch
import numpy as np
from fairseq import data, options, tasks, tokenizer, utils
from fairseq.sequence_scorer import SequenceScorer
from fairseq.utils import import_user_module
from fairseq.data.data_utils import process_bpe_symbol
from fairseq.data import data_utils
import logging
from low_resource_summarization.summarization_systems.rwmdrank import RWMDRankSummarizer
from low_resource_summarization.summarization_systems.lexrank import LexRankSummarizer
Batch = namedtuple('Batch', 'ids src_tokens src_lengths')
Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments')
def set_overlap(source_set, target_set):
"""Compute the overlap score between a source and a target set.
It is the intersection of the two sets, divided by the length of the target set."""
word_overlap = target_set.intersection(source_set)
overlap = len(word_overlap) / float(len(target_set))
assert 0. <= overlap <= 1.
return overlap
def copy_rate(source, target):
"""
Compute copy rate
:param source:
:param target:
:param stem: whether to perform stemming using nltk
:return:
"""
source_set = set(source.split())
target_set = set(target.split())
if len(source_set) == 0 or len(target_set) == 0:
return 0.
return set_overlap(source_set, target_set)
def jaccard_similarity(source, target):
"""Compute the jaccard similarity between two texts."""
if type(source) == str:
source = source.split()
if type(target) == str:
target = target.split()
if len(source) == 0 or len(target) == 0:
return 0.
source_set = set(source)
target_set = set(target)
try:
return set_overlap(source_set, target_set.union(source_set))
except ZeroDivisionError as e:
logging.error(e)
return 0.
def buffered_read(input, buffer_size):
buffer = []
with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h:
for src_str in h:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, args, task, max_positions):
tokens = [
task.source_dictionary.encode_line(src_str, add_if_not_exist=False).long()
for src_str in lines
]
lengths = torch.LongTensor([t.numel() for t in tokens])
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(tokens, lengths),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
ids=batch['id'],
src_tokens=batch['net_input']['src_tokens'],
src_lengths=batch['net_input']['src_lengths'],
)
def make_history_batches(lines, history, args, task, max_positions):
sentence_tokens = [
task.source_dictionary.encode_line(src_str, add_if_not_exist=False).long()
for src_str in lines
]
lengths = torch.LongTensor([t.numel() for t in sentence_tokens])
history_tokens = [
task.source_dictionary.encode_line(src_str, add_if_not_exist=False).long()
for src_str in history
]
article_itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(sentence_tokens, lengths),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
).next_epoch_itr(shuffle=False)
history_itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(history_tokens, lengths),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
).next_epoch_itr(shuffle=False)
for src_batch, history_batch in zip(article_itr, history_itr):
yield Batch(
ids=src_batch['id'],
src_tokens=src_batch['net_input']['src_tokens'],
src_lengths=src_batch['net_input']['src_lengths'],
), Batch(
ids=history_batch['id'],
src_tokens=history_batch['net_input']['src_tokens'],
src_lengths=history_batch['net_input']['src_lengths'],
)
def main(args):
import_user_module(args)
if args.buffer_size < 1:
args.buffer_size = 1
if args.max_tokens is None and args.max_sentences is None:
args.max_sentences = 1
if args.detokenize:
from nltk.tokenize.moses import MosesDetokenizer
detokenizer = MosesDetokenizer()
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
# assert not args.max_sentences or args.max_sentences <= args.buffer_size, \
# '--max-sentences/--batch-size cannot be larger than --buffer-size'
print(args)
print("\n * Summarization args: * \n\tSentences per summary: {} Max tokens per summary: {} ".format(
"all" if args.num_output_sentences == -1 else args.num_output_sentences,
args.max_summary_tokens))
print("Article buffer size: {} \tSentence batch size: {} \n".format(
args.buffer_size, args.max_sentences))
if args.num_output_sentences > 0:
if args.extractive_approach == "lexrank":
print(" * Using LexRank extractive summarizer * \n")
extractor = LexRankSummarizer()
elif args.extractive_approach == "rwmdrank":
print(" * Using RWMDRank extractive summarizer * \n")
extractor = RWMDRankSummarizer()
elif args.extractive_approach == "lead":
print(" * Using Lead extractive summarizer * \n")
else:
logging.error(" * Wrong extractive summarizer name. * ")
raise Exception()
use_cuda = torch.cuda.is_available() and not args.cpu
# Setup task, e.g., translation
task = tasks.setup_task(args)
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, _model_args = utils.load_ensemble_for_inference(
args.path.split(':'), task, model_arg_overrides=eval(args.model_overrides),
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
if len(models) == 2:
paraphrase_models = models
else:
paraphrase_models = models[:2]
# Initialize generator
generator = task.build_generator(args)
scorer = SequenceScorer(task.target_dictionary)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in [models[0]]]
)
print('| Type the input sentence and press return:')
start_id = 0
if args.output_file is not None:
if args.extractive_only:
args.output_file = "{}.extract-only={}.out_sent={}".format(
args.output_file, args.extractive_approach, args.num_output_sentences)
else:
args.output_file = "{}.ext={}.out_sent={}.max_tok={}".format(
args.output_file, args.extractive_approach, args.num_output_sentences,
args.max_summary_tokens)
out_file = open(args.output_file, "w")
print('| Writing output to: {}'.format(args.output_file))
selection_stat = {}
def make_history(history_sents, articles_sents, multiplier=1):
articles_history = []
for hist, art in zip(history_sents, articles_sents):
history_str = "<@@ s@@ > " + " <@@ s@@ > ".join(hist) + " <@@ s@@ >" if len(hist) > 0 else "<@@ s@@ >"
articles_history.append([history_str for i in range(len(art) * multiplier)])
return articles_history
def generate_paraphrases(paraphrase_models, article_sentences, history_sents,
return_n_best=1):
# Generate the batch of sentences to paraphrase
sentence_batch_flat = [sent for article in article_sentences for sent in article]
# Generate the history batch
history = make_history(history_sents, article_sentences)
history_batch_flat = [sent for article in history for sent in article]
results = []
input_idx = 0
# for sentence_batch, history_batch in zip(sentence_batch_iter, history_batch_iter):
for sentence_batch, history_batch in make_history_batches(sentence_batch_flat, history_batch_flat,
args, task, max_positions):
src_tokens = sentence_batch.src_tokens
src_lengths = sentence_batch.src_lengths
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
history_src_tokens = history_batch.src_tokens
history_src_lengths = history_batch.src_lengths
if use_cuda:
history_src_tokens = history_src_tokens.cuda()
history_src_lengths = history_src_lengths.cuda()
sample = {'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
'history_tokens': history_src_tokens,
'history_src_lengths': history_src_lengths
}, 'alpha': 0.}
# Generate the next abstractive sentences
paraphrase_predictions = task.inference_step(
generator, paraphrase_models, sample)
input_idx += args.max_sentences
for i, (id, hypos) in enumerate(zip(sentence_batch.ids.tolist(), paraphrase_predictions)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
results.append((start_id + id, src_tokens_i, hypos))
sentence_paraphrases = []
sentence_paraphrases_clean = []
scores = []
# sort output to match input order
for i, (id, src_tokens, hypos) in enumerate(sorted(results, key=lambda x: x[0])):
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
# Process top predictions
for hypo in hypos[:min(len(hypos), return_n_best)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
# print(hypo)
score = hypo["score"]
sentence_paraphrases.append(hypo_str)
# Remove BPE
sentence_paraphrases_clean.append(process_bpe_symbol(hypo_str, "@@ "))
scores.append(score)
# Split the paraphrases per article
curr_idx = 0
out_paraphrases = []
out_paraphrases_clean = []
out_scores = []
for article in article_sentences:
out_paraphrases.append(
sentence_paraphrases[curr_idx:curr_idx + len(article) * return_n_best])
out_paraphrases_clean.append(
sentence_paraphrases_clean[curr_idx:curr_idx + len(article) * return_n_best])
out_scores.append(
scores[curr_idx:curr_idx + len(article) * return_n_best])
curr_idx += len(article) * return_n_best
return out_paraphrases, out_paraphrases_clean, out_scores
def extractive_summarization(article, length):
assert type(article) == list
if args.extractive_approach == "lead":
return article[:length], list(range(length))
import copy
article_copy = copy.deepcopy(article)
# Remove BPE
article = [process_bpe_symbol(sent, "@@ ") for sent in article]
summary = []
order = []
for s, info in zip(*extractor(
article, length, order_by_article=True)):
order.append(info.order)
summary.append(article_copy[info.order])
return summary, order
import time
total_processed = 0
pbar = tqdm(desc="Summarized")
start_time_global = time.time()
for iteration_n, input_lines in enumerate(
buffered_read(args.input, args.buffer_size)):
start_time = time.time()
if args.num_output_sentences > 0:
max_summary_length = args.num_output_sentences
# Run extractive summarization
articles = [
extractive_summarization(inp.strip().split(" <@@ s@@ > "), max_summary_length)[0]
for inp in input_lines
]
else:
articles = [inp.strip().split(" <@@ s@@ > ") for inp in input_lines]
max_summary_length = len(articles[0])
article_lengths = [len(article) for article in articles]
finished_generation = [False for article in articles]
total_sentences_in_buffer = np.sum(article_lengths)
sentence_selection_indices = [[] for i in range(len(articles))]
summary_history = [[] for j in range(len(articles))]
final_clean_summaries = [[] for j in range(len(articles))]
final_clean_summaries_lengths = [len(s) for s in final_clean_summaries]
if total_sentences_in_buffer < args.max_sentences:
args.max_sentences = total_sentences_in_buffer
print("WARNING: you can increase your buffer size")
if args.extractive_only: # Only run extractive summarization
for i, article in enumerate(articles):
for sent in article:
final_clean_summaries[i].append(process_bpe_symbol(sent, "@@ "))
else:
for sentence_num in range(max_summary_length):
if all(finished_generation):
break
if sentence_num == 0: # Only regenerate paraphrases first time
# Sentence paraphrasing
paraphrases, paraphrases_clean, paraphrase_scores = \
generate_paraphrases(
paraphrase_models, articles, summary_history
)
for article_id, (article_paraphrases, article_paraphrases_clean) \
in enumerate(zip(paraphrases, paraphrases_clean)):
if sentence_num > len(article_paraphrases) - 1: # Article shorter than expected summary length
continue
next_sent = article_paraphrases[sentence_num]
next_sent_clean = article_paraphrases_clean[sentence_num]
final_clean_summaries[article_id].append(next_sent_clean)
final_clean_summaries_lengths[article_id] += len(next_sent_clean.split())
sentence_selection_indices[article_id].append(sentence_num)
summary_history[article_id].append(next_sent)
for sel_idx in sentence_selection_indices:
for j in sel_idx:
if j in selection_stat:
selection_stat[j] += 1
else:
selection_stat[j] = 1
if args.print_summary:
for article_sentences, summary_sentences, summary_history_sents in zip(
articles, final_clean_summaries, summary_history):
print("Input:")
for i, paraphrase in enumerate(article_sentences):
print(" {}) {}".format(i, paraphrase.replace("@@ ", " ")))
print()
print("Summary:")
for i, paraphrase in enumerate(summary_sentences):
print(" {}) {}".format(i, paraphrase))
print("Summary history:")
for i, paraphrase in enumerate(summary_history_sents):
print(" {}) {}".format(i, paraphrase))
print("*"*50)
print()
for final_clean_summary in final_clean_summaries:
if args.buffer_size == 1:
pbar.update()
if args.detokenize:
final_clean_summary = [detokenizer.detokenize(s.split(), return_str=True) for s in final_clean_summary]
out_file.write("{}\n".format(" ".join(final_clean_summary)))
else:
out_file.write("{}\n".format(" <s> ".join(final_clean_summary)))
out_file.flush()
total_processed += len(input_lines)
end_time = (time.time() - start_time)
if args.buffer_size > 1:
print("--- Processed {} articles ({}s, {}s/article) ---".format(
total_processed, np.round(end_time, 4),
np.round(end_time / len(input_lines), 4)))
# update running id counter
start_id += total_sentences_in_buffer
end_time_global = (time.time() - start_time_global)
print("--- Total time for {} articles ({}s, {}s/article) ---".format(
total_processed, np.round(end_time_global, 4),
np.round(end_time_global / total_processed, 4)))
print("Selection stat: {}".format(selection_stat))
out_file.close()
def cli_main():
parser = options.get_generation_parser(interactive=True)
parser.add_argument('--output_file', help='file to write the output to')
parser.add_argument('--num_output_sentences', help='file to write the output to',
default=-1, type=int)
parser.add_argument('--max_summary_tokens', help='file to write the output to',
default=-1, type=int)
parser.add_argument('--extractive_approach', help="Paraphrase then extract",
default="lead")
parser.add_argument('--extractive_only', help="Only extractive summarization",
default=False, type=bool)
parser.add_argument('--print_summary', help="print summaries to std out",
default=False, type=bool)
parser.add_argument('--detokenize',
help="detokenize output summary sentences with NLTK", default=False, type=bool)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
from collections import namedtuple
import fileinput
from tqdm import *
import torch
import numpy as np
from fairseq import data, options, tasks, tokenizer, utils
from fairseq.sequence_scorer import SequenceScorer
from fairseq.utils import import_user_module
from fairseq.data.data_utils import process_bpe_symbol
from fairseq.data import data_utils
import logging
from low_resource_summarization.summarization_systems.rwmdrank import RWMDRankSummarizer
from low_resource_summarization.summarization_systems.lexrank import LexRankSummarizer
Batch = namedtuple('Batch', 'ids src_tokens src_lengths')
Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments')
def set_overlap(source_set, target_set):
"""Compute the overlap score between a source and a target set.
It is the intersection of the two sets, divided by the length of the target set."""
word_overlap = target_set.intersection(source_set)
overlap = len(word_overlap) / float(len(target_set))
assert 0. <= overlap <= 1.
return overlap
def copy_rate(source, target):
"""
Compute copy rate
:param source:
:param target:
:param stem: whether to perform stemming using nltk
:return:
"""
source_set = set(source.split())
target_set = set(target.split())
if len(source_set) == 0 or len(target_set) == 0:
return 0.
return set_overlap(source_set, target_set)
def jaccard_similarity(source, target):
"""Compute the jaccard similarity between two texts."""
if type(source) == str:
source = source.split()
if type(target) == str:
target = target.split()
if len(source) == 0 or len(target) == 0:
return 0.
source_set = set(source)
target_set = set(target)
try:
return set_overlap(source_set, target_set.union(source_set))
except ZeroDivisionError as e:
logging.error(e)
return 0.
def buffered_read(input, buffer_size):
buffer = []
with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h:
for src_str in h:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, args, task, max_positions):
tokens = [
task.source_dictionary.encode_line(src_str, add_if_not_exist=False).long()
for src_str in lines
]
lengths = torch.LongTensor([t.numel() for t in tokens])
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(tokens, lengths),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
ids=batch['id'],
src_tokens=batch['net_input']['src_tokens'],
src_lengths=batch['net_input']['src_lengths'],
)
def make_history_batches(lines, history, args, task, max_positions):
sentence_tokens = [
task.source_dictionary.encode_line(src_str, add_if_not_exist=False).long()
for src_str in lines
]
lengths = torch.LongTensor([t.numel() for t in sentence_tokens])
history_tokens = [
task.source_dictionary.encode_line(src_str, add_if_not_exist=False).long()
for src_str in history
]
article_itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(sentence_tokens, lengths),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
).next_epoch_itr(shuffle=False)
history_itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(history_tokens, lengths),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
).next_epoch_itr(shuffle=False)
for src_batch, history_batch in zip(article_itr, history_itr):
yield Batch(
ids=src_batch['id'],
src_tokens=src_batch['net_input']['src_tokens'],
src_lengths=src_batch['net_input']['src_lengths'],
), Batch(
ids=history_batch['id'],
src_tokens=history_batch['net_input']['src_tokens'],
src_lengths=history_batch['net_input']['src_lengths'],
)
def main(args):
import_user_module(args)
if args.buffer_size < 1:
args.buffer_size = 1
if args.max_tokens is None and args.max_sentences is None:
args.max_sentences = 1
if args.detokenize:
from nltk.tokenize.moses import MosesDetokenizer
detokenizer = MosesDetokenizer()
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
# assert not args.max_sentences or args.max_sentences <= args.buffer_size, \
# '--max-sentences/--batch-size cannot be larger than --buffer-size'
print(args)
print("\n * Summarization args: * \n\tSentences per summary: {} Max tokens per summary: {} ".format(
"all" if args.num_output_sentences == -1 else args.num_output_sentences,
args.max_summary_tokens))
print("Article buffer size: {} \tSentence batch size: {} \n".format(
args.buffer_size, args.max_sentences))
if args.num_output_sentences > 0:
if args.extractive_approach == "lexrank":
print(" * Using LexRank extractive summarizer * \n")
extractor = LexRankSummarizer()
elif args.extractive_approach == "rwmdrank":
print(" * Using RWMDRank extractive summarizer * \n")
extractor = RWMDRankSummarizer()
elif args.extractive_approach == "lead":
print(" * Using Lead extractive summarizer * \n")
else:
logging.error(" * Wrong extractive summarizer name. * ")
raise Exception()
use_cuda = torch.cuda.is_available() and not args.cpu
# Setup task, e.g., translation
task = tasks.setup_task(args)
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, _model_args = utils.load_ensemble_for_inference(
args.path.split(':'), task, model_arg_overrides=eval(args.model_overrides),
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
if len(models) == 2:
paraphrase_models = models
else:
paraphrase_models = models[:2]
# Initialize generator
generator = task.build_generator(args)
scorer = SequenceScorer(task.target_dictionary)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in [models[0]]]
)
print('| Type the input sentence and press return:')
start_id = 0
if args.output_file is not None:
if args.extractive_only:
args.output_file = "{}.extract-only={}.out_sent={}".format(
args.output_file, args.extractive_approach, args.num_output_sentences)
else:
args.output_file = "{}.ext={}.out_sent={}.max_tok={}".format(
args.output_file, args.extractive_approach, args.num_output_sentences,
args.max_summary_tokens)
out_file = open(args.output_file, "w")
print('| Writing output to: {}'.format(args.output_file))
selection_stat = {}
def make_history(history_sents, articles_sents, multiplier=1):
articles_history = []
for hist, art in zip(history_sents, articles_sents):
history_str = "<@@ s@@ > " + " <@@ s@@ > ".join(hist) + " <@@ s@@ >" if len(hist) > 0 else "<@@ s@@ >"
articles_history.append([history_str for i in range(len(art) * multiplier)])
return articles_history
def generate_paraphrases(paraphrase_models, article_sentences, history_sents,
return_n_best=1):
# Generate the batch of sentences to paraphrase
sentence_batch_flat = [sent for article in article_sentences for sent in article]
# Generate the history batch
history = make_history(history_sents, article_sentences)
history_batch_flat = [sent for article in history for sent in article]
results = []
input_idx = 0
# for sentence_batch, history_batch in zip(sentence_batch_iter, history_batch_iter):
for sentence_batch, history_batch in make_history_batches(sentence_batch_flat, history_batch_flat,
args, task, max_positions):
src_tokens = sentence_batch.src_tokens
src_lengths = sentence_batch.src_lengths
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
history_src_tokens = history_batch.src_tokens
history_src_lengths = history_batch.src_lengths
if use_cuda:
history_src_tokens = history_src_tokens.cuda()
history_src_lengths = history_src_lengths.cuda()
sample = {'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
'history_tokens': history_src_tokens,
'history_src_lengths': history_src_lengths
}, 'alpha': 0.}
# Generate the next abstractive sentences
paraphrase_predictions = task.inference_step(
generator, paraphrase_models, sample)
input_idx += args.max_sentences
for i, (id, hypos) in enumerate(zip(sentence_batch.ids.tolist(), paraphrase_predictions)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
results.append((start_id + id, src_tokens_i, hypos))
sentence_paraphrases = []
sentence_paraphrases_clean = []
scores = []
# sort output to match input order
for i, (id, src_tokens, hypos) in enumerate(sorted(results, key=lambda x: x[0])):
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
# Process top predictions
for hypo in hypos[:min(len(hypos), return_n_best)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
# print(hypo)
score = hypo["score"]
sentence_paraphrases.append(hypo_str)
# Remove BPE
sentence_paraphrases_clean.append(process_bpe_symbol(hypo_str, "@@ "))
scores.append(score)
# Split the paraphrases per article
curr_idx = 0
out_paraphrases = []
out_paraphrases_clean = []
out_scores = []
for article in article_sentences:
out_paraphrases.append(
sentence_paraphrases[curr_idx:curr_idx + len(article) * return_n_best])
out_paraphrases_clean.append(
sentence_paraphrases_clean[curr_idx:curr_idx + len(article) * return_n_best])
out_scores.append(
scores[curr_idx:curr_idx + len(article) * return_n_best])
curr_idx += len(article) * return_n_best
return out_paraphrases, out_paraphrases_clean, out_scores
def extractive_summarization(article, length):
assert type(article) == list
if args.extractive_approach == "lead":
return article[:length], list(range(length))
import copy
article_copy = copy.deepcopy(article)
# Remove BPE
article = [process_bpe_symbol(sent, "@@ ") for sent in article]
summary = []
order = []
for s, info in zip(*extractor(
article, length, order_by_article=True)):
order.append(info.order)
summary.append(article_copy[info.order])
return summary, order
import time
total_processed = 0
pbar = tqdm(desc="Summarized")
start_time_global = time.time()
for iteration_n, input_lines in enumerate(
buffered_read(args.input, args.buffer_size)):
start_time = time.time()
if args.num_output_sentences > 0:
max_summary_length = args.num_output_sentences
# Run extractive summarization
articles = [
extractive_summarization(inp.strip().split(" <@@ s@@ > "), max_summary_length)[0]
for inp in input_lines
]
else:
articles = [inp.strip().split(" <@@ s@@ > ") for inp in input_lines]
max_summary_length = len(articles[0])
article_lengths = [len(article) for article in articles]
finished_generation = [False for article in articles]
total_sentences_in_buffer = np.sum(article_lengths)
sentence_selection_indices = [[] for i in range(len(articles))]
summary_history = [[] for j in range(len(articles))]
final_clean_summaries = [[] for j in range(len(articles))]
final_clean_summaries_lengths = [len(s) for s in final_clean_summaries]
if total_sentences_in_buffer < args.max_sentences:
args.max_sentences = total_sentences_in_buffer
print("WARNING: you can increase your buffer size")
if args.extractive_only: # Only run extractive summarization
for i, article in enumerate(articles):
for sent in article:
final_clean_summaries[i].append(process_bpe_symbol(sent, "@@ "))
else:
for sentence_num in range(max_summary_length):
if all(finished_generation):
break
if sentence_num == 0: # Only regenerate paraphrases first time
# Sentence paraphrasing
paraphrases, paraphrases_clean, paraphrase_scores = \
generate_paraphrases(
paraphrase_models, articles, summary_history
)
for article_id, (article_paraphrases, article_paraphrases_clean) \
in enumerate(zip(paraphrases, paraphrases_clean)):
if sentence_num > len(article_paraphrases) - 1: # Article shorter than expected summary length
continue
next_sent = article_paraphrases[sentence_num]
next_sent_clean = article_paraphrases_clean[sentence_num]
final_clean_summaries[article_id].append(next_sent_clean)
final_clean_summaries_lengths[article_id] += len(next_sent_clean.split())
sentence_selection_indices[article_id].append(sentence_num)
summary_history[article_id].append(next_sent)
for sel_idx in sentence_selection_indices:
for j in sel_idx:
if j in selection_stat:
selection_stat[j] += 1
else:
selection_stat[j] = 1
if args.print_summary:
for article_sentences, summary_sentences, summary_history_sents in zip(
articles, final_clean_summaries, summary_history):
print("Input:")
for i, paraphrase in enumerate(article_sentences):
print(" {}) {}".format(i, paraphrase.replace("@@ ", " ")))
print()
print("Summary:")
for i, paraphrase in enumerate(summary_sentences):
print(" {}) {}".format(i, paraphrase))
print("Summary history:")
for i, paraphrase in enumerate(summary_history_sents):
print(" {}) {}".format(i, paraphrase))
print("*"*50)
print()
for final_clean_summary in final_clean_summaries:
if args.buffer_size == 1:
pbar.update()
if args.detokenize:
final_clean_summary = [detokenizer.detokenize(s.split(), return_str=True) for s in final_clean_summary]
out_file.write("{}\n".format(" ".join(final_clean_summary)))
else:
out_file.write("{}\n".format(" <s> ".join(final_clean_summary)))
out_file.flush()
total_processed += len(input_lines)
end_time = (time.time() - start_time)
if args.buffer_size > 1:
print("--- Processed {} articles ({}s, {}s/article) ---".format(
total_processed, np.round(end_time, 4),
np.round(end_time / len(input_lines), 4)))
# update running id counter
start_id += total_sentences_in_buffer
end_time_global = (time.time() - start_time_global)
print("--- Total time for {} articles ({}s, {}s/article) ---".format(
total_processed, np.round(end_time_global, 4),
np.round(end_time_global / total_processed, 4)))
print("Selection stat: {}".format(selection_stat))
out_file.close()
def cli_main():
parser = options.get_generation_parser(interactive=True)
parser.add_argument('--output_file', help='file to write the output to')
parser.add_argument('--num_output_sentences', help='file to write the output to',
default=-1, type=int)
parser.add_argument('--max_summary_tokens', help='file to write the output to',
default=-1, type=int)
parser.add_argument('--extractive_approach', help="Paraphrase then extract",
default="lead")
parser.add_argument('--extractive_only', help="Only extractive summarization",
default=False, type=bool)
parser.add_argument('--print_summary', help="print summaries to std out",
default=False, type=bool)
parser.add_argument('--detokenize',
help="detokenize output summary sentences with NLTK", default=False, type=bool)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| en | 0.765581 | #!/usr/bin/env python3 -u # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. Translate raw text with a trained model. Batches data on-the-fly. Compute the overlap score between a source and a target set. It is the intersection of the two sets, divided by the length of the target set. Compute copy rate :param source: :param target: :param stem: whether to perform stemming using nltk :return: Compute the jaccard similarity between two texts. # assert not args.max_sentences or args.max_sentences <= args.buffer_size, \ # '--max-sentences/--batch-size cannot be larger than --buffer-size' # Setup task, e.g., translation # Load ensemble # Set dictionaries # Optimize ensemble for generation # Initialize generator # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) # Generate the batch of sentences to paraphrase # Generate the history batch # for sentence_batch, history_batch in zip(sentence_batch_iter, history_batch_iter): # Generate the next abstractive sentences # sort output to match input order # Process top predictions # print(hypo) # Remove BPE # Split the paraphrases per article # Remove BPE # Run extractive summarization # Only run extractive summarization # Only regenerate paraphrases first time # Sentence paraphrasing # Article shorter than expected summary length # update running id counter | 2.360264 | 2 |
tests/conftest.py | audeering/audbackend | 0 | 6632019 | <gh_stars>0
import glob
import os
import shutil
import pytest
import audeer
import audfactory
pytest.ROOT = audeer.safe_path(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'tmp',
)
)
pytest.ARTIFACTORY_HOST = 'https://audeering.jfrog.io/artifactory'
pytest.FILE_SYSTEM_HOST = os.path.join(pytest.ROOT, 'repo')
pytest.ID = audeer.uid()
pytest.REPOSITORY_NAME = 'unittests-public'
@pytest.fixture(scope='session', autouse=True)
def cleanup_session():
path = os.path.join(
pytest.ROOT,
'..',
'.coverage.*',
)
for file in glob.glob(path):
os.remove(file)
yield
if os.path.exists(pytest.ROOT):
shutil.rmtree(pytest.ROOT)
url = audfactory.path(
audfactory.url(
pytest.ARTIFACTORY_HOST,
repository=pytest.REPOSITORY_NAME,
group_id=pytest.ID,
),
)
if url.exists():
url.unlink()
| import glob
import os
import shutil
import pytest
import audeer
import audfactory
pytest.ROOT = audeer.safe_path(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'tmp',
)
)
pytest.ARTIFACTORY_HOST = 'https://audeering.jfrog.io/artifactory'
pytest.FILE_SYSTEM_HOST = os.path.join(pytest.ROOT, 'repo')
pytest.ID = audeer.uid()
pytest.REPOSITORY_NAME = 'unittests-public'
@pytest.fixture(scope='session', autouse=True)
def cleanup_session():
path = os.path.join(
pytest.ROOT,
'..',
'.coverage.*',
)
for file in glob.glob(path):
os.remove(file)
yield
if os.path.exists(pytest.ROOT):
shutil.rmtree(pytest.ROOT)
url = audfactory.path(
audfactory.url(
pytest.ARTIFACTORY_HOST,
repository=pytest.REPOSITORY_NAME,
group_id=pytest.ID,
),
)
if url.exists():
url.unlink() | none | 1 | 1.939166 | 2 |
|
pyaib/components.py | loljoho-old/ainu | 0 | 6632020 | <reponame>loljoho-old/ainu
#!/usr/bin/env python
#
# Copyright 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import inspect
import collections
from importlib import import_module
from gevent.event import AsyncResult
import gevent
from .util.decorator import EasyDecorator
from .irc import Message
__all__ = ['component_class',
'msg_parser',
'watches', 'observe', 'observes', 'handle', 'handles',
'every',
'triggers_on', 'keyword', 'keywords', 'trigger', 'triggers',
'ComponentManager']
#Used to mark classes for later inspection
CLASS_MARKER = '_PYAIB_COMPONENT'
def component_class(cls):
"""
Let the component loader know to load this class
If they pass a string argument to the decorator use it as a context
name for the instance
"""
if isinstance(cls, basestring):
context = cls
def wrapper(cls):
setattr(cls, CLASS_MARKER, context)
return cls
return wrapper
elif inspect.isclass(cls):
setattr(cls, CLASS_MARKER, True)
return cls
def _requires(*names):
def wrapper(cls):
cls.__requires__ = names
return cls
return wrapper
component_class.requires = _requires
def _get_plugs(method, kind):
""" Setup a place to put plugin hooks, allowing only one type per func """
if not hasattr(method, '__plugs__'):
method.__plugs__ = (kind, [])
elif method.__plugs__[0] != kind:
raise RuntimeError('Multiple Hook Types on a single method (%s)' %
method.__name__)
return method.__plugs__[1]
def msg_parser(*kinds, **kwargs):
"""
Defines that this method is a message type parser
@param kinds: List of IRC message types/numerics
@param kwargs: Accepts chain keyword, True or 'after' executes this after
the existing parser. 'before' execute before existing parsers.
default is to replace the existing parser
"""
chain = kwargs.pop('chain', False)
def wrapper(func):
parsers = _get_plugs(func, 'parsers')
parsers.extend([(kind, chain) for kind in kinds])
return func
return wrapper
def watches(*events):
""" Define a series of events to later be subscribed to """
def wrapper(func):
eplugs = _get_plugs(func, 'events')
eplugs.extend([event for event in events if event not in eplugs])
return func
return wrapper
observes = watches
observe = watches
handle = watches
handles = watches
class _Ignore(EasyDecorator):
"""Only pass if triggers is from user not ignored"""
def wrapper(dec, irc_c, msg, *args):
if dec.args and dec.kwargs.get('runtime'):
for attr in dec.args:
if hasattr(dec._instance, attr):
ignore_nicks = getattr(dec._instance, attr)
if isinstance(ignore_nicks, basestring)\
and msg.sender.nick == ignore_nicks:
return
elif isinstance(ignore_nicks, collections.Container)\
and msg.sender.nick in ignore_nicks:
return
elif dec.args and msg.sender.nick in dec.args:
return
return dec.call(irc_c, msg, *args)
watches.ignore = _Ignore
class _Channel(EasyDecorator):
"""Ignore triggers not in channels, or optionally a list of channels"""
def wrapper(dec, irc_c, msg, *args):
if msg.channel:
#Did they want to restrict which channels
#Should we lookup allowed channels at run time
if dec.args and dec.kwargs.get('runtime'):
for attr in dec.args:
ok = False
if hasattr(dec._instance, attr):
channel = getattr(dec._instance, attr)
if isinstance(channel, basestring)\
and msg.channel == channel:
ok = True
elif isinstance(channel, collections.Container)\
and msg.channel in channel:
ok = True
if not ok:
return
elif dec.args and msg.channel not in dec.args:
return
return dec.call(irc_c, msg, *args)
watches.channel = _Channel
def every(seconds, name=None):
""" Define a timer to execute every interval """
def wrapper(func):
timers = _get_plugs(func, 'timers')
timer = (name if name else func.__name__, seconds)
if timer not in timers:
timers.append(timer)
return func
return wrapper
class triggers_on(object):
"""Define a series of trigger words this method responds too"""
def __init__(self, *words):
self.words = words
def __call__(self, func):
triggers = _get_plugs(func, 'triggers')
triggers.extend(set([word for word in self.words
if word not in triggers]))
return func
class channel(EasyDecorator):
"""Ignore triggers not in channels, or optionally a list of channels"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if msg.channel:
#Did they want to restrict which channels
#Should we lookup allowed channels at run time
if dec.args and dec.kwargs.get('runtime'):
ok = False
for attr in dec.args:
if hasattr(dec._instance, attr):
channel = getattr(dec._instance, attr)
if isinstance(channel, basestring)\
and msg.channel.lower() == channel:
ok = True
elif isinstance(channel, collections.Container)\
and msg.channel.lower() in channel:
ok = True
if not ok:
return
elif dec.args and msg.channel not in dec.args:
return
elif not dec.kwargs.get('private'):
return
return dec.call(irc_c, msg, trigger, args, kargs)
class private_or_channel(channel):
"""Allow either private or specified channel"""
def __init__(dec, *args, **kwargs):
kwargs['private'] = True
super(private_or_channel, dec).__init__(*args, **kwargs)
class private(EasyDecorator):
"""Only pass if triggers is from message not in a channel"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if not msg.channel:
return dec.call(irc_c, msg, trigger, args, kargs)
class helponly(EasyDecorator):
"""Only provide help"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
msg.reply('%s %s' % (trigger,
irc_c.triggers._clean_doc(dec.__doc__)))
class autohelp(EasyDecorator):
"""Make --help trigger help"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if 'help' in kargs or (args and args[0] == 'help'):
msg.reply('%s %s' % (trigger,
irc_c.triggers._clean_doc(dec.__doc__)))
else:
dec.call(irc_c, msg, trigger, args, kargs)
class autohelp_noargs(EasyDecorator):
"""Empty args / kargs trigger help"""
#It was impossible to call autohelp to decorate this method
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if (not args and not kargs) or 'help' in kargs or (
args and args[0] == 'help'):
msg.reply('%s %s' % (trigger,
irc_c.triggers._clean_doc(dec.__doc__)))
else:
return dec.call(irc_c, msg, trigger, args, kargs)
class sub(EasyDecorator):
"""Handle only sub(words) for a given trigger"""
def __init__(dec, *words):
dec._subs = words
for word in words:
if not isinstance(word, basestring):
raise TypeError("sub word must be a string")
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if args and args[0].lower() in dec._subs:
return dec.call(irc_c, msg, '%s %s' % (trigger,
args[0].lower()),
args[1:], kargs)
subs = sub
class nosub(EasyDecorator):
"""Prevent call if argument is present"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if (not dec.args and args) or (dec.args and args
and args[0].lower() in dec.args):
return
else:
return dec.call(irc_c, msg, trigger, args, kargs)
nosubs = nosub
keyword = keywords = trigger = triggers = triggers_on
triggers.ignore = _Ignore
triggers.channel = _Channel
class ComponentManager(object):
""" Manage and Load all pyaib Components """
_loaded_components = collections.defaultdict(AsyncResult)
def __init__(self, context, config):
""" Needs a irc context and its config """
self.context = context
self.config = config
def load(self, name):
""" Load a python module as a component """
if self.is_loaded(name):
return
#Load top level config item matching component name
basename = name.split('.').pop()
config = self.context.config.setdefault(basename, {})
print("Loading Component %s..." % name)
ns = self._process_component(name, 'pyaib', CLASS_MARKER,
self.context, config)
self._loaded_components[basename].set(ns)
def _require(self, name):
self._loaded_components[name].wait()
def load_configured(self, autoload=None):
"""
Load all configured components autoload is a list of components
to always load
"""
components = []
if isinstance(autoload, (list, tuple, set)):
components.extend(autoload)
#Don't do duplicate loads
if self.config.load:
if not isinstance(self.config.load, list):
self.config.load = self.config.load.split(' ')
[components.append(comp) for comp in self.config.load
if comp not in components]
gevent.joinall([gevent.spawn(self.load, component)
for component in components])
def is_loaded(self, name):
""" Determine by name if a component is loaded """
return self._loaded_components[name].ready()
def _install_hooks(self, context, hooked_methods):
#Add All the hooks to the right place
for method in hooked_methods:
kind, args = method.__plugs__
if kind == 'events':
for event in args:
context.events(event).observe(method)
elif kind == 'triggers':
for word in args:
context.triggers(word).observe(method)
elif kind == 'timers':
for name, seconds in args:
context.timers.set(name, method, every=seconds)
elif kind == 'parsers':
for name, chain in args:
self._add_parsers(method, name, chain)
def _add_parsers(self, method, name, chain):
""" Handle Message parser adding and chaining """
if chain:
existing = Message.get_parser(name)
def _chain_after(msg, irc_c):
existing(msg, irc_c)
method(msg, irc_c)
def _chain_before(msg, irc_c):
method(msg, irc_c)
existing(msg, irc_c)
if existing and chain == 'before':
Message.add_parser(name, _chain_before)
elif existing:
Message.add_parser(name, _chain_after)
else:
Message.add_parser(name, method)
else:
Message.add_parser(name, method)
def _find_annotated_callables(self, class_marker, component_ns, config,
context):
annotated_callables = []
for name, member in inspect.getmembers(component_ns):
#Find Classes marked for loading
if inspect.isclass(member) and hasattr(member, class_marker):
#Handle Requirements
if hasattr(member, '__requires__'):
for req in member.__requires__:
self._require(req)
obj = member(context, config)
#Save the context for this obj if the class_marker is a str
context_name = getattr(obj, class_marker)
if isinstance(context_name, basestring):
context[context_name] = obj
#Search for hooked instance methods
for name, thing in inspect.getmembers(obj):
if (isinstance(thing, collections.Callable)
and hasattr(thing, '__plugs__')):
annotated_callables.append(thing)
#Find Functions with Hooks
if (isinstance(member, collections.Callable)
and hasattr(member, '__plugs__')):
annotated_callables.append(member)
return annotated_callables
def _process_component(self, name, path, class_marker, context, config):
if name.startswith('/'):
importname = name[1:]
path = None
else:
importname = '.'.join([path, name])
try:
component_ns = import_module(importname)
except ImportError as e:
raise ImportError('pyaib failed to load (%s): %r'
% (importname, e))
annotated_calls = self._find_annotated_callables(class_marker,
component_ns, config,
context)
self._install_hooks(context, annotated_calls)
return component_ns
| #!/usr/bin/env python
#
# Copyright 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import inspect
import collections
from importlib import import_module
from gevent.event import AsyncResult
import gevent
from .util.decorator import EasyDecorator
from .irc import Message
__all__ = ['component_class',
'msg_parser',
'watches', 'observe', 'observes', 'handle', 'handles',
'every',
'triggers_on', 'keyword', 'keywords', 'trigger', 'triggers',
'ComponentManager']
#Used to mark classes for later inspection
CLASS_MARKER = '_PYAIB_COMPONENT'
def component_class(cls):
"""
Let the component loader know to load this class
If they pass a string argument to the decorator use it as a context
name for the instance
"""
if isinstance(cls, basestring):
context = cls
def wrapper(cls):
setattr(cls, CLASS_MARKER, context)
return cls
return wrapper
elif inspect.isclass(cls):
setattr(cls, CLASS_MARKER, True)
return cls
def _requires(*names):
def wrapper(cls):
cls.__requires__ = names
return cls
return wrapper
component_class.requires = _requires
def _get_plugs(method, kind):
""" Setup a place to put plugin hooks, allowing only one type per func """
if not hasattr(method, '__plugs__'):
method.__plugs__ = (kind, [])
elif method.__plugs__[0] != kind:
raise RuntimeError('Multiple Hook Types on a single method (%s)' %
method.__name__)
return method.__plugs__[1]
def msg_parser(*kinds, **kwargs):
"""
Defines that this method is a message type parser
@param kinds: List of IRC message types/numerics
@param kwargs: Accepts chain keyword, True or 'after' executes this after
the existing parser. 'before' execute before existing parsers.
default is to replace the existing parser
"""
chain = kwargs.pop('chain', False)
def wrapper(func):
parsers = _get_plugs(func, 'parsers')
parsers.extend([(kind, chain) for kind in kinds])
return func
return wrapper
def watches(*events):
""" Define a series of events to later be subscribed to """
def wrapper(func):
eplugs = _get_plugs(func, 'events')
eplugs.extend([event for event in events if event not in eplugs])
return func
return wrapper
observes = watches
observe = watches
handle = watches
handles = watches
class _Ignore(EasyDecorator):
"""Only pass if triggers is from user not ignored"""
def wrapper(dec, irc_c, msg, *args):
if dec.args and dec.kwargs.get('runtime'):
for attr in dec.args:
if hasattr(dec._instance, attr):
ignore_nicks = getattr(dec._instance, attr)
if isinstance(ignore_nicks, basestring)\
and msg.sender.nick == ignore_nicks:
return
elif isinstance(ignore_nicks, collections.Container)\
and msg.sender.nick in ignore_nicks:
return
elif dec.args and msg.sender.nick in dec.args:
return
return dec.call(irc_c, msg, *args)
watches.ignore = _Ignore
class _Channel(EasyDecorator):
"""Ignore triggers not in channels, or optionally a list of channels"""
def wrapper(dec, irc_c, msg, *args):
if msg.channel:
#Did they want to restrict which channels
#Should we lookup allowed channels at run time
if dec.args and dec.kwargs.get('runtime'):
for attr in dec.args:
ok = False
if hasattr(dec._instance, attr):
channel = getattr(dec._instance, attr)
if isinstance(channel, basestring)\
and msg.channel == channel:
ok = True
elif isinstance(channel, collections.Container)\
and msg.channel in channel:
ok = True
if not ok:
return
elif dec.args and msg.channel not in dec.args:
return
return dec.call(irc_c, msg, *args)
watches.channel = _Channel
def every(seconds, name=None):
""" Define a timer to execute every interval """
def wrapper(func):
timers = _get_plugs(func, 'timers')
timer = (name if name else func.__name__, seconds)
if timer not in timers:
timers.append(timer)
return func
return wrapper
class triggers_on(object):
"""Define a series of trigger words this method responds too"""
def __init__(self, *words):
self.words = words
def __call__(self, func):
triggers = _get_plugs(func, 'triggers')
triggers.extend(set([word for word in self.words
if word not in triggers]))
return func
class channel(EasyDecorator):
"""Ignore triggers not in channels, or optionally a list of channels"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if msg.channel:
#Did they want to restrict which channels
#Should we lookup allowed channels at run time
if dec.args and dec.kwargs.get('runtime'):
ok = False
for attr in dec.args:
if hasattr(dec._instance, attr):
channel = getattr(dec._instance, attr)
if isinstance(channel, basestring)\
and msg.channel.lower() == channel:
ok = True
elif isinstance(channel, collections.Container)\
and msg.channel.lower() in channel:
ok = True
if not ok:
return
elif dec.args and msg.channel not in dec.args:
return
elif not dec.kwargs.get('private'):
return
return dec.call(irc_c, msg, trigger, args, kargs)
class private_or_channel(channel):
"""Allow either private or specified channel"""
def __init__(dec, *args, **kwargs):
kwargs['private'] = True
super(private_or_channel, dec).__init__(*args, **kwargs)
class private(EasyDecorator):
"""Only pass if triggers is from message not in a channel"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if not msg.channel:
return dec.call(irc_c, msg, trigger, args, kargs)
class helponly(EasyDecorator):
"""Only provide help"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
msg.reply('%s %s' % (trigger,
irc_c.triggers._clean_doc(dec.__doc__)))
class autohelp(EasyDecorator):
"""Make --help trigger help"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if 'help' in kargs or (args and args[0] == 'help'):
msg.reply('%s %s' % (trigger,
irc_c.triggers._clean_doc(dec.__doc__)))
else:
dec.call(irc_c, msg, trigger, args, kargs)
class autohelp_noargs(EasyDecorator):
"""Empty args / kargs trigger help"""
#It was impossible to call autohelp to decorate this method
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if (not args and not kargs) or 'help' in kargs or (
args and args[0] == 'help'):
msg.reply('%s %s' % (trigger,
irc_c.triggers._clean_doc(dec.__doc__)))
else:
return dec.call(irc_c, msg, trigger, args, kargs)
class sub(EasyDecorator):
"""Handle only sub(words) for a given trigger"""
def __init__(dec, *words):
dec._subs = words
for word in words:
if not isinstance(word, basestring):
raise TypeError("sub word must be a string")
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if args and args[0].lower() in dec._subs:
return dec.call(irc_c, msg, '%s %s' % (trigger,
args[0].lower()),
args[1:], kargs)
subs = sub
class nosub(EasyDecorator):
"""Prevent call if argument is present"""
def wrapper(dec, irc_c, msg, trigger, args, kargs):
if (not dec.args and args) or (dec.args and args
and args[0].lower() in dec.args):
return
else:
return dec.call(irc_c, msg, trigger, args, kargs)
nosubs = nosub
keyword = keywords = trigger = triggers = triggers_on
triggers.ignore = _Ignore
triggers.channel = _Channel
class ComponentManager(object):
""" Manage and Load all pyaib Components """
_loaded_components = collections.defaultdict(AsyncResult)
def __init__(self, context, config):
""" Needs a irc context and its config """
self.context = context
self.config = config
def load(self, name):
""" Load a python module as a component """
if self.is_loaded(name):
return
#Load top level config item matching component name
basename = name.split('.').pop()
config = self.context.config.setdefault(basename, {})
print("Loading Component %s..." % name)
ns = self._process_component(name, 'pyaib', CLASS_MARKER,
self.context, config)
self._loaded_components[basename].set(ns)
def _require(self, name):
self._loaded_components[name].wait()
def load_configured(self, autoload=None):
"""
Load all configured components autoload is a list of components
to always load
"""
components = []
if isinstance(autoload, (list, tuple, set)):
components.extend(autoload)
#Don't do duplicate loads
if self.config.load:
if not isinstance(self.config.load, list):
self.config.load = self.config.load.split(' ')
[components.append(comp) for comp in self.config.load
if comp not in components]
gevent.joinall([gevent.spawn(self.load, component)
for component in components])
def is_loaded(self, name):
""" Determine by name if a component is loaded """
return self._loaded_components[name].ready()
def _install_hooks(self, context, hooked_methods):
#Add All the hooks to the right place
for method in hooked_methods:
kind, args = method.__plugs__
if kind == 'events':
for event in args:
context.events(event).observe(method)
elif kind == 'triggers':
for word in args:
context.triggers(word).observe(method)
elif kind == 'timers':
for name, seconds in args:
context.timers.set(name, method, every=seconds)
elif kind == 'parsers':
for name, chain in args:
self._add_parsers(method, name, chain)
def _add_parsers(self, method, name, chain):
""" Handle Message parser adding and chaining """
if chain:
existing = Message.get_parser(name)
def _chain_after(msg, irc_c):
existing(msg, irc_c)
method(msg, irc_c)
def _chain_before(msg, irc_c):
method(msg, irc_c)
existing(msg, irc_c)
if existing and chain == 'before':
Message.add_parser(name, _chain_before)
elif existing:
Message.add_parser(name, _chain_after)
else:
Message.add_parser(name, method)
else:
Message.add_parser(name, method)
def _find_annotated_callables(self, class_marker, component_ns, config,
context):
annotated_callables = []
for name, member in inspect.getmembers(component_ns):
#Find Classes marked for loading
if inspect.isclass(member) and hasattr(member, class_marker):
#Handle Requirements
if hasattr(member, '__requires__'):
for req in member.__requires__:
self._require(req)
obj = member(context, config)
#Save the context for this obj if the class_marker is a str
context_name = getattr(obj, class_marker)
if isinstance(context_name, basestring):
context[context_name] = obj
#Search for hooked instance methods
for name, thing in inspect.getmembers(obj):
if (isinstance(thing, collections.Callable)
and hasattr(thing, '__plugs__')):
annotated_callables.append(thing)
#Find Functions with Hooks
if (isinstance(member, collections.Callable)
and hasattr(member, '__plugs__')):
annotated_callables.append(member)
return annotated_callables
def _process_component(self, name, path, class_marker, context, config):
if name.startswith('/'):
importname = name[1:]
path = None
else:
importname = '.'.join([path, name])
try:
component_ns = import_module(importname)
except ImportError as e:
raise ImportError('pyaib failed to load (%s): %r'
% (importname, e))
annotated_calls = self._find_annotated_callables(class_marker,
component_ns, config,
context)
self._install_hooks(context, annotated_calls)
return component_ns | en | 0.82181 | #!/usr/bin/env python # # Copyright 2013 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. #Used to mark classes for later inspection Let the component loader know to load this class If they pass a string argument to the decorator use it as a context name for the instance Setup a place to put plugin hooks, allowing only one type per func Defines that this method is a message type parser @param kinds: List of IRC message types/numerics @param kwargs: Accepts chain keyword, True or 'after' executes this after the existing parser. 'before' execute before existing parsers. default is to replace the existing parser Define a series of events to later be subscribed to Only pass if triggers is from user not ignored Ignore triggers not in channels, or optionally a list of channels #Did they want to restrict which channels #Should we lookup allowed channels at run time Define a timer to execute every interval Define a series of trigger words this method responds too Ignore triggers not in channels, or optionally a list of channels #Did they want to restrict which channels #Should we lookup allowed channels at run time Allow either private or specified channel Only pass if triggers is from message not in a channel Only provide help Make --help trigger help Empty args / kargs trigger help #It was impossible to call autohelp to decorate this method Handle only sub(words) for a given trigger Prevent call if argument is present Manage and Load all pyaib Components Needs a irc context and its config Load a python module as a component #Load top level config item matching component name Load all configured components autoload is a list of components to always load #Don't do duplicate loads Determine by name if a component is loaded #Add All the hooks to the right place Handle Message parser adding and chaining #Find Classes marked for loading #Handle Requirements #Save the context for this obj if the class_marker is a str #Search for hooked instance methods #Find Functions with Hooks | 2.059215 | 2 |
NestedList.py | shreya-n-kumari/python | 0 | 6632021 | employes = [['shreya',22,'developer'],['dekosta',25,'security'],['alina',30,'sales']]
print(employes) #print the all list.
for employe in employes:
print(employe) #print the sub lists.
for employee in employes:
print('Name:',(employee[0]))
print('Age: ', employee[1])
print('Department:',employee[2])
print('-' * 20)
employee = employes[1] #print the list which is at index 1.
print(employee)
print('Name:',employee[0])
print('Age: ', employee[1])
print('Department:', employee[2])
print('.'*25) | employes = [['shreya',22,'developer'],['dekosta',25,'security'],['alina',30,'sales']]
print(employes) #print the all list.
for employe in employes:
print(employe) #print the sub lists.
for employee in employes:
print('Name:',(employee[0]))
print('Age: ', employee[1])
print('Department:',employee[2])
print('-' * 20)
employee = employes[1] #print the list which is at index 1.
print(employee)
print('Name:',employee[0])
print('Age: ', employee[1])
print('Department:', employee[2])
print('.'*25) | en | 0.801863 | #print the all list. #print the sub lists. #print the list which is at index 1. | 4.202598 | 4 |
manage.py | willuvbb/test_fastapi_template | 128 | 6632022 | <reponame>willuvbb/test_fastapi_template
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from application.initializer import IncludeAPIRouter
from application.main.config import settings
def get_application():
_app = FastAPI(title=settings.API_NAME,
description=settings.API_DESCRIPTION,
version=settings.API_VERSION)
_app.include_router(IncludeAPIRouter())
_app.add_middleware(
CORSMiddleware,
allow_credentials=False,
allow_methods=["*"],
allow_headers=["*"],
)
return _app
app = get_application()
@app.on_event("shutdown")
async def app_shutdown():
# on app shutdown do something probably close some connections or trigger some event
print("On App Shutdown i will be called.")
#uvicorn.run("manage:app", host=settings.HOST, port=settings.PORT, log_level=settings.LOG_LEVEL, use_colors=True,reload=True)
| import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from application.initializer import IncludeAPIRouter
from application.main.config import settings
def get_application():
_app = FastAPI(title=settings.API_NAME,
description=settings.API_DESCRIPTION,
version=settings.API_VERSION)
_app.include_router(IncludeAPIRouter())
_app.add_middleware(
CORSMiddleware,
allow_credentials=False,
allow_methods=["*"],
allow_headers=["*"],
)
return _app
app = get_application()
@app.on_event("shutdown")
async def app_shutdown():
# on app shutdown do something probably close some connections or trigger some event
print("On App Shutdown i will be called.")
#uvicorn.run("manage:app", host=settings.HOST, port=settings.PORT, log_level=settings.LOG_LEVEL, use_colors=True,reload=True) | en | 0.708345 | # on app shutdown do something probably close some connections or trigger some event #uvicorn.run("manage:app", host=settings.HOST, port=settings.PORT, log_level=settings.LOG_LEVEL, use_colors=True,reload=True) | 2.361539 | 2 |
venv/Lib/site-packages/pygame_gui/core/drawable_shapes/ellipse_drawable_shape.py | auneselva/pygame_shoot_out_zombies_ai | 0 | 6632023 | import math
from typing import Dict, List, Union, Tuple, Any
import pygame
from pygame_gui.core.interfaces import IUIManagerInterface
from pygame_gui.core.colour_gradient import ColourGradient
from pygame_gui.core.drawable_shapes.drawable_shape import DrawableShape
from pygame_gui.core.utility import apply_colour_to_surface, basic_blit
class EllipseDrawableShape(DrawableShape):
"""
A drawable ellipse shape for the UI, has theming options for a border, a shadow, colour
gradients and text.
:param containing_rect: The layout rectangle that surrounds and controls the size of this shape.
:param theming_parameters: Various styling parameters that control the final look of the shape.
:param states: The different UI states the shape can be in. Shapes have different surfaces
for each state.
:param manager: The UI manager.
"""
def __init__(self, containing_rect: pygame.Rect,
theming_parameters: Dict[str, Any],
states: List[str],
manager: IUIManagerInterface):
super().__init__(containing_rect, theming_parameters, states, manager)
self.ellipse_center = containing_rect.center
self.ellipse_half_diameters = (0.5 * containing_rect.width, 0.5 * containing_rect.height)
self.full_rebuild_on_size_change()
def full_rebuild_on_size_change(self):
"""
Completely redraw the shape from it's theming parameters and dimensions.
"""
super().full_rebuild_on_size_change()
# clamping border and shadow widths so we can't form impossible negative sized surfaces
if self.shadow_width > min(math.floor(self.containing_rect.width / 2),
math.floor(self.containing_rect.height / 2)):
self.shadow_width = min(math.floor(self.containing_rect.width / 2),
math.floor(self.containing_rect.height / 2))
if self.shadow_width < 0:
self.shadow_width = 0
if self.border_width > min(math.floor((self.containing_rect.width -
(self.shadow_width * 2)) / 2),
math.floor((self.containing_rect.height -
(self.shadow_width * 2)) / 2)):
self.border_width = min(math.floor((self.containing_rect.width -
(self.shadow_width * 2)) / 2),
math.floor((self.containing_rect.height -
(self.shadow_width * 2)) / 2))
if self.border_width < 0:
self.border_width = 0
if self.shadow_width > 0:
self.click_area_shape = pygame.Rect((self.containing_rect.x + self.shadow_width,
self.containing_rect.y + self.shadow_width),
(self.containing_rect.width -
(2 * self.shadow_width),
self.containing_rect.height -
(2 * self.shadow_width)))
self.base_surface = self.ui_manager.get_shadow(self.containing_rect.size,
self.shadow_width, 'ellipse')
else:
self.click_area_shape = self.containing_rect.copy()
self.base_surface = pygame.surface.Surface(self.containing_rect.size,
flags=pygame.SRCALPHA,
depth=32)
self.base_surface.fill(pygame.Color('#00000000'))
self.compute_aligned_text_rect()
self.border_rect = pygame.Rect((self.shadow_width,
self.shadow_width),
(self.click_area_shape.width, self.click_area_shape.height))
self.background_rect = pygame.Rect((self.border_width + self.shadow_width,
self.border_width + self.shadow_width),
(self.click_area_shape.width - (2 * self.border_width),
self.click_area_shape.height - (2 * self.border_width)))
self.redraw_all_states()
def collide_point(self, point: Union[pygame.math.Vector2,
Tuple[int, int],
Tuple[float, float]]) -> bool:
"""
Checks collision between a point and this ellipse.
:param point: The point to test against the shape.
:return: True if the point is inside the shape.
"""
collided = False
x_val = ((point[0] - self.ellipse_center[0]) ** 2) / (self.ellipse_half_diameters[0] ** 2)
y_val = ((point[1] - self.ellipse_center[1]) ** 2) / (self.ellipse_half_diameters[1] ** 2)
if (x_val + y_val) < 1:
collided = True
return collided
def set_dimensions(self, dimensions: Union[pygame.math.Vector2,
Tuple[int, int],
Tuple[float, float]]):
"""
Expensive size change of the ellipse shape.
:param dimensions: The new size to set the shape to.
"""
self.containing_rect.width = dimensions[0]
self.containing_rect.height = dimensions[1]
self.click_area_shape.width = dimensions[0] - (2 * self.shadow_width)
self.click_area_shape.height = dimensions[1] - (2 * self.shadow_width)
self.ellipse_half_diameters = (0.5 * self.containing_rect.width,
0.5 * self.containing_rect.height)
self.full_rebuild_on_size_change()
def set_position(self, point: Union[pygame.math.Vector2,
Tuple[int, int],
Tuple[float, float]]):
"""
Move the shape. Only really impacts the position of the 'click_area' hot spot.
:param point: The new position to move it to.
"""
self.containing_rect.x = point[0]
self.containing_rect.y = point[1]
self.click_area_shape.x = point[0] + self.shadow_width
self.click_area_shape.y = point[1] + self.shadow_width
self.ellipse_center = self.click_area_shape.center
def redraw_state(self, state_str: str):
"""
Redraws the shape's surface for a given UI state.
:param state_str: The ID string of the state to rebuild.
"""
border_colour_state_str = state_str + '_border'
bg_colour_state_str = state_str + '_bg'
text_colour_state_str = state_str + '_text'
image_state_str = state_str + '_image'
found_shape = None
shape_id = None
if 'filled_bar' not in self.theming and 'filled_bar_width_percentage' not in self.theming:
shape_id = self.shape_cache.build_cache_id('ellipse',
self.containing_rect.size,
self.shadow_width,
self.border_width,
self.theming[border_colour_state_str],
self.theming[bg_colour_state_str])
found_shape = self.shape_cache.find_surface_in_cache(shape_id)
if found_shape is not None:
self.states[state_str].surface = found_shape.copy()
else:
self.states[state_str].surface = self.base_surface.copy()
# Try one AA call method
aa_amount = 4
self.border_rect = pygame.Rect((self.shadow_width * aa_amount,
self.shadow_width * aa_amount),
(self.click_area_shape.width * aa_amount,
self.click_area_shape.height * aa_amount))
self.background_rect = pygame.Rect(((self.border_width +
self.shadow_width) * aa_amount,
(self.border_width +
self.shadow_width) * aa_amount),
(self.border_rect.width -
(2 * self.border_width * aa_amount),
self.border_rect.height -
(2 * self.border_width * aa_amount)))
bab_surface = pygame.surface.Surface((self.containing_rect.width * aa_amount,
self.containing_rect.height * aa_amount),
flags=pygame.SRCALPHA,
depth=32)
bab_surface.fill(pygame.Color('#00000000'))
if self.border_width > 0:
if isinstance(self.theming[border_colour_state_str], ColourGradient):
shape_surface = self.clear_and_create_shape_surface(bab_surface,
self.border_rect,
0, aa_amount=aa_amount,
clear=False)
self.theming[border_colour_state_str].apply_gradient_to_surface(shape_surface)
else:
shape_surface = self.clear_and_create_shape_surface(bab_surface,
self.border_rect,
0, aa_amount=aa_amount,
clear=False)
apply_colour_to_surface(self.theming[border_colour_state_str],
shape_surface)
basic_blit(bab_surface, shape_surface, self.border_rect)
if isinstance(self.theming[bg_colour_state_str], ColourGradient):
shape_surface = self.clear_and_create_shape_surface(bab_surface,
self.background_rect, 1,
aa_amount=aa_amount)
self.theming[bg_colour_state_str].apply_gradient_to_surface(shape_surface)
else:
shape_surface = self.clear_and_create_shape_surface(bab_surface,
self.background_rect, 1,
aa_amount=aa_amount)
apply_colour_to_surface(self.theming[bg_colour_state_str], shape_surface)
basic_blit(bab_surface, shape_surface, self.background_rect)
# apply AA to background
bab_surface = pygame.transform.smoothscale(bab_surface, self.containing_rect.size)
# cut a hole in shadow, then blit background into it
sub_surface = pygame.surface.Surface(
((self.containing_rect.width - (2 * self.shadow_width)) * aa_amount,
(self.containing_rect.height - (2 * self.shadow_width)) * aa_amount),
flags=pygame.SRCALPHA, depth=32)
sub_surface.fill(pygame.Color('#00000000'))
pygame.draw.ellipse(sub_surface, pygame.Color("#FFFFFFFF"), sub_surface.get_rect())
small_sub = pygame.transform.smoothscale(sub_surface,
(self.containing_rect.width -
(2 * self.shadow_width),
self.containing_rect.height -
(2 * self.shadow_width)))
self.states[state_str].surface.blit(small_sub, pygame.Rect((self.shadow_width,
self.shadow_width),
sub_surface.get_size()),
special_flags=pygame.BLEND_RGBA_SUB)
basic_blit(self.states[state_str].surface, bab_surface, (0, 0))
if (shape_id is not None and
self.states[state_str].surface.get_width() <= 1024 and
self.states[state_str].surface.get_height() <= 1024):
self.shape_cache.add_surface_to_cache(self.states[state_str].surface.copy(),
shape_id)
self.rebuild_images_and_text(image_state_str, state_str, text_colour_state_str)
self.states[state_str].has_fresh_surface = True
self.states[state_str].generated = True
@staticmethod
def clear_and_create_shape_surface(surface: pygame.surface.Surface,
rect: pygame.Rect,
overlap: int,
aa_amount: int,
clear: bool = True) -> pygame.surface.Surface:
"""
Clear a space for a new shape surface on the main state surface for this state. The
surface created will be plain white so that it can be easily multiplied with a colour
surface.
:param surface: The surface we are working on.
:param rect: Used to size and position the new shape.
:param overlap: The amount of overlap between this surface and the one below.
:param aa_amount: The amount of Anti Aliasing to use for this shape.
:param clear: Whether we should clear our surface.
:return: The new shape surface.
"""
# For the visible AA shape surface we only want to blend in the alpha channel
large_shape_surface = pygame.surface.Surface((rect.width, rect.height),
flags=pygame.SRCALPHA,
depth=32)
large_shape_surface.fill(pygame.Color('#00000000'))
pygame.draw.ellipse(large_shape_surface, pygame.Color("#FFFFFFFF"),
large_shape_surface.get_rect())
if clear:
# before we draw a shape we clear a space for it, to allow for transparency.
# This works best if we leave a small overlap between the old background
# and the new shape
subtract_rect = pygame.Rect(rect.x + (overlap * aa_amount),
rect.y + (overlap * aa_amount),
max(0, rect.width - 2 * (overlap * aa_amount)),
max(0, rect.height - 2 * (overlap * aa_amount)))
# for the subtract surface we want to blend in all RGBA channels to clear correctly
# for our new shape
large_sub_surface = pygame.surface.Surface((subtract_rect.width,
subtract_rect.height),
flags=pygame.SRCALPHA, depth=32)
large_sub_surface.fill(pygame.Color('#00000000'))
pygame.draw.ellipse(large_sub_surface, pygame.Color("#FFFFFFFF"),
large_sub_surface.get_rect())
surface.blit(large_sub_surface, subtract_rect, special_flags=pygame.BLEND_RGBA_SUB)
return large_shape_surface
| import math
from typing import Dict, List, Union, Tuple, Any
import pygame
from pygame_gui.core.interfaces import IUIManagerInterface
from pygame_gui.core.colour_gradient import ColourGradient
from pygame_gui.core.drawable_shapes.drawable_shape import DrawableShape
from pygame_gui.core.utility import apply_colour_to_surface, basic_blit
class EllipseDrawableShape(DrawableShape):
"""
A drawable ellipse shape for the UI, has theming options for a border, a shadow, colour
gradients and text.
:param containing_rect: The layout rectangle that surrounds and controls the size of this shape.
:param theming_parameters: Various styling parameters that control the final look of the shape.
:param states: The different UI states the shape can be in. Shapes have different surfaces
for each state.
:param manager: The UI manager.
"""
def __init__(self, containing_rect: pygame.Rect,
theming_parameters: Dict[str, Any],
states: List[str],
manager: IUIManagerInterface):
super().__init__(containing_rect, theming_parameters, states, manager)
self.ellipse_center = containing_rect.center
self.ellipse_half_diameters = (0.5 * containing_rect.width, 0.5 * containing_rect.height)
self.full_rebuild_on_size_change()
def full_rebuild_on_size_change(self):
"""
Completely redraw the shape from it's theming parameters and dimensions.
"""
super().full_rebuild_on_size_change()
# clamping border and shadow widths so we can't form impossible negative sized surfaces
if self.shadow_width > min(math.floor(self.containing_rect.width / 2),
math.floor(self.containing_rect.height / 2)):
self.shadow_width = min(math.floor(self.containing_rect.width / 2),
math.floor(self.containing_rect.height / 2))
if self.shadow_width < 0:
self.shadow_width = 0
if self.border_width > min(math.floor((self.containing_rect.width -
(self.shadow_width * 2)) / 2),
math.floor((self.containing_rect.height -
(self.shadow_width * 2)) / 2)):
self.border_width = min(math.floor((self.containing_rect.width -
(self.shadow_width * 2)) / 2),
math.floor((self.containing_rect.height -
(self.shadow_width * 2)) / 2))
if self.border_width < 0:
self.border_width = 0
if self.shadow_width > 0:
self.click_area_shape = pygame.Rect((self.containing_rect.x + self.shadow_width,
self.containing_rect.y + self.shadow_width),
(self.containing_rect.width -
(2 * self.shadow_width),
self.containing_rect.height -
(2 * self.shadow_width)))
self.base_surface = self.ui_manager.get_shadow(self.containing_rect.size,
self.shadow_width, 'ellipse')
else:
self.click_area_shape = self.containing_rect.copy()
self.base_surface = pygame.surface.Surface(self.containing_rect.size,
flags=pygame.SRCALPHA,
depth=32)
self.base_surface.fill(pygame.Color('#00000000'))
self.compute_aligned_text_rect()
self.border_rect = pygame.Rect((self.shadow_width,
self.shadow_width),
(self.click_area_shape.width, self.click_area_shape.height))
self.background_rect = pygame.Rect((self.border_width + self.shadow_width,
self.border_width + self.shadow_width),
(self.click_area_shape.width - (2 * self.border_width),
self.click_area_shape.height - (2 * self.border_width)))
self.redraw_all_states()
def collide_point(self, point: Union[pygame.math.Vector2,
Tuple[int, int],
Tuple[float, float]]) -> bool:
"""
Checks collision between a point and this ellipse.
:param point: The point to test against the shape.
:return: True if the point is inside the shape.
"""
collided = False
x_val = ((point[0] - self.ellipse_center[0]) ** 2) / (self.ellipse_half_diameters[0] ** 2)
y_val = ((point[1] - self.ellipse_center[1]) ** 2) / (self.ellipse_half_diameters[1] ** 2)
if (x_val + y_val) < 1:
collided = True
return collided
def set_dimensions(self, dimensions: Union[pygame.math.Vector2,
Tuple[int, int],
Tuple[float, float]]):
"""
Expensive size change of the ellipse shape.
:param dimensions: The new size to set the shape to.
"""
self.containing_rect.width = dimensions[0]
self.containing_rect.height = dimensions[1]
self.click_area_shape.width = dimensions[0] - (2 * self.shadow_width)
self.click_area_shape.height = dimensions[1] - (2 * self.shadow_width)
self.ellipse_half_diameters = (0.5 * self.containing_rect.width,
0.5 * self.containing_rect.height)
self.full_rebuild_on_size_change()
def set_position(self, point: Union[pygame.math.Vector2,
Tuple[int, int],
Tuple[float, float]]):
"""
Move the shape. Only really impacts the position of the 'click_area' hot spot.
:param point: The new position to move it to.
"""
self.containing_rect.x = point[0]
self.containing_rect.y = point[1]
self.click_area_shape.x = point[0] + self.shadow_width
self.click_area_shape.y = point[1] + self.shadow_width
self.ellipse_center = self.click_area_shape.center
def redraw_state(self, state_str: str):
"""
Redraws the shape's surface for a given UI state.
:param state_str: The ID string of the state to rebuild.
"""
border_colour_state_str = state_str + '_border'
bg_colour_state_str = state_str + '_bg'
text_colour_state_str = state_str + '_text'
image_state_str = state_str + '_image'
found_shape = None
shape_id = None
if 'filled_bar' not in self.theming and 'filled_bar_width_percentage' not in self.theming:
shape_id = self.shape_cache.build_cache_id('ellipse',
self.containing_rect.size,
self.shadow_width,
self.border_width,
self.theming[border_colour_state_str],
self.theming[bg_colour_state_str])
found_shape = self.shape_cache.find_surface_in_cache(shape_id)
if found_shape is not None:
self.states[state_str].surface = found_shape.copy()
else:
self.states[state_str].surface = self.base_surface.copy()
# Try one AA call method
aa_amount = 4
self.border_rect = pygame.Rect((self.shadow_width * aa_amount,
self.shadow_width * aa_amount),
(self.click_area_shape.width * aa_amount,
self.click_area_shape.height * aa_amount))
self.background_rect = pygame.Rect(((self.border_width +
self.shadow_width) * aa_amount,
(self.border_width +
self.shadow_width) * aa_amount),
(self.border_rect.width -
(2 * self.border_width * aa_amount),
self.border_rect.height -
(2 * self.border_width * aa_amount)))
bab_surface = pygame.surface.Surface((self.containing_rect.width * aa_amount,
self.containing_rect.height * aa_amount),
flags=pygame.SRCALPHA,
depth=32)
bab_surface.fill(pygame.Color('#00000000'))
if self.border_width > 0:
if isinstance(self.theming[border_colour_state_str], ColourGradient):
shape_surface = self.clear_and_create_shape_surface(bab_surface,
self.border_rect,
0, aa_amount=aa_amount,
clear=False)
self.theming[border_colour_state_str].apply_gradient_to_surface(shape_surface)
else:
shape_surface = self.clear_and_create_shape_surface(bab_surface,
self.border_rect,
0, aa_amount=aa_amount,
clear=False)
apply_colour_to_surface(self.theming[border_colour_state_str],
shape_surface)
basic_blit(bab_surface, shape_surface, self.border_rect)
if isinstance(self.theming[bg_colour_state_str], ColourGradient):
shape_surface = self.clear_and_create_shape_surface(bab_surface,
self.background_rect, 1,
aa_amount=aa_amount)
self.theming[bg_colour_state_str].apply_gradient_to_surface(shape_surface)
else:
shape_surface = self.clear_and_create_shape_surface(bab_surface,
self.background_rect, 1,
aa_amount=aa_amount)
apply_colour_to_surface(self.theming[bg_colour_state_str], shape_surface)
basic_blit(bab_surface, shape_surface, self.background_rect)
# apply AA to background
bab_surface = pygame.transform.smoothscale(bab_surface, self.containing_rect.size)
# cut a hole in shadow, then blit background into it
sub_surface = pygame.surface.Surface(
((self.containing_rect.width - (2 * self.shadow_width)) * aa_amount,
(self.containing_rect.height - (2 * self.shadow_width)) * aa_amount),
flags=pygame.SRCALPHA, depth=32)
sub_surface.fill(pygame.Color('#00000000'))
pygame.draw.ellipse(sub_surface, pygame.Color("#FFFFFFFF"), sub_surface.get_rect())
small_sub = pygame.transform.smoothscale(sub_surface,
(self.containing_rect.width -
(2 * self.shadow_width),
self.containing_rect.height -
(2 * self.shadow_width)))
self.states[state_str].surface.blit(small_sub, pygame.Rect((self.shadow_width,
self.shadow_width),
sub_surface.get_size()),
special_flags=pygame.BLEND_RGBA_SUB)
basic_blit(self.states[state_str].surface, bab_surface, (0, 0))
if (shape_id is not None and
self.states[state_str].surface.get_width() <= 1024 and
self.states[state_str].surface.get_height() <= 1024):
self.shape_cache.add_surface_to_cache(self.states[state_str].surface.copy(),
shape_id)
self.rebuild_images_and_text(image_state_str, state_str, text_colour_state_str)
self.states[state_str].has_fresh_surface = True
self.states[state_str].generated = True
@staticmethod
def clear_and_create_shape_surface(surface: pygame.surface.Surface,
rect: pygame.Rect,
overlap: int,
aa_amount: int,
clear: bool = True) -> pygame.surface.Surface:
"""
Clear a space for a new shape surface on the main state surface for this state. The
surface created will be plain white so that it can be easily multiplied with a colour
surface.
:param surface: The surface we are working on.
:param rect: Used to size and position the new shape.
:param overlap: The amount of overlap between this surface and the one below.
:param aa_amount: The amount of Anti Aliasing to use for this shape.
:param clear: Whether we should clear our surface.
:return: The new shape surface.
"""
# For the visible AA shape surface we only want to blend in the alpha channel
large_shape_surface = pygame.surface.Surface((rect.width, rect.height),
flags=pygame.SRCALPHA,
depth=32)
large_shape_surface.fill(pygame.Color('#00000000'))
pygame.draw.ellipse(large_shape_surface, pygame.Color("#FFFFFFFF"),
large_shape_surface.get_rect())
if clear:
# before we draw a shape we clear a space for it, to allow for transparency.
# This works best if we leave a small overlap between the old background
# and the new shape
subtract_rect = pygame.Rect(rect.x + (overlap * aa_amount),
rect.y + (overlap * aa_amount),
max(0, rect.width - 2 * (overlap * aa_amount)),
max(0, rect.height - 2 * (overlap * aa_amount)))
# for the subtract surface we want to blend in all RGBA channels to clear correctly
# for our new shape
large_sub_surface = pygame.surface.Surface((subtract_rect.width,
subtract_rect.height),
flags=pygame.SRCALPHA, depth=32)
large_sub_surface.fill(pygame.Color('#00000000'))
pygame.draw.ellipse(large_sub_surface, pygame.Color("#FFFFFFFF"),
large_sub_surface.get_rect())
surface.blit(large_sub_surface, subtract_rect, special_flags=pygame.BLEND_RGBA_SUB)
return large_shape_surface
| en | 0.880153 | A drawable ellipse shape for the UI, has theming options for a border, a shadow, colour
gradients and text.
:param containing_rect: The layout rectangle that surrounds and controls the size of this shape.
:param theming_parameters: Various styling parameters that control the final look of the shape.
:param states: The different UI states the shape can be in. Shapes have different surfaces
for each state.
:param manager: The UI manager. Completely redraw the shape from it's theming parameters and dimensions. # clamping border and shadow widths so we can't form impossible negative sized surfaces Checks collision between a point and this ellipse.
:param point: The point to test against the shape.
:return: True if the point is inside the shape. Expensive size change of the ellipse shape.
:param dimensions: The new size to set the shape to. Move the shape. Only really impacts the position of the 'click_area' hot spot.
:param point: The new position to move it to. Redraws the shape's surface for a given UI state.
:param state_str: The ID string of the state to rebuild. # Try one AA call method # apply AA to background # cut a hole in shadow, then blit background into it Clear a space for a new shape surface on the main state surface for this state. The
surface created will be plain white so that it can be easily multiplied with a colour
surface.
:param surface: The surface we are working on.
:param rect: Used to size and position the new shape.
:param overlap: The amount of overlap between this surface and the one below.
:param aa_amount: The amount of Anti Aliasing to use for this shape.
:param clear: Whether we should clear our surface.
:return: The new shape surface. # For the visible AA shape surface we only want to blend in the alpha channel # before we draw a shape we clear a space for it, to allow for transparency. # This works best if we leave a small overlap between the old background # and the new shape # for the subtract surface we want to blend in all RGBA channels to clear correctly # for our new shape | 2.767134 | 3 |
exp_tools/Trial.py | StevenM1/flashtask | 0 | 6632024 | <reponame>StevenM1/flashtask<filename>exp_tools/Trial.py
#!/usr/bin/env python
# encoding: utf-8
"""
Session.py
Created by <NAME> on 2009-11-26.
Copyright (c) 2009 TK. All rights reserved.
"""
import time as time_module
from Session import *
class Trial(object):
"""base class for Trials"""
def __init__(self, parameters={}, phase_durations=[], session=None, screen=None, tracker=None):
super(Trial, self).__init__()
self.parameters = parameters.copy()
self.phase_durations = phase_durations
self.screen = screen
self.tracker = tracker
self.session = session
self.events = []
self.phase = 0
self.phase_time = None
self.phase_times = np.cumsum(np.array(self.phase_durations))
self.stopped = False
def create_stimuli(self):
pass
def run(self):
self.start_time = self.session.clock.getTime()
if self.tracker:
self.tracker.log('trial ' + str(self.ID) + ' started at ' + str(self.start_time) )
self.tracker.send_command('record_status_message "Trial ' + str(self.ID) + '"')
self.events.append('trial ' + str(self.ID) + ' started at ' + str(self.start_time))
def stop(self):
self.stop_time = self.session.clock.getTime()
self.stopped = True
if self.tracker:
# pipe parameters to the eyelink data file in a for loop so as to limit the risk of flooding the buffer
for k in self.parameters.keys():
self.tracker.log('trial ' + str(self.ID) + ' parameter\t' + k + ' : ' + str(self.parameters[k]) )
time_module.sleep(0.0005)
self.tracker.log('trial ' + str(self.ID) + ' stopped at ' + str(self.stop_time) )
self.session.outputDict['eventArray'].append(self.events)
self.session.outputDict['parameterArray'].append(self.parameters)
def key_event(self, event):
if self.tracker:
self.tracker.log('trial ' + str(self.ID) + ' event ' + str(event) + ' at ' + str(self.session.clock.getTime()) )
self.events.append('trial ' + str(self.ID) + ' event ' + str(event) + ' at ' + str(self.session.clock.getTime()))
def feedback(self, answer, setting):
"""feedback give the subject feedback on performance"""
if setting != 0.0:
if cmp(setting, 0) == answer:
self.session.play_sound(sound_index=0)
else:
self.session.play_sound(sound_index=1)
def draw(self):
"""draw function of the Trial superclass finishes drawing by clearing, drawing the viewport and swapping buffers"""
self.screen.flip()
def phase_forward(self):
"""go one phase forward"""
self.phase += 1
self.phase_time = self.session.clock.getTime()
self.events.append('trial ' + str(self.ID) + ' phase ' + str(self.phase) + ' started at ' + str(
self.phase_time))
if self.tracker:
self.tracker.log('trial ' + str(self.ID) + ' phase ' + str(self.phase) + ' started at ' +
str(self.phase_time))
time_module.sleep(0.0005)
| #!/usr/bin/env python
# encoding: utf-8
"""
Session.py
Created by <NAME> on 2009-11-26.
Copyright (c) 2009 TK. All rights reserved.
"""
import time as time_module
from Session import *
class Trial(object):
"""base class for Trials"""
def __init__(self, parameters={}, phase_durations=[], session=None, screen=None, tracker=None):
super(Trial, self).__init__()
self.parameters = parameters.copy()
self.phase_durations = phase_durations
self.screen = screen
self.tracker = tracker
self.session = session
self.events = []
self.phase = 0
self.phase_time = None
self.phase_times = np.cumsum(np.array(self.phase_durations))
self.stopped = False
def create_stimuli(self):
pass
def run(self):
self.start_time = self.session.clock.getTime()
if self.tracker:
self.tracker.log('trial ' + str(self.ID) + ' started at ' + str(self.start_time) )
self.tracker.send_command('record_status_message "Trial ' + str(self.ID) + '"')
self.events.append('trial ' + str(self.ID) + ' started at ' + str(self.start_time))
def stop(self):
self.stop_time = self.session.clock.getTime()
self.stopped = True
if self.tracker:
# pipe parameters to the eyelink data file in a for loop so as to limit the risk of flooding the buffer
for k in self.parameters.keys():
self.tracker.log('trial ' + str(self.ID) + ' parameter\t' + k + ' : ' + str(self.parameters[k]) )
time_module.sleep(0.0005)
self.tracker.log('trial ' + str(self.ID) + ' stopped at ' + str(self.stop_time) )
self.session.outputDict['eventArray'].append(self.events)
self.session.outputDict['parameterArray'].append(self.parameters)
def key_event(self, event):
if self.tracker:
self.tracker.log('trial ' + str(self.ID) + ' event ' + str(event) + ' at ' + str(self.session.clock.getTime()) )
self.events.append('trial ' + str(self.ID) + ' event ' + str(event) + ' at ' + str(self.session.clock.getTime()))
def feedback(self, answer, setting):
"""feedback give the subject feedback on performance"""
if setting != 0.0:
if cmp(setting, 0) == answer:
self.session.play_sound(sound_index=0)
else:
self.session.play_sound(sound_index=1)
def draw(self):
"""draw function of the Trial superclass finishes drawing by clearing, drawing the viewport and swapping buffers"""
self.screen.flip()
def phase_forward(self):
"""go one phase forward"""
self.phase += 1
self.phase_time = self.session.clock.getTime()
self.events.append('trial ' + str(self.ID) + ' phase ' + str(self.phase) + ' started at ' + str(
self.phase_time))
if self.tracker:
self.tracker.log('trial ' + str(self.ID) + ' phase ' + str(self.phase) + ' started at ' +
str(self.phase_time))
time_module.sleep(0.0005) | en | 0.820926 | #!/usr/bin/env python # encoding: utf-8 Session.py Created by <NAME> on 2009-11-26. Copyright (c) 2009 TK. All rights reserved. base class for Trials # pipe parameters to the eyelink data file in a for loop so as to limit the risk of flooding the buffer feedback give the subject feedback on performance draw function of the Trial superclass finishes drawing by clearing, drawing the viewport and swapping buffers go one phase forward | 2.550564 | 3 |
test/datastore_test.py | truggles/pudl | 0 | 6632025 | """
Exercise the functionality in the datastore management module.
The local datastore is managed by a script that uses the datastore management
module to pull raw data from public sources, and organize it prior to ETL.
However, that process is time consuming because the data is large and far away.
Because of that, we don't pull down new data frequently, which means if the
datastore management infrastructure breaks, we often don't find out about it
for a long time. These tests are meant to help avoid that problem by
continuosly exercising this functionality.
"""
import logging
import os
import pudl
import pudl.workspace.datastore as datastore
logger = logging.getLogger(__name__)
def test_datastore(pudl_settings_fixture, data_scope):
"""Download sample data for each available data source."""
sources_to_update = ['eia860', 'eia923', 'epaipm']
years_by_source = {
'eia860': data_scope['eia860_years'],
'eia923': data_scope['eia923_years'],
'epacems': [],
'epaipm': [None, ],
'ferc1': [],
}
# Sadly, FERC & EPA only provide access to their data via FTP, and it's
# not possible to use FTP from within the Travis CI environment:
if os.getenv('TRAVIS'):
states = []
else:
# Idaho has the least data of any CEMS state.
states = data_scope['epacems_states']
sources_to_update.extend(['ferc1', 'epacems'])
years_by_source['ferc1'] = data_scope['ferc1_years']
years_by_source['epacems'] = data_scope['epacems_years']
datastore.parallel_update(
sources=sources_to_update,
years_by_source=years_by_source,
states=states,
data_dir=pudl_settings_fixture['data_dir'],
)
pudl.helpers.verify_input_files(
ferc1_years=years_by_source['ferc1'],
eia923_years=years_by_source['eia923'],
eia860_years=years_by_source['eia860'],
epacems_years=years_by_source['epacems'],
epacems_states=states,
# Currently no mechanism for automatically verifying EPA IPM files...
pudl_settings=pudl_settings_fixture,
)
| """
Exercise the functionality in the datastore management module.
The local datastore is managed by a script that uses the datastore management
module to pull raw data from public sources, and organize it prior to ETL.
However, that process is time consuming because the data is large and far away.
Because of that, we don't pull down new data frequently, which means if the
datastore management infrastructure breaks, we often don't find out about it
for a long time. These tests are meant to help avoid that problem by
continuosly exercising this functionality.
"""
import logging
import os
import pudl
import pudl.workspace.datastore as datastore
logger = logging.getLogger(__name__)
def test_datastore(pudl_settings_fixture, data_scope):
"""Download sample data for each available data source."""
sources_to_update = ['eia860', 'eia923', 'epaipm']
years_by_source = {
'eia860': data_scope['eia860_years'],
'eia923': data_scope['eia923_years'],
'epacems': [],
'epaipm': [None, ],
'ferc1': [],
}
# Sadly, FERC & EPA only provide access to their data via FTP, and it's
# not possible to use FTP from within the Travis CI environment:
if os.getenv('TRAVIS'):
states = []
else:
# Idaho has the least data of any CEMS state.
states = data_scope['epacems_states']
sources_to_update.extend(['ferc1', 'epacems'])
years_by_source['ferc1'] = data_scope['ferc1_years']
years_by_source['epacems'] = data_scope['epacems_years']
datastore.parallel_update(
sources=sources_to_update,
years_by_source=years_by_source,
states=states,
data_dir=pudl_settings_fixture['data_dir'],
)
pudl.helpers.verify_input_files(
ferc1_years=years_by_source['ferc1'],
eia923_years=years_by_source['eia923'],
eia860_years=years_by_source['eia860'],
epacems_years=years_by_source['epacems'],
epacems_states=states,
# Currently no mechanism for automatically verifying EPA IPM files...
pudl_settings=pudl_settings_fixture,
)
| en | 0.933022 | Exercise the functionality in the datastore management module. The local datastore is managed by a script that uses the datastore management module to pull raw data from public sources, and organize it prior to ETL. However, that process is time consuming because the data is large and far away. Because of that, we don't pull down new data frequently, which means if the datastore management infrastructure breaks, we often don't find out about it for a long time. These tests are meant to help avoid that problem by continuosly exercising this functionality. Download sample data for each available data source. # Sadly, FERC & EPA only provide access to their data via FTP, and it's # not possible to use FTP from within the Travis CI environment: # Idaho has the least data of any CEMS state. # Currently no mechanism for automatically verifying EPA IPM files... | 2.102029 | 2 |
project/server/plugins/double/double_negative.py | ibrezm1/minion-go | 0 | 6632026 | <filename>project/server/plugins/double/double_negative.py
from project.server import plugin_collection
class DoubleNegative(plugin_collection.Plugin):
"""This plugin will just multiply the argument with the value -2
"""
def __init__(self,input_data):
super().__init__(input_data)
self.description = 'Negative double function'
def perform_operation(self, argument):
"""The actual implementation of this plugin is to multiple the
value of the supplied argument by -2
"""
return argument*-2
| <filename>project/server/plugins/double/double_negative.py
from project.server import plugin_collection
class DoubleNegative(plugin_collection.Plugin):
"""This plugin will just multiply the argument with the value -2
"""
def __init__(self,input_data):
super().__init__(input_data)
self.description = 'Negative double function'
def perform_operation(self, argument):
"""The actual implementation of this plugin is to multiple the
value of the supplied argument by -2
"""
return argument*-2
| en | 0.709518 | This plugin will just multiply the argument with the value -2 The actual implementation of this plugin is to multiple the value of the supplied argument by -2 | 3.143284 | 3 |
Python/Export/export_mitral.py | OpenSourceBrain/MiglioreEtAl14_OlfactoryBulb3D | 1 | 6632027 | <reponame>OpenSourceBrain/MiglioreEtAl14_OlfactoryBulb3D
import os
import sys
import neuroml
import exportHelper
#Nav to neuron folder where compiled MOD files are present
os.chdir("../../NEURON")
from neuron import h
os.chdir("../NeuroML2")
h.chdir('../NEURON')
h.load_file('mitral.hoc')
sys.path.append('../NEURON')
from mkmitral import mkmitral
from pyneuroml.neuron import export_to_neuroml2
from pyneuroml import pynml
from neuroml import SegmentGroup
def export(num_cells_to_export = 5):
cells = []
for mgid in range(num_cells_to_export):
print mgid
cells.append(mkmitral(mgid))
nml_net_file = "../NeuroML2/MitralCells/Exported/PartialBulb_%iMTCells.net.nml" % num_cells_to_export
export_to_neuroml2(None,
nml_net_file,
includeBiophysicalProperties=False,
separateCellFiles=True)
for i in range(num_cells_to_export):
print("Processing cell %i out of %i"%(i, num_cells_to_export))
nml_cell_file = "../NeuroML2/MitralCells/Exported/Mitral_0_%i.cell.nml" % i
nml_doc = pynml.read_neuroml2_file(nml_cell_file)
cell = nml_doc.cells[0]
soma_seg = next(seg for seg in cell.morphology.segments if seg.name == "Seg0_soma")
initial_seg = next(seg for seg in cell.morphology.segments if seg.name == "Seg0_initialseg")
hillock_seg = next(seg for seg in cell.morphology.segments if seg.name == "Seg0_hillock")
# Ensure hillock parent is soma
hillock_seg.parent.segments = soma_seg.id
# Fix initial and hillock segs by moving them to the soma
hillock_seg.proximal = pointMovedByOffset(hillock_seg.proximal, soma_seg.distal)
hillock_seg.distal = pointMovedByOffset(hillock_seg.distal, soma_seg.distal)
initial_seg.proximal = pointMovedByOffset(initial_seg.proximal, soma_seg.distal)
initial_seg.distal = pointMovedByOffset(initial_seg.distal, soma_seg.distal)
# Set root to id=0 and increment others
exportHelper.resetRoot(cell)
# TODO: cell.position(x,y,z) used for cell positioning in networks does not work as expected
# See: https://github.com/NeuroML/jNeuroML/issues/55
# Skipping the translation for now
# # Move everything back to the origin
# originOffset = type("", (), dict(x = -soma_seg.proximal.x, y = -soma_seg.proximal.y, z = -soma_seg.proximal.z ))()
#
# for seg in cell.morphology.segments:
# seg.proximal = pointMovedByOffset(seg.proximal, originOffset)
# seg.distal = pointMovedByOffset(seg.distal, originOffset)
# Replace ModelViewParmSubset_N groups with all, axon, soma, dendrite groups
buildStandardSegmentGroups(cell)
# Add channel placeholders
nml_doc.includes.append(neuroml.IncludeType(href="channelIncludesPLACEHOLDER"))
cell.biophysical_properties = neuroml.BiophysicalProperties(id="biophysPLACEHOLDER")
# Save the new NML
pynml.write_neuroml2_file(nml_doc, nml_cell_file)
# Replace placeholders with contents from MitralCell...xml files
replaceChannelPlaceholders(nml_cell_file)
print("COMPLETED: " + nml_cell_file)
print("DONE")
def pointMovedByOffset(point, offset):
if point is None:
return None
point.x = point.x + offset.x
point.y = point.y + offset.y
point.z = point.z + offset.z
return point
def replaceChannelPlaceholders(nml_cell_file):
with open ("../NeuroML2/MitralCells/MitralCellBiophysicalProperties.xml", "r") as bioPhysFile:
bioPhysProps=bioPhysFile.read()
with open ("../NeuroML2/MitralCells/MitralCellChannelIncludes.xml", "r") as channelIncludesFile:
channelIncludes=channelIncludesFile.read()
with open (nml_cell_file, "r") as cellFile:
cellNMLreplaced=cellFile.read()\
.replace('<include href="channelIncludesPLACEHOLDER"></include>', channelIncludes)\
.replace('<biophysicalProperties id="biophysPLACEHOLDER"/>', bioPhysProps)\
with open(nml_cell_file, "w") as cellFile:
cellFile.write(cellNMLreplaced)
def buildStandardSegmentGroups(cell):
largestGroup = None
# Delete all ModelViewParmSubset_N groups, saving the largest
for g in xrange(len(cell.morphology.segment_groups) - 1, -1, -1): # Start from the end
group = cell.morphology.segment_groups[g]
if group.id.startswith('ModelViewParmSubset'):
cell.morphology.segment_groups.remove(group)
if largestGroup is None or len(largestGroup.includes) < len(group.includes):
largestGroup = group
# Add the standard soma, dendrite, axon groups
somaGroup = SegmentGroup('GO:0043025', 'soma_group')
dendriteGroup = SegmentGroup('GO:0030425', 'dendrite_group')
axonGroup = SegmentGroup('GO:0030424', 'axon_group')
allGroup = None
# Find the group with all segments
for group in cell.morphology.segment_groups:
if group.id == 'all':
allGroup = group
# If there is no "all" group, assume it's the largest of the ModelViewP... groups
if allGroup is None and largestGroup is not None:
allGroup = largestGroup
# Create the 'all' group from the largest group
largestGroup.id = 'all'
cell.morphology.segment_groups.append(largestGroup)
if allGroup is not None:
# Classify each include of 'all' group into a standard group
for include in allGroup.includes:
if include.segment_groups.startswith(('secden', 'priden', 'tuftden')):
dendriteGroup.includes.append(include)
elif include.segment_groups == 'soma':
somaGroup.includes.append(include)
elif include.segment_groups.startswith(('hillock', 'initialseg')):
axonGroup.includes.append(include)
# Attach the standard groups to the cell
cell.morphology.segment_groups.append(somaGroup)
cell.morphology.segment_groups.append(dendriteGroup)
cell.morphology.segment_groups.append(axonGroup)
if __name__ == "__main__":
export() | import os
import sys
import neuroml
import exportHelper
#Nav to neuron folder where compiled MOD files are present
os.chdir("../../NEURON")
from neuron import h
os.chdir("../NeuroML2")
h.chdir('../NEURON')
h.load_file('mitral.hoc')
sys.path.append('../NEURON')
from mkmitral import mkmitral
from pyneuroml.neuron import export_to_neuroml2
from pyneuroml import pynml
from neuroml import SegmentGroup
def export(num_cells_to_export = 5):
cells = []
for mgid in range(num_cells_to_export):
print mgid
cells.append(mkmitral(mgid))
nml_net_file = "../NeuroML2/MitralCells/Exported/PartialBulb_%iMTCells.net.nml" % num_cells_to_export
export_to_neuroml2(None,
nml_net_file,
includeBiophysicalProperties=False,
separateCellFiles=True)
for i in range(num_cells_to_export):
print("Processing cell %i out of %i"%(i, num_cells_to_export))
nml_cell_file = "../NeuroML2/MitralCells/Exported/Mitral_0_%i.cell.nml" % i
nml_doc = pynml.read_neuroml2_file(nml_cell_file)
cell = nml_doc.cells[0]
soma_seg = next(seg for seg in cell.morphology.segments if seg.name == "Seg0_soma")
initial_seg = next(seg for seg in cell.morphology.segments if seg.name == "Seg0_initialseg")
hillock_seg = next(seg for seg in cell.morphology.segments if seg.name == "Seg0_hillock")
# Ensure hillock parent is soma
hillock_seg.parent.segments = soma_seg.id
# Fix initial and hillock segs by moving them to the soma
hillock_seg.proximal = pointMovedByOffset(hillock_seg.proximal, soma_seg.distal)
hillock_seg.distal = pointMovedByOffset(hillock_seg.distal, soma_seg.distal)
initial_seg.proximal = pointMovedByOffset(initial_seg.proximal, soma_seg.distal)
initial_seg.distal = pointMovedByOffset(initial_seg.distal, soma_seg.distal)
# Set root to id=0 and increment others
exportHelper.resetRoot(cell)
# TODO: cell.position(x,y,z) used for cell positioning in networks does not work as expected
# See: https://github.com/NeuroML/jNeuroML/issues/55
# Skipping the translation for now
# # Move everything back to the origin
# originOffset = type("", (), dict(x = -soma_seg.proximal.x, y = -soma_seg.proximal.y, z = -soma_seg.proximal.z ))()
#
# for seg in cell.morphology.segments:
# seg.proximal = pointMovedByOffset(seg.proximal, originOffset)
# seg.distal = pointMovedByOffset(seg.distal, originOffset)
# Replace ModelViewParmSubset_N groups with all, axon, soma, dendrite groups
buildStandardSegmentGroups(cell)
# Add channel placeholders
nml_doc.includes.append(neuroml.IncludeType(href="channelIncludesPLACEHOLDER"))
cell.biophysical_properties = neuroml.BiophysicalProperties(id="biophysPLACEHOLDER")
# Save the new NML
pynml.write_neuroml2_file(nml_doc, nml_cell_file)
# Replace placeholders with contents from MitralCell...xml files
replaceChannelPlaceholders(nml_cell_file)
print("COMPLETED: " + nml_cell_file)
print("DONE")
def pointMovedByOffset(point, offset):
if point is None:
return None
point.x = point.x + offset.x
point.y = point.y + offset.y
point.z = point.z + offset.z
return point
def replaceChannelPlaceholders(nml_cell_file):
with open ("../NeuroML2/MitralCells/MitralCellBiophysicalProperties.xml", "r") as bioPhysFile:
bioPhysProps=bioPhysFile.read()
with open ("../NeuroML2/MitralCells/MitralCellChannelIncludes.xml", "r") as channelIncludesFile:
channelIncludes=channelIncludesFile.read()
with open (nml_cell_file, "r") as cellFile:
cellNMLreplaced=cellFile.read()\
.replace('<include href="channelIncludesPLACEHOLDER"></include>', channelIncludes)\
.replace('<biophysicalProperties id="biophysPLACEHOLDER"/>', bioPhysProps)\
with open(nml_cell_file, "w") as cellFile:
cellFile.write(cellNMLreplaced)
def buildStandardSegmentGroups(cell):
largestGroup = None
# Delete all ModelViewParmSubset_N groups, saving the largest
for g in xrange(len(cell.morphology.segment_groups) - 1, -1, -1): # Start from the end
group = cell.morphology.segment_groups[g]
if group.id.startswith('ModelViewParmSubset'):
cell.morphology.segment_groups.remove(group)
if largestGroup is None or len(largestGroup.includes) < len(group.includes):
largestGroup = group
# Add the standard soma, dendrite, axon groups
somaGroup = SegmentGroup('GO:0043025', 'soma_group')
dendriteGroup = SegmentGroup('GO:0030425', 'dendrite_group')
axonGroup = SegmentGroup('GO:0030424', 'axon_group')
allGroup = None
# Find the group with all segments
for group in cell.morphology.segment_groups:
if group.id == 'all':
allGroup = group
# If there is no "all" group, assume it's the largest of the ModelViewP... groups
if allGroup is None and largestGroup is not None:
allGroup = largestGroup
# Create the 'all' group from the largest group
largestGroup.id = 'all'
cell.morphology.segment_groups.append(largestGroup)
if allGroup is not None:
# Classify each include of 'all' group into a standard group
for include in allGroup.includes:
if include.segment_groups.startswith(('secden', 'priden', 'tuftden')):
dendriteGroup.includes.append(include)
elif include.segment_groups == 'soma':
somaGroup.includes.append(include)
elif include.segment_groups.startswith(('hillock', 'initialseg')):
axonGroup.includes.append(include)
# Attach the standard groups to the cell
cell.morphology.segment_groups.append(somaGroup)
cell.morphology.segment_groups.append(dendriteGroup)
cell.morphology.segment_groups.append(axonGroup)
if __name__ == "__main__":
export() | en | 0.711576 | #Nav to neuron folder where compiled MOD files are present # Ensure hillock parent is soma # Fix initial and hillock segs by moving them to the soma # Set root to id=0 and increment others # TODO: cell.position(x,y,z) used for cell positioning in networks does not work as expected # See: https://github.com/NeuroML/jNeuroML/issues/55 # Skipping the translation for now # # Move everything back to the origin # originOffset = type("", (), dict(x = -soma_seg.proximal.x, y = -soma_seg.proximal.y, z = -soma_seg.proximal.z ))() # # for seg in cell.morphology.segments: # seg.proximal = pointMovedByOffset(seg.proximal, originOffset) # seg.distal = pointMovedByOffset(seg.distal, originOffset) # Replace ModelViewParmSubset_N groups with all, axon, soma, dendrite groups # Add channel placeholders # Save the new NML # Replace placeholders with contents from MitralCell...xml files # Delete all ModelViewParmSubset_N groups, saving the largest # Start from the end # Add the standard soma, dendrite, axon groups # Find the group with all segments # If there is no "all" group, assume it's the largest of the ModelViewP... groups # Create the 'all' group from the largest group # Classify each include of 'all' group into a standard group # Attach the standard groups to the cell | 2.401799 | 2 |
src/hed_utils/cli/csv_search.py | Hrissimir/hed_utils | 0 | 6632028 | """usage: csv-search [-h]
[-v] [-vv] [--log-format LOG_FORMAT]
[-d DIRECTORY] [-o TEXT_REPORT] [-xl EXCEL_REPORT]
[-e ENCODING] -t TEXT [-i]
Find text in CSV files.
optional arguments:
-h, --help show this help message and exit
-d DIRECTORY path to CSV files directory (default: CWD)
-o TEXT_REPORT filepath for writing text report
-xl EXCEL_REPORT filepath for writing excel report
-e ENCODING encoding for opening the CSV files (default: utf-8)
-t TEXT the text to find
-i if passed search will ignore casing (default: False)
logging related
-v, --verbose set log level to INFO
-vv, --very-verbose set log level to DEBUG
--log-format LOG_FORMAT
set custom log format
"""
import logging
import sys
from collections import namedtuple
from io import StringIO
from os.path import abspath, basename
from pathlib import Path
from hed_utils.cli.arguments import create_parser
from hed_utils.cli.arguments import input_folder_path
from hed_utils.cli.arguments import output_file_path
from hed_utils.cli.arguments import string_value
from hed_utils.support.file_utils.csv_file import get_csv_files
from hed_utils.support.file_utils.csv_file import get_csv_files_containing
from hed_utils.support.file_utils.xlsx_file import xlsx_write_sheets_data
from hed_utils.support.text_tool import normalize
from hed_utils.support.time_tool import Timer
from tabulate import tabulate
LOG_FORMAT = "%(asctime)s | %(levelname)8s | %(message)s"
Result = namedtuple("Result", "filepath headers rows")
_log = logging.getLogger(__name__)
_log.addHandler(logging.NullHandler())
def _parse_args(args):
parser = create_parser(
name="csv-search",
description="Find text in CSV files."
)
parser.add_argument("-d",
dest="directory",
action="store",
type=input_folder_path,
default=Path.cwd(),
help="path to CSV files directory (default: CWD)")
parser.add_argument("-o",
dest="text_report",
action="store",
type=output_file_path,
default=None,
help="filepath for writing text report")
parser.add_argument("-xl",
dest="excel_report",
action="store",
type=output_file_path,
default=None,
help="filepath for writing excel report")
parser.add_argument("-e",
dest="encoding",
action="store",
default="utf-8",
type=string_value,
help="encoding for opening the CSV files (default: utf-8)")
parser.add_argument("-t",
dest="text",
action="store",
type=string_value,
required=True,
help="the text to find")
parser.add_argument("-i",
dest="ignorecase",
action="store_true",
help="if passed search will ignore casing (default: False)")
return parser.parse_args(args)
def _generate_excel_report(results: list, file: str):
if not file:
_log.warning("no excel report file was set!")
return
file = abspath(file)
_log.info("writing excel report to: '%s'", file)
sheets_data = [(basename(filepath), headers, rows)
for filepath, headers, rows
in results]
xlsx_write_sheets_data(file, sheets_data)
def _generate_text_report(results: list, file: str):
def _format_result(r):
filepath, headers, rows = r
sep = len(filepath) * "="
return f"\n\n{sep}\n{filepath}:\n\n{tabulate(tabular_data=rows, headers=headers)}\n"
report = StringIO()
for result in results:
details = _format_result(result)
print(details, file=report)
print(report.getvalue())
if file:
_log.info("writing text report to file: '%s'", file)
with open(file, mode="w") as fp:
fp.write(report.getvalue())
else:
_log.warning("No text report file was set!")
def _init_logging(level):
from hed_utils.support import log
log.init(level=level, log_format=LOG_FORMAT)
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = _parse_args(args)
print("search started with args: ", args)
# extract args
encoding = args.encoding
text = normalize(args.text.strip()) # ensure encoding consistency
directory = abspath(args.directory)
text_report_path = args.text_report
excel_report_path = args.excel_report
verbose = args.verbose
ignorecase = args.ignorecase
_init_logging(verbose)
# measure the program execution time
search_timer = Timer()
search_timer.start()
# perform the search
csv_files = [file for file in get_csv_files(directory)]
results = get_csv_files_containing(csv_files, text, ignorecase=ignorecase, encoding=encoding)
search_timer.stop()
# generate reports if needed
if results:
results.sort(key=(lambda r: len(r[-1])), reverse=True)
_generate_text_report(results, text_report_path)
_generate_excel_report(results, excel_report_path)
else:
_log.warning("No results were found!")
rows_count = sum([len(result[-1]) for result in results])
_log.info("All Done! Found [ %s ] matching rows in [ %s ] different files (took: %.3f s.)",
rows_count, len(results), search_timer.elapsed)
def run():
"""Entry point for console_scripts"""
main(sys.argv[1:])
| """usage: csv-search [-h]
[-v] [-vv] [--log-format LOG_FORMAT]
[-d DIRECTORY] [-o TEXT_REPORT] [-xl EXCEL_REPORT]
[-e ENCODING] -t TEXT [-i]
Find text in CSV files.
optional arguments:
-h, --help show this help message and exit
-d DIRECTORY path to CSV files directory (default: CWD)
-o TEXT_REPORT filepath for writing text report
-xl EXCEL_REPORT filepath for writing excel report
-e ENCODING encoding for opening the CSV files (default: utf-8)
-t TEXT the text to find
-i if passed search will ignore casing (default: False)
logging related
-v, --verbose set log level to INFO
-vv, --very-verbose set log level to DEBUG
--log-format LOG_FORMAT
set custom log format
"""
import logging
import sys
from collections import namedtuple
from io import StringIO
from os.path import abspath, basename
from pathlib import Path
from hed_utils.cli.arguments import create_parser
from hed_utils.cli.arguments import input_folder_path
from hed_utils.cli.arguments import output_file_path
from hed_utils.cli.arguments import string_value
from hed_utils.support.file_utils.csv_file import get_csv_files
from hed_utils.support.file_utils.csv_file import get_csv_files_containing
from hed_utils.support.file_utils.xlsx_file import xlsx_write_sheets_data
from hed_utils.support.text_tool import normalize
from hed_utils.support.time_tool import Timer
from tabulate import tabulate
LOG_FORMAT = "%(asctime)s | %(levelname)8s | %(message)s"
Result = namedtuple("Result", "filepath headers rows")
_log = logging.getLogger(__name__)
_log.addHandler(logging.NullHandler())
def _parse_args(args):
parser = create_parser(
name="csv-search",
description="Find text in CSV files."
)
parser.add_argument("-d",
dest="directory",
action="store",
type=input_folder_path,
default=Path.cwd(),
help="path to CSV files directory (default: CWD)")
parser.add_argument("-o",
dest="text_report",
action="store",
type=output_file_path,
default=None,
help="filepath for writing text report")
parser.add_argument("-xl",
dest="excel_report",
action="store",
type=output_file_path,
default=None,
help="filepath for writing excel report")
parser.add_argument("-e",
dest="encoding",
action="store",
default="utf-8",
type=string_value,
help="encoding for opening the CSV files (default: utf-8)")
parser.add_argument("-t",
dest="text",
action="store",
type=string_value,
required=True,
help="the text to find")
parser.add_argument("-i",
dest="ignorecase",
action="store_true",
help="if passed search will ignore casing (default: False)")
return parser.parse_args(args)
def _generate_excel_report(results: list, file: str):
if not file:
_log.warning("no excel report file was set!")
return
file = abspath(file)
_log.info("writing excel report to: '%s'", file)
sheets_data = [(basename(filepath), headers, rows)
for filepath, headers, rows
in results]
xlsx_write_sheets_data(file, sheets_data)
def _generate_text_report(results: list, file: str):
def _format_result(r):
filepath, headers, rows = r
sep = len(filepath) * "="
return f"\n\n{sep}\n{filepath}:\n\n{tabulate(tabular_data=rows, headers=headers)}\n"
report = StringIO()
for result in results:
details = _format_result(result)
print(details, file=report)
print(report.getvalue())
if file:
_log.info("writing text report to file: '%s'", file)
with open(file, mode="w") as fp:
fp.write(report.getvalue())
else:
_log.warning("No text report file was set!")
def _init_logging(level):
from hed_utils.support import log
log.init(level=level, log_format=LOG_FORMAT)
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = _parse_args(args)
print("search started with args: ", args)
# extract args
encoding = args.encoding
text = normalize(args.text.strip()) # ensure encoding consistency
directory = abspath(args.directory)
text_report_path = args.text_report
excel_report_path = args.excel_report
verbose = args.verbose
ignorecase = args.ignorecase
_init_logging(verbose)
# measure the program execution time
search_timer = Timer()
search_timer.start()
# perform the search
csv_files = [file for file in get_csv_files(directory)]
results = get_csv_files_containing(csv_files, text, ignorecase=ignorecase, encoding=encoding)
search_timer.stop()
# generate reports if needed
if results:
results.sort(key=(lambda r: len(r[-1])), reverse=True)
_generate_text_report(results, text_report_path)
_generate_excel_report(results, excel_report_path)
else:
_log.warning("No results were found!")
rows_count = sum([len(result[-1]) for result in results])
_log.info("All Done! Found [ %s ] matching rows in [ %s ] different files (took: %.3f s.)",
rows_count, len(results), search_timer.elapsed)
def run():
"""Entry point for console_scripts"""
main(sys.argv[1:])
| en | 0.401495 | usage: csv-search [-h] [-v] [-vv] [--log-format LOG_FORMAT] [-d DIRECTORY] [-o TEXT_REPORT] [-xl EXCEL_REPORT] [-e ENCODING] -t TEXT [-i] Find text in CSV files. optional arguments: -h, --help show this help message and exit -d DIRECTORY path to CSV files directory (default: CWD) -o TEXT_REPORT filepath for writing text report -xl EXCEL_REPORT filepath for writing excel report -e ENCODING encoding for opening the CSV files (default: utf-8) -t TEXT the text to find -i if passed search will ignore casing (default: False) logging related -v, --verbose set log level to INFO -vv, --very-verbose set log level to DEBUG --log-format LOG_FORMAT set custom log format Main entry point allowing external calls Args: args ([str]): command line parameter list # extract args # ensure encoding consistency # measure the program execution time # perform the search # generate reports if needed Entry point for console_scripts | 2.891757 | 3 |
tools/download-wheels.py | Ennosigaeon/scipy | 1 | 6632029 | <reponame>Ennosigaeon/scipy
#!/usr/bin/env python
"""
Download SciPy wheels from Anaconda staging area.
"""
import sys
import os
import re
import shutil
import argparse
import urllib3
from bs4 import BeautifulSoup
__version__ = '0.1'
# Edit these for other projects.
STAGING_URL = 'https://anaconda.org/multibuild-wheels-staging/scipy'
PREFIX = 'scipy'
def get_wheel_names(version):
""" Get wheel names from Anaconda HTML directory.
This looks in the Anaconda multibuild-wheels-staging page and
parses the HTML to get all the wheel names for a release version.
Parameters
----------
version : str
The release version. For instance, "1.5.0".
"""
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')
tmpl = re.compile(rf"^.*{PREFIX}-{version}-.*\.whl$")
index_url = f"{STAGING_URL}/files"
index_html = http.request('GET', index_url)
soup = BeautifulSoup(index_html.data, 'html.parser')
return soup.findAll(text=tmpl)
def download_wheels(version, wheelhouse):
"""Download release wheels.
The release wheels for the given SciPy version are downloaded
into the given directory.
Parameters
----------
version : str
The release version. For instance, "1.5.0".
wheelhouse : str
Directory in which to download the wheels.
"""
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')
wheel_names = get_wheel_names(version)
for i, wheel_name in enumerate(wheel_names):
wheel_url = f"{STAGING_URL}/{version}/download/{wheel_name}"
wheel_path = os.path.join(wheelhouse, wheel_name)
with open(wheel_path, 'wb') as f:
with http.request('GET', wheel_url, preload_content=False,) as r:
print(f"{i + 1:<4}{wheel_name}")
shutil.copyfileobj(r, f)
print(f"\nTotal files downloaded: {len(wheel_names)}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"version",
help="SciPy version to download.")
parser.add_argument(
"-w", "--wheelhouse",
default=os.path.join(os.getcwd(), "release", "installers"),
help="Directory in which to store downloaded wheels\n"
"[defaults to <cwd>/release/installers]")
args = parser.parse_args()
wheelhouse = os.path.expanduser(args.wheelhouse)
if not os.path.isdir(wheelhouse):
raise RuntimeError(
f"{wheelhouse} wheelhouse directory is not present."
" Perhaps you need to use the '-w' flag to specify one.")
download_wheels(args.version, wheelhouse)
| #!/usr/bin/env python
"""
Download SciPy wheels from Anaconda staging area.
"""
import sys
import os
import re
import shutil
import argparse
import urllib3
from bs4 import BeautifulSoup
__version__ = '0.1'
# Edit these for other projects.
STAGING_URL = 'https://anaconda.org/multibuild-wheels-staging/scipy'
PREFIX = 'scipy'
def get_wheel_names(version):
""" Get wheel names from Anaconda HTML directory.
This looks in the Anaconda multibuild-wheels-staging page and
parses the HTML to get all the wheel names for a release version.
Parameters
----------
version : str
The release version. For instance, "1.5.0".
"""
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')
tmpl = re.compile(rf"^.*{PREFIX}-{version}-.*\.whl$")
index_url = f"{STAGING_URL}/files"
index_html = http.request('GET', index_url)
soup = BeautifulSoup(index_html.data, 'html.parser')
return soup.findAll(text=tmpl)
def download_wheels(version, wheelhouse):
"""Download release wheels.
The release wheels for the given SciPy version are downloaded
into the given directory.
Parameters
----------
version : str
The release version. For instance, "1.5.0".
wheelhouse : str
Directory in which to download the wheels.
"""
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')
wheel_names = get_wheel_names(version)
for i, wheel_name in enumerate(wheel_names):
wheel_url = f"{STAGING_URL}/{version}/download/{wheel_name}"
wheel_path = os.path.join(wheelhouse, wheel_name)
with open(wheel_path, 'wb') as f:
with http.request('GET', wheel_url, preload_content=False,) as r:
print(f"{i + 1:<4}{wheel_name}")
shutil.copyfileobj(r, f)
print(f"\nTotal files downloaded: {len(wheel_names)}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"version",
help="SciPy version to download.")
parser.add_argument(
"-w", "--wheelhouse",
default=os.path.join(os.getcwd(), "release", "installers"),
help="Directory in which to store downloaded wheels\n"
"[defaults to <cwd>/release/installers]")
args = parser.parse_args()
wheelhouse = os.path.expanduser(args.wheelhouse)
if not os.path.isdir(wheelhouse):
raise RuntimeError(
f"{wheelhouse} wheelhouse directory is not present."
" Perhaps you need to use the '-w' flag to specify one.")
download_wheels(args.version, wheelhouse) | en | 0.781673 | #!/usr/bin/env python Download SciPy wheels from Anaconda staging area. # Edit these for other projects. Get wheel names from Anaconda HTML directory. This looks in the Anaconda multibuild-wheels-staging page and parses the HTML to get all the wheel names for a release version. Parameters ---------- version : str The release version. For instance, "1.5.0". Download release wheels. The release wheels for the given SciPy version are downloaded into the given directory. Parameters ---------- version : str The release version. For instance, "1.5.0". wheelhouse : str Directory in which to download the wheels. | 2.947379 | 3 |
companies/migrations/0001_initial.py | maxinsar/insar | 0 | 6632030 | <reponame>maxinsar/insar
# Generated by Django 2.2.5 on 2019-09-16 17:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.CharField(max_length=400, verbose_name='Сообщение')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор')),
],
options={
'verbose_name': 'Отзыв',
'verbose_name_plural': 'Отзывы',
},
),
migrations.CreateModel(
name='Vacancy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='Название вакансии')),
('slug', models.SlugField(max_length=30, verbose_name='URL')),
('description', models.TextField(max_length=700, verbose_name='Описание вакансии')),
('payments', models.DecimalField(decimal_places=10, max_digits=19, verbose_name='Зарплата')),
],
options={
'verbose_name': 'Вакансия',
'verbose_name_plural': 'Вакансии',
},
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, verbose_name='Название компании')),
('slug', models.SlugField(max_length=30, verbose_name='URL')),
('description', models.TextField(max_length=400, verbose_name='Описание компании')),
('logo_company', models.ImageField(blank=True, null=True, upload_to='images/logo_comapny', verbose_name='Логотип компании')),
('comments', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='companies.Comments', verbose_name='Отзывы о компании')),
('vacancy_company', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='companies.Vacancy', verbose_name='Вакансии компании')),
],
options={
'verbose_name': 'Компания',
'verbose_name_plural': 'Компании',
},
),
]
| # Generated by Django 2.2.5 on 2019-09-16 17:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message', models.CharField(max_length=400, verbose_name='Сообщение')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор')),
],
options={
'verbose_name': 'Отзыв',
'verbose_name_plural': 'Отзывы',
},
),
migrations.CreateModel(
name='Vacancy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='Название вакансии')),
('slug', models.SlugField(max_length=30, verbose_name='URL')),
('description', models.TextField(max_length=700, verbose_name='Описание вакансии')),
('payments', models.DecimalField(decimal_places=10, max_digits=19, verbose_name='Зарплата')),
],
options={
'verbose_name': 'Вакансия',
'verbose_name_plural': 'Вакансии',
},
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, verbose_name='Название компании')),
('slug', models.SlugField(max_length=30, verbose_name='URL')),
('description', models.TextField(max_length=400, verbose_name='Описание компании')),
('logo_company', models.ImageField(blank=True, null=True, upload_to='images/logo_comapny', verbose_name='Логотип компании')),
('comments', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='companies.Comments', verbose_name='Отзывы о компании')),
('vacancy_company', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='companies.Vacancy', verbose_name='Вакансии компании')),
],
options={
'verbose_name': 'Компания',
'verbose_name_plural': 'Компании',
},
),
] | en | 0.738622 | # Generated by Django 2.2.5 on 2019-09-16 17:05 | 1.738039 | 2 |
math/uva10107 what is the median.py | windowssocket/py_leetcode | 3 | 6632031 | <filename>math/uva10107 what is the median.py
import sys
import bisect
numList = []
for line in sys.stdin:
# pos = bisect.bisect(numList,int(line))
bisect.insort(numList,int(line))
pos = len(numList) // 2
if len(numList) % 2 == 0:
print((numList[pos] + numList[pos-1]) // 2)
else:
print(numList[pos])
| <filename>math/uva10107 what is the median.py
import sys
import bisect
numList = []
for line in sys.stdin:
# pos = bisect.bisect(numList,int(line))
bisect.insort(numList,int(line))
pos = len(numList) // 2
if len(numList) % 2 == 0:
print((numList[pos] + numList[pos-1]) // 2)
else:
print(numList[pos])
| en | 0.496991 | # pos = bisect.bisect(numList,int(line)) | 3.390651 | 3 |
raiden_contracts/utils/sign.py | hackaugusto/raiden-contracts | 0 | 6632032 | from web3 import Web3
from .sign_utils import sign
def hash_balance_data(transferred_amount, locked_amount, locksroot):
return Web3.soliditySha3(
['uint256', 'uint256', 'bytes32'],
[transferred_amount, locked_amount, locksroot],
)
def hash_balance_proof(
token_network_address,
chain_identifier,
channel_identifier,
balance_hash,
nonce,
additional_hash,
):
return Web3.soliditySha3([
'bytes32',
'uint256',
'bytes32',
'uint256',
'address',
'uint256',
], [
balance_hash,
nonce,
additional_hash,
channel_identifier,
token_network_address,
chain_identifier,
])
def hash_balance_proof_update_message(
token_network_address,
chain_identifier,
channel_identifier,
balance_hash,
nonce,
additional_hash,
closing_signature,
):
return Web3.soliditySha3([
'bytes32',
'uint256',
'bytes32',
'uint256',
'address',
'uint256',
'bytes',
], [
balance_hash,
nonce,
additional_hash,
channel_identifier,
token_network_address,
chain_identifier,
closing_signature,
])
def hash_cooperative_settle_message(
token_network_address,
chain_identifier,
channel_identifier,
participant1_address,
participant1_balance,
participant2_address,
participant2_balance,
):
return Web3.soliditySha3([
'address',
'uint256',
'address',
'uint256',
'uint256',
'address',
'uint256',
], [
participant1_address,
participant1_balance,
participant2_address,
participant2_balance,
channel_identifier,
token_network_address,
chain_identifier,
])
def hash_withdraw_message(
token_network_address,
chain_identifier,
channel_identifier,
participant,
amount_to_withdraw,
):
return Web3.soliditySha3([
'address',
'uint256',
'uint256',
'address',
'uint256',
], [
participant,
amount_to_withdraw,
channel_identifier,
token_network_address,
chain_identifier,
])
def hash_reward_proof(
channel_identifier,
reward_amount,
token_network_address,
chain_id,
nonce):
return Web3.soliditySha3([
'uint256',
'uint256',
'address',
'uint256',
'uint256',
], [
channel_identifier,
reward_amount,
token_network_address,
chain_id,
nonce,
])
def sign_balance_proof(
privatekey,
token_network_address,
chain_identifier,
channel_identifier,
balance_hash,
nonce,
additional_hash,
v=27,
):
message_hash = hash_balance_proof(
token_network_address,
chain_identifier,
channel_identifier,
balance_hash,
nonce,
additional_hash,
)
return sign(privatekey, message_hash, v)
def sign_balance_proof_update_message(
privatekey,
token_network_address,
chain_identifier,
channel_identifier,
balance_hash,
nonce,
additional_hash,
closing_signature,
v=27,
):
message_hash = hash_balance_proof_update_message(
token_network_address,
chain_identifier,
channel_identifier,
balance_hash,
nonce,
additional_hash,
closing_signature,
)
return sign(privatekey, message_hash, v)
def sign_cooperative_settle_message(
privatekey,
token_network_address,
chain_identifier,
channel_identifier,
participant1_address,
participant1_balance,
participant2_address,
participant2_balance,
v=27,
):
message_hash = hash_cooperative_settle_message(
token_network_address,
chain_identifier,
channel_identifier,
participant1_address,
participant1_balance,
participant2_address,
participant2_balance,
)
return sign(privatekey, message_hash, v)
def sign_withdraw_message(
privatekey,
token_network_address,
chain_identifier,
channel_identifier,
participant,
amount_to_withdraw,
v=27,
):
message_hash = hash_withdraw_message(
token_network_address,
chain_identifier,
channel_identifier,
participant,
amount_to_withdraw,
)
return sign(privatekey, message_hash, v)
def sign_reward_proof(
privatekey,
channel_identifier,
reward_amount,
token_network_address,
chain_id,
nonce,
v=27):
message_hash = hash_reward_proof(
channel_identifier,
reward_amount,
token_network_address,
chain_id,
nonce,
)
return sign(privatekey, message_hash, v)
| from web3 import Web3
from .sign_utils import sign
def hash_balance_data(transferred_amount, locked_amount, locksroot):
return Web3.soliditySha3(
['uint256', 'uint256', 'bytes32'],
[transferred_amount, locked_amount, locksroot],
)
def hash_balance_proof(
token_network_address,
chain_identifier,
channel_identifier,
balance_hash,
nonce,
additional_hash,
):
return Web3.soliditySha3([
'bytes32',
'uint256',
'bytes32',
'uint256',
'address',
'uint256',
], [
balance_hash,
nonce,
additional_hash,
channel_identifier,
token_network_address,
chain_identifier,
])
def hash_balance_proof_update_message(
token_network_address,
chain_identifier,
channel_identifier,
balance_hash,
nonce,
additional_hash,
closing_signature,
):
return Web3.soliditySha3([
'bytes32',
'uint256',
'bytes32',
'uint256',
'address',
'uint256',
'bytes',
], [
balance_hash,
nonce,
additional_hash,
channel_identifier,
token_network_address,
chain_identifier,
closing_signature,
])
def hash_cooperative_settle_message(
token_network_address,
chain_identifier,
channel_identifier,
participant1_address,
participant1_balance,
participant2_address,
participant2_balance,
):
return Web3.soliditySha3([
'address',
'uint256',
'address',
'uint256',
'uint256',
'address',
'uint256',
], [
participant1_address,
participant1_balance,
participant2_address,
participant2_balance,
channel_identifier,
token_network_address,
chain_identifier,
])
def hash_withdraw_message(
token_network_address,
chain_identifier,
channel_identifier,
participant,
amount_to_withdraw,
):
return Web3.soliditySha3([
'address',
'uint256',
'uint256',
'address',
'uint256',
], [
participant,
amount_to_withdraw,
channel_identifier,
token_network_address,
chain_identifier,
])
def hash_reward_proof(
channel_identifier,
reward_amount,
token_network_address,
chain_id,
nonce):
return Web3.soliditySha3([
'uint256',
'uint256',
'address',
'uint256',
'uint256',
], [
channel_identifier,
reward_amount,
token_network_address,
chain_id,
nonce,
])
def sign_balance_proof(
privatekey,
token_network_address,
chain_identifier,
channel_identifier,
balance_hash,
nonce,
additional_hash,
v=27,
):
message_hash = hash_balance_proof(
token_network_address,
chain_identifier,
channel_identifier,
balance_hash,
nonce,
additional_hash,
)
return sign(privatekey, message_hash, v)
def sign_balance_proof_update_message(
privatekey,
token_network_address,
chain_identifier,
channel_identifier,
balance_hash,
nonce,
additional_hash,
closing_signature,
v=27,
):
message_hash = hash_balance_proof_update_message(
token_network_address,
chain_identifier,
channel_identifier,
balance_hash,
nonce,
additional_hash,
closing_signature,
)
return sign(privatekey, message_hash, v)
def sign_cooperative_settle_message(
privatekey,
token_network_address,
chain_identifier,
channel_identifier,
participant1_address,
participant1_balance,
participant2_address,
participant2_balance,
v=27,
):
message_hash = hash_cooperative_settle_message(
token_network_address,
chain_identifier,
channel_identifier,
participant1_address,
participant1_balance,
participant2_address,
participant2_balance,
)
return sign(privatekey, message_hash, v)
def sign_withdraw_message(
privatekey,
token_network_address,
chain_identifier,
channel_identifier,
participant,
amount_to_withdraw,
v=27,
):
message_hash = hash_withdraw_message(
token_network_address,
chain_identifier,
channel_identifier,
participant,
amount_to_withdraw,
)
return sign(privatekey, message_hash, v)
def sign_reward_proof(
privatekey,
channel_identifier,
reward_amount,
token_network_address,
chain_id,
nonce,
v=27):
message_hash = hash_reward_proof(
channel_identifier,
reward_amount,
token_network_address,
chain_id,
nonce,
)
return sign(privatekey, message_hash, v)
| none | 1 | 2.052083 | 2 |
|
tests/api/test_cors.py | dioptra-io/iris | 6 | 6632033 | <filename>tests/api/test_cors.py
import pytest
@pytest.mark.parametrize("origin", ["https://example.org", "http://localhost:8000"])
def test_cors_allowed_origin(make_client, make_user, origin):
# https://fastapi.tiangolo.com/advanced/testing-events/?h=startup
# https://github.com/encode/starlette/blob/master/tests/middleware/test_cors.py
client = make_client()
headers = {"Origin": origin}
with client:
r = client.options("/", headers=headers)
assert r.headers["access-control-allow-credentials"] == "true"
assert r.headers["access-control-allow-origin"] == origin
def test_cors_unallowed_origin(make_client, make_user):
client = make_client()
headers = {"Origin": "https://example.com"}
with client:
r = client.options("/", headers=headers)
assert "access-control-allow-origin" not in r.headers
| <filename>tests/api/test_cors.py
import pytest
@pytest.mark.parametrize("origin", ["https://example.org", "http://localhost:8000"])
def test_cors_allowed_origin(make_client, make_user, origin):
# https://fastapi.tiangolo.com/advanced/testing-events/?h=startup
# https://github.com/encode/starlette/blob/master/tests/middleware/test_cors.py
client = make_client()
headers = {"Origin": origin}
with client:
r = client.options("/", headers=headers)
assert r.headers["access-control-allow-credentials"] == "true"
assert r.headers["access-control-allow-origin"] == origin
def test_cors_unallowed_origin(make_client, make_user):
client = make_client()
headers = {"Origin": "https://example.com"}
with client:
r = client.options("/", headers=headers)
assert "access-control-allow-origin" not in r.headers
| en | 0.45152 | # https://fastapi.tiangolo.com/advanced/testing-events/?h=startup # https://github.com/encode/starlette/blob/master/tests/middleware/test_cors.py | 2.165366 | 2 |
apps/core/account/manager.py | GMNaim/Online-Exam-System | 0 | 6632034 | from django.contrib.auth.base_user import BaseUserManager
from django.utils.translation import ugettext_lazy as _
# ugettext is a unicode version of a translatable string.
# ugettext_lazy is a "lazy" version of that. Lazy strings are
# a Django-ism; they are string-like objects that don't
# actually turn into the real string until the last possible minute.
# Often, you can't know how to translate a string until late in the process.
class UserManager(BaseUserManager):
# We can optionally serialize managers into migrations and have them
# available in RunPython operations. This is done by defining a
# use_in_migrations attribute on the manager class:
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
""" Creates and saves a User with the given email and password """
if not email:
raise ValueError(_('Email must be needed'))
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
# these user will be normal user
extra_fields.setdefault('is_superuser',
False) # Insert key with a value of default if key is not in the dictionary.
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
# These user will be superuser with the given email and password
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError(_('Superuser must have is_staff=True.'))
if extra_fields.get('is_superuser') is not True:
raise ValueError(_("Superuser must have 'is_superuser=True'."))
return self._create_user(email, password, **extra_fields)
| from django.contrib.auth.base_user import BaseUserManager
from django.utils.translation import ugettext_lazy as _
# ugettext is a unicode version of a translatable string.
# ugettext_lazy is a "lazy" version of that. Lazy strings are
# a Django-ism; they are string-like objects that don't
# actually turn into the real string until the last possible minute.
# Often, you can't know how to translate a string until late in the process.
class UserManager(BaseUserManager):
# We can optionally serialize managers into migrations and have them
# available in RunPython operations. This is done by defining a
# use_in_migrations attribute on the manager class:
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
""" Creates and saves a User with the given email and password """
if not email:
raise ValueError(_('Email must be needed'))
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
# these user will be normal user
extra_fields.setdefault('is_superuser',
False) # Insert key with a value of default if key is not in the dictionary.
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
# These user will be superuser with the given email and password
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError(_('Superuser must have is_staff=True.'))
if extra_fields.get('is_superuser') is not True:
raise ValueError(_("Superuser must have 'is_superuser=True'."))
return self._create_user(email, password, **extra_fields)
| en | 0.923239 | # ugettext is a unicode version of a translatable string. # ugettext_lazy is a "lazy" version of that. Lazy strings are # a Django-ism; they are string-like objects that don't # actually turn into the real string until the last possible minute. # Often, you can't know how to translate a string until late in the process. # We can optionally serialize managers into migrations and have them # available in RunPython operations. This is done by defining a # use_in_migrations attribute on the manager class: Creates and saves a User with the given email and password # these user will be normal user # Insert key with a value of default if key is not in the dictionary. # These user will be superuser with the given email and password | 2.403537 | 2 |
tests/components/devolo_home_control/test_init.py | tbarbette/core | 4 | 6632035 | <gh_stars>1-10
"""Tests for the devolo Home Control integration."""
from unittest.mock import patch
from devolo_home_control_api.exceptions.gateway import GatewayOfflineError
import pytest
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_ERROR,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.core import HomeAssistant
from tests.components.devolo_home_control import configure_integration
async def test_setup_entry(hass: HomeAssistant):
"""Test setup entry."""
entry = configure_integration(hass)
with patch("homeassistant.components.devolo_home_control.HomeControl"):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_LOADED
@pytest.mark.credentials_invalid
async def test_setup_entry_credentials_invalid(hass: HomeAssistant):
"""Test setup entry fails if credentials are invalid."""
entry = configure_integration(hass)
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_ERROR
@pytest.mark.maintenance
async def test_setup_entry_maintenance(hass: HomeAssistant):
"""Test setup entry fails if mydevolo is in maintenance mode."""
entry = configure_integration(hass)
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_setup_connection_error(hass: HomeAssistant):
"""Test setup entry fails on connection error."""
entry = configure_integration(hass)
with patch(
"homeassistant.components.devolo_home_control.HomeControl",
side_effect=ConnectionError,
):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_setup_gateway_offline(hass: HomeAssistant):
"""Test setup entry fails on gateway offline."""
entry = configure_integration(hass)
with patch(
"homeassistant.components.devolo_home_control.HomeControl",
side_effect=GatewayOfflineError,
):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_unload_entry(hass: HomeAssistant):
"""Test unload entry."""
entry = configure_integration(hass)
with patch("homeassistant.components.devolo_home_control.HomeControl"):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
await hass.config_entries.async_unload(entry.entry_id)
assert entry.state == ENTRY_STATE_NOT_LOADED
| """Tests for the devolo Home Control integration."""
from unittest.mock import patch
from devolo_home_control_api.exceptions.gateway import GatewayOfflineError
import pytest
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_ERROR,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.core import HomeAssistant
from tests.components.devolo_home_control import configure_integration
async def test_setup_entry(hass: HomeAssistant):
"""Test setup entry."""
entry = configure_integration(hass)
with patch("homeassistant.components.devolo_home_control.HomeControl"):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_LOADED
@pytest.mark.credentials_invalid
async def test_setup_entry_credentials_invalid(hass: HomeAssistant):
"""Test setup entry fails if credentials are invalid."""
entry = configure_integration(hass)
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_ERROR
@pytest.mark.maintenance
async def test_setup_entry_maintenance(hass: HomeAssistant):
"""Test setup entry fails if mydevolo is in maintenance mode."""
entry = configure_integration(hass)
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_setup_connection_error(hass: HomeAssistant):
"""Test setup entry fails on connection error."""
entry = configure_integration(hass)
with patch(
"homeassistant.components.devolo_home_control.HomeControl",
side_effect=ConnectionError,
):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_setup_gateway_offline(hass: HomeAssistant):
"""Test setup entry fails on gateway offline."""
entry = configure_integration(hass)
with patch(
"homeassistant.components.devolo_home_control.HomeControl",
side_effect=GatewayOfflineError,
):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_unload_entry(hass: HomeAssistant):
"""Test unload entry."""
entry = configure_integration(hass)
with patch("homeassistant.components.devolo_home_control.HomeControl"):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
await hass.config_entries.async_unload(entry.entry_id)
assert entry.state == ENTRY_STATE_NOT_LOADED | en | 0.764865 | Tests for the devolo Home Control integration. Test setup entry. Test setup entry fails if credentials are invalid. Test setup entry fails if mydevolo is in maintenance mode. Test setup entry fails on connection error. Test setup entry fails on gateway offline. Test unload entry. | 2.021052 | 2 |
src/ExportCsvToInflux/csv_object.py | kganczarek-adultimagroup/dpp-elc-export-csv-to-influx | 31 | 6632036 | <filename>src/ExportCsvToInflux/csv_object.py
from collections import defaultdict
from .base_object import BaseObject
from itertools import tee
from glob import glob
import hashlib
import types
import time
import json
import csv
import sys
import os
class CSVObject(object):
"""CSV Object"""
def __init__(self, delimiter=',', lineterminator='\n'):
self.delimiter = delimiter
self.lineterminator = lineterminator
def get_csv_header(self, file_name):
"""Function: get_csv_header.
:param file_name: the file name
:return return csv header as list
"""
self.valid_file_exist(file_name)
with open(file_name) as f:
sniffer = csv.Sniffer()
try:
has_header = sniffer.has_header(f.read(40960))
except csv.Error:
has_header = False
f.seek(0)
csv_reader = csv.DictReader(f, delimiter=self.delimiter, lineterminator=self.lineterminator)
headers = csv_reader.fieldnames
is_header = not any(field.isdigit() for field in headers)
headers = headers if has_header or is_header else []
return headers
@staticmethod
def search_files_in_dir(directory, match_suffix='.csv', filter_pattern='_influx.csv'):
"""Function: search_files_in_dir
:param directory: the directory
:param match_suffix: match the file suffix, use comma to separate, only string, not support regex
:param filter_pattern: filter the files, only string, not support regex
"""
base_object = BaseObject()
match_suffix = base_object.str_to_list(match_suffix, lower=True)
filter_pattern = base_object.str_to_list(filter_pattern, lower=True)
# Is file
is_file = os.path.isfile(directory)
if is_file:
yield directory
# Search directory
for x in os.walk(directory):
for y in glob(os.path.join(x[0], '*.*')):
# Continue if directory
try:
check_directory = os.path.isdir(y)
except UnicodeEncodeError as e:
y = y.encode('utf-8', 'ignore')
print('Warning: Unicode Encode Error found when checking isdir {0}: {1}'.format(y, e))
check_directory = os.path.isdir(y)
if check_directory is True:
continue
# Filter Out
match_suffix_status = any(the_filter in y.lower() for the_filter in match_suffix)
filter_pattern_status = any(the_filter in y.lower() for the_filter in filter_pattern)
if match_suffix_status is True and filter_pattern_status is False:
yield y
@staticmethod
def valid_file_exist(file_name):
"""Function: valid_file_exist
:param file_name: the file name
"""
file_exists = os.path.exists(file_name)
if file_exists is False:
error_message = 'Error: The file does not exist: {0}'.format(file_name)
sys.exit(error_message)
def get_file_md5(self, file_name):
"""Function: get_file_md5
:param file_name: the file name
:return return the file md5
"""
self.valid_file_exist(file_name)
hash_md5 = hashlib.md5()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(40960), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def get_file_modify_time(self, file_name, enable_ms=False):
"""Function: get_file_modify_time
:param file_name: the file name
:param enable_ms: enable milliseconds (default False)
:return return the human readable time
"""
self.valid_file_exist(file_name)
modified = os.path.getmtime(file_name)
modified_s, modified_ms = divmod(modified * 1000, 1000)
if enable_ms is False:
modified_pretty = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(modified_s))
else:
modified_pretty = '%s.%03d' % (time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(modified_s)), modified_ms)
return modified_pretty
def get_csv_lines_count(self, file_name):
"""Function: get_csv_lines_count.
:param file_name: the file name
:return return csv line count. No count header into count
"""
has_header = self.get_csv_header(file_name)
with open(file_name) as f:
csv_reader = csv.DictReader(f, delimiter=self.delimiter, lineterminator=self.lineterminator)
count = 0 if has_header else 1
for row in csv_reader:
count += 1
return count
def convert_csv_data_to_int_float(self, file_name=None, csv_reader=None):
"""Function: convert_csv_data_to_int_float
:param file_name: the file name (default None)
:param csv_reader: the csv dict reader (default None)
The csv_reader could come from 2 ways:
1. use csv.DictReader to get the csv_reader object
2. use dict to make up the csv_reader, the dict format is as following
[
{'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...},
{'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...},
{'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...},
...
]
"""
# init
int_type = defaultdict(list)
float_type = defaultdict(list)
keys = list()
csv_reader = list() if csv_reader is None else csv_reader
csv_reader_bk = csv_reader
has_header = True
# Verify the csv_reader
csv_reader_type = type(csv_reader)
is_generator_type = isinstance(csv_reader, types.GeneratorType)
if csv_reader_type != list and csv_reader_type != csv.DictReader and not is_generator_type:
error_message = 'Error: The csv_reader type is not expected: {0}, ' \
'should list type or csv.DictReader'.format(csv_reader_type)
sys.exit(error_message)
if is_generator_type:
csv_reader, csv_reader_bk = tee(csv_reader)
# Get csv_reader from csv file
f = None
if file_name:
has_header = self.get_csv_header(file_name)
f = open(file_name)
csv_reader = csv.DictReader(f, delimiter=self.delimiter, lineterminator=self.lineterminator)
csv_reader, csv_reader_bk = tee(csv_reader)
# Process
for row in csv_reader:
keys = row.keys()
for key in keys:
value = row[key]
len_value = len(value)
# Continue If Value Empty
if len_value == 0:
int_type[key].append(False)
float_type[key].append(False)
continue
# Valid Int Type
try:
if float(value).is_integer():
int_type[key].append(True)
else:
int_type[key].append(False)
except ValueError:
int_type[key].append(False)
# Valid Float Type
try:
float(value)
float_type[key].append(True)
except ValueError:
float_type[key].append(False)
# Valid the key if no header
if keys and not has_header:
for key in keys:
len_key = len(key)
# Continue If Key Empty
if len_key == 0:
continue
# Valid Int Type
try:
if float(key).is_integer():
int_type[key].append(True)
else:
int_type[key].append(False)
except ValueError:
int_type[key].append(False)
# Valid Float Type
try:
float(key)
float_type[key].append(True)
except ValueError:
float_type[key].append(False)
# Finalize Type
int_type = {k: all(int_type[k]) for k in int_type}
float_type = {k: all(float_type[k]) for k in float_type}
# Yield Data
i = 1
for row in csv_reader_bk:
keys = row.keys()
for key in keys:
value = row[key]
int_status = int_type[key]
len_value = len(value)
if len_value == 0:
continue
if int_status is True:
row[key] = int(float(value)) if int_type[key] is True else value
else:
row[key] = float(value) if float_type[key] is True else value
yield row, int_type, float_type
if not has_header and i == 1:
for key in keys:
int_status = int_type[key]
len_key = len(key)
if len_key == 0:
continue
if int_status is True:
row[key] = int(float(key)) if int_type[key] is True else key
else:
row[key] = float(key) if float_type[key] is True else key
yield row, int_type, float_type
i += 1
# Close file
if file_name:
f.close()
def add_columns_to_csv(self,
file_name,
target,
data,
save_csv_file=True):
"""Function: add_columns_to_csv
:param file_name: the file name
:param target: the target file to save result
:param data: the new columns data, list type, the item is dict.
for example: [{"new_header_1": ["new_value_1", "new_value_2", "new_value_3"]},
{"new_header_2": ["new_value_1", "new_value_2", "new_value_3"]}
]
:param save_csv_file: save csv file to local (default True)
:return return the new csv data by dict
"""
has_header = self.get_csv_header(file_name)
# Process data
data_type = type(data)
error_message = 'Error: The data should be list type, the item should be dict. Or the json type as following ' \
'for example: [{"new_header_1": ["new_value_1", "new_value_2", "new_value_3"]}, ' \
'{"new_header_2": ["new_value_1", "new_value_2", "new_value_3"]}]'
try:
check_data_type = data_type is not list and data_type is not str and data_type is not unicode
except NameError:
check_data_type = data_type is not list and data_type is not str
if check_data_type:
sys.exit(error_message)
try:
check_data_type = data_type is str or data_type is unicode
except NameError:
check_data_type = data_type is str
if check_data_type:
try:
data = json.loads(data)
except ValueError:
sys.exit(error_message)
# Add columns
target_writer = None
target_file = None
if save_csv_file:
target_file = open(target, 'w+')
target_writer = csv.writer(target_file, delimiter=self.delimiter, lineterminator=self.lineterminator)
with open(file_name) as f:
source_reader = csv.DictReader(f, delimiter=self.delimiter, lineterminator=self.lineterminator)
new_headers = [list(x.keys())[0] for x in data]
row_id = 0
for row in source_reader:
values = list(row.values())
if row_id == 0:
headers = list(row.keys())
if not has_header:
continue
headers += new_headers
if save_csv_file:
target_writer.writerow(headers)
new_values = list()
for x in data:
try:
value = list(x.values())[0][row_id]
except IndexError:
print('Warning: The provided column length is less than with the source csv length. '
'Use "null" to fill the empty data')
value = 'null'
new_values.append(value)
values += new_values
row_id += 1
if save_csv_file:
target_writer.writerow(values)
yield dict(zip(headers, values))
if save_csv_file:
target_file.close()
| <filename>src/ExportCsvToInflux/csv_object.py
from collections import defaultdict
from .base_object import BaseObject
from itertools import tee
from glob import glob
import hashlib
import types
import time
import json
import csv
import sys
import os
class CSVObject(object):
"""CSV Object"""
def __init__(self, delimiter=',', lineterminator='\n'):
self.delimiter = delimiter
self.lineterminator = lineterminator
def get_csv_header(self, file_name):
"""Function: get_csv_header.
:param file_name: the file name
:return return csv header as list
"""
self.valid_file_exist(file_name)
with open(file_name) as f:
sniffer = csv.Sniffer()
try:
has_header = sniffer.has_header(f.read(40960))
except csv.Error:
has_header = False
f.seek(0)
csv_reader = csv.DictReader(f, delimiter=self.delimiter, lineterminator=self.lineterminator)
headers = csv_reader.fieldnames
is_header = not any(field.isdigit() for field in headers)
headers = headers if has_header or is_header else []
return headers
@staticmethod
def search_files_in_dir(directory, match_suffix='.csv', filter_pattern='_influx.csv'):
"""Function: search_files_in_dir
:param directory: the directory
:param match_suffix: match the file suffix, use comma to separate, only string, not support regex
:param filter_pattern: filter the files, only string, not support regex
"""
base_object = BaseObject()
match_suffix = base_object.str_to_list(match_suffix, lower=True)
filter_pattern = base_object.str_to_list(filter_pattern, lower=True)
# Is file
is_file = os.path.isfile(directory)
if is_file:
yield directory
# Search directory
for x in os.walk(directory):
for y in glob(os.path.join(x[0], '*.*')):
# Continue if directory
try:
check_directory = os.path.isdir(y)
except UnicodeEncodeError as e:
y = y.encode('utf-8', 'ignore')
print('Warning: Unicode Encode Error found when checking isdir {0}: {1}'.format(y, e))
check_directory = os.path.isdir(y)
if check_directory is True:
continue
# Filter Out
match_suffix_status = any(the_filter in y.lower() for the_filter in match_suffix)
filter_pattern_status = any(the_filter in y.lower() for the_filter in filter_pattern)
if match_suffix_status is True and filter_pattern_status is False:
yield y
@staticmethod
def valid_file_exist(file_name):
"""Function: valid_file_exist
:param file_name: the file name
"""
file_exists = os.path.exists(file_name)
if file_exists is False:
error_message = 'Error: The file does not exist: {0}'.format(file_name)
sys.exit(error_message)
def get_file_md5(self, file_name):
"""Function: get_file_md5
:param file_name: the file name
:return return the file md5
"""
self.valid_file_exist(file_name)
hash_md5 = hashlib.md5()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(40960), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def get_file_modify_time(self, file_name, enable_ms=False):
"""Function: get_file_modify_time
:param file_name: the file name
:param enable_ms: enable milliseconds (default False)
:return return the human readable time
"""
self.valid_file_exist(file_name)
modified = os.path.getmtime(file_name)
modified_s, modified_ms = divmod(modified * 1000, 1000)
if enable_ms is False:
modified_pretty = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(modified_s))
else:
modified_pretty = '%s.%03d' % (time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(modified_s)), modified_ms)
return modified_pretty
def get_csv_lines_count(self, file_name):
"""Function: get_csv_lines_count.
:param file_name: the file name
:return return csv line count. No count header into count
"""
has_header = self.get_csv_header(file_name)
with open(file_name) as f:
csv_reader = csv.DictReader(f, delimiter=self.delimiter, lineterminator=self.lineterminator)
count = 0 if has_header else 1
for row in csv_reader:
count += 1
return count
def convert_csv_data_to_int_float(self, file_name=None, csv_reader=None):
"""Function: convert_csv_data_to_int_float
:param file_name: the file name (default None)
:param csv_reader: the csv dict reader (default None)
The csv_reader could come from 2 ways:
1. use csv.DictReader to get the csv_reader object
2. use dict to make up the csv_reader, the dict format is as following
[
{'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...},
{'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...},
{'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...},
...
]
"""
# init
int_type = defaultdict(list)
float_type = defaultdict(list)
keys = list()
csv_reader = list() if csv_reader is None else csv_reader
csv_reader_bk = csv_reader
has_header = True
# Verify the csv_reader
csv_reader_type = type(csv_reader)
is_generator_type = isinstance(csv_reader, types.GeneratorType)
if csv_reader_type != list and csv_reader_type != csv.DictReader and not is_generator_type:
error_message = 'Error: The csv_reader type is not expected: {0}, ' \
'should list type or csv.DictReader'.format(csv_reader_type)
sys.exit(error_message)
if is_generator_type:
csv_reader, csv_reader_bk = tee(csv_reader)
# Get csv_reader from csv file
f = None
if file_name:
has_header = self.get_csv_header(file_name)
f = open(file_name)
csv_reader = csv.DictReader(f, delimiter=self.delimiter, lineterminator=self.lineterminator)
csv_reader, csv_reader_bk = tee(csv_reader)
# Process
for row in csv_reader:
keys = row.keys()
for key in keys:
value = row[key]
len_value = len(value)
# Continue If Value Empty
if len_value == 0:
int_type[key].append(False)
float_type[key].append(False)
continue
# Valid Int Type
try:
if float(value).is_integer():
int_type[key].append(True)
else:
int_type[key].append(False)
except ValueError:
int_type[key].append(False)
# Valid Float Type
try:
float(value)
float_type[key].append(True)
except ValueError:
float_type[key].append(False)
# Valid the key if no header
if keys and not has_header:
for key in keys:
len_key = len(key)
# Continue If Key Empty
if len_key == 0:
continue
# Valid Int Type
try:
if float(key).is_integer():
int_type[key].append(True)
else:
int_type[key].append(False)
except ValueError:
int_type[key].append(False)
# Valid Float Type
try:
float(key)
float_type[key].append(True)
except ValueError:
float_type[key].append(False)
# Finalize Type
int_type = {k: all(int_type[k]) for k in int_type}
float_type = {k: all(float_type[k]) for k in float_type}
# Yield Data
i = 1
for row in csv_reader_bk:
keys = row.keys()
for key in keys:
value = row[key]
int_status = int_type[key]
len_value = len(value)
if len_value == 0:
continue
if int_status is True:
row[key] = int(float(value)) if int_type[key] is True else value
else:
row[key] = float(value) if float_type[key] is True else value
yield row, int_type, float_type
if not has_header and i == 1:
for key in keys:
int_status = int_type[key]
len_key = len(key)
if len_key == 0:
continue
if int_status is True:
row[key] = int(float(key)) if int_type[key] is True else key
else:
row[key] = float(key) if float_type[key] is True else key
yield row, int_type, float_type
i += 1
# Close file
if file_name:
f.close()
def add_columns_to_csv(self,
file_name,
target,
data,
save_csv_file=True):
"""Function: add_columns_to_csv
:param file_name: the file name
:param target: the target file to save result
:param data: the new columns data, list type, the item is dict.
for example: [{"new_header_1": ["new_value_1", "new_value_2", "new_value_3"]},
{"new_header_2": ["new_value_1", "new_value_2", "new_value_3"]}
]
:param save_csv_file: save csv file to local (default True)
:return return the new csv data by dict
"""
has_header = self.get_csv_header(file_name)
# Process data
data_type = type(data)
error_message = 'Error: The data should be list type, the item should be dict. Or the json type as following ' \
'for example: [{"new_header_1": ["new_value_1", "new_value_2", "new_value_3"]}, ' \
'{"new_header_2": ["new_value_1", "new_value_2", "new_value_3"]}]'
try:
check_data_type = data_type is not list and data_type is not str and data_type is not unicode
except NameError:
check_data_type = data_type is not list and data_type is not str
if check_data_type:
sys.exit(error_message)
try:
check_data_type = data_type is str or data_type is unicode
except NameError:
check_data_type = data_type is str
if check_data_type:
try:
data = json.loads(data)
except ValueError:
sys.exit(error_message)
# Add columns
target_writer = None
target_file = None
if save_csv_file:
target_file = open(target, 'w+')
target_writer = csv.writer(target_file, delimiter=self.delimiter, lineterminator=self.lineterminator)
with open(file_name) as f:
source_reader = csv.DictReader(f, delimiter=self.delimiter, lineterminator=self.lineterminator)
new_headers = [list(x.keys())[0] for x in data]
row_id = 0
for row in source_reader:
values = list(row.values())
if row_id == 0:
headers = list(row.keys())
if not has_header:
continue
headers += new_headers
if save_csv_file:
target_writer.writerow(headers)
new_values = list()
for x in data:
try:
value = list(x.values())[0][row_id]
except IndexError:
print('Warning: The provided column length is less than with the source csv length. '
'Use "null" to fill the empty data')
value = 'null'
new_values.append(value)
values += new_values
row_id += 1
if save_csv_file:
target_writer.writerow(values)
yield dict(zip(headers, values))
if save_csv_file:
target_file.close()
| en | 0.459422 | CSV Object Function: get_csv_header. :param file_name: the file name :return return csv header as list Function: search_files_in_dir :param directory: the directory :param match_suffix: match the file suffix, use comma to separate, only string, not support regex :param filter_pattern: filter the files, only string, not support regex # Is file # Search directory # Continue if directory # Filter Out Function: valid_file_exist :param file_name: the file name Function: get_file_md5 :param file_name: the file name :return return the file md5 Function: get_file_modify_time :param file_name: the file name :param enable_ms: enable milliseconds (default False) :return return the human readable time Function: get_csv_lines_count. :param file_name: the file name :return return csv line count. No count header into count Function: convert_csv_data_to_int_float :param file_name: the file name (default None) :param csv_reader: the csv dict reader (default None) The csv_reader could come from 2 ways: 1. use csv.DictReader to get the csv_reader object 2. use dict to make up the csv_reader, the dict format is as following [ {'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...}, {'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...}, {'csv_header_1': 'value', 'csv_header_2': 'value', 'csv_header_3': 'value', ...}, ... ] # init # Verify the csv_reader # Get csv_reader from csv file # Process # Continue If Value Empty # Valid Int Type # Valid Float Type # Valid the key if no header # Continue If Key Empty # Valid Int Type # Valid Float Type # Finalize Type # Yield Data # Close file Function: add_columns_to_csv :param file_name: the file name :param target: the target file to save result :param data: the new columns data, list type, the item is dict. for example: [{"new_header_1": ["new_value_1", "new_value_2", "new_value_3"]}, {"new_header_2": ["new_value_1", "new_value_2", "new_value_3"]} ] :param save_csv_file: save csv file to local (default True) :return return the new csv data by dict # Process data # Add columns | 3.080251 | 3 |
backend/uclapi/dashboard/migrations/0001_initial.py | balping/uclapi | 0 | 6632037 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-13 14:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='APICall',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ts', models.DateTimeField(auto_now_add=True)),
('raw_request', models.TextField(max_length=10000000)),
],
),
migrations.CreateModel(
name='App',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1000)),
('api_token', models.CharField(max_length=1000)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=100)),
('full_name', models.CharField(max_length=1000)),
('given_name', models.CharField(max_length=100)),
('cn', models.CharField(max_length=100, unique=True)),
('department', models.CharField(max_length=1000)),
('employee_id', models.CharField(max_length=100, unique=True)),
('raw_intranet_groups', models.CharField(max_length=2000)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddField(
model_name='app',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to='dashboard.User'),
),
migrations.AddField(
model_name='apicall',
name='app',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='api_call', to='dashboard.App'),
),
migrations.AddField(
model_name='apicall',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='api_call', to='dashboard.User'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-13 14:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='APICall',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ts', models.DateTimeField(auto_now_add=True)),
('raw_request', models.TextField(max_length=10000000)),
],
),
migrations.CreateModel(
name='App',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1000)),
('api_token', models.CharField(max_length=1000)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=100)),
('full_name', models.CharField(max_length=1000)),
('given_name', models.CharField(max_length=100)),
('cn', models.CharField(max_length=100, unique=True)),
('department', models.CharField(max_length=1000)),
('employee_id', models.CharField(max_length=100, unique=True)),
('raw_intranet_groups', models.CharField(max_length=2000)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddField(
model_name='app',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to='dashboard.User'),
),
migrations.AddField(
model_name='apicall',
name='app',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='api_call', to='dashboard.App'),
),
migrations.AddField(
model_name='apicall',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='api_call', to='dashboard.User'),
),
]
| en | 0.750156 | # -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-01-13 14:58 | 1.688613 | 2 |
xmodaler/modeling/meta_arch/__init__.py | cclauss/xmodaler | 830 | 6632038 | # -*- coding: utf-8 -*-
"""
From original at https://github.com/facebookresearch/detectron2/blob/master/detectron2/modeling/meta_arch/__init__.py
Original copyright of Facebook code below, modifications by <NAME>, Copyright 2021.
"""
# Copyright (c) Facebook, Inc. and its affiliates.
from .build import META_ARCH_REGISTRY, build_model, add_config
from .rnn_att_enc_dec import RnnAttEncoderDecoder
from .transformer_enc_dec import TransformerEncoderDecoder
from .tden import TDENBiTransformer, TDENPretrain, TDENCaptioner
from .uniter import UniterPretrain, UniterForMMUnderstanding
__all__ = list(globals().keys()) | # -*- coding: utf-8 -*-
"""
From original at https://github.com/facebookresearch/detectron2/blob/master/detectron2/modeling/meta_arch/__init__.py
Original copyright of Facebook code below, modifications by <NAME>, Copyright 2021.
"""
# Copyright (c) Facebook, Inc. and its affiliates.
from .build import META_ARCH_REGISTRY, build_model, add_config
from .rnn_att_enc_dec import RnnAttEncoderDecoder
from .transformer_enc_dec import TransformerEncoderDecoder
from .tden import TDENBiTransformer, TDENPretrain, TDENCaptioner
from .uniter import UniterPretrain, UniterForMMUnderstanding
__all__ = list(globals().keys()) | en | 0.743874 | # -*- coding: utf-8 -*- From original at https://github.com/facebookresearch/detectron2/blob/master/detectron2/modeling/meta_arch/__init__.py Original copyright of Facebook code below, modifications by <NAME>, Copyright 2021. # Copyright (c) Facebook, Inc. and its affiliates. | 1.184496 | 1 |
demo/socket_send.py | konflic/python_qa_socket | 0 | 6632039 | import socket
from demo.config import LOCALHOST
# Define this target port
TARGET_PORT = None
my_socket = socket.socket()
address_and_port = (LOCALHOST, TARGET_PORT)
my_socket.connect(address_and_port)
# https://docs.python.org/3/library/socket.html#socket.socket.send
data_amount = my_socket.send(b"Hello, socket!")
print("Send", data_amount, "bytes")
my_socket.close()
| import socket
from demo.config import LOCALHOST
# Define this target port
TARGET_PORT = None
my_socket = socket.socket()
address_and_port = (LOCALHOST, TARGET_PORT)
my_socket.connect(address_and_port)
# https://docs.python.org/3/library/socket.html#socket.socket.send
data_amount = my_socket.send(b"Hello, socket!")
print("Send", data_amount, "bytes")
my_socket.close()
| en | 0.559227 | # Define this target port # https://docs.python.org/3/library/socket.html#socket.socket.send | 3.263992 | 3 |
crslab/config/config.py | Xiaolong-Qi/CRSLab | 1 | 6632040 | # @Time : 2020/11/22
# @Author : <NAME>
# @Email : <EMAIL>
# UPDATE:
# @Time : 2020/11/23, 2020/12/20
# @Author : <NAME>, <NAME>
# @Email : <EMAIL>, <EMAIL>
import json
import os
import time
from pprint import pprint
import yaml
from loguru import logger
from tqdm import tqdm
class Config:
"""Configurator module that load the defined parameters."""
def __init__(self, config_file, debug=False):
"""Load parameters and set log level.
Args:
config_file (str): path to the config file, which should be in ``yaml`` format.
You can use default config provided in the `Github repo`_, or write it by yourself.
debug (bool, optional): whether to enable debug function during running. Defaults to False.
.. _Github repo:
https://github.com/RUCAIBox/CRSLab
"""
self.opt = self.load_yaml_configs(config_file)
dataset = self.opt['dataset']
tokenize = self.opt['tokenize']
if isinstance(tokenize, dict):
tokenize = ', '.join(tokenize.values())
model = self.opt.get('model', None)
rec_model = self.opt.get('rec_model', None)
conv_model = self.opt.get('conv_model', None)
policy_model = self.opt.get('policy_model', None)
if model:
model_name = model
else:
models = []
if rec_model:
models.append(rec_model)
if conv_model:
models.append(conv_model)
if policy_model:
models.append(policy_model)
model_name = '_'.join(models)
self.opt['model_name'] = model_name
log_name = self.opt.get("log_name", dataset + '_' + model_name + '_' + time.strftime("%Y-%m-%d-%H-%M-%S",
time.localtime())) + ".log"
if not os.path.exists("log"):
os.makedirs("log")
logger.remove()
if debug:
level = 'DEBUG'
else:
level = 'INFO'
logger.add(os.path.join("log", log_name), level=level)
logger.add(lambda msg: tqdm.write(msg, end=''), colorize=True, level=level)
logger.info(f"[Dataset: {dataset} tokenized in {tokenize}]")
if model:
logger.info(f'[Model: {model}]')
if rec_model:
logger.info(f'[Recommendation Model: {rec_model}]')
if conv_model:
logger.info(f'[Conversation Model: {conv_model}]')
if policy_model:
logger.info(f'[Policy Model: {policy_model}]')
logger.info("[Config]" + '\n' + json.dumps(self.opt, indent=4))
@staticmethod
def load_yaml_configs(filename):
"""This function reads ``yaml`` file to build config dictionary
Args:
filename (str): path to ``yaml`` config
Returns:
dict: config
"""
config_dict = dict()
with open(filename, 'r', encoding='utf-8') as f:
config_dict.update(yaml.safe_load(f.read()))
return config_dict
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("index must be a str.")
self.opt[key] = value
def __getitem__(self, item):
if item in self.opt:
return self.opt[item]
else:
return None
def get(self, item, default=None):
"""Get value of corrsponding item in config
Args:
item (str): key to query in config
default (optional): default value for item if not found in config. Defaults to None.
Returns:
value of corrsponding item in config
"""
if item in self.opt:
return self.opt[item]
else:
return default
def __contains__(self, key):
if not isinstance(key, str):
raise TypeError("index must be a str.")
return key in self.opt
def __str__(self):
return str(self.opt)
def __repr__(self):
return self.__str__()
if __name__ == '__main__':
opt_dict = Config('../../config/kbrd/redial.yaml')
pprint(opt_dict)
| # @Time : 2020/11/22
# @Author : <NAME>
# @Email : <EMAIL>
# UPDATE:
# @Time : 2020/11/23, 2020/12/20
# @Author : <NAME>, <NAME>
# @Email : <EMAIL>, <EMAIL>
import json
import os
import time
from pprint import pprint
import yaml
from loguru import logger
from tqdm import tqdm
class Config:
"""Configurator module that load the defined parameters."""
def __init__(self, config_file, debug=False):
"""Load parameters and set log level.
Args:
config_file (str): path to the config file, which should be in ``yaml`` format.
You can use default config provided in the `Github repo`_, or write it by yourself.
debug (bool, optional): whether to enable debug function during running. Defaults to False.
.. _Github repo:
https://github.com/RUCAIBox/CRSLab
"""
self.opt = self.load_yaml_configs(config_file)
dataset = self.opt['dataset']
tokenize = self.opt['tokenize']
if isinstance(tokenize, dict):
tokenize = ', '.join(tokenize.values())
model = self.opt.get('model', None)
rec_model = self.opt.get('rec_model', None)
conv_model = self.opt.get('conv_model', None)
policy_model = self.opt.get('policy_model', None)
if model:
model_name = model
else:
models = []
if rec_model:
models.append(rec_model)
if conv_model:
models.append(conv_model)
if policy_model:
models.append(policy_model)
model_name = '_'.join(models)
self.opt['model_name'] = model_name
log_name = self.opt.get("log_name", dataset + '_' + model_name + '_' + time.strftime("%Y-%m-%d-%H-%M-%S",
time.localtime())) + ".log"
if not os.path.exists("log"):
os.makedirs("log")
logger.remove()
if debug:
level = 'DEBUG'
else:
level = 'INFO'
logger.add(os.path.join("log", log_name), level=level)
logger.add(lambda msg: tqdm.write(msg, end=''), colorize=True, level=level)
logger.info(f"[Dataset: {dataset} tokenized in {tokenize}]")
if model:
logger.info(f'[Model: {model}]')
if rec_model:
logger.info(f'[Recommendation Model: {rec_model}]')
if conv_model:
logger.info(f'[Conversation Model: {conv_model}]')
if policy_model:
logger.info(f'[Policy Model: {policy_model}]')
logger.info("[Config]" + '\n' + json.dumps(self.opt, indent=4))
@staticmethod
def load_yaml_configs(filename):
"""This function reads ``yaml`` file to build config dictionary
Args:
filename (str): path to ``yaml`` config
Returns:
dict: config
"""
config_dict = dict()
with open(filename, 'r', encoding='utf-8') as f:
config_dict.update(yaml.safe_load(f.read()))
return config_dict
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("index must be a str.")
self.opt[key] = value
def __getitem__(self, item):
if item in self.opt:
return self.opt[item]
else:
return None
def get(self, item, default=None):
"""Get value of corrsponding item in config
Args:
item (str): key to query in config
default (optional): default value for item if not found in config. Defaults to None.
Returns:
value of corrsponding item in config
"""
if item in self.opt:
return self.opt[item]
else:
return default
def __contains__(self, key):
if not isinstance(key, str):
raise TypeError("index must be a str.")
return key in self.opt
def __str__(self):
return str(self.opt)
def __repr__(self):
return self.__str__()
if __name__ == '__main__':
opt_dict = Config('../../config/kbrd/redial.yaml')
pprint(opt_dict)
| en | 0.536933 | # @Time : 2020/11/22 # @Author : <NAME> # @Email : <EMAIL> # UPDATE: # @Time : 2020/11/23, 2020/12/20 # @Author : <NAME>, <NAME> # @Email : <EMAIL>, <EMAIL> Configurator module that load the defined parameters. Load parameters and set log level. Args: config_file (str): path to the config file, which should be in ``yaml`` format. You can use default config provided in the `Github repo`_, or write it by yourself. debug (bool, optional): whether to enable debug function during running. Defaults to False. .. _Github repo: https://github.com/RUCAIBox/CRSLab This function reads ``yaml`` file to build config dictionary Args: filename (str): path to ``yaml`` config Returns: dict: config Get value of corrsponding item in config Args: item (str): key to query in config default (optional): default value for item if not found in config. Defaults to None. Returns: value of corrsponding item in config | 2.126657 | 2 |
script/Other/client.py | StevenDias33/InfoSecNotes | 0 | 6632041 | import socket # Import socket module
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 60000 # Reserve a port for your service.
s.connect((host, port))
s.send("Hello server!")
with open('received_file', 'wb') as f:
print 'file opened'
while True:
print('receiving data...')
data = s.recv(1024)
print('data=%s', (data))
if not data:
break
# write data to a file
f.write(data)
f.close()
print('Successfully get the file')
s.close()
print('connection closed')
| import socket # Import socket module
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 60000 # Reserve a port for your service.
s.connect((host, port))
s.send("Hello server!")
with open('received_file', 'wb') as f:
print 'file opened'
while True:
print('receiving data...')
data = s.recv(1024)
print('data=%s', (data))
if not data:
break
# write data to a file
f.write(data)
f.close()
print('Successfully get the file')
s.close()
print('connection closed')
| en | 0.641348 | # Import socket module # Create a socket object # Get local machine name # Reserve a port for your service. # write data to a file | 3.138886 | 3 |
tools/mircounts/format_fasta_hairpins.py | moskalenko/tools-artbio | 0 | 6632042 | import argparse
import gzip
def Parser():
the_parser = argparse.ArgumentParser()
the_parser.add_argument(
'--hairpins_path', action="store", type=str,
help="BASE url. ex: /pub/mirbase/22/")
the_parser.add_argument(
'--output', action="store", type=str,
help="parsed hairpin output in fasta format")
the_parser.add_argument(
'--basename', action="store", type=str,
help="genome basename of the parsed fasta")
args = the_parser.parse_args()
return args
def get_fasta_dic(gzipfile):
'''
gzipfile value example : 'mirbase/22/hairpin.fa.gz'
'''
item_dic = {}
with gzip.open(gzipfile, 'rb') as f:
current_item = ''
stringlist = []
for line in f:
line = line.decode('utf-8').strip('\n')
if (line[0] == ">"):
# dump the sequence of the previous item
if current_item and stringlist:
item_dic[current_item] = "".join(stringlist)
# take first word of item '''
current_item = line[1:].split()[0]
stringlist = []
else:
stringlist.append(line)
item_dic[current_item] = "".join(stringlist) # for the last item
return item_dic
def convert_and_print_hairpins(gzipfile, basename, fasta_output):
raw_fasta_dict = get_fasta_dic(gzipfile)
parsed_fasta_dict = {}
trs = str.maketrans("uU", "tT")
for head in raw_fasta_dict:
if basename in head:
parsed_fasta_dict[head] = raw_fasta_dict[head].translate(trs)
with open(fasta_output, "w") as output:
for head in sorted(parsed_fasta_dict):
output.write('>%s\n%s\n' % (head, parsed_fasta_dict[head]))
def main(hairpins_path, basename, outfile):
convert_and_print_hairpins(hairpins_path, basename, outfile)
if __name__ == "__main__":
args = Parser()
main(args.hairpins_path, args.basename, args.output)
| import argparse
import gzip
def Parser():
the_parser = argparse.ArgumentParser()
the_parser.add_argument(
'--hairpins_path', action="store", type=str,
help="BASE url. ex: /pub/mirbase/22/")
the_parser.add_argument(
'--output', action="store", type=str,
help="parsed hairpin output in fasta format")
the_parser.add_argument(
'--basename', action="store", type=str,
help="genome basename of the parsed fasta")
args = the_parser.parse_args()
return args
def get_fasta_dic(gzipfile):
'''
gzipfile value example : 'mirbase/22/hairpin.fa.gz'
'''
item_dic = {}
with gzip.open(gzipfile, 'rb') as f:
current_item = ''
stringlist = []
for line in f:
line = line.decode('utf-8').strip('\n')
if (line[0] == ">"):
# dump the sequence of the previous item
if current_item and stringlist:
item_dic[current_item] = "".join(stringlist)
# take first word of item '''
current_item = line[1:].split()[0]
stringlist = []
else:
stringlist.append(line)
item_dic[current_item] = "".join(stringlist) # for the last item
return item_dic
def convert_and_print_hairpins(gzipfile, basename, fasta_output):
raw_fasta_dict = get_fasta_dic(gzipfile)
parsed_fasta_dict = {}
trs = str.maketrans("uU", "tT")
for head in raw_fasta_dict:
if basename in head:
parsed_fasta_dict[head] = raw_fasta_dict[head].translate(trs)
with open(fasta_output, "w") as output:
for head in sorted(parsed_fasta_dict):
output.write('>%s\n%s\n' % (head, parsed_fasta_dict[head]))
def main(hairpins_path, basename, outfile):
convert_and_print_hairpins(hairpins_path, basename, outfile)
if __name__ == "__main__":
args = Parser()
main(args.hairpins_path, args.basename, args.output)
| en | 0.495873 | gzipfile value example : 'mirbase/22/hairpin.fa.gz' # dump the sequence of the previous item # take first word of item ''' # for the last item | 3.390483 | 3 |
walky/registry.py | tallynerdy/walky | 0 | 6632043 | <reponame>tallynerdy/walky<gh_stars>0
import os
import base64
import json
import weakref
from walky.constants import *
from walky.objects import *
from walky.objects.system import *
from walky.serializer import *
def reg_object_id(obj):
""" Returns the registry encoded version of an object's id
Uses walky.objects.common.object_id so it will dig down to the
underlying object's id if required.
"""
obj_id = object_id(obj)
return hex(obj_id)[2:]
class Registry(object):
""" This should contain information required at the
connection level to allow it to operate independantly
"""
_objects_registry = None
def __init__(self):
self.reset()
def reset(self):
self._objects_registry = {}
def get(self,reg_obj_id):
return self._objects_registry.get(reg_obj_id)
def put(self,obj,reg_obj_id=None):
""" Register an object. If reg_obj_id is provided, force
the reg_obj_id to be a certain key
"""
if not reg_obj_id:
reg_obj_id = reg_object_id(obj)
self._objects_registry[reg_obj_id] = obj
return reg_obj_id
def delete(self,reg_obj_id):
if reg_obj_id in self._objects_registry:
del self._objects_registry[reg_obj_id]
| import os
import base64
import json
import weakref
from walky.constants import *
from walky.objects import *
from walky.objects.system import *
from walky.serializer import *
def reg_object_id(obj):
""" Returns the registry encoded version of an object's id
Uses walky.objects.common.object_id so it will dig down to the
underlying object's id if required.
"""
obj_id = object_id(obj)
return hex(obj_id)[2:]
class Registry(object):
""" This should contain information required at the
connection level to allow it to operate independantly
"""
_objects_registry = None
def __init__(self):
self.reset()
def reset(self):
self._objects_registry = {}
def get(self,reg_obj_id):
return self._objects_registry.get(reg_obj_id)
def put(self,obj,reg_obj_id=None):
""" Register an object. If reg_obj_id is provided, force
the reg_obj_id to be a certain key
"""
if not reg_obj_id:
reg_obj_id = reg_object_id(obj)
self._objects_registry[reg_obj_id] = obj
return reg_obj_id
def delete(self,reg_obj_id):
if reg_obj_id in self._objects_registry:
del self._objects_registry[reg_obj_id] | en | 0.800342 | Returns the registry encoded version of an object's id Uses walky.objects.common.object_id so it will dig down to the underlying object's id if required. This should contain information required at the connection level to allow it to operate independantly Register an object. If reg_obj_id is provided, force the reg_obj_id to be a certain key | 2.495841 | 2 |
pytorch_lightning/accelerators/cpu_backend.py | dkmiller/pytorch-lightning | 1 | 6632044 | <gh_stars>1-10
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytorch_lightning.utilities.exceptions import MisconfigurationException
class CPUBackend(object):
def __init__(self, trainer):
self.trainer = trainer
def setup(self, model):
# run through amp wrapper
if self.trainer.amp_backend:
raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option')
# call setup after the ddp process has connected
self.trainer.call_setup_hook(model)
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
self.trainer.optimizers = optimizers
self.trainer.lr_schedulers = lr_schedulers
self.trainer.optimizer_frequencies = optimizer_frequencies
def train(self, model):
results = self.trainer.run_pretrain_routine(model)
return results
| # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytorch_lightning.utilities.exceptions import MisconfigurationException
class CPUBackend(object):
def __init__(self, trainer):
self.trainer = trainer
def setup(self, model):
# run through amp wrapper
if self.trainer.amp_backend:
raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option')
# call setup after the ddp process has connected
self.trainer.call_setup_hook(model)
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
self.trainer.optimizers = optimizers
self.trainer.lr_schedulers = lr_schedulers
self.trainer.optimizer_frequencies = optimizer_frequencies
def train(self, model):
results = self.trainer.run_pretrain_routine(model)
return results | en | 0.875264 | # Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # run through amp wrapper # call setup after the ddp process has connected # CHOOSE OPTIMIZER # allow for lr schedulers as well | 2.105603 | 2 |
tests/modules/children/grandchildren/foo.py | Fryguy/py2rb | 124 | 6632045 | def hi():
print("Hi! I'm foo.")
| def hi():
print("Hi! I'm foo.")
| none | 1 | 1.673675 | 2 |
|
tests/__init__.py | jokaorgua/trendet | 272 | 6632046 | <reponame>jokaorgua/trendet
# Copyright 2019-2020 <NAME>
# See LICENSE for details. | # Copyright 2019-2020 <NAME>
# See LICENSE for details. | en | 0.643656 | # Copyright 2019-2020 <NAME> # See LICENSE for details. | 0.806579 | 1 |
exec_vig.py | aliasgar1978/pyVig | 0 | 6632047 | <filename>exec_vig.py
from pyVig.visio import device, VisioObject
from pyVig.static import op_file
from pyVig.stencils import *
from pyVig.database import DeviceData, CableMatrixData
# # -----------------------------------------------------------------------------------
# data_file = 'data.xlsx'
# data_file = 'data - vod.xlsx'
data_file = "data - MTP.xlsx"
DD = DeviceData(data_file)
DD.read("Devices")
DD.add_description()
CMD = CableMatrixData(data_file)
CMD.read("CableMatrix")
CMD.filter_eligible_cables_only()
CMD.calc_slop(DD)
# # # -----------------------------------------------------------------------------------
stencils = get_list_of_stencils(stencil_folder)
devices = {}
x_coordinates= []
y_coordinates= []
with VisioObject(stencils=stencils, outputFile=op_file) as V:
print("Visio Drawing Inprogress, Do not close Visio Drawing while its running...")
for i, dev in DD.df.iterrows():
if not ((dev.hostname == CMD.df.dev_a).any()
or (dev.hostname == CMD.df.dev_b).any() ):
continue
x_coordinates.append(dev.x)
y_coordinates.append(dev.y)
stencil = dev.stencil if dev.stencil else "Network and Peripherals"
devices[dev.hostname] = device( # drop device
stencil=stencil,
visObj=V,
item=stencil_icons[dev.dev_type],
x=dev.x,
y=dev.y)
devices[dev.hostname].description(dev.description) # description of device
for i, connector in CMD.df.iterrows():
if connector.dev_a and connector.dev_b:
angle = connector.angle_straight_connector if connector.conn_type_x == "straight" else connector.angle_angled_connector
devices[connector.dev_a].connect(devices[connector.dev_b], # connect these two devices
connector_type=connector.conn_type_x,
angle=angle,
aport=connector.dev_a_port_y,
color=connector.color_x,
weight=connector.weight_x,
pattern=connector.pattern_x,
)
height = max(y_coordinates) - min(y_coordinates) + 2
width = max(x_coordinates) - min(x_coordinates) + 3
V.fit_to_draw(height, width)
print("Finished with drawing, Save the file as necessary.")
# # # -----------------------------------------------------------------------------------
# y1,x1 = 10,1
# y11,x11 = 10,8
# y12,x12 = 1,8
# y13,x13 = 1,1
# y2,x2 = 5,5
# V = VisioObject(stencils=stencils, outputFile=op_file)
# d1 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y1,x1)
# d2 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y2,x2)
# d11 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y11,x11)
# d12 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y12,x12)
# d13 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y13,x13)
| <filename>exec_vig.py
from pyVig.visio import device, VisioObject
from pyVig.static import op_file
from pyVig.stencils import *
from pyVig.database import DeviceData, CableMatrixData
# # -----------------------------------------------------------------------------------
# data_file = 'data.xlsx'
# data_file = 'data - vod.xlsx'
data_file = "data - MTP.xlsx"
DD = DeviceData(data_file)
DD.read("Devices")
DD.add_description()
CMD = CableMatrixData(data_file)
CMD.read("CableMatrix")
CMD.filter_eligible_cables_only()
CMD.calc_slop(DD)
# # # -----------------------------------------------------------------------------------
stencils = get_list_of_stencils(stencil_folder)
devices = {}
x_coordinates= []
y_coordinates= []
with VisioObject(stencils=stencils, outputFile=op_file) as V:
print("Visio Drawing Inprogress, Do not close Visio Drawing while its running...")
for i, dev in DD.df.iterrows():
if not ((dev.hostname == CMD.df.dev_a).any()
or (dev.hostname == CMD.df.dev_b).any() ):
continue
x_coordinates.append(dev.x)
y_coordinates.append(dev.y)
stencil = dev.stencil if dev.stencil else "Network and Peripherals"
devices[dev.hostname] = device( # drop device
stencil=stencil,
visObj=V,
item=stencil_icons[dev.dev_type],
x=dev.x,
y=dev.y)
devices[dev.hostname].description(dev.description) # description of device
for i, connector in CMD.df.iterrows():
if connector.dev_a and connector.dev_b:
angle = connector.angle_straight_connector if connector.conn_type_x == "straight" else connector.angle_angled_connector
devices[connector.dev_a].connect(devices[connector.dev_b], # connect these two devices
connector_type=connector.conn_type_x,
angle=angle,
aport=connector.dev_a_port_y,
color=connector.color_x,
weight=connector.weight_x,
pattern=connector.pattern_x,
)
height = max(y_coordinates) - min(y_coordinates) + 2
width = max(x_coordinates) - min(x_coordinates) + 3
V.fit_to_draw(height, width)
print("Finished with drawing, Save the file as necessary.")
# # # -----------------------------------------------------------------------------------
# y1,x1 = 10,1
# y11,x11 = 10,8
# y12,x12 = 1,8
# y13,x13 = 1,1
# y2,x2 = 5,5
# V = VisioObject(stencils=stencils, outputFile=op_file)
# d1 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y1,x1)
# d2 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y2,x2)
# d11 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y11,x11)
# d12 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y12,x12)
# d13 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y13,x13)
| en | 0.332931 | # # ----------------------------------------------------------------------------------- # data_file = 'data.xlsx' # data_file = 'data - vod.xlsx' # # # ----------------------------------------------------------------------------------- # drop device # description of device # connect these two devices # # # ----------------------------------------------------------------------------------- # y1,x1 = 10,1 # y11,x11 = 10,8 # y12,x12 = 1,8 # y13,x13 = 1,1 # y2,x2 = 5,5 # V = VisioObject(stencils=stencils, outputFile=op_file) # d1 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y1,x1) # d2 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y2,x2) # d11 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y11,x11) # d12 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y12,x12) # d13 = device( "Network and Peripherals", V, stencil_icons['L3_SW'], y13,x13) | 2.352254 | 2 |
connector-packager/connector_packager/jar_jdk_packager.py | LevyForchh/connector-plugin-sdk | 0 | 6632048 | import os
import logging
import subprocess
import shutil
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import List
from .connector_file import ConnectorFile
from .helper import check_jdk_environ_variable
from .version import __min_version_tableau__
JAR_EXECUTABLE_NAME = "jar"
if os.name == 'nt':
JAR_EXECUTABLE_NAME += ".exe"
logger = logging.getLogger('packager_logger')
MANIFEST_FILE_TYPE = "manifest"
MANIFEST_FILE_NAME = MANIFEST_FILE_TYPE + ".xml"
MANIFEST_FILE_COPY_NAME = MANIFEST_FILE_TYPE + "_copy.xml"
MANIFEST_ROOT_ELEM = "connector-plugin"
MIN_TABLEAU_VERSION_ATTR = "min-version-tableau"
def stamp_min_support_version(input_dir: Path, file_list: List[ConnectorFile], jar_filename: str) -> bool:
"""
Stamp of minimum support version to the connector manifest in packaged jar file
:param input_dir: source dir of files to be packaged
:type input_dir: Path
:param file_list: files need to be packaged
:type file_list: list of ConnectorFile
:param jar_filename: filename of the created JAR
:type jar_filename: str
:return: Boolean
"""
# find manifest in connector file list
manifest_file = None
for file in file_list:
if file.file_type == MANIFEST_FILE_TYPE and file.file_name == MANIFEST_FILE_NAME:
manifest_file = file
break
if not manifest_file:
logger.info("Can not find manifest.xml in input directory while packaging")
return False
# make a copy of manifest file
shutil.copyfile(input_dir / manifest_file.file_name, input_dir / MANIFEST_FILE_COPY_NAME)
# stamp the original manifest file
manifest = ET.parse(input_dir / manifest_file.file_name)
plugin_elem = manifest.getroot()
if plugin_elem.tag != MANIFEST_ROOT_ELEM:
logger.info("Manifest's root element has been modified after xml validation")
return False
plugin_elem.set(MIN_TABLEAU_VERSION_ATTR, __min_version_tableau__)
manifest.write(input_dir / manifest_file.file_name, encoding="utf-8", xml_declaration=True)
# update the connector manifest inside taco
args = ["jar", "uf", jar_filename, manifest_file.file_name]
p = subprocess.Popen(args, cwd=os.path.abspath(input_dir))
return_code = p.wait()
# Recover manifest file from its copy
os.remove(input_dir / MANIFEST_FILE_NAME)
os.rename(input_dir / MANIFEST_FILE_COPY_NAME, input_dir / MANIFEST_FILE_NAME)
# Check Subprocess result
if return_code != 0:
logger.info("Unable to stamp minimum support version while packaging")
return False
return True
def jdk_create_jar(source_dir: Path, files: List[ConnectorFile], jar_filename: str, dest_dir: Path) -> bool:
"""
Package JAR file from given files using JAVA JDK
:param source_dir: source dir of files to be packaged
:type source_dir: str
:param files: files need to be packaged
:type files: list of ConnectorFile
:param jar_filename: filename of the created JAR
:type jar_filename: str
:param dest_dir: destination dir to create jar file
:type dest_dir: str
:return: Boolean
"""
if not check_jdk_environ_variable(JAR_EXECUTABLE_NAME):
return False
abs_source_path = source_dir.resolve()
logging.debug("Start packaging " + jar_filename + " from " + str(abs_source_path) + " using JDK")
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logging.debug("Creating destination directory " + str(dest_dir))
args = ["jar", "cf", jar_filename]
for file in files:
args.append(file.file_name)
p = subprocess.Popen(args, cwd=abs_source_path)
p.wait()
if not stamp_min_support_version(source_dir, files, jar_filename):
return False
shutil.move(abs_source_path / jar_filename, dest_dir / jar_filename)
logging.info(jar_filename + " was created in " + str(os.path.abspath(dest_dir)))
return True
| import os
import logging
import subprocess
import shutil
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import List
from .connector_file import ConnectorFile
from .helper import check_jdk_environ_variable
from .version import __min_version_tableau__
JAR_EXECUTABLE_NAME = "jar"
if os.name == 'nt':
JAR_EXECUTABLE_NAME += ".exe"
logger = logging.getLogger('packager_logger')
MANIFEST_FILE_TYPE = "manifest"
MANIFEST_FILE_NAME = MANIFEST_FILE_TYPE + ".xml"
MANIFEST_FILE_COPY_NAME = MANIFEST_FILE_TYPE + "_copy.xml"
MANIFEST_ROOT_ELEM = "connector-plugin"
MIN_TABLEAU_VERSION_ATTR = "min-version-tableau"
def stamp_min_support_version(input_dir: Path, file_list: List[ConnectorFile], jar_filename: str) -> bool:
"""
Stamp of minimum support version to the connector manifest in packaged jar file
:param input_dir: source dir of files to be packaged
:type input_dir: Path
:param file_list: files need to be packaged
:type file_list: list of ConnectorFile
:param jar_filename: filename of the created JAR
:type jar_filename: str
:return: Boolean
"""
# find manifest in connector file list
manifest_file = None
for file in file_list:
if file.file_type == MANIFEST_FILE_TYPE and file.file_name == MANIFEST_FILE_NAME:
manifest_file = file
break
if not manifest_file:
logger.info("Can not find manifest.xml in input directory while packaging")
return False
# make a copy of manifest file
shutil.copyfile(input_dir / manifest_file.file_name, input_dir / MANIFEST_FILE_COPY_NAME)
# stamp the original manifest file
manifest = ET.parse(input_dir / manifest_file.file_name)
plugin_elem = manifest.getroot()
if plugin_elem.tag != MANIFEST_ROOT_ELEM:
logger.info("Manifest's root element has been modified after xml validation")
return False
plugin_elem.set(MIN_TABLEAU_VERSION_ATTR, __min_version_tableau__)
manifest.write(input_dir / manifest_file.file_name, encoding="utf-8", xml_declaration=True)
# update the connector manifest inside taco
args = ["jar", "uf", jar_filename, manifest_file.file_name]
p = subprocess.Popen(args, cwd=os.path.abspath(input_dir))
return_code = p.wait()
# Recover manifest file from its copy
os.remove(input_dir / MANIFEST_FILE_NAME)
os.rename(input_dir / MANIFEST_FILE_COPY_NAME, input_dir / MANIFEST_FILE_NAME)
# Check Subprocess result
if return_code != 0:
logger.info("Unable to stamp minimum support version while packaging")
return False
return True
def jdk_create_jar(source_dir: Path, files: List[ConnectorFile], jar_filename: str, dest_dir: Path) -> bool:
"""
Package JAR file from given files using JAVA JDK
:param source_dir: source dir of files to be packaged
:type source_dir: str
:param files: files need to be packaged
:type files: list of ConnectorFile
:param jar_filename: filename of the created JAR
:type jar_filename: str
:param dest_dir: destination dir to create jar file
:type dest_dir: str
:return: Boolean
"""
if not check_jdk_environ_variable(JAR_EXECUTABLE_NAME):
return False
abs_source_path = source_dir.resolve()
logging.debug("Start packaging " + jar_filename + " from " + str(abs_source_path) + " using JDK")
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
logging.debug("Creating destination directory " + str(dest_dir))
args = ["jar", "cf", jar_filename]
for file in files:
args.append(file.file_name)
p = subprocess.Popen(args, cwd=abs_source_path)
p.wait()
if not stamp_min_support_version(source_dir, files, jar_filename):
return False
shutil.move(abs_source_path / jar_filename, dest_dir / jar_filename)
logging.info(jar_filename + " was created in " + str(os.path.abspath(dest_dir)))
return True
| en | 0.703752 | Stamp of minimum support version to the connector manifest in packaged jar file :param input_dir: source dir of files to be packaged :type input_dir: Path :param file_list: files need to be packaged :type file_list: list of ConnectorFile :param jar_filename: filename of the created JAR :type jar_filename: str :return: Boolean # find manifest in connector file list # make a copy of manifest file # stamp the original manifest file # update the connector manifest inside taco # Recover manifest file from its copy # Check Subprocess result Package JAR file from given files using JAVA JDK :param source_dir: source dir of files to be packaged :type source_dir: str :param files: files need to be packaged :type files: list of ConnectorFile :param jar_filename: filename of the created JAR :type jar_filename: str :param dest_dir: destination dir to create jar file :type dest_dir: str :return: Boolean | 2.372282 | 2 |
aws_quota/check/rds.py | yanbinren/aws-quota-checker | 0 | 6632049 | from .quota_check import QuotaCheck, QuotaScope
class RDSDBInstanceCountCheck(QuotaCheck):
key = "rds_instances"
description = "RDS instances per region"
service_code = "rds"
scope = QuotaScope.REGION
quota_code = "L-7B6409FD"
@property
def current(self) -> int:
return self.count_paginated_results("rds", "describe_db_instances", "DBInstances")
class RDSDBParameterGroupsCountCheck(QuotaCheck):
key = "rds_parameter_groups"
description = "RDS parameter groups per region"
service_code = "rds"
scope = QuotaScope.REGION
quota_code = "L-DE55804A"
@property
def current(self) -> int:
return self.count_paginated_results("rds", "describe_db_parameter_groups", "DBParameterGroups")
class RDSDBClusterParameterGroupCountCheck(QuotaCheck):
key = "rds_cluster_parameter_groups"
description = "RDS cluster parameter groups per region"
service_code = "rds"
scope = QuotaScope.REGION
quota_code = "L-E4C808A8"
@property
def current(self) -> int:
return self.count_paginated_results("rds", "describe_db_cluster_parameter_groups", "DBClusterParameterGroups")
class RDSEventSubscriptions(QuotaCheck):
key = "rds_event_subscriptions"
description = "RDS event subscriptions per region"
service_code = "rds"
scope = QuotaScope.REGION
quota_code = "L-A59F4C87"
@property
def current(self) -> int:
return self.count_paginated_results("rds", "describe_event_subscriptions", "EventSubscriptionsList")
| from .quota_check import QuotaCheck, QuotaScope
class RDSDBInstanceCountCheck(QuotaCheck):
key = "rds_instances"
description = "RDS instances per region"
service_code = "rds"
scope = QuotaScope.REGION
quota_code = "L-7B6409FD"
@property
def current(self) -> int:
return self.count_paginated_results("rds", "describe_db_instances", "DBInstances")
class RDSDBParameterGroupsCountCheck(QuotaCheck):
key = "rds_parameter_groups"
description = "RDS parameter groups per region"
service_code = "rds"
scope = QuotaScope.REGION
quota_code = "L-DE55804A"
@property
def current(self) -> int:
return self.count_paginated_results("rds", "describe_db_parameter_groups", "DBParameterGroups")
class RDSDBClusterParameterGroupCountCheck(QuotaCheck):
key = "rds_cluster_parameter_groups"
description = "RDS cluster parameter groups per region"
service_code = "rds"
scope = QuotaScope.REGION
quota_code = "L-E4C808A8"
@property
def current(self) -> int:
return self.count_paginated_results("rds", "describe_db_cluster_parameter_groups", "DBClusterParameterGroups")
class RDSEventSubscriptions(QuotaCheck):
key = "rds_event_subscriptions"
description = "RDS event subscriptions per region"
service_code = "rds"
scope = QuotaScope.REGION
quota_code = "L-A59F4C87"
@property
def current(self) -> int:
return self.count_paginated_results("rds", "describe_event_subscriptions", "EventSubscriptionsList")
| none | 1 | 2.218845 | 2 |
|
label_studio/io_storages/functions.py | pachyderm/label-studio | 0 | 6632050 | from .s3.api import S3ImportStorageListAPI, S3ExportStorageListAPI
from .gcs.api import GCSImportStorageListAPI, GCSExportStorageListAPI
from .azure_blob.api import AzureBlobImportStorageListAPI, AzureBlobExportStorageListAPI
from .redis.api import RedisImportStorageListAPI, RedisExportStorageListAPI
from .pachyderm.api import PachydermImportStorageListAPI, PachydermExportStorageListAPI
def get_storage_list():
return [
{'name': 's3', 'title': 'AWS S3', 'import_list_api': S3ImportStorageListAPI, 'export_list_api': S3ExportStorageListAPI},
{'name': 'gcs', 'title': 'Google Cloud Storage', 'import_list_api': GCSImportStorageListAPI, 'export_list_api': GCSExportStorageListAPI},
{'name': 'azure', 'title': 'Microsoft Azure', 'import_list_api': AzureBlobImportStorageListAPI, 'export_list_api': AzureBlobExportStorageListAPI},
{'name': 'redis', 'title': 'Redis', 'import_list_api': RedisImportStorageListAPI, 'export_list_api': RedisExportStorageListAPI},
{'name': 'pachyderm', 'title': 'Pachyderm', 'import_list_api': PachydermImportStorageListAPI, 'export_list_api': PachydermExportStorageListAPI},
] | from .s3.api import S3ImportStorageListAPI, S3ExportStorageListAPI
from .gcs.api import GCSImportStorageListAPI, GCSExportStorageListAPI
from .azure_blob.api import AzureBlobImportStorageListAPI, AzureBlobExportStorageListAPI
from .redis.api import RedisImportStorageListAPI, RedisExportStorageListAPI
from .pachyderm.api import PachydermImportStorageListAPI, PachydermExportStorageListAPI
def get_storage_list():
return [
{'name': 's3', 'title': 'AWS S3', 'import_list_api': S3ImportStorageListAPI, 'export_list_api': S3ExportStorageListAPI},
{'name': 'gcs', 'title': 'Google Cloud Storage', 'import_list_api': GCSImportStorageListAPI, 'export_list_api': GCSExportStorageListAPI},
{'name': 'azure', 'title': 'Microsoft Azure', 'import_list_api': AzureBlobImportStorageListAPI, 'export_list_api': AzureBlobExportStorageListAPI},
{'name': 'redis', 'title': 'Redis', 'import_list_api': RedisImportStorageListAPI, 'export_list_api': RedisExportStorageListAPI},
{'name': 'pachyderm', 'title': 'Pachyderm', 'import_list_api': PachydermImportStorageListAPI, 'export_list_api': PachydermExportStorageListAPI},
] | none | 1 | 1.64356 | 2 |
Subsets and Splits