blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4f08dc744434feff1d32cee783de86333bb13fb9
|
76787cd4117d71377bd27d251b6d30b41922ff67
|
/tests/integration/response/schema/test_countries.py
|
34f80377ee2c79759a4d9a4a17217fb723874f4e
|
[
"MIT"
] |
permissive
|
jaebradley/draftkings_client
|
50ba0f25e38b78f75d92a57660bfb110e3a27e69
|
2184e2e3cf66bfe9e4cc6f6d577c80602ab7121a
|
refs/heads/v3
| 2022-12-09T14:35:50.263181 | 2022-01-19T06:36:24 | 2022-01-19T06:36:24 | 73,451,976 | 138 | 47 |
MIT
| 2022-12-08T01:23:13 | 2016-11-11T06:29:44 |
Python
|
UTF-8
|
Python
| false | false | 2,712 |
py
|
import os
from unittest import TestCase
from draft_kings.response.objects.countries import Country, Countries
from draft_kings.response.schema.countries import CountriesSchema
from tests.config import ROOT_DIRECTORY
class TestCountries(TestCase):
def setUp(self) -> None:
with open(os.path.join(ROOT_DIRECTORY, 'tests/files/countries.json'), encoding="utf-8") as data_file:
self.schema = CountriesSchema()
self.data = self.schema.loads(data_file.read())
def test_deserialization(self) -> None:
self.assertIsNotNone(self.data)
def test_countries_data(self) -> None:
self.assertEqual(
Countries(
countries=[
Country(
country_id=1,
country_code="US",
name="United States",
is_licensed=True
),
Country(
country_id=14,
country_code="AU",
name="Australia",
is_licensed=True
),
Country(
country_id=15,
country_code="AT",
name="Austria",
is_licensed=True
),
Country(
country_id=2,
country_code="CA",
name="Canada",
is_licensed=True
),
Country(
country_id=4,
country_code="DE",
name="Germany",
is_licensed=True
),
Country(
country_id=89,
country_code="IE",
name="Ireland",
is_licensed=True
),
Country(
country_id=117,
country_code="MT",
name="Malta",
is_licensed=True
),
Country(
country_id=132,
country_code="NL",
name="Netherlands",
is_licensed=True
),
Country(
country_id=3,
country_code="GB",
name="United Kingdom",
is_licensed=True
)
]
),
self.data
)
|
[
"[email protected]"
] | |
ba4c4198b6f4565bd1b715c26f548292f2287c5f
|
cda43bf6a84f7e55fab26aa70cda934683a51fe5
|
/residualflows/train_toy_dioDionelis.py
|
b211dab5f7e76b0f94b3aaae0281197026274108
|
[] |
no_license
|
nikolaosdionelis/NeuralNetworksNNs
|
abb55622882e31c8d130a8986868b3d19ede186f
|
8a217490ad5bb3f7fccf4002c6b43a06c1e562fc
|
refs/heads/master
| 2022-11-13T00:50:23.578197 | 2020-07-12T18:52:20 | 2020-07-12T18:52:20 | 279,042,013 | 5 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 33,216 |
py
|
#import os
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"]="1"
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="1"
#import os
import time
import argparse
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import math
import torch
import numpy as np
import lib.optimizers as optim
import lib.layers.base as base_layers
import lib.utils as utils
import lib.layers as layers
import lib.toy_data as toy_data
from lib.visualize_flow import visualize_transform
ACTIVATION_FNS = {
'relu': torch.nn.ReLU,
'tanh': torch.nn.Tanh,
'elu': torch.nn.ELU,
'selu': torch.nn.SELU,
'fullsort': base_layers.FullSort,
'maxmin': base_layers.MaxMin,
'swish': base_layers.Swish,
'lcube': base_layers.LipschitzCube,
}
parser = argparse.ArgumentParser()
# Use 8 Gaussians
# We use 8 Gaussians
#parser.add_argument(
# '--data', choices=['swissroll', '8gaussians')
#parser.add_argument(
# '--data', choices=['swissroll', '8gaussians', 'pinwheel', 'circles', 'moons', '2spirals', 'checkerboard', 'rings'],
# type=str, default='pinwheel')
# 8 Gaussians
# Use 8 Gaussians
parser.add_argument(
'--data', choices=['swissroll', '8gaussians', 'pinwheel', 'circles', 'moons', '2spirals', 'checkerboard', 'rings'],
type=str, default='8gaussians')
# 8 Gaussians
# Use 8 Gaussians
parser.add_argument('--arch', choices=['iresnet', 'realnvp'], default='iresnet')
parser.add_argument('--coeff', type=float, default=0.9)
parser.add_argument('--vnorms', type=str, default='222222')
parser.add_argument('--n-lipschitz-iters', type=int, default=5)
parser.add_argument('--atol', type=float, default=None)
parser.add_argument('--rtol', type=float, default=None)
parser.add_argument('--learn-p', type=eval, choices=[True, False], default=False)
parser.add_argument('--mixed', type=eval, choices=[True, False], default=True)
#parser.add_argument('--dims', type=str, default='128-128-128-128')
parser.add_argument('--act', type=str, choices=ACTIVATION_FNS.keys(), default='swish')
#parser.add_argument('--dims', type=str, default='128-128-128-128')
#parser.add_argument('--dims', type=str, default='128-128-128-128')
#parser.add_argument('--dims', type=str, default='128-128')
#parser.add_argument('--dims', type=str, default='128-128-128-128')
#parser.add_argument('--dims', type=str, default='64-64')
#parser.add_argument('--dims', type=str, default='128-128-128-128')
parser.add_argument('--dims', type=str, default='32-32')
#parser.add_argument('--dims', type=str, default='128-128-128-128')
#parser.add_argument('--dims', type=str, default='16-16')
#parser.add_argument('--dims', type=str, default='128-128-128-128')
#parser.add_argument('--dims', type=str, default='128-128-128-128')
#parser.add_argument('--nblocks', type=int, default=100)
parser.add_argument('--brute-force', type=eval, choices=[True, False], default=False)
#parser.add_argument('--nblocks', type=int, default=100)
#parser.add_argument('--nblocks', type=int, default=100)
parser.add_argument('--nblocks', type=int, default=50)
#parser.add_argument('--nblocks', type=int, default=100)
#parser.add_argument('--nblocks', type=int, default=100)
parser.add_argument('--actnorm', type=eval, choices=[True, False], default=False)
parser.add_argument('--batchnorm', type=eval, choices=[True, False], default=False)
parser.add_argument('--exact-trace', type=eval, choices=[True, False], default=False)
parser.add_argument('--n-power-series', type=int, default=None)
parser.add_argument('--n-samples', type=int, default=1)
parser.add_argument('--n-dist', choices=['geometric', 'poisson'], default='geometric')
#parser.add_argument('--niters', type=int, default=50000)
#parser.add_argument('--batch_size', type=int, default=500)
#parser.add_argument('--niters', type=int, default=50000)
#parser.add_argument('--niters', type=int, default=50000)
#parser.add_argument('--niters', type=int, default=500)
#parser.add_argument('--niters', type=int, default=50000)
parser.add_argument('--niters', type=int, default=7000)
#parser.add_argument('--niters2', type=int, default=5000)
parser.add_argument('--niters2', type=int, default=100)
#parser.add_argument('--batch_size', type=int, default=500)
#parser.add_argument('--batch_size', type=int, default=500)
#parser.add_argument('--batch_size', type=int, default=250)
#parser.add_argument('--batch_size', type=int, default=500)
parser.add_argument('--batch_size', type=int, default=500)
#parser.add_argument('--test_batch_size', type=int, default=10000)
#parser.add_argument('--lr', type=float, default=1e-5)
#parser.add_argument('--lr', type=float, default=1e-5)
#parser.add_argument('--lr', type=float, default=1e-5)
#parser.add_argument('--lr', type=float, default=1e-3)
#parser.add_argument('--lr', type=float, default=1e-5)
parser.add_argument('--lr', type=float, default=1e-1)
#parser.add_argument('--lr', type=float, default=1e-5)
#parser.add_argument('--lr', type=float, default=1e-5)
#parser.add_argument('--test_batch_size', type=int, default=10000)
#parser.add_argument('--test_batch_size', type=int, default=10000)
#parser.add_argument('--test_batch_size', type=int, default=200)
#parser.add_argument('--test_batch_size', type=int, default=10000)
parser.add_argument('--test_batch_size', type=int, default=500)
#parser.add_argument('--test_batch_size', type=int, default=10000)
#parser.add_argument('--test_batch_size', type=int, default=10000)
parser.add_argument('--weight-decay', type=float, default=1e-5)
parser.add_argument('--annealing-iters', type=int, default=0)
parser.add_argument('--save', type=str, default='experiments/iresnet_toy')
#parser.add_argument('--viz_freq', type=int, default=100)
#parser.add_argument('--viz_freq', type=int, default=100)
#parser.add_argument('--val_freq', type=int, default=100)
parser.add_argument('--viz_freq', type=int, default=1000)
parser.add_argument('--val_freq', type=int, default=1000)
#parser.add_argument('--viz_freq', type=int, default=10)
#parser.add_argument('--val_freq', type=int, default=10)
parser.add_argument('--log_freq', type=int, default=1000)
#parser.add_argument('--gpu', type=int, default=0)
#parser.add_argument('--gpu', type=int, default=0)
#parser.add_argument('--seed', type=int, default=0)
#parser.add_argument('--seed', type=int, default=0)
#parser.add_argument('--seed', type=int, default=0)
#parser.add_argument('--seed', type=int, default=1)
#parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__))
logger.info(args)
#device = torch.device('cuda:' + str(args.gpu))
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
print('')
print(device)
print(device.type)
print('')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if device.type == 'cuda':
torch.cuda.manual_seed(args.seed)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def standard_normal_sample(size):
return torch.randn(size)
def standard_normal_logprob(z):
logZ = -0.5 * math.log(2 * math.pi)
return logZ - z.pow(2) / 2
def compute_loss(args, model, batch_size=None, beta=1.):
if batch_size is None:
batch_size = args.batch_size
# load data
x = toy_data.inf_train_gen(args.data, batch_size=batch_size)
x = torch.from_numpy(x).type(torch.float32).to(device)
#print('')
#print(x.shape)
#print('')
#print(x)
#print('')
#plt.figure()
#plt.plot(x[:, 0].cpu().squeeze().numpy(), x[:, 1].cpu().squeeze().numpy(), 'o')
##plt.ion()
#plt.show()
#plt.pause(2)
#sfadadfa
zero = torch.zeros(x.shape[0], 1).to(x)
# transform to z
z, delta_logp = model(x, zero)
# compute log p(z)
# compute log p(z)
logpz = standard_normal_logprob(z).sum(1, keepdim=True)
logpx = logpz - beta * delta_logp
loss = -torch.mean(logpx)
return loss, torch.mean(logpz), torch.mean(-delta_logp)
# x is a Tensor => batch_size x 2
# x is the same as args.data => batch_size x 2
# x is a Tensor => batch_size x 2
def compute_loss2(x, args, model, batch_size=None, beta=1.):
if batch_size is None:
batch_size = args.batch_size
zero = torch.zeros(x.shape[0], 1).to(x)
# transform to z
z, delta_logp = model(x, zero)
# x is a Tensor => batch_size x 2
# x is the same as args.data => batch_size x 2
# compute log p(z)
# compute log p(z)
logpz = standard_normal_logprob(z).sum(1, keepdim=True)
logpx = logpz - beta * delta_logp
#loss = -torch.mean(logpx)
#return loss, torch.mean(logpz), torch.mean(-delta_logp)
#return torch.mean(logpx), torch.mean(logpz), torch.mean(-delta_logp)
#return torch.mean(logpx), torch.mean(logpz), torch.mean(-delta_logp)
return torch.mean(logpx)
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# The GAN Model
class Generator(nn.Module):
def __init__(self, nhidden):
super(Generator, self).__init__()
#self.lin1 = nn.Linear(1, nhidden)
#self.lin2 = nn.Linear(nhidden, 1)
self.lin1 = nn.Linear(2, nhidden)
self.lin2 = nn.Linear(nhidden, 2)
def forward(self, z):
h = F.relu(self.lin1(z))
x = self.lin2(h)
return x
def loss_fn(first_term_loss, second_term_loss, third_term_loss):
# diff = t1 - t2
# .numel() number of elements
# return torch.sum(diff * diff) / diff.numel()
#return first_term_loss + second_term_loss + third_term_loss
return first_term_loss
def loss_fn2(genFGen2, args, model):
genFGen3 = torch.randn((args.batch_size, 2)).to(device)
first_term_loss = compute_loss2(genFGen2, args, model, beta=beta)
print('')
print(first_term_loss)
import math
#mu = torch.from_numpy(np.array([2.8099582e+00, 9.6440443e-04], dtype="float32")).to(device)
#mu = torch.from_numpy(np.array([[2.805741, -0.00889241]], dtype="float32")).to(device)
mu = torch.from_numpy(np.array([2.805741, -0.00889241], dtype="float32")).to(device)
#S = torch.from_numpy(np.array([[0.35833913, 0.0], [0.0, 0.34720358]], dtype="float32")).to(device)
#S = torch.from_numpy(np.array([[pow(0.35833913,2), 0.0], [0.0, pow(0.34720358,2)]], dtype="float32")).to(device)
S = torch.from_numpy(np.array([[pow(0.3442525,2), 0.0], [0.0, pow(0.35358343,2)]], dtype="float32")).to(device)
#print('')
#import timeit
#import scipy.stats
#start = timeit.default_timer()
#storeAll = torch.from_numpy(np.array(0.0, dtype="float32")).to(device)
#for loopIndex_i in range(genFGen2.size()[0]):
# #print((((torch.from_numpy(np.array(-np.log(2 * math.pi), dtype="float32")).to(device))) -
# # ((0.5 * torch.log(torch.det(S)))) - 0.5 *
# # ((((torch.matmul(torch.matmul(((genFGen2[loopIndex_i:1+loopIndex_i, :]) - mu), torch.inverse(S)),
# # torch.transpose((genFGen2[loopIndex_i:1+loopIndex_i, :]) - mu, 0, 1))))))))
#
# storeAll += ((((torch.from_numpy(np.array(-np.log(2 * math.pi), dtype="float32")).to(device))) -
# ((0.5 * torch.log(torch.det(S)))) - 0.5 * (
# (((torch.matmul(torch.matmul(((genFGen2[loopIndex_i:1 + loopIndex_i, :]) - mu), torch.inverse(S)),
# torch.transpose((genFGen2[loopIndex_i:1 + loopIndex_i, :]) - mu, 0, 1)))))))).squeeze()
#
#storeAll /= genFGen2.size()[0]
#print(storeAll)
#stop = timeit.default_timer()
#print('Time: ', stop - start)
#print('')
#print(torch.exp(((torch.from_numpy(np.array(-np.log(2 * math.pi), dtype="float32")).to(device))) -
# ((0.5 * torch.log(torch.det(S)))) - 0.5 *
# ((((torch.matmul(torch.matmul(((genFGen2[0:1, :]) - mu), torch.inverse(S)),
# torch.transpose((genFGen2[0:1, :]) - mu, 0, 1))))))))
#import scipy.stats
#print(scipy.stats.multivariate_normal.pdf(genFGen2[0:1, :].cpu().detach().numpy(),
# mean=mu.cpu().detach().numpy(), cov=S.cpu().detach().numpy()))
#first_term_loss = storeAll
#asdfasdfadfa
#print('')
#import timeit
#import scipy.stats
#start = timeit.default_timer()
storeAll = torch.from_numpy(np.array(0.0, dtype="float32")).to(device)
for loopIndex_i in range(genFGen2.size()[0]):
#storeAll += np.log(scipy.stats.multivariate_normal.pdf(genFGen2[loopIndex_i:1+loopIndex_i, :].cpu().detach().numpy(),
# mean=mu.cpu().detach().numpy(), cov=S.cpu().detach().numpy()))
toUse_storeAll = torch.distributions.MultivariateNormal(loc=mu, covariance_matrix=S)
storeAll += toUse_storeAll.log_prob(genFGen2[loopIndex_i:1+loopIndex_i, :].squeeze(0))
storeAll /= genFGen2.size()[0]
print(storeAll)
first_term_loss = storeAll
#asdfasdfadfa
#stop = timeit.default_timer()
#print('Time: ', stop - start)
#print('')
#asdfsdfaa
xData = toy_data.inf_train_gen(args.data, batch_size=args.batch_size)
xData = torch.from_numpy(xData).type(torch.float32).to(device)
#import timeit
#start = timeit.default_timer()
#second_term_loss = 0.0
#second_term_loss = torch.from_numpy(np.array(0.0, dtype='float32'))
#second_term_loss = torch.from_numpy(np.array(0.0, dtype='float32'))
#second_term_loss = torch.from_numpy(np.array(0.0, dtype='float32'))
#second_term_loss = torch.from_numpy(np.array(0.0, dtype='float32'))
#second_term_loss = torch.from_numpy(np.array(0.0, dtype='float32')).to(device)
#second_term_loss = torch.from_numpy(np.array(0.0, dtype='float32')).to(device)
#for i in genFGen2:
# for j in xData:
# # second_term_loss += np.linalg.norm(i.cpu().detach().numpy()-j.cpu().detach().numpy())
#
# # second_term_loss += torch.norm(i-j, 2)
# # second_term_loss += torch.norm(i-j)
#
# # second_term_loss += torch.dist(i.type(torch.float64), j.type(torch.float64), 2)
# second_term_loss += torch.dist(i, j, 2)
#
# second_term_loss /= args.batch_size
# #second_term_loss *= 0.1
#
#second_term_loss /= args.batch_size
#print('')
#print(first_term_loss)
#print(torch.exp(first_term_loss))
#print('')
#print(second_term_loss)
#stop = timeit.default_timer()
#print('Time: ', stop - start)
#print('')
#asdfsdfa
#second_term_loss = []
#for i in genFGen2:
# second_term_lossTwo = []
# for j in xData:
# #second_term_lossTwo.append(torch.dist(i, j, 2).unsqueeze(0))
# second_term_lossTwo.append(torch.dist(i, j, 2))
# second_term_loss.append(second_term_lossTwo)
# #second_term_loss.append(.unsqueeze(0))
# #second_term_loss.append()
#second_term_loss = torch.cat(second_term_loss)
##second_term_loss = torch.cat(second_term_loss, dim=1)
##second_term_loss *= 0.1
#second_term_loss = torch.mean(second_term_loss)
#import timeit
#start = timeit.default_timer()
var1 = []
for i in genFGen2:
for j in xData:
new_stuff = torch.dist(i, j, 2) # this is a tensor
var1.append(new_stuff.unsqueeze(0))
#var1.append(new_stuff)
var1_tensor = torch.cat(var1)
#var1_tensor = torch.cat(var1, dim=1)
#second_term_loss = torch.mean(var1_tensor)
#second_term_loss = torch.mean(var1_tensor) / args.batch_size
#second_term_loss = 10.0 * torch.mean(var1_tensor) / args.batch_size
#second_term_loss = torch.min(var1_tensor)
second_term_loss = torch.min(var1_tensor) / args.batch_size
#second_term_loss = 10.0 * torch.min(var1_tensor) / args.batch_size
#second_term_loss = torch.from_numpy(np.array(None, dtype='float32')).to(device)
#for i in genFGen2:
# second_term_lossTwo = torch.from_numpy(np.array(None, dtype='float32')).to(device)
# for j in xData:
# second_term_lossTwo = torch.cat((second_term_lossTwo, torch.dist(i, j, 2)), 0)
# second_term_loss = torch.cat((second_term_loss, torch.mean(second_term_lossTwo, 0)), 0)
# #second_term_loss *= 0.1
#second_term_loss = torch.mean(second_term_loss)
#print('')
#print(second_term_loss)
#stop = timeit.default_timer()
#print('Time: ', stop - start)
#print('')
#print(second_term_loss)
#print(torch.min(var1_tensor) / args.batch_size)
#print('')
#asdfdffa
second_term_loss *= 10000.0
print('')
print(first_term_loss)
print(second_term_loss)
#print('')
#print('')
#asdfdffa
third_term_loss = torch.from_numpy(np.array(0.0, dtype='float32')).to(device)
for i in range(args.batch_size):
for j in range(args.batch_size):
if i != j:
# third_term_loss += ((np.linalg.norm(genFGen3[i,:].cpu().detach().numpy()-genFGen3[j,:].cpu().detach().numpy())) / (np.linalg.norm(genFGen2[i,:].cpu().detach().numpy()-genFGen2[j,:].cpu().detach().numpy())))
# third_term_loss += ((torch.norm(genFGen3[i,:]-genFGen3[j,:], 2)) / (torch.norm(genFGen2[i,:]-genFGen2[j,:], 2)))
# third_term_loss += ((torch.norm(genFGen3[i,:]-genFGen3[j,:])) / (torch.norm(genFGen2[i,:]-genFGen2[j,:])))
# third_term_loss += ((torch.norm(genFGen3[i,:] - genFGen3[j,:])) / (torch.norm(genFGen2[i,:] - genFGen2[j,:])))
third_term_loss += ((torch.dist(genFGen3[i, :], genFGen3[j, :], 2)) / (torch.dist(genFGen2[i, :], genFGen2[j, :], 2)))
third_term_loss /= (args.batch_size - 1)
third_term_loss /= args.batch_size
#third_term_loss *= 1000.0
third_term_loss *= 100.0
#return first_term_loss + second_term_loss + third_term_loss
print(third_term_loss)
print('')
#asdfsfa
return first_term_loss + second_term_loss + third_term_loss
#return first_term_loss + second_term_loss
def parse_vnorms():
ps = []
for p in args.vnorms:
if p == 'f':
ps.append(float('inf'))
else:
ps.append(float(p))
return ps[:-1], ps[1:]
def compute_p_grads(model):
#scales = 0.
scales = 0.
nlayers = 0
for m in model.modules():
if isinstance(m, base_layers.InducedNormConv2d) or isinstance(m, base_layers.InducedNormLinear):
scales = scales + m.compute_one_iter()
nlayers += 1
scales.mul(1 / nlayers).mul(0.01).backward()
for m in model.modules():
if isinstance(m, base_layers.InducedNormConv2d) or isinstance(m, base_layers.InducedNormLinear):
if m.domain.grad is not None and torch.isnan(m.domain.grad):
m.domain.grad = None
def build_nnet(dims, activation_fn=torch.nn.ReLU):
nnet = []
domains, codomains = parse_vnorms()
if args.learn_p:
if args.mixed:
domains = [torch.nn.Parameter(torch.tensor(0.)) for _ in domains]
else:
domains = [torch.nn.Parameter(torch.tensor(0.))] * len(domains)
codomains = domains[1:] + [domains[0]]
for i, (in_dim, out_dim, domain, codomain) in enumerate(zip(dims[:-1], dims[1:], domains, codomains)):
nnet.append(activation_fn())
nnet.append(
base_layers.get_linear(
in_dim,
out_dim,
coeff=args.coeff,
n_iterations=args.n_lipschitz_iters,
atol=args.atol,
rtol=args.rtol,
domain=domain,
codomain=codomain,
zero_init=(out_dim == 2),
)
)
return torch.nn.Sequential(*nnet)
def update_lipschitz(model, n_iterations):
for m in model.modules():
if isinstance(m, base_layers.SpectralNormConv2d) or isinstance(m, base_layers.SpectralNormLinear):
m.compute_weight(update=True, n_iterations=n_iterations)
if isinstance(m, base_layers.InducedNormConv2d) or isinstance(m, base_layers.InducedNormLinear):
m.compute_weight(update=True, n_iterations=n_iterations)
def get_ords(model):
ords = []
for m in model.modules():
if isinstance(m, base_layers.InducedNormConv2d) or isinstance(m, base_layers.InducedNormLinear):
domain, codomain = m.compute_domain_codomain()
if torch.is_tensor(domain):
domain = domain.item()
if torch.is_tensor(codomain):
codomain = codomain.item()
ords.append(domain)
ords.append(codomain)
return ords
def pretty_repr(a):
return('[[' + ','.join(list(map(lambda i: '{}'.format(i), a))) + ']]')
if __name__ == '__main__':
activation_fn = ACTIVATION_FNS[args.act]
if args.arch == 'iresnet':
dims = [2] + list(map(int, args.dims.split('-'))) + [2]
blocks = []
if args.actnorm: blocks.append(layers.ActNorm1d(2))
for _ in range(args.nblocks):
blocks.append(
layers.iResBlock(
build_nnet(dims, activation_fn),
n_dist=args.n_dist,
n_power_series=args.n_power_series,
exact_trace=args.exact_trace,
brute_force=args.brute_force,
n_samples=args.n_samples,
neumann_grad=False,
grad_in_forward=False,
)
)
if args.actnorm: blocks.append(layers.ActNorm1d(2))
if args.batchnorm: blocks.append(layers.MovingBatchNorm1d(2))
model = layers.SequentialFlow(blocks).to(device)
elif args.arch == 'realnvp':
blocks = []
for _ in range(args.nblocks):
blocks.append(layers.CouplingLayer(2, swap=False))
blocks.append(layers.CouplingLayer(2, swap=True))
if args.actnorm: blocks.append(layers.ActNorm1d(2))
if args.batchnorm: blocks.append(layers.MovingBatchNorm1d(2))
model = layers.SequentialFlow(blocks).to(device)
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
genGen = Generator(16)
#optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
#optimizerGen = optim.SGD(genGen.parameters(), lr=0.1, momentum=0.9)
#optimizerGen = optim.SGD(genGen.parameters(), lr=0.1, momentum=0.9)
#optimizerGen = optim.SGD(genGen.parameters(), lr=0.1, momentum=0.9)
#optimizerGen = optim.Adam(genGen.parameters(), lr=args.lr, weight_decay=args.weight_decay)
#optimizerGen = optim.Adam(genGen.parameters(), lr=args.lr, weight_decay=args.weight_decay)
optimizerGen = optim.Adam(genGen.parameters(), lr=args.lr)
time_meter = utils.RunningAverageMeter(0.93)
loss_meter = utils.RunningAverageMeter(0.93)
logpz_meter = utils.RunningAverageMeter(0.93)
delta_logp_meter = utils.RunningAverageMeter(0.93)
end = time.time()
best_loss = float('inf')
model.train()
#for itr in range(1, args.niters2 + 1):
#for itr in range(1, args.niters + 1):
#for itr in range(1, args.niters + 1):
for itr in range(1, 2):
optimizer.zero_grad()
#optimizerGen.zero_grad()
beta = min(1, itr / args.annealing_iters) if args.annealing_iters > 0 else 1.
loss, logpz, delta_logp = compute_loss(args, model, beta=beta)
#genFGen2 = genGen.forward(torch.randn((args.batch_size, 2)))
#genFGen2 = genFGen2.type(torch.float32).to(device)
#lossGen = loss_fn2(genFGen2, args, model)
#if itr == 1:
# xData = toy_data.inf_train_gen(args.data, batch_size=args.batch_size)
# xData = torch.from_numpy(xData).type(torch.float32).to(device)
# plt.figure()
# plt.plot(xData[:, 0].cpu().squeeze().numpy(), xData[:, 1].cpu().squeeze().numpy(), 'o')
# #plt.ion()
# plt.show()
# plt.pause(0.5)
#plt.figure()
#plt.plot(genFGen2[:, 0].cpu().detach().numpy(), genFGen2[:, 1].cpu().detach().numpy(), 'o')
##plt.ion()
#plt.show()
#plt.pause(1)
loss_meter.update(loss.item())
logpz_meter.update(logpz.item())
delta_logp_meter.update(delta_logp.item())
loss.backward()
#lossGen.backward()
if args.learn_p and itr > args.annealing_iters:
compute_p_grads(model)
optimizer.step()
#optimizerGen.step()
update_lipschitz(model, args.n_lipschitz_iters)
time_meter.update(time.time() - end)
logger.info(
'Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f})'
' | Logp(z) {:.6f}({:.6f}) | DeltaLogp {:.6f}({:.6f})'.format(
itr, time_meter.val, time_meter.avg, loss_meter.val, loss_meter.avg, logpz_meter.val, logpz_meter.avg,
delta_logp_meter.val, delta_logp_meter.avg
)
)
if itr % args.val_freq == 0 or itr == args.niters:
update_lipschitz(model, 200)
with torch.no_grad():
model.eval()
test_loss, test_logpz, test_delta_logp = compute_loss(args, model, batch_size=args.test_batch_size)
log_message = (
'[TEST] Iter {:04d} | Test Loss {:.6f} '
'| Test Logp(z) {:.6f} | Test DeltaLogp {:.6f}'.format(
itr, test_loss.item(), test_logpz.item(), test_delta_logp.item()
)
)
logger.info(log_message)
logger.info('Ords: {}'.format(pretty_repr(get_ords(model))))
if test_loss.item() < best_loss:
best_loss = test_loss.item()
utils.makedirs(args.save)
torch.save({
'args': args,
'state_dict': model.state_dict(),
}, os.path.join(args.save, 'checkpt.pth'))
model.train()
#if itr == 1 or itr % args.viz_freq == 0:
if itr % args.viz_freq == 0:
with torch.no_grad():
model.eval()
p_samples = toy_data.inf_train_gen(args.data, batch_size=20000)
sample_fn, density_fn = model.inverse, model.forward
plt.figure(figsize=(9, 3))
visualize_transform(p_samples, torch.randn, standard_normal_logprob, transform=sample_fn,
inverse_transform=density_fn, samples=True, npts=400, device=device)
fig_filename = os.path.join(args.save, 'figs', '{:04d}.jpg'.format(itr))
print('')
print(fig_filename)
print('')
utils.makedirs(os.path.dirname(fig_filename))
plt.savefig(fig_filename)
#plt.ion()
plt.show()
plt.pause(0.1)
plt.close()
model.train()
end = time.time()
logger.info('Training 1 has finished.')
#utils.save_checkpoint({'state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(),
# 'args': args}, os.path.join(args.save, 'models'), args.niters)
#utils.save_checkpoint({'state_dict': model.state_dict()}, os.path.join(args.save, 'models'), args.niters)
#utils.save_checkpoint({'state_dict': model.state_dict()}, os.path.join(args.save, 'models'), args.niters)
#utils.save_checkpoint({'state_dict': model.state_dict()}, os.path.join(args.save, 'models'), args.niters)
#utils.save_checkpoint({'state_dict': model.state_dict()}, os.path.join(args.save, 'models'), args.niters)
#adsfgdsgsdfgdsaa
#utils.save_checkpoint({'state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(),
# 'args': args}, os.path.join(args.save, 'models'), args.niters)
#adsfgdsgsdsaa
#model = torch.load(os.path.join(os.path.join(args.save, 'models'), 'checkpt-%04d.pth' % args.niters))
#model.load_state_dict(os.path.join(os.path.join(args.save, 'models'), 'checkpt-%04d.pth' % args.niters))
checkpoint = torch.load(os.path.join(os.path.join(args.save, 'models'), 'checkpt-%04d.pth' % args.niters))
model.load_state_dict(checkpoint['state_dict'])
#optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
#args.load_state_dict(checkpoint['args'])
time2_meter = utils.RunningAverageMeter(0.93)
loss2_meter = utils.RunningAverageMeter(0.93)
end = time.time()
best_loss = float('inf')
for itr in range(1, args.niters2 + 1):
#for itr in range(1, args.niters + 1):
#optimizer.zero_grad()
optimizerGen.zero_grad()
#beta = min(1, itr / args.annealing_iters) if args.annealing_iters > 0 else 1.
#loss, logpz, delta_logp = compute_loss(args, model, beta=beta)
genFGen2 = genGen.forward(torch.randn((args.batch_size, 2)))
genFGen2 = genFGen2.type(torch.float32).to(device)
lossGen = loss_fn2(genFGen2, args, model)
if itr == 1:
xData = toy_data.inf_train_gen(args.data, batch_size=args.batch_size)
xData = torch.from_numpy(xData).type(torch.float32).to(device)
plt.figure()
plt.plot(xData[:, 0].cpu().squeeze().numpy(), xData[:, 1].cpu().squeeze().numpy(), 'ob')
plt.grid()
#plt.xlim(1.0, 4.5)
#plt.ylim(-1.5, 1.5)
#plt.ion()
plt.show()
plt.pause(0.1)
plt.figure()
plt.plot(genFGen2[:, 0].cpu().detach().numpy(), genFGen2[:, 1].cpu().detach().numpy(), 'ob')
plt.plot(xData[:, 0].cpu().squeeze().numpy(), xData[:, 1].cpu().squeeze().numpy(), '+r')
plt.grid()
#plt.xlim(1.0, 4.5)
#plt.ylim(-1.5, 1.5)
#plt.ion()
plt.show()
plt.pause(0.1)
#loss_meter.update(loss.item())
#logpz_meter.update(logpz.item())
loss2_meter.update(lossGen.item())
#delta_logp_meter.update(delta_logp.item())
#loss.backward()
lossGen.backward()
#if args.learn_p and itr > args.annealing_iters:
# compute_p_grads(model)
#optimizer.step()
optimizerGen.step()
#update_lipschitz(model, args.n_lipschitz_iters)
time2_meter.update(time.time() - end)
logger.info('Iter {:04d} | Time {:.4f}({:.4f}) | Loss {:.6f}({:.6f})'.format(
itr, time2_meter.val, time2_meter.avg, loss2_meter.val, loss2_meter.avg))
#if itr % args.val_freq == 0 or itr == args.niters:
# update_lipschitz(model, 200)
# with torch.no_grad():
# model.eval()
# test_loss, test_logpz, test_delta_logp = compute_loss(args, model, batch_size=args.test_batch_size)
# log_message = (
# '[TEST] Iter {:04d} | Test Loss {:.6f} '
# '| Test Logp(z) {:.6f} | Test DeltaLogp {:.6f}'.format(
# itr, test_loss.item(), test_logpz.item(), test_delta_logp.item()
# )
# )
# logger.info(log_message)
# logger.info('Ords: {}'.format(pretty_repr(get_ords(model))))
# if test_loss.item() < best_loss:
# best_loss = test_loss.item()
# utils.makedirs(args.save)
# torch.save({
# 'args': args,
# 'state_dict': model.state_dict(),
# }, os.path.join(args.save, 'checkpt.pth'))
# model.train()
#if itr == 1 or itr % args.viz_freq == 0:
# with torch.no_grad():
# model.eval()
# p_samples = toy_data.inf_train_gen(args.data, batch_size=20000)
# sample_fn, density_fn = model.inverse, model.forward
# plt.figure(figsize=(9, 3))
# visualize_transform(p_samples, torch.randn, standard_normal_logprob, transform=sample_fn,
# inverse_transform=density_fn, samples=True, npts=400, device=device)
# fig_filename = os.path.join(args.save, 'figs', '{:04d}.jpg'.format(itr))
# print('')
# print(fig_filename)
# print('')
# utils.makedirs(os.path.dirname(fig_filename))
# plt.savefig(fig_filename)
# #plt.ion()
# plt.show()
# plt.pause(0.5)
# plt.close()
# model.train()
# end = time.time()
logger.info('Training 2 has finished.')
|
[
"[email protected]"
] | |
6a7d1bfe97a9a6af626cfbca8a9591a1abd35904
|
c88cd09f52ba3b0549ae223ba8a96dff256b71bc
|
/Codechef/ANUWTA.py
|
ccc4a525cd89bb2248849140873b90175b88c24c
|
[] |
no_license
|
Ishita-Tiwari/competitive-programming
|
cadd3007fffc89e1b6905bbdb3fbdd0250e29cdc
|
c54659081de2ca87f00a7a399cc15d6aa919e772
|
refs/heads/master
| 2021-08-17T11:47:12.890012 | 2021-07-22T13:56:02 | 2021-07-22T13:56:02 | 188,955,292 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 97 |
py
|
t = int(input())
for T in range(t):
n = int(input())
print(((n * (n + 1)) // 2) + n)
|
[
"[email protected]"
] | |
92f0d23f55c9015d1f365d9e204979a391d1b0e4
|
5a8214b3a452c574e6c883bf5d90ba58ba87c461
|
/leetcode/48.rotate-image.py
|
db4b09988b60fd7639cb9e4e1bf241fac3a2f0ad
|
[] |
no_license
|
phlalx/algorithms
|
69a3c8519687816e3c6333ec12b40659d3e3167f
|
f4da5a5dbda640b9bcbe14cb60a72c422b5d6240
|
refs/heads/master
| 2023-02-03T10:30:30.181735 | 2020-12-26T09:47:38 | 2020-12-26T09:47:38 | 129,254,618 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,563 |
py
|
# TAGS implem
# make a drawing
#
# @lc app=leetcode id=48 lang=python3
#
# [48] Rotate Image
#
# https://leetcode.com/problems/rotate-image/description/
#
# algorithms
# Medium (50.40%)
# Likes: 2053
# Dislikes: 179
# Total Accepted: 307.7K
# Total Submissions: 596.5K
# Testcase Example: '[[1,2,3],[4,5,6],[7,8,9]]'
#
# You are given an n x n 2D matrix representing an image.
#
# Rotate the image by 90 degrees (clockwise).
#
# Note:
#
# You have to rotate the image in-place, which means you have to modify the
# input 2D matrix directly. DO NOT allocate another 2D matrix and do the
# rotation.
#
# Example 1:
#
#
# Given input matrix =
# [
# [1,2,3],
# [4,5,6],
# [7,8,9]
# ],
#
# rotate the input matrix in-place such that it becomes:
# [
# [7,4,1],
# [8,5,2],
# [9,6,3]
# ]
#
#
# Example 2:
#
#
# Given input matrix =
# [
# [ 5, 1, 9,11],
# [ 2, 4, 8,10],
# [13, 3, 6, 7],
# [15,14,12,16]
# ],
#
# rotate the input matrix in-place such that it becomes:
# [
# [15,13, 2, 5],
# [14, 3, 4, 1],
# [12, 6, 8, 9],
# [16, 7,10,11]
# ]
#
#
#
# @lc code=start
class Solution:
def rotate(self, t: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
if not t or not t[0]:
return t
n = len(t[0])
for a in range(n // 2):
b = n - a - 1
for i in range(b - a):
t[a][a+i], t[a+i][b], t[b][b-i], t[b-i][a] = t[b-i][a], t[a][a+i], t[a+i][b], t[b][b-i]
# @lc code=end
|
[
"[email protected]"
] | |
e2611d1cd34427015fc98b5c6c870673d634990c
|
b2cfcacbd898f758a56d095f2140681934205d89
|
/GeekShop_mentor/src_lesson_7/step_1(own_admin_start)/geekshop/adminapp/views.py
|
e33e89d0dc71607f5a2ff803da6531ffe1ed0eda
|
[] |
no_license
|
AndreySperansky/Django_1
|
7d3be3ea2ede8e46d932fdae146ce4a7c4e300b4
|
0fec0a9a02b887fd8b45a5b763b7da5dc6377208
|
refs/heads/master
| 2022-12-15T19:56:23.611288 | 2020-09-21T17:40:40 | 2020-09-21T17:40:40 | 284,131,625 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,625 |
py
|
from django.shortcuts import render, get_object_or_404
from authapp.models import ShopUser
from mainapp.models import ProductCategory, Product
def users(request):
title = 'админка/пользователи'
users_list = ShopUser.objects.all().order_by('-is_active', '-is_superuser', '-is_staff', 'username')
content = {
'title': title,
'objects': users_list
}
return render(request, 'adminapp/users.html', content)
def user_create(request):
pass
def user_update(request, pk):
pass
def user_delete(request, pk):
pass
def categories(request):
title = 'админка/категории'
categories_list = ProductCategory.objects.all()
content = {
'title': title,
'objects': categories_list
}
return render(request, 'adminapp/categories.html', content)
def category_create(request):
pass
def category_update(request, pk):
pass
def category_delete(request, pk):
pass
def products(request, pk):
title = 'админка/продукт'
category = get_object_or_404(ProductCategory, pk=pk)
products_list = Product.objects.filter(category__pk=pk).order_by('name')
content = {
'title': title,
'category': category,
'objects': products_list,
}
return render(request, 'adminapp/products.html', content)
def product_create(request, pk):
pass
def product_read(request, pk):
pass
def product_update(request, pk):
pass
def product_delete(request, pk):
pass
|
[
"[email protected]"
] | |
ab2dd271acd9d5e775479f648fceeef73b690fb5
|
210e1cffcd8a705c2a8a1485ed5532b9169f5d10
|
/whoville/cloudbreak/models/security_group_response.py
|
c3208c1dbfa2e90b7ce3774562626a3d18b27bbe
|
[
"Apache-2.0"
] |
permissive
|
mikchaos/whoville
|
2a45bc6636d448733d8d2368ac88a980cf6954ea
|
6eabaea4b74ac0b632c03db8252590131c6ce63b
|
refs/heads/master
| 2020-04-19T08:53:04.430990 | 2019-01-29T05:01:57 | 2019-01-29T05:01:57 | 168,092,002 | 0 | 0 |
Apache-2.0
| 2019-01-29T05:00:06 | 2019-01-29T05:00:06 | null |
UTF-8
|
Python
| false | false | 11,025 |
py
|
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SecurityGroupResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'description': 'str',
'security_group_id': 'str',
'cloud_platform': 'str',
'name': 'str',
'id': 'int',
'owner': 'str',
'account': 'str',
'security_rules': 'list[SecurityRuleResponse]',
'public_in_account': 'bool'
}
attribute_map = {
'description': 'description',
'security_group_id': 'securityGroupId',
'cloud_platform': 'cloudPlatform',
'name': 'name',
'id': 'id',
'owner': 'owner',
'account': 'account',
'security_rules': 'securityRules',
'public_in_account': 'publicInAccount'
}
def __init__(self, description=None, security_group_id=None, cloud_platform=None, name=None, id=None, owner=None, account=None, security_rules=None, public_in_account=False):
"""
SecurityGroupResponse - a model defined in Swagger
"""
self._description = None
self._security_group_id = None
self._cloud_platform = None
self._name = None
self._id = None
self._owner = None
self._account = None
self._security_rules = None
self._public_in_account = None
if description is not None:
self.description = description
if security_group_id is not None:
self.security_group_id = security_group_id
self.cloud_platform = cloud_platform
if name is not None:
self.name = name
if id is not None:
self.id = id
if owner is not None:
self.owner = owner
if account is not None:
self.account = account
if security_rules is not None:
self.security_rules = security_rules
self.public_in_account = public_in_account
@property
def description(self):
"""
Gets the description of this SecurityGroupResponse.
description of the resource
:return: The description of this SecurityGroupResponse.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this SecurityGroupResponse.
description of the resource
:param description: The description of this SecurityGroupResponse.
:type: str
"""
if description is not None and len(description) > 1000:
raise ValueError("Invalid value for `description`, length must be less than or equal to `1000`")
if description is not None and len(description) < 0:
raise ValueError("Invalid value for `description`, length must be greater than or equal to `0`")
self._description = description
@property
def security_group_id(self):
"""
Gets the security_group_id of this SecurityGroupResponse.
Exisiting security group id
:return: The security_group_id of this SecurityGroupResponse.
:rtype: str
"""
return self._security_group_id
@security_group_id.setter
def security_group_id(self, security_group_id):
"""
Sets the security_group_id of this SecurityGroupResponse.
Exisiting security group id
:param security_group_id: The security_group_id of this SecurityGroupResponse.
:type: str
"""
self._security_group_id = security_group_id
@property
def cloud_platform(self):
"""
Gets the cloud_platform of this SecurityGroupResponse.
type of cloud provider
:return: The cloud_platform of this SecurityGroupResponse.
:rtype: str
"""
return self._cloud_platform
@cloud_platform.setter
def cloud_platform(self, cloud_platform):
"""
Sets the cloud_platform of this SecurityGroupResponse.
type of cloud provider
:param cloud_platform: The cloud_platform of this SecurityGroupResponse.
:type: str
"""
if cloud_platform is None:
raise ValueError("Invalid value for `cloud_platform`, must not be `None`")
self._cloud_platform = cloud_platform
@property
def name(self):
"""
Gets the name of this SecurityGroupResponse.
name of the resource
:return: The name of this SecurityGroupResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this SecurityGroupResponse.
name of the resource
:param name: The name of this SecurityGroupResponse.
:type: str
"""
self._name = name
@property
def id(self):
"""
Gets the id of this SecurityGroupResponse.
id of the resource
:return: The id of this SecurityGroupResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this SecurityGroupResponse.
id of the resource
:param id: The id of this SecurityGroupResponse.
:type: int
"""
self._id = id
@property
def owner(self):
"""
Gets the owner of this SecurityGroupResponse.
id of the resource owner that is provided by OAuth provider
:return: The owner of this SecurityGroupResponse.
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""
Sets the owner of this SecurityGroupResponse.
id of the resource owner that is provided by OAuth provider
:param owner: The owner of this SecurityGroupResponse.
:type: str
"""
self._owner = owner
@property
def account(self):
"""
Gets the account of this SecurityGroupResponse.
account id of the resource owner that is provided by OAuth provider
:return: The account of this SecurityGroupResponse.
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""
Sets the account of this SecurityGroupResponse.
account id of the resource owner that is provided by OAuth provider
:param account: The account of this SecurityGroupResponse.
:type: str
"""
self._account = account
@property
def security_rules(self):
"""
Gets the security_rules of this SecurityGroupResponse.
list of security rules that relates to the security group
:return: The security_rules of this SecurityGroupResponse.
:rtype: list[SecurityRuleResponse]
"""
return self._security_rules
@security_rules.setter
def security_rules(self, security_rules):
"""
Sets the security_rules of this SecurityGroupResponse.
list of security rules that relates to the security group
:param security_rules: The security_rules of this SecurityGroupResponse.
:type: list[SecurityRuleResponse]
"""
self._security_rules = security_rules
@property
def public_in_account(self):
"""
Gets the public_in_account of this SecurityGroupResponse.
resource is visible in account
:return: The public_in_account of this SecurityGroupResponse.
:rtype: bool
"""
return self._public_in_account
@public_in_account.setter
def public_in_account(self, public_in_account):
"""
Sets the public_in_account of this SecurityGroupResponse.
resource is visible in account
:param public_in_account: The public_in_account of this SecurityGroupResponse.
:type: bool
"""
if public_in_account is None:
raise ValueError("Invalid value for `public_in_account`, must not be `None`")
self._public_in_account = public_in_account
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SecurityGroupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
91af10910e07ae524e190b9ebf9a91f72007e792
|
32150af04590afe11f5c1229faf840e2e8c2a9ab
|
/Assignments/AS03/Week 5 HMEQ CART.py
|
b21ea8103f32726716f239ee33861966908b788a
|
[] |
no_license
|
nsbgit/IIT-S21-CS-484
|
f595f67dd72e0c0f65f9cbaafe581ab41ea4cab3
|
1850f0e571d0c72d875baedf87aae3d6943af382
|
refs/heads/main
| 2023-05-06T23:26:30.687201 | 2021-05-31T23:40:22 | 2021-05-31T23:40:22 | 332,584,584 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,725 |
py
|
# Load the necessary libraries
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import numpy
import pandas
# Define a function to visualize the percent of a particular target category by a nominal predictor
def TargetPercentByNominal (
targetVar, # target variable
targetCat, # target category
predictor, # nominal predictor
val4na): # imputed value for NaN
crossTable = pandas.crosstab(index = predictor.fillna(val4na), columns = targetVar, margins = True, dropna = True)
crossTable['Percent'] = 100 * (crossTable[targetCat] / crossTable['All'])
print(crossTable)
plotTable = crossTable[crossTable.index != 'All']
plt.bar(plotTable.index, plotTable['Percent'])
plt.xlabel(predictor.name)
plt.ylabel('Percent of ' + targetVar.name + ' = ' + str(targetCat))
plt.gca().xaxis.set_major_locator(mticker.MultipleLocator(1))
plt.grid(True, axis='y')
plt.show()
return(crossTable)
# Define a function to visualize the percent of a particular target category by an interval predictor
def TargetPercentByInterval (
targetVar, # target variable
targetCat, # target category
predictor, # nominal predictor
val4na): # imputed value for NaN
crossTable = pandas.crosstab(index = predictor.fillna(val4na), columns = targetVar, margins = True, dropna = True)
crossTable['Percent'] = 100 * (crossTable[targetCat] / crossTable['All'])
print(crossTable)
plotTable = crossTable[crossTable.index != 'All']
plt.scatter(plotTable.index, plotTable['Percent'])
plt.xlabel(predictor.name)
plt.ylabel('Percent of ' + targetVar.name + ' = ' + str(targetCat))
plt.grid(True, axis='both')
plt.show()
return(crossTable)
hmeq = pandas.read_csv('hmeq.csv',
delimiter=',')
nTotal = len(hmeq)
# Generate the frequency table and the bar chart for the BAD target variable
crossTable = pandas.crosstab(index = hmeq['BAD'], columns = ["Count"], margins = True, dropna = False)
crossTable['Percent'] = 100 * (crossTable['Count'] / nTotal)
crossTable = crossTable.drop(columns = ['All'])
print(crossTable)
plotTable = crossTable[crossTable.index != 'All']
plt.bar(plotTable.index, plotTable['Percent'])
plt.xticks([[0], [1]])
plt.xlabel('BAD')
plt.ylabel('Percent')
plt.grid(True, axis='y')
plt.show()
# Cross-tabulate BAD by DELINQ
resultTable = TargetPercentByNominal(hmeq['BAD'], 1, hmeq['DELINQ'], val4na = -1)
# Cross-tabulate BAD by DEROG
resultTable = TargetPercentByNominal(hmeq['BAD'], 1, hmeq['DEROG'], val4na = -1)
# Cross-tabulate BAD by JOB
resultTable = TargetPercentByNominal(hmeq['BAD'], 1, hmeq['JOB'], val4na = 'Unknown')
# Cross-tabulate BAD by NINQ
resultTable = TargetPercentByNominal(hmeq['BAD'], 1, hmeq['NINQ'], val4na = -1)
# Cross-tabulate BAD by REASON
resultTable = TargetPercentByNominal(hmeq['BAD'], 1, hmeq['REASON'], val4na = 'Unknown')
# Cross-tabulate BAD by DEBTINC
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['DEBTINC'], val4na = -10)
# Cross-tabulate BAD by CLAGE
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['CLAGE'], val4na = -10)
# Cross-tabulate BAD by CLNO
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['CLNO'], val4na = -10)
# Cross-tabulate BAD by LOAN
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['LOAN'], val4na = -10)
# Cross-tabulate BAD by MORTDUE
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['MORTDUE'], val4na = -10)
# Cross-tabulate BAD by VALUE
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['VALUE'], val4na = -10)
# Cross-tabulate BAD by YOJ
resultTable = TargetPercentByInterval(hmeq['BAD'], 1, hmeq['YOJ'], val4na = -10)
# Specify the target and the predictor variables
X_name = ['DEBTINC', 'DELINQ']
Y_name = 'BAD'
trainData = hmeq[['DEBTINC', 'DELINQ', 'BAD']].dropna()
X_inputs = trainData[X_name]
Y_target = trainData[Y_name]
# How many missing values are there?
print('Number of Missing Observations:')
print(X_inputs.isnull().sum())
print(Y_target.isnull().sum())
# Load the TREE library from SKLEARN
from sklearn import tree
classTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=2, random_state=60616)
hmeq_DT = classTree.fit(X_inputs, Y_target)
print('Accuracy of Decision Tree classifier on training set: {:.6f}' .format(classTree.score(X_inputs, Y_target)))
import graphviz
dot_data = tree.export_graphviz(hmeq_DT,
out_file=None,
impurity = True, filled = True,
feature_names = X_name,
class_names = ['0', '1'])
graph = graphviz.Source(dot_data)
print(graph)
|
[
"[email protected]"
] | |
3676f283e395d049f59998b475e78308256c0321
|
db0fe327ae984b7a461c02be74c439fe7d41c220
|
/Question_repo/libs/repo_data.py
|
47c31cbbfeb20ee14e0a507f5b9489b3cc15dd93
|
[] |
no_license
|
xingzhe1998/T_QUE
|
4da80366bc721cc0cd222f7f7fde8331c6df85ee
|
4c0d39402659b7c8fc448165c784ab125c700b41
|
refs/heads/master
| 2020-06-24T16:29:22.652765 | 2019-07-26T12:55:45 | 2019-07-26T12:55:45 | 199,016,012 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,309 |
py
|
from apps.repo.models import Answers, User, Questions
from django.db.models import Count
def check_rank(data):
return data["id__count"]
def user_answer_data(user):
# 答题数量及总量
# count=> 计数
answer_num = Answers.objects.filter(user=user).count()
question_all = Questions.objects.all().__len__()
# 用户总量
user_sum = User.objects.all().__len__()
# 答题情况
# 每个用户答题数量:按用户统计答题数量
rank = Answers.objects.values('user').annotate(Count('id'))
# <QuerySet [{'user': 1, 'id__count': 1}, {'user': 2, 'id__count': 1}, {'user': 3, 'id__count': 2}]>
# print(rank) 按答题量排序
rank = sorted(rank, key=check_rank, reverse=True)
# 统计每个人的排名(为提升效率,可写入memcache)
rank_dict = {}
cur_rank = 0
cur_count = 0
for index, item in enumerate(rank, start=1):
if cur_count != item["id__count"]:
cur_rank = index
cur_count = item["id__count"]
rank_dict[item["user"]] = dict(item, **{"rank":cur_rank})
# print(rank_dict)
kwgs = {
"answer_num": answer_num,
"question_all": question_all,
"user_sum": user_sum,
"rank": rank_dict[user.id] if answer_num else {"rank":0, },
}
return kwgs
|
[
"[email protected]"
] | |
8560eab3f2fc20fb2784b79adb1f3d5c9ff9d7c8
|
d1742451b25705fc128acc245524659628ab3e7d
|
/Data Structure & Algorithm/Segment Tree/Segment Tree Query.py
|
2c6201ceabaf2ce509db9d2239a70e8e6527bab1
|
[] |
no_license
|
Shovon588/Programming
|
ebab793a3c97aedddfcad5ea06e7e22f5c54a86e
|
e4922c9138998358eed09a1be7598f9b060c685f
|
refs/heads/master
| 2022-12-23T18:29:10.141117 | 2020-10-04T17:29:32 | 2020-10-04T17:29:32 | 256,915,133 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 663 |
py
|
def func(node,low,high):
if low==high:
tree[node]=a[low-1]
return
left=2*node
right=(2*node)+1
mid=(low+high)//2
func(left,low,mid)
func(right,mid+1,high)
tree[node]=tree[left]+tree[right]
def query(node,b,e,i,j):
if b>=i and e<=j:
return tree[node]
if i>e or j<b:
return 0
left=2*node
right=(2*node)+1
mid=(b+e)//2
p1=query(left,b,mid,i,j)
p2=query(right,mid+1,e,i,j)
return p1+p2
n=int(input())
a=list(map(int,input().split()))
tree=[0]*(3*n)
func(1,1,n)
for i in range(int(input())):
l,m=map(int,input().split())
b=query(1,1,n,l,m)
print(b)
|
[
"[email protected]"
] | |
af2538c02b261ca582200a1d3a1e2fe9f4d58da2
|
8b54951abdc4a8c119b057c5231adf65fdd5a915
|
/lock_signal.py
|
fef29effd8931853c5369ee4e80d6ecf89fc05d7
|
[] |
no_license
|
larago/gevent
|
0a645fbc97ec1f7f85c6a3a961b82739d1e64c7a
|
b41360ca8ebf6fbad8463d5128bb7b4bf837a7b9
|
refs/heads/master
| 2021-01-11T14:58:55.860636 | 2017-01-28T09:34:38 | 2017-01-28T09:34:38 | 80,270,162 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 516 |
py
|
# encoding=utf8
from gevent import sleep
from gevent.pool import Pool
from gevent.lock import BoundedSemaphore
sem = BoundedSemaphore(2)
def worker1(n):
sem.acquire()
print 'Worker %s acquired semaphore' % n
sleep(0)
sem.release()
print 'Worker %s releasedd semaphore' % n
def worker2(n):
with sem:
print 'Worker %i acquired semaphore' % n
sleep(0)
print 'Worker %i released semaphore' % n
pool = Pool()
pool.map(worker1, xrange(0, 2))
pool.map(worker2, xrange(3, 6))
|
[
"[email protected]"
] | |
20ec50dbb59c877aabc53633286aa894e4a4907c
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-codecraft/huaweicloudsdkcodecraft/v5/model/register_competition_info_response.py
|
c351c82d2c877b2af1bf58f524e51d1749af18c2
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 4,282 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RegisterCompetitionInfoResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'is_permitted': 'bool',
'team_id': 'str'
}
attribute_map = {
'is_permitted': 'is_permitted',
'team_id': 'team_id'
}
def __init__(self, is_permitted=None, team_id=None):
"""RegisterCompetitionInfoResponse
The model defined in huaweicloud sdk
:param is_permitted: 是否允许提交作品,true-允许,false-不允许
:type is_permitted: bool
:param team_id: 团队ID
:type team_id: str
"""
super(RegisterCompetitionInfoResponse, self).__init__()
self._is_permitted = None
self._team_id = None
self.discriminator = None
if is_permitted is not None:
self.is_permitted = is_permitted
if team_id is not None:
self.team_id = team_id
@property
def is_permitted(self):
"""Gets the is_permitted of this RegisterCompetitionInfoResponse.
是否允许提交作品,true-允许,false-不允许
:return: The is_permitted of this RegisterCompetitionInfoResponse.
:rtype: bool
"""
return self._is_permitted
@is_permitted.setter
def is_permitted(self, is_permitted):
"""Sets the is_permitted of this RegisterCompetitionInfoResponse.
是否允许提交作品,true-允许,false-不允许
:param is_permitted: The is_permitted of this RegisterCompetitionInfoResponse.
:type is_permitted: bool
"""
self._is_permitted = is_permitted
@property
def team_id(self):
"""Gets the team_id of this RegisterCompetitionInfoResponse.
团队ID
:return: The team_id of this RegisterCompetitionInfoResponse.
:rtype: str
"""
return self._team_id
@team_id.setter
def team_id(self, team_id):
"""Sets the team_id of this RegisterCompetitionInfoResponse.
团队ID
:param team_id: The team_id of this RegisterCompetitionInfoResponse.
:type team_id: str
"""
self._team_id = team_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RegisterCompetitionInfoResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
3043c42d97729db02e9e2dfc1c55597e1cf93c21
|
80afa26ba73b53f38e3fc21bf395030762fe8981
|
/200. Number of Islands.py
|
ae34d2da127bdf85debe74d5795951cefd3b567d
|
[] |
no_license
|
iamshivamgoswami/Random-DSA-Questions
|
45b402063dbd2e31da2eee7590b6991aa624637d
|
e36250d08cf0de59cd0a59b4f3293e55793b1a6f
|
refs/heads/main
| 2023-07-15T15:48:36.363321 | 2021-08-26T03:40:47 | 2021-08-26T03:40:47 | 392,702,686 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 535 |
py
|
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
def dfs(i, j):
grid[i][j] = "0"
for x, y in [(i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)]:
if 0 <= x < len(grid) and 0 <= y < len(grid[0]) and grid[x][y] == "1":
dfs(x, y)
count = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == "1":
count += 1
dfs(i, j)
return count
|
[
"[email protected]"
] | |
5e8b05b53016ffca5bed18a5013345269a00b769
|
7549c5f2e2b8ecc6d8f7ccc8030e005ffdf15018
|
/modules/python_base/09/9.1.3/finally.py
|
6c930a0a22b024c92df588cea78ced2a5f4298f0
|
[] |
no_license
|
gao634209276/myPython
|
d16bbf53645531e03cd2da4f211e783d0b93f703
|
40b4e8dcd329c34a73808a51743131d554832ab6
|
refs/heads/master
| 2020-06-20T22:21:55.521914 | 2017-09-01T14:27:31 | 2017-09-01T14:27:31 | 74,817,170 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 522 |
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# finally错误的用法
# try:
# f = file("hello2.txt", "r")
# print "读文件"
# except IOError: # 捕获IOError异常
# print "文件不存在"
# finally: # 其他异常情况
# f.close()
# try...except...finally
try:
f = open("hello.txt", "r")
try:
print f.read(5)
except:
print "读取文件错误"
finally: # finally子句一般用于释放资源
print "释放资源"
f.close()
except IOError:
print "文件不存在"
|
[
"[email protected]"
] | |
7c49980f44f9b0929a02ad6e53a286947278a7c5
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_167/ch16_2019_03_01_13_58_05_885182.py
|
74ce338f954775fb384946b2469a654fe753e495
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 83 |
py
|
def distancia_euclidiana(x1,y1,x2,y2):
d= ((x1-x2)+(y1-y2)) ** 1/2
return d
|
[
"[email protected]"
] | |
e71dabbd598b0e442f49c9a54a5edd5d114097bf
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_118/ch117_2020_03_31_17_46_06_284632.py
|
7d0d3bd8c16e279f5555404740242af0f74d1a9b
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 109 |
py
|
import math
def snell_descartes(n1,n2,o1):
o2=math.asin((n1*math.sin(math.radians(t1)))/n2)
return o2
|
[
"[email protected]"
] | |
e7545cd95e31563f8e17fbe8a574b2b3b02f8418
|
7dfabdddeb5b8f1628e445cdb6d536958c8bc85b
|
/pcdet/models/backbones_3d/spconv_unet_fpn_nom.py
|
58ecf873afd22147eaecebc11a861dc296d6efa2
|
[
"Apache-2.0"
] |
permissive
|
vehxianfish/SRDAN_Open
|
d6ba16ebc201c9651fac16bc30f57dc3a740041f
|
47c1bd9d2369d8e486b18a7aea220af7324c9011
|
refs/heads/master
| 2023-08-15T10:36:56.483018 | 2021-09-25T03:35:53 | 2021-09-25T03:35:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 18,243 |
py
|
from functools import partial
import spconv
import torch
import torch.nn as nn
from ...utils import common_utils
from .spconv_backbone import post_act_block
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, indice_key=None, norm_fn=None):
super(SparseBasicBlock, self).__init__()
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x.features
assert x.features.dim() == 2, 'x.features.dim()=%d' % x.features.dim()
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.bn2(out.features)
if self.downsample is not None:
identity = self.downsample(x)
out.features += identity
out.features = self.relu(out.features)
return out
class UNetV2FPN(nn.Module):
"""
Sparse Convolution based UNet for point-wise feature learning.
Reference Paper: https://arxiv.org/abs/1907.03670 (Shaoshuai Shi, et. al)
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, model_cfg, input_channels, grid_size, voxel_size, point_cloud_range, num_fpn_up=0, num_fpn_downup=0, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.num_fpn_up = num_fpn_up
self.num_fpn_downup = num_fpn_downup
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.downup_up = self.model_cfg.get('DOWNUP_UP_MODULE', False)
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='spconv2_1'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='spconv2_2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='spconv3_1', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='spconv3_2', conv_type='spconv'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='spconv4_1'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='spconv4_2'),
)
self.share_up_module = self.model_cfg.get('SHARE_UP_MODULE', False)
if self.model_cfg.get('RETURN_ENCODED_TENSOR', True):
last_pad = self.model_cfg.get('last_pad', 0)
self.conv_out = spconv.SparseSequential(
# conv 4: 1, 64, 5, 126, 126 to 128, 2, 126, 126
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
# 1, 128, 2, 126, 126 -> 256, 126, 126
norm_fn(128),
nn.ReLU(),
)
else:
self.conv_out = None
if self.num_fpn_up + self.num_fpn_downup > 0:
self.FPN = True
last_pad = self.model_cfg.get('last_pad', 0)
######### can change different receptive field #########
######### can change different receptive field #########
######### can change different receptive field #########
# fpn conv: GPU not possible to have too many fpn conv
if self.num_fpn_up > 0:
self.conv3_ident = spconv.SparseSequential(
spconv.SparseConv3d(64, 64, (1, 1, 1), stride=(1, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_ident_3'),
# 128, 5, 252, 252 -> 640, 252, 252
norm_fn(64),
nn.ReLU(),
)
if not self.share_up_module:
self.conv_up_t4_to_3 = SparseBasicBlock(64, 64, indice_key='subm4', norm_fn=norm_fn)
self.conv_up_m4_to_3 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4')
self.conv4_to_3 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv4', conv_type='inverseconv')
# conv3: 1, 64, 11, 252, 252 to 128, 5, 252, 252
self.conv3_out = spconv.SparseSequential(
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2_3'),
# 128, 5, 252, 252 -> 640, 252, 252
norm_fn(128),
nn.ReLU(),
)
if self.num_fpn_up > 1:
self.conv2_ident = spconv.SparseSequential(
spconv.SparseConv3d(32, 32, (1, 1, 1), stride=(1, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_ident_3'),
# 128, 5, 252, 252 -> 640, 252, 252
norm_fn(32),
nn.ReLU(),
)
# conv2: 1, 32, 21, 504, 504 to 64, 10, 504, 504
self.conv2_out = spconv.SparseSequential(
spconv.SparseConv3d(32, 64, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2_2'),
# 64, 10, 504, 504 -> 640, 504, 504
norm_fn(64),
nn.ReLU(),
)
# fpn deconv
if self.num_fpn_downup > 0:
# conv2: 64, 10, 504, 504 to 1, 32, 21, 504, 504 to
self.conv5 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv5', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm5'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm5'),
)
self.conv5_out = spconv.SparseSequential(
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2_5'),
# 64, 10, 504, 504 -> 640, 504, 504
norm_fn(128),
nn.ReLU(),
)
self.conv_up_t5_to_4 = SparseBasicBlock(64, 64, indice_key='subm5', norm_fn=norm_fn)
self.conv_up_m5_to_4 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm5')
self.conv5_to_4 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv5', conv_type='inverseconv')
self.conv4_ident = spconv.SparseSequential(
spconv.SparseConv3d(64, 64, (1, 1, 1), stride=(1, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_ident_4'),
# 128, 11, 126, 126
norm_fn(64),
nn.ReLU(),
)
self.conv4_out = spconv.SparseSequential(
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2_4'),
# 64, 10, 504, 504 -> 640, 504, 504
norm_fn(128),
nn.ReLU(),
)
# if self.num_fpn_downup > 1:
else:
self.FPN = False
# decoder
# if self.num_fpn_downup > 0:
# self.conv_up_t5 = SparseBasicBlock(64, 64, indice_key='subm5', norm_fn=norm_fn)
# self.conv_up_m5 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm5')
# self.inv_conv5 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv5', conv_type='inverseconv')
# [400, 352, 11] <- [200, 176, 5]
self.conv_up_t4 = SparseBasicBlock(64, 64, indice_key='subm4', norm_fn=norm_fn)
self.conv_up_m4 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4')
self.inv_conv4 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv4', conv_type='inverseconv')
# [800, 704, 21] <- [400, 352, 11]
self.conv_up_t3 = SparseBasicBlock(64, 64, indice_key='subm3', norm_fn=norm_fn)
self.conv_up_m3 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3')
self.inv_conv3 = block(64, 32, 3, norm_fn=norm_fn, indice_key='spconv3', conv_type='inverseconv')
# [1600, 1408, 41] <- [800, 704, 21]
self.conv_up_t2 = SparseBasicBlock(32, 32, indice_key='subm2', norm_fn=norm_fn)
self.conv_up_m2 = block(64, 32, 3, norm_fn=norm_fn, indice_key='subm2')
self.inv_conv2 = block(32, 16, 3, norm_fn=norm_fn, indice_key='spconv2', conv_type='inverseconv')
# [1600, 1408, 41] <- [1600, 1408, 41]
self.conv_up_t1 = SparseBasicBlock(16, 16, indice_key='subm1', norm_fn=norm_fn)
self.conv_up_m1 = block(32, 16, 3, norm_fn=norm_fn, indice_key='subm1')
self.conv0 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1')
)
self.num_point_features = 16
def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):
x_trans = conv_t(x_lateral)
x = x_trans
x.features = torch.cat((x_bottom.features, x_trans.features), dim=1)
x_m = conv_m(x)
x = self.channel_reduction(x, x_m.features.shape[1])
x.features = x_m.features + x.features
x = conv_inv(x)
return x
def FPN_block_forward(self, x_lateral, x_bottom, conv_ident, conv_inv, conv_t=None, conv_m=None):
#conv_t, conv_m,
x_ident = conv_ident(x_lateral)
# x = x_ident
# x.features = torch.cat((x_bottom.features, x_trans.features), dim=1)
if conv_t is not None:
x_bottom = conv_t(x_bottom)
if conv_m is not None:
x_bottom = conv_m(x_bottom)
x = conv_inv(x_bottom)
# x = self.channel_reduction(x, x_m.features.shape[1])
# print("channel_reduction", x.dense().shape)
# x.features = x_m.features + x.features
# print("x.features", x.dense().shape)
# x = conv_inv(x)
# print("xfi", x.dense().shape)
x.features = x.features + x_ident.features
# print("xfi", x.dense().shape)
return x
@staticmethod
def channel_reduction(x, out_channels):
"""
Args:
x: x.features (N, C1)
out_channels: C2
Returns:
"""
features = x.features
n, in_channels = features.shape
assert (in_channels % out_channels == 0) and (in_channels >= out_channels)
x.features = features.view(n, out_channels, -1).sum(dim=2)
return x
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
# print("x", x.dense().shape)
# , 16, 41, 1008, 1008
x_conv1 = self.conv1(x)
# print("x_conv1", x_conv1.dense().shape)
# 1, 16, 41, 1008, 1008
x_conv2 = self.conv2(x_conv1)
# print("x_conv2", x_conv2.dense().shape)
# 1, 32, 21, 504, 504
x_conv3 = self.conv3(x_conv2)
# print("x_conv3", x_conv3.dense().shape)
# 1, 64, 11, 252, 252
x_conv4 = self.conv4(x_conv3)
# print("x_conv4", x_conv4.dense().shape)
# 1, 64, 5, 126, 126
if self.conv_out is not None:
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict['encoded_spconv_tensor'] = out
batch_dict['encoded_spconv_tensor_stride'] = 8
if self.FPN:
if self.num_fpn_up > 0 and not self.downup_up:
if self.share_up_module:
x_fpn3 = self.FPN_block_forward(x_conv3, x_conv4, self.conv3_ident, self.inv_conv4, conv_t=self.conv_up_t4, conv_m=self.conv_up_m4)
else:
x_fpn3 = self.FPN_block_forward(x_conv3, x_conv4, self.conv3_ident, self.conv4_to_3,
conv_t=self.conv_up_t4_to_3, conv_m=self.conv_up_m4_to_3)
out_3 = self.conv3_out(x_fpn3)
batch_dict['encoded_spconv_tensor_fpn3'] = out_3
batch_dict['encoded_spconv_tensor_stride_fpn3'] = 8#16
# if self.num_fpn_up > 1:
# out_2 = self.conv2_out(x_conv2)
# batch_dict['encoded_spconv_tensor_fpn2'] = out_2
# batch_dict['encoded_spconv_tensor_stride_fpn2'] = 32
if self.num_fpn_downup > 0:
# print("x_conv4", x_conv4.dense().shape)
# 64, 5, 126, 126
x_conv5 = self.conv5(x_conv4)
# print("conv5", x_conv5.dense().shape)
# 64, 3, 63, 63
out_5 = self.conv5_out(x_conv5)
# print("out_5", out_5.dense().shape)
# 128, 1, 63, 63
batch_dict['encoded_spconv_tensor_fpn5'] = out_5
batch_dict['encoded_spconv_tensor_stride_fpn5'] = 8#16
x_fpn4 = self.FPN_block_forward(x_conv4, x_conv5, self.conv4_ident, self.conv5_to_4,
conv_t=self.conv_up_t5_to_4, conv_m=self.conv_up_m5_to_4)
# print("x_fpn4", x_fpn4.dense().shape)
# 64, 5, 126, 126
out_4 = self.conv4_out(x_fpn4)
# print("out_4", out_4.dense().shape)
# 128, 2, 126, 126
batch_dict['encoded_spconv_tensor_fpn4'] = out_4
batch_dict['encoded_spconv_tensor_stride_fpn4'] = 8#16
if self.num_fpn_up > 0 and self.downup_up:
if self.share_up_module:
x_fpn3 = self.FPN_block_forward(x_conv3, x_fpn4, self.conv3_ident, self.inv_conv4, conv_t=self.conv_up_t4, conv_m=self.conv_up_m4)
else:
x_fpn3 = self.FPN_block_forward(x_conv3, x_fpn4, self.conv3_ident, self.conv4_to_3,
conv_t=self.conv_up_t4_to_3, conv_m=self.conv_up_m4_to_3)
out_3 = self.conv3_out(x_fpn3)
batch_dict['encoded_spconv_tensor_fpn3'] = out_3
batch_dict['encoded_spconv_tensor_stride_fpn3'] = 8#16
# print("cv out", out.dense().shape)
# 1, 128, 2, 126, 126 -> 256, 126, 126
# for segmentation head
# [400, 352, 11] <- [200, 176, 5]
x_up4 = self.UR_block_forward(x_conv4, x_conv4, self.conv_up_t4, self.conv_up_m4, self.inv_conv4)
# print("x_up4 out", x_up4.dense().shape)
# 1, 64, 11, 252, 252
# [800, 704, 21] <- [400, 352, 11]
x_up3 = self.UR_block_forward(x_conv3, x_up4, self.conv_up_t3, self.conv_up_m3, self.inv_conv3)
# print("x_up3 out", x_up3.dense().shape)
# 1, 32, 21, 504, 504
# [1600, 1408, 41] <- [800, 704, 21]
x_up2 = self.UR_block_forward(x_conv2, x_up3, self.conv_up_t2, self.conv_up_m2, self.inv_conv2)
# print("x_up2 out", x_up2.dense().shape)
# 1, 16, 41, 1008, 1008
# [1600, 1408, 41] <- [1600, 1408, 41]
x_up1 = self.UR_block_forward(x_conv1, x_up2, self.conv_up_t1, self.conv_up_m1, self.conv0)
# print("x_up1 out", x_up1.dense().shape)
# 16, 41, 1008, 1008
batch_dict['point_features'] = x_up1.features
point_coords = common_utils.get_voxel_centers(
x_up1.indices[:, 1:], downsample_times=1, voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_dict['point_coords'] = torch.cat((x_up1.indices[:, 0:1].float(), point_coords), dim=1)
return batch_dict
|
[
"[email protected]"
] | |
535ad5ea0cc874c6d7e51ccadc8482f2f6343596
|
2309fbe9f9b86685f533706e6877ac3cfae99632
|
/tests/src/Regression_Testing/Test_Scripts/Click_on_TAR.py
|
099c02daa09fc1944c2154963b58662a39772477
|
[
"MIT"
] |
permissive
|
komathi1607/cQube
|
909a8834608ce19989347863be538022bfaacd84
|
6cc629a600075a1e5332f84f8ffa940a3eebfcd0
|
refs/heads/master
| 2022-11-15T07:46:19.314371 | 2020-06-05T00:55:17 | 2020-06-05T00:55:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 779 |
py
|
import time
import unittest
from selenium import webdriver
from Data.parameters import Data
from TS.reuse_func import cqube
from get_dir import pwd
class test_TAR(unittest.TestCase):
def setUp(self):
driver_path = pwd()
self.driver = webdriver.Chrome(executable_path=driver_path.get_driver_path())
driver = cqube(self.driver)
driver.open_cqube_appln()
driver = cqube(self.driver)
driver.login_cqube()
def test_TAR_Page(self):
self.driver.find_element_by_xpath(Data.Dashboard).click()
time.sleep(2)
self.driver.find_element_by_xpath(Data.TAR).click()
time.sleep(2)
def tearDown(self):
time.sleep(5)
self.driver.close()
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
81ed140b023ae1c2e0ee981bb65a3c175e059052
|
3356ecffb180dd617a8ded3bca89a067122e9d65
|
/lab1/task_7.py
|
0c781a6c6f9d656ecab036074ee523467f7bbfbe
|
[] |
no_license
|
python-practice-b02-927/TodorovRV
|
bae93783b15e6e0397c7dfae018dfac58b719a03
|
d6765f8b65ae88b2d0ca021340ff1848d4b3605a
|
refs/heads/master
| 2020-07-22T20:18:45.296515 | 2019-11-19T11:46:03 | 2019-11-19T11:46:03 | 207,315,122 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 293 |
py
|
#!/usr/bin/python3
from pyrob.api import *
@task
def task_5_4():
while not wall_is_beneath():
move_down()
while wall_is_beneath():
move_right()
move_down()
move_left()
while wall_is_above() and (not wall_is_on_the_left()):
move_left()
if __name__ == '__main__':
run_tasks()
|
[
"[email protected]"
] | |
38613ed23f71373e77774283f60636d3ef9b8b70
|
e87c04d6c2bbba383f9c75620b16f02358039ab5
|
/200826프로/Re최소비용구하기_G5.py
|
d8fd01ea1428cd215e57f2eb2c491840f6e2f32f
|
[] |
no_license
|
yoonwoo123/Algorithm
|
2bf6e103009572cbcf3abfd783f6c28762529340
|
5d1e76f1bf6c2fc6acb25dc5296d62b2ca453ec6
|
refs/heads/master
| 2022-06-18T14:06:06.248906 | 2022-05-29T10:41:15 | 2022-05-29T10:41:15 | 221,483,283 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,240 |
py
|
# 0628백준 못풀었던 최소비용구하기 재도전
import sys, heapq
sys.stdin = open("최소비용_input.txt")
input = sys.stdin.readline
def dijkstra(graph, start, end):
distances = {node : float('inf') for node in graph}
distances[start] = 0
queue = []
heapq.heappush(queue, [distances[start], start])
while queue:
currentDistance, currentNode = heapq.heappop(queue)
if currentDistance > distances[currentNode]: continue
for adjacent, weight in graph[currentNode].items():
distance = currentDistance + weight
if distance < distances[adjacent]:
distances[adjacent] = distance
heapq.heappush(queue, [distance, adjacent])
return distances[end]
N = int(input())
M = int(input())
graph = {i : {} for i in range(1, N+1)}
for _ in range(M):
start, end, toll = map(int, input().split())
# 주의해야할 점!! 같은 버스(같은 키)값으로 더 큰 값이 들어오는건 무시해줘야
# 최소비용을 정확히 구할 수 있다.
if end in graph[start] and toll >= graph[start][end]: continue
graph[start][end] = toll
start, end = map(int, input().split())
print(dijkstra(graph, start, end))
|
[
"[email protected]"
] | |
39c80ed7609de255a1d4095a62be36b57429a380
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/XjgoXNmnz59txiQp3_20.py
|
5a5eb8ffec5c428b019e2cbaa644d2f512144599
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 176 |
py
|
def split(number):
if (number < 5):
return number
rem = number % 3
div3 = number//3
if (rem == 2):
return 3 ** (div3) * 2
return 3 ** (div3-1) * (3 + rem)
|
[
"[email protected]"
] | |
86e9fa6a8c3ed0a7e6035870f5d3efbab277cffc
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_35/202.py
|
85cf40d5f4d6c4e8f2519c5092793cfc5a85d792
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,956 |
py
|
#!/usr/bin/python
MAX_ALT = 11000
def is_valid_index(i, j, alts):
return i >= 0 and j >= 0 and i < len(alts) and j < len(alts[i])
def find_flow_dir(i, j, alts):
dirs = [(alts[i][j], (i, j)),
(alts[i - 1][j] if is_valid_index(i - 1, j, alts) else MAX_ALT, (i - 1, j)),
(alts[i][j - 1] if is_valid_index(i, j - 1, alts) else MAX_ALT, (i, j - 1)),
(alts[i][j + 1] if is_valid_index(i, j + 1, alts) else MAX_ALT, (i, j + 1)),
(alts[i + 1][j] if is_valid_index(i + 1, j, alts) else MAX_ALT, (i + 1, j))]
return min(dirs, key=lambda x: x[0])[1]
def flow(i, j, n, ans, alts, eq):
while ans[i][j] == -1:
ans[i][j] = n
ni, nj = find_flow_dir(i, j, alts)
if (ni, nj) == (i, j): # found a sink
break
i, j = ni, nj
eq[n] = ans[i][j]
def print_basin(basin, f):
for row in basin:
f.write(' '.join(map(str, row)) + '\n')
def parent(num, eq):
if eq[num] != num:
eq[num] = parent(eq[num], eq)
return eq[num]
def to_alph(basin, eq):
nums_seen = {}
count = 0
for i in xrange(H):
for j in xrange(W):
p = parent(basin[i][j], eq)
if p not in nums_seen:
nums_seen[p] = count
count += 1
basin[i][j] = chr(ord('a') + nums_seen[p])
f = open('B-large.in', 'r')
lines = [line.strip() for line in f.readlines()]
f.close()
N = int(lines[0])
f = open('B-large.out', 'w')
line_num = 1
for case in xrange(1, N + 1):
H, W = map(int, lines[line_num].split(' '))
alts = [0]*H
for i in xrange(H):
alts[i] = map(int, lines[line_num + i + 1].split(' '))
assert len(alts[i]) == W
ans = [0] * H
for i in xrange(H):
ans[i] = [-1] * W
equivalencies = {}
count = 0
for i in xrange(H):
for j in xrange(W):
if ans[i][j] == -1:
flow(i, j, count, ans, alts, equivalencies)
count += 1
to_alph(ans, equivalencies)
f.write('Case #%d:\n' % case)
print_basin(ans, f)
line_num += 1 + H
f.close()
|
[
"[email protected]"
] | |
c891b8067ded9d4dbe6bf8eef18d752dd94634f4
|
78137d5e4e688749399bbb386b26536e4ac6d9fa
|
/pytorch3d/renderer/camera_conversions.py
|
9fb73e5fcc8140e159b9f0ae21645212077e7ee4
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
bruinxiong/pytorch3d
|
4235681c6356f7e69fa506d8474a3c7cf83d9fe6
|
18a3c5cbb9055bcda44590d39db65bb0c74db799
|
refs/heads/master
| 2022-06-18T16:28:39.589229 | 2022-05-18T20:11:36 | 2022-05-18T20:11:36 | 238,892,798 | 0 | 0 |
NOASSERTION
| 2022-05-18T20:11:37 | 2020-02-07T10:04:39 |
Python
|
UTF-8
|
Python
| false | false | 6,893 |
py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Tuple
import torch
from ..transforms import matrix_to_rotation_6d
from .cameras import PerspectiveCameras
LOGGER = logging.getLogger(__name__)
def _cameras_from_opencv_projection(
R: torch.Tensor,
tvec: torch.Tensor,
camera_matrix: torch.Tensor,
image_size: torch.Tensor,
) -> PerspectiveCameras:
focal_length = torch.stack([camera_matrix[:, 0, 0], camera_matrix[:, 1, 1]], dim=-1)
principal_point = camera_matrix[:, :2, 2]
# Retype the image_size correctly and flip to width, height.
image_size_wh = image_size.to(R).flip(dims=(1,))
# Screen to NDC conversion:
# For non square images, we scale the points such that smallest side
# has range [-1, 1] and the largest side has range [-u, u], with u > 1.
# This convention is consistent with the PyTorch3D renderer, as well as
# the transformation function `get_ndc_to_screen_transform`.
scale = image_size_wh.to(R).min(dim=1, keepdim=True)[0] / 2.0
scale = scale.expand(-1, 2)
c0 = image_size_wh / 2.0
# Get the PyTorch3D focal length and principal point.
focal_pytorch3d = focal_length / scale
p0_pytorch3d = -(principal_point - c0) / scale
# For R, T we flip x, y axes (opencv screen space has an opposite
# orientation of screen axes).
# We also transpose R (opencv multiplies points from the opposite=left side).
R_pytorch3d = R.clone().permute(0, 2, 1)
T_pytorch3d = tvec.clone()
R_pytorch3d[:, :, :2] *= -1
T_pytorch3d[:, :2] *= -1
return PerspectiveCameras(
R=R_pytorch3d,
T=T_pytorch3d,
focal_length=focal_pytorch3d,
principal_point=p0_pytorch3d,
image_size=image_size,
device=R.device,
)
def _opencv_from_cameras_projection(
cameras: PerspectiveCameras,
image_size: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
R_pytorch3d = cameras.R.clone() # pyre-ignore
T_pytorch3d = cameras.T.clone() # pyre-ignore
focal_pytorch3d = cameras.focal_length
p0_pytorch3d = cameras.principal_point
T_pytorch3d[:, :2] *= -1
R_pytorch3d[:, :, :2] *= -1
tvec = T_pytorch3d
R = R_pytorch3d.permute(0, 2, 1)
# Retype the image_size correctly and flip to width, height.
image_size_wh = image_size.to(R).flip(dims=(1,))
# NDC to screen conversion.
scale = image_size_wh.to(R).min(dim=1, keepdim=True)[0] / 2.0
scale = scale.expand(-1, 2)
c0 = image_size_wh / 2.0
# pyre-fixme[29]: `Union[BoundMethod[typing.Callable(torch.Tensor.__neg__)[[Named...
principal_point = -p0_pytorch3d * scale + c0
focal_length = focal_pytorch3d * scale
camera_matrix = torch.zeros_like(R)
camera_matrix[:, :2, 2] = principal_point
camera_matrix[:, 2, 2] = 1.0
camera_matrix[:, 0, 0] = focal_length[:, 0]
camera_matrix[:, 1, 1] = focal_length[:, 1]
return R, tvec, camera_matrix
def _pulsar_from_opencv_projection(
R: torch.Tensor,
tvec: torch.Tensor,
camera_matrix: torch.Tensor,
image_size: torch.Tensor,
znear: float = 0.1,
) -> torch.Tensor:
assert len(camera_matrix.size()) == 3, "This function requires batched inputs!"
assert len(R.size()) == 3, "This function requires batched inputs!"
assert len(tvec.size()) in (2, 3), "This function reuqires batched inputs!"
# Validate parameters.
image_size_wh = image_size.to(R).flip(dims=(1,))
assert torch.all(
image_size_wh > 0
), "height and width must be positive but min is: %s" % (
str(image_size_wh.min().item())
)
assert (
camera_matrix.size(1) == 3 and camera_matrix.size(2) == 3
), "Incorrect camera matrix shape: expected 3x3 but got %dx%d" % (
camera_matrix.size(1),
camera_matrix.size(2),
)
assert (
R.size(1) == 3 and R.size(2) == 3
), "Incorrect R shape: expected 3x3 but got %dx%d" % (
R.size(1),
R.size(2),
)
if len(tvec.size()) == 2:
tvec = tvec.unsqueeze(2)
assert (
tvec.size(1) == 3 and tvec.size(2) == 1
), "Incorrect tvec shape: expected 3x1 but got %dx%d" % (
tvec.size(1),
tvec.size(2),
)
# Check batch size.
batch_size = camera_matrix.size(0)
assert R.size(0) == batch_size, "Expected R to have batch size %d. Has size %d." % (
batch_size,
R.size(0),
)
assert (
tvec.size(0) == batch_size
), "Expected tvec to have batch size %d. Has size %d." % (
batch_size,
tvec.size(0),
)
# Check image sizes.
image_w = image_size_wh[0, 0]
image_h = image_size_wh[0, 1]
assert torch.all(
image_size_wh[:, 0] == image_w
), "All images in a batch must have the same width!"
assert torch.all(
image_size_wh[:, 1] == image_h
), "All images in a batch must have the same height!"
# Focal length.
fx = camera_matrix[:, 0, 0].unsqueeze(1)
fy = camera_matrix[:, 1, 1].unsqueeze(1)
# Check that we introduce less than 1% error by averaging the focal lengths.
fx_y = fx / fy
if torch.any(fx_y > 1.01) or torch.any(fx_y < 0.99):
LOGGER.warning(
"Pulsar only supports a single focal lengths. For converting OpenCV "
"focal lengths, we average them for x and y directions. "
"The focal lengths for x and y you provided differ by more than 1%, "
"which means this could introduce a noticeable error."
)
f = (fx + fy) / 2
# Normalize f into normalized device coordinates.
focal_length_px = f / image_w
# Transfer into focal_length and sensor_width.
focal_length = torch.tensor([znear - 1e-5], dtype=torch.float32, device=R.device)
focal_length = focal_length[None, :].repeat(batch_size, 1)
sensor_width = focal_length / focal_length_px
# Principal point.
cx = camera_matrix[:, 0, 2].unsqueeze(1)
cy = camera_matrix[:, 1, 2].unsqueeze(1)
# Transfer principal point offset into centered offset.
cx = -(cx - image_w / 2)
cy = cy - image_h / 2
# Concatenate to final vector.
param = torch.cat([focal_length, sensor_width, cx, cy], dim=1)
R_trans = R.permute(0, 2, 1)
cam_pos = -torch.bmm(R_trans, tvec).squeeze(2)
cam_rot = matrix_to_rotation_6d(R_trans)
cam_params = torch.cat([cam_pos, cam_rot, param], dim=1)
return cam_params
def _pulsar_from_cameras_projection(
cameras: PerspectiveCameras,
image_size: torch.Tensor,
) -> torch.Tensor:
opencv_R, opencv_T, opencv_K = _opencv_from_cameras_projection(cameras, image_size)
return _pulsar_from_opencv_projection(opencv_R, opencv_T, opencv_K, image_size)
|
[
"[email protected]"
] | |
24a344875f69d03d0e0a3a6a855fce30a6b147d7
|
6b0f007ca1d3426c71b2298adac853ddce996b49
|
/Schoolwebsite/schoolapp/migrations/0030_auto_20201223_1852.py
|
ff5d81f44974039c5d2f45fb26b161bd10c597f6
|
[] |
no_license
|
AbdurRahman111/Full_School_Management_System
|
49b37e8615b94bc20aeabc3ef41e468cf2dd9b47
|
0508fa6ba7b529429c4dae2feeb19a991547457e
|
refs/heads/master
| 2023-02-27T04:24:13.272126 | 2021-02-05T16:17:44 | 2021-02-05T16:17:44 | 336,323,426 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,037 |
py
|
# Generated by Django 3.1.4 on 2020-12-23 12:52
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('schoolapp', '0029_auto_20201223_1031'),
]
operations = [
migrations.AlterField(
model_name='assignment_comments_all',
name='time_comment',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 12, 23, 18, 51, 59, 873142)),
),
migrations.AlterField(
model_name='teacher_assignment_upload_file',
name='date',
field=models.DateField(blank=True, default=datetime.datetime(2020, 12, 23, 18, 51, 59, 872143)),
),
migrations.AlterField(
model_name='teacher_assignment_upload_file',
name='due_date',
field=models.DateField(blank=True, default=datetime.datetime(2020, 12, 23, 18, 51, 59, 872143)),
),
migrations.AlterField(
model_name='timeoff_staffs_teachers',
name='date',
field=models.DateField(blank=True, default=datetime.datetime(2020, 12, 23, 18, 51, 59, 874141)),
),
migrations.CreateModel(
name='Dean_login_information',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Dean_ID', models.CharField(max_length=50)),
('Dean_Name', models.CharField(max_length=255)),
('Dean_pass', models.CharField(max_length=255)),
('phone', models.CharField(max_length=255)),
('address', models.CharField(max_length=255)),
('dob', models.CharField(max_length=255)),
('major', models.CharField(max_length=255)),
('IT_Service_info', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schoolapp.it_service_login_information')),
],
),
]
|
[
"[email protected]"
] | |
b6c3ca36933ac9dc94c8c51052fd4e4dc4c105c6
|
d35167f7ab29813d926bd702fa652090556befdf
|
/generated/test_frotms_PaddleOCR2Pytorch.py
|
9d914b969c0b5beed4af3cacfde687594af223c5
|
[] |
no_license
|
jansel/pytorch-jit-paritybench
|
65e5311d43daf5065beac52a68488ce188199fa8
|
7e55a422588c1d1e00f35a3d3a3ff896cce59e18
|
refs/heads/master
| 2023-08-12T04:43:16.669114 | 2023-06-08T17:36:32 | 2023-06-08T17:36:32 | 270,464,378 | 35 | 15 | null | 2023-06-08T17:36:34 | 2020-06-07T23:42:50 |
Python
|
UTF-8
|
Python
| false | false | 313,216 |
py
|
import sys
_module = sys.modules[__name__]
del sys
main = _module
generate_multi_language_configs = _module
ch_ppocr_v2_det_converter = _module
ch_ppocr_v2_rec_converter = _module
ch_ppocr_v3_det_converter = _module
ch_ppocr_v3_rec_converter = _module
ch_ppocr_v3_rec_converter_nodistill = _module
det_converter = _module
det_fcenet_converter = _module
e2e_converter = _module
layoutxlm_re_converter = _module
layoutxlm_ser_converter = _module
multilingual_ppocr_v3_rec_converter = _module
ppstructure_table_det_converter = _module
ppstructure_table_rec_converter = _module
ppstructure_table_structure_converter = _module
rec_converter = _module
rec_nrtr_mtb_converter = _module
rec_sar_converter = _module
rec_svtr_converter = _module
rec_vitstr_converter = _module
srn_converter = _module
attention_grucell = _module
attention_head = _module
common = _module
conv = _module
diff = _module
fc = _module
gelu = _module
gru_cell = _module
hard_swish = _module
hs = _module
layernorm = _module
lstm = _module
pp_ocr = _module
pp_rec_resnet_fpn = _module
pp_rec_srn_head = _module
pp_self_attention = _module
pp_table_att_head = _module
pp_table_fpn = _module
pp_table_mobilenet_v3 = _module
pt_rec_resnet_fpn = _module
pt_rec_srn_head = _module
pt_self_attention = _module
pt_table_att_head = _module
pt_table_fpn = _module
pt_table_mobilenet_v3 = _module
rec_resnet_fpn = _module
rec_srn = _module
rec_srn_head = _module
table_att_head = _module
table_det = _module
table_mobile = _module
table_mobilenet_v3 = _module
onnx_optimizer = _module
ptstructure = _module
ptppyolov2 = _module
ppyolo_utils = _module
ppyolov2 = _module
ppyolov2_base = _module
ppyolov2_darknet = _module
ppyolov2_layout = _module
ppyolov2_pt = _module
ppyolov2_resnet = _module
ppyolov2_yolo_fpn = _module
ppyolov2_yolo_head = _module
pt_utils = _module
utils = _module
predict_system = _module
table = _module
matcher = _module
predict_structure = _module
predict_table = _module
tablepyxl = _module
style = _module
utility = _module
infer_ser_e2e = _module
infer_ser_re_e2e = _module
data = _module
vocab = _module
transformers = _module
bert = _module
tokenizer = _module
layoutlm = _module
modeling = _module
layoutxlm = _module
modeling = _module
visual_backbone = _module
model_utils = _module
tokenizer_utils = _module
utils = _module
vqa_utils = _module
pytorchocr = _module
base_ocr_v20 = _module
imaug = _module
gen_table_mask = _module
operators = _module
architectures = _module
base_model = _module
backbones = _module
det_mobilenet_v3 = _module
det_resnet = _module
det_resnet_vd = _module
det_resnet_vd_sast = _module
e2e_resnet_vd_pg = _module
rec_mobilenet_v3 = _module
rec_mv1_enhance = _module
rec_nrtr_mtb = _module
rec_resnet_31 = _module
rec_resnet_fpn = _module
rec_resnet_vd = _module
rec_svtrnet = _module
rec_vitstr = _module
table_mobilenet_v3 = _module
table_resnet_vd = _module
common = _module
heads = _module
cls_head = _module
det_db_head = _module
det_east_head = _module
det_fce_head = _module
det_pse_head = _module
det_sast_head = _module
e2e_pg_head = _module
multiheadAttention = _module
rec_att_head = _module
rec_ctc_head = _module
rec_nrtr_head = _module
rec_sar_head = _module
rec_srn_head = _module
self_attention = _module
table_att_head = _module
necks = _module
db_fpn = _module
east_fpn = _module
fce_fpn = _module
fpn = _module
pg_fpn = _module
rnn = _module
sast_fpn = _module
table_fpn = _module
transforms = _module
stn = _module
tps = _module
tps_spatial_transformer = _module
postprocess = _module
cls_postprocess = _module
db_postprocess = _module
east_postprocess = _module
fce_postprocess = _module
locality_aware_nms = _module
pg_postprocess = _module
pse_postprocess = _module
pse = _module
setup = _module
pse_postprocess = _module
rec_postprocess = _module
sast_postprocess = _module
extract_batchsize = _module
extract_textpoint_fast = _module
extract_textpoint_slow = _module
pgnet_pp_utils = _module
visual = _module
logging = _module
poly_nms = _module
predict_cls = _module
predict_det = _module
predict_e2e = _module
predict_rec = _module
pytorchocr_utility = _module
from _paritybench_helpers import _mock_config, patch_functional
from unittest.mock import mock_open, MagicMock
from torch.autograd import Function
from torch.nn import Module
import abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings
import numpy as np
from torch import Tensor
patch_functional()
open = mock_open()
yaml = logging = sys = argparse = MagicMock()
ArgumentParser = argparse.ArgumentParser
_global_config = args = argv = cfg = config = params = _mock_config()
argparse.ArgumentParser.return_value.parse_args.return_value = _global_config
yaml.load.return_value = _global_config
sys.argv = _global_config
__version__ = '1.0.0'
xrange = range
wraps = functools.wraps
from collections import OrderedDict
import numpy as np
import torch
import copy
import torch.nn as nn
import torch.nn.functional as F
import math
import torchvision
from numbers import Integral
import time
from copy import deepcopy
from torch.nn import Module as Layer
from abc import abstractmethod
from collections import namedtuple
from torch.nn import Module
import logging
import inspect
import functools
import random
from torch import nn
from functools import partial
from torch.nn import Linear
from torch.nn.init import xavier_uniform_
from torch.nn import ModuleList as LayerList
from torch.nn import Dropout
from torch.nn import LayerNorm
from torch.nn import Conv2d
from torch.nn.init import xavier_normal_
from torch.nn import functional as F
import itertools
from numpy.fft import ifft
import string
import torch.distributed as dist
class PTAttentionGRUCell(nn.Module):
def __init__(self, input_size, hidden_size, num_embeddings, use_gru=False):
super(PTAttentionGRUCell, self).__init__()
self.i2h = nn.Linear(input_size, hidden_size, bias=False)
self.h2h = nn.Linear(hidden_size, hidden_size)
self.score = nn.Linear(hidden_size, 1, bias=False)
self.rnn = nn.GRUCell(input_size=input_size + num_embeddings, hidden_size=hidden_size, bias=True)
self.hidden_size = hidden_size
def forward(self, prev_hidden, batch_H, char_onehots):
batch_H_proj = self.i2h(batch_H)
prev_hidden_proj = torch.unsqueeze(self.h2h(prev_hidden), dim=1)
res = torch.add(batch_H_proj, prev_hidden_proj)
res = torch.tanh(res)
e = self.score(res)
alpha = F.softmax(e, dim=1)
alpha = alpha.permute(0, 2, 1)
context = torch.squeeze(torch.matmul(alpha, batch_H), dim=1)
concat_context = torch.cat([context, char_onehots.float()], 1)
cur_hidden = self.rnn(concat_context, prev_hidden)
return (cur_hidden, cur_hidden), alpha
class PTAttentionHead(nn.Module):
def __init__(self, in_channels, out_channels, hidden_size, **kwargs):
super(PTAttentionHead, self).__init__()
self.input_size = in_channels
self.hidden_size = hidden_size
self.num_classes = out_channels
self.attention_cell = PTAttentionGRUCell(in_channels, hidden_size, out_channels, use_gru=False)
self.generator = nn.Linear(hidden_size, out_channels)
def _char_to_onehot(self, input_char, onehot_dim):
input_ont_hot = F.one_hot(input_char.type(torch.int64), onehot_dim)
return input_ont_hot
def forward(self, inputs, targets=None, batch_max_length=25):
batch_size = inputs.size()[0]
num_steps = batch_max_length
hidden = torch.zeros((batch_size, self.hidden_size))
output_hiddens = []
if targets is not None:
for i in range(num_steps):
char_onehots = self._char_to_onehot(targets[:, i], onehot_dim=self.num_classes)
(outputs, hidden), alpha = self.attention_cell(hidden, inputs, char_onehots)
output_hiddens.append(torch.unsqueeze(outputs, dim=1))
output = torch.cat(output_hiddens, dim=1)
probs = self.generator(output)
else:
targets = torch.zeros([batch_size], dtype=torch.int32)
probs = None
char_onehots = None
outputs = None
alpha = None
for i in range(num_steps):
char_onehots = self._char_to_onehot(targets, onehot_dim=self.num_classes)
(outputs, hidden), alpha = self.attention_cell(hidden, inputs, char_onehots)
probs_step = self.generator(outputs)
if probs is None:
probs = torch.unsqueeze(probs_step, dim=1)
else:
probs = torch.cat([probs, torch.unsqueeze(probs_step, dim=1)], dim=1)
next_input = probs_step.argmax(dim=1)
targets = next_input
return probs
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(1.2 * x + 3.0, inplace=self.inplace) / 6.0
class GELU(nn.Module):
def __init__(self, inplace=True):
super(GELU, self).__init__()
self.inplace = inplace
def forward(self, x):
return torch.nn.functional.gelu(x)
class Swish(nn.Module):
def __init__(self, inplace=True):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
if self.inplace:
x.mul_(torch.sigmoid(x))
return x
else:
return x * torch.sigmoid(x)
class Activation(nn.Module):
def __init__(self, act_type, inplace=True):
super(Activation, self).__init__()
act_type = act_type.lower()
if act_type == 'relu':
self.act = nn.ReLU(inplace=inplace)
elif act_type == 'relu6':
self.act = nn.ReLU6(inplace=inplace)
elif act_type == 'sigmoid':
raise NotImplementedError
elif act_type == 'hard_sigmoid':
self.act = Hsigmoid(inplace)
elif act_type == 'hard_swish':
self.act = Hswish(inplace=inplace)
elif act_type == 'leakyrelu':
self.act = nn.LeakyReLU(inplace=inplace)
elif act_type == 'gelu':
self.act = GELU(inplace=inplace)
elif act_type == 'swish':
self.act = Swish(inplace=inplace)
else:
raise NotImplementedError
def forward(self, inputs):
return self.act(inputs)
class ConvBNLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, groups=1, act=None, name=None):
super(ConvBNLayer, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=(kernel_size - 1) // 2, groups=groups, bias=False)
bn_name = 'bn_' + name
self.bn = nn.BatchNorm2d(out_channels)
self.act = act
if act is not None:
self._act = Activation(act)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.act is not None:
x = self._act(x)
return x
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride, shortcut=True, if_first=False, name=None):
super(BasicBlock, self).__init__()
self.stride = stride
self.conv0 = ConvBNLayer(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, act='relu', name=name + '_branch2a')
self.conv1 = ConvBNLayer(in_channels=out_channels, out_channels=out_channels, kernel_size=3, act=None, name=name + '_branch2b')
if not shortcut:
self.short = ConvBNLayer(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, is_vd_mode=False if if_first else True, name=name + '_branch1')
self.shortcut = shortcut
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = torch.add(short, conv1)
y = F.relu(y)
return y
class BottleneckBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride, shortcut=True, if_first=False, name=None):
super(BottleneckBlock, self).__init__()
self.conv0 = ConvBNLayer(in_channels=in_channels, out_channels=out_channels, kernel_size=1, act='relu', name=name + '_branch2a')
self.conv1 = ConvBNLayer(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=stride, act='relu', name=name + '_branch2b')
self.conv2 = ConvBNLayer(in_channels=out_channels, out_channels=out_channels * 4, kernel_size=1, act=None, name=name + '_branch2c')
if not shortcut:
self.short = ConvBNLayer(in_channels=in_channels, out_channels=out_channels * 4, kernel_size=1, stride=1, is_vd_mode=False if if_first else True, name=name + '_branch1')
self.shortcut = shortcut
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = torch.add(short, conv2)
y = F.relu(y)
return y
class ResNetFPN(nn.Module):
def __init__(self, in_channels=1, layers=50, **kwargs):
super(ResNetFPN, self).__init__()
supported_layers = {(18): {'depth': [2, 2, 2, 2], 'block_class': BasicBlock}, (34): {'depth': [3, 4, 6, 3], 'block_class': BasicBlock}, (50): {'depth': [3, 4, 6, 3], 'block_class': BottleneckBlock}, (101): {'depth': [3, 4, 23, 3], 'block_class': BottleneckBlock}, (152): {'depth': [3, 8, 36, 3], 'block_class': BottleneckBlock}}
stride_list = [(2, 2), (2, 2), (1, 1), (1, 1)]
num_filters = [64, 128, 256, 512]
self.depth = supported_layers[layers]['depth']
self.conv = ConvBNLayer(in_channels=in_channels, out_channels=64, kernel_size=7, stride=2, act='relu', name='conv1')
self.block_list = nn.ModuleList()
in_ch = 64
if layers >= 50:
for block in range(len(self.depth)):
for i in range(self.depth[block]):
if layers in [101, 152] and block == 2:
if i == 0:
conv_name = 'res' + str(block + 2) + 'a'
else:
conv_name = 'res' + str(block + 2) + 'b' + str(i)
else:
conv_name = 'res' + str(block + 2) + chr(97 + i)
bottlenectBlock = BottleneckBlock(in_channels=in_ch, out_channels=num_filters[block], stride=stride_list[block] if i == 0 else 1, name=conv_name)
in_ch = num_filters[block] * 4
self.block_list.add_module('bottleneckBlock_{}_{}'.format(block, i), bottlenectBlock)
else:
for block in range(len(self.depth)):
for i in range(self.depth[block]):
conv_name = 'res' + str(block + 2) + chr(97 + i)
if i == 0 and block != 0:
stride = 2, 1
else:
stride = 1, 1
basicBlock = BasicBlock(in_channels=in_ch, out_channels=num_filters[block], stride=stride_list[block] if i == 0 else 1, is_first=block == i == 0, name=conv_name)
in_ch = basicBlock.out_channels
self.block_list.add_module(conv_name, basicBlock)
out_ch_list = [in_ch // 4, in_ch // 2, in_ch]
self.base_block = nn.ModuleList()
self.conv_trans = []
self.bn_block = []
for i in [-2, -3]:
in_channels = out_ch_list[i + 1] + out_ch_list[i]
bb_0 = nn.Conv2d(in_channels=in_channels, out_channels=out_ch_list[i], kernel_size=1, bias=True)
self.base_block.add_module('F_{}_base_block_0'.format(i), bb_0)
bb_1 = nn.Conv2d(in_channels=out_ch_list[i], out_channels=out_ch_list[i], kernel_size=3, padding=1, bias=True)
self.base_block.add_module('F_{}_base_block_1'.format(i), bb_1)
bb_2 = nn.Sequential(nn.BatchNorm2d(out_ch_list[i]), Activation('relu'))
self.base_block.add_module('F_{}_base_block_2'.format(i), bb_2)
bb_3 = nn.Conv2d(in_channels=out_ch_list[i], out_channels=512, kernel_size=1, bias=True)
self.base_block.add_module('F_{}_base_block_3'.format(i), bb_3)
self.out_channels = 512
def __call__(self, x):
x = self.conv(x)
fpn_list = []
F = []
for i in range(len(self.depth)):
fpn_list.append(np.sum(self.depth[:i + 1]))
for i, block in enumerate(self.block_list):
x = block(x)
for number in fpn_list:
if i + 1 == number:
F.append(x)
base = F[-1]
j = 0
for i, block in enumerate(self.base_block):
if i % 3 == 0 and i < 6:
j = j + 1
b, c, w, h = F[-j - 1].shape
if [w, h] == list(base.shape[2:]):
base = base
else:
base = self.conv_trans[j - 1](base)
base = self.bn_block[j - 1](base)
base = torch.cat([base, F[-j - 1]], dim=1)
base = block(base)
return base
class ShortCut(nn.Module):
def __init__(self, in_channels, out_channels, stride, name, is_first=False):
super(ShortCut, self).__init__()
self.use_conv = True
if in_channels != out_channels or stride != 1 or is_first == True:
if stride == (1, 1):
self.conv = ConvBNLayer(in_channels, out_channels, 1, 1, name=name)
else:
self.conv = ConvBNLayer(in_channels, out_channels, 1, stride, name=name)
else:
self.use_conv = False
def forward(self, x):
if self.use_conv:
x = self.conv(x)
return x
class Lambda(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `func` with `x`"""
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
class FFN(nn.Module):
"""
Feed-Forward Network
"""
def __init__(self, d_inner_hid, d_model, dropout_rate):
super(FFN, self).__init__()
self.dropout_rate = dropout_rate
self.fc1 = torch.nn.Linear(in_features=d_model, out_features=d_inner_hid)
self.fc2 = torch.nn.Linear(in_features=d_inner_hid, out_features=d_model)
def forward(self, x):
hidden = self.fc1(x)
hidden = F.relu(hidden)
if self.dropout_rate:
hidden = F.dropout(hidden, p=self.dropout_rate)
out = self.fc2(hidden)
return out
class MultiHeadAttention(nn.Module):
"""
Multi-Head Attention
"""
def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0.0):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_key = d_key
self.d_value = d_value
self.d_model = d_model
self.dropout_rate = dropout_rate
self.q_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False)
self.k_fc = torch.nn.Linear(in_features=d_model, out_features=d_key * n_head, bias=False)
self.v_fc = torch.nn.Linear(in_features=d_model, out_features=d_value * n_head, bias=False)
self.proj_fc = torch.nn.Linear(in_features=d_value * n_head, out_features=d_model, bias=False)
def _prepare_qkv(self, queries, keys, values, cache=None):
if keys is None:
keys, values = queries, queries
static_kv = False
else:
static_kv = True
q = self.q_fc(queries)
q = torch.reshape(q, shape=[q.size(0), q.size(1), self.n_head, self.d_key])
q = q.permute(0, 2, 1, 3)
if cache is not None and static_kv and 'static_k' in cache:
k = cache['static_k']
v = cache['static_v']
else:
k = self.k_fc(keys)
v = self.v_fc(values)
k = torch.reshape(k, shape=[k.size(0), k.size(1), self.n_head, self.d_key])
k = k.permute(0, 2, 1, 3)
v = torch.reshape(v, shape=[v.size(0), v.size(1), self.n_head, self.d_value])
v = v.permute(0, 2, 1, 3)
if cache is not None:
if static_kv and not 'static_k' in cache:
cache['static_k'], cache['static_v'] = k, v
elif not static_kv:
cache_k, cache_v = cache['k'], cache['v']
k = torch.cat([cache_k, k], dim=2)
v = torch.cat([cache_v, v], dim=2)
cache['k'], cache['v'] = k, v
return q, k, v
def forward(self, queries, keys, values, attn_bias, cache=None):
keys = queries if keys is None else keys
values = keys if values is None else values
q, k, v = self._prepare_qkv(queries, keys, values, cache)
product = torch.matmul(q, k.transpose(2, 3))
product = product * self.d_model ** -0.5
if attn_bias is not None:
product += attn_bias
weights = F.softmax(product, dim=-1)
if self.dropout_rate:
weights = F.dropout(weights, p=self.dropout_rate)
out = torch.matmul(weights, v)
out = out.permute(0, 2, 1, 3)
out = torch.reshape(out, shape=[out.size(0), out.size(1), out.shape[2] * out.shape[3]])
out = self.proj_fc(out)
return out
class LambdaXY(nn.Module):
"""An easy way to create a pytorch layer for a simple `func`."""
def __init__(self, func):
"""create a layer that simply calls `func` with `x`"""
super().__init__()
self.func = func
def forward(self, x, y):
return self.func(x, y)
class PrePostProcessLayer(nn.Module):
"""
PrePostProcessLayer
"""
def __init__(self, process_cmd, d_model, dropout_rate):
super(PrePostProcessLayer, self).__init__()
self.process_cmd = process_cmd
self.functors = nn.ModuleList()
cur_a_len = 0
cur_n_len = 0
cur_d_len = 0
for cmd in self.process_cmd:
if cmd == 'a':
self.functors.add_module('add_res_connect_{}'.format(cur_a_len), LambdaXY(lambda x, y: x + y if y is not None else x))
cur_a_len += 1
elif cmd == 'n':
layerNorm = torch.nn.LayerNorm(normalized_shape=d_model, elementwise_affine=True, eps=1e-05)
self.functors.add_module('layer_norm_%d' % cur_n_len, layerNorm)
cur_n_len += 1
elif cmd == 'd':
self.functors.add_module('add_drop_{}'.format(cur_d_len), Lambda(lambda x: F.dropout(x, p=dropout_rate) if dropout_rate else x))
cur_d_len += 1
def forward(self, x, residual=None):
for i, (cmd, functor) in enumerate(zip(self.process_cmd, self.functors)):
if cmd == 'a':
x = functor(x, residual)
else:
x = functor(x)
return x
class EncoderLayer(nn.Module):
"""
EncoderLayer
"""
def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd='n', postprocess_cmd='da'):
super(EncoderLayer, self).__init__()
self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout)
self.self_attn = MultiHeadAttention(d_key, d_value, d_model, n_head, attention_dropout)
self.postprocesser1 = PrePostProcessLayer(postprocess_cmd, d_model, prepostprocess_dropout)
self.preprocesser2 = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout)
self.ffn = FFN(d_inner_hid, d_model, relu_dropout)
self.postprocesser2 = PrePostProcessLayer(postprocess_cmd, d_model, prepostprocess_dropout)
def forward(self, enc_input, attn_bias):
attn_output = self.self_attn(self.preprocesser1(enc_input), None, None, attn_bias)
attn_output = self.postprocesser1(attn_output, enc_input)
ffn_output = self.ffn(self.preprocesser2(attn_output))
ffn_output = self.postprocesser2(ffn_output, attn_output)
return ffn_output
class Encoder(nn.Module):
"""
encoder
"""
def __init__(self, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd='n', postprocess_cmd='da'):
super(Encoder, self).__init__()
self.encoder_layers = nn.ModuleList()
for i in range(n_layer):
encoderLayer = EncoderLayer(n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd)
self.encoder_layers.add_module('layer_%d' % i, encoderLayer)
self.processer = PrePostProcessLayer(preprocess_cmd, d_model, prepostprocess_dropout)
def forward(self, enc_input, attn_bias):
for encoder_layer in self.encoder_layers:
enc_output = encoder_layer(enc_input, attn_bias)
enc_input = enc_output
enc_output = self.processer(enc_output)
return enc_output
class PrepareEncoder(nn.Module):
def __init__(self, src_vocab_size, src_emb_dim, src_max_len, dropout_rate=0, bos_idx=0, word_emb_param_name=None, pos_enc_param_name=None):
super(PrepareEncoder, self).__init__()
self.src_emb_dim = src_emb_dim
self.src_max_len = src_max_len
self.emb = torch.nn.Embedding(num_embeddings=self.src_max_len, embedding_dim=self.src_emb_dim, sparse=True)
self.dropout_rate = dropout_rate
def forward(self, src_word, src_pos):
src_word_emb = src_word.type(torch.float32)
src_word_emb = self.src_emb_dim ** 0.5 * src_word_emb
src_pos = torch.squeeze(src_pos, dim=-1)
src_pos_enc = self.emb(src_pos.type(torch.int64))
src_pos_enc.stop_gradient = True
enc_input = src_word_emb + src_pos_enc
if self.dropout_rate:
out = F.dropout(enc_input, p=self.dropout_rate)
else:
out = enc_input
return out
class WrapEncoderForFeature(nn.Module):
def __init__(self, src_vocab_size, max_length, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd, weight_sharing, bos_idx=0):
super(WrapEncoderForFeature, self).__init__()
self.prepare_encoder = PrepareEncoder(src_vocab_size, d_model, max_length, prepostprocess_dropout, bos_idx=bos_idx, word_emb_param_name='src_word_emb_table')
self.encoder = Encoder(n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd)
def forward(self, enc_inputs):
conv_features, src_pos, src_slf_attn_bias = enc_inputs
enc_input = self.prepare_encoder(conv_features, src_pos)
enc_output = self.encoder(enc_input, src_slf_attn_bias)
return enc_output
class PVAM(nn.Module):
def __init__(self, in_channels, char_num, max_text_length, num_heads, num_encoder_tus, hidden_dims):
super(PVAM, self).__init__()
self.char_num = char_num
self.max_length = max_text_length
self.num_heads = num_heads
self.num_encoder_TUs = num_encoder_tus
self.hidden_dims = hidden_dims
t = 256
c = 512
self.wrap_encoder_for_feature = WrapEncoderForFeature(src_vocab_size=1, max_length=t, n_layer=self.num_encoder_TUs, n_head=self.num_heads, d_key=int(self.hidden_dims / self.num_heads), d_value=int(self.hidden_dims / self.num_heads), d_model=self.hidden_dims, d_inner_hid=self.hidden_dims, prepostprocess_dropout=0.0, attention_dropout=0.0, relu_dropout=0.0, preprocess_cmd='n', postprocess_cmd='da', weight_sharing=True)
self.flatten0 = Lambda(lambda x: torch.flatten(x, start_dim=0, end_dim=1))
self.fc0 = torch.nn.Linear(in_features=in_channels, out_features=in_channels)
self.emb = torch.nn.Embedding(num_embeddings=self.max_length, embedding_dim=in_channels)
self.flatten1 = Lambda(lambda x: torch.flatten(x, start_dim=0, end_dim=2))
self.fc1 = torch.nn.Linear(in_features=in_channels, out_features=1, bias=False)
def forward(self, inputs, encoder_word_pos, gsrm_word_pos):
b, c, h, w = inputs.shape
conv_features = torch.reshape(inputs, shape=[-1, c, h * w])
conv_features = conv_features.permute(0, 2, 1)
b, t, c = conv_features.shape
enc_inputs = [conv_features, encoder_word_pos, None]
word_features = self.wrap_encoder_for_feature(enc_inputs)
b, t, c = word_features.shape
word_features = self.fc0(word_features)
word_features_ = torch.reshape(word_features, [-1, 1, t, c])
word_features_ = word_features_.repeat([1, self.max_length, 1, 1])
word_pos_feature = self.emb(gsrm_word_pos)
word_pos_feature_ = torch.reshape(word_pos_feature, [-1, self.max_length, 1, c])
word_pos_feature_ = word_pos_feature_.repeat([1, 1, t, 1])
y = word_pos_feature_ + word_features_
y = torch.tanh(y)
attention_weight = self.fc1(y)
attention_weight = torch.reshape(attention_weight, shape=[-1, self.max_length, t])
attention_weight = F.softmax(attention_weight, dim=-1)
pvam_features = torch.matmul(attention_weight, word_features)
return pvam_features
class PrepareDecoder(nn.Module):
def __init__(self, src_vocab_size, src_emb_dim, src_max_len, dropout_rate=0, bos_idx=0, word_emb_param_name=None, pos_enc_param_name=None):
super(PrepareDecoder, self).__init__()
self.src_emb_dim = src_emb_dim
"""
self.emb0 = Embedding(num_embeddings=src_vocab_size,
embedding_dim=src_emb_dim)
"""
self.emb0 = torch.nn.Embedding(num_embeddings=src_vocab_size, embedding_dim=self.src_emb_dim, padding_idx=bos_idx)
self.emb1 = torch.nn.Embedding(num_embeddings=src_max_len, embedding_dim=self.src_emb_dim)
self.dropout_rate = dropout_rate
def forward(self, src_word, src_pos):
src_word = torch.squeeze(src_word.type(torch.int64), dim=-1)
src_word_emb = self.emb0(src_word)
src_word_emb = self.src_emb_dim ** 0.5 * src_word_emb
src_pos = torch.squeeze(src_pos, dim=-1)
src_pos_enc = self.emb1(src_pos)
src_pos_enc.stop_gradient = True
enc_input = src_word_emb + src_pos_enc
if self.dropout_rate:
out = F.dropout(enc_input, p=self.dropout_rate)
else:
out = enc_input
return out
class WrapEncoder(nn.Module):
"""
embedder + encoder
"""
def __init__(self, src_vocab_size, max_length, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd, weight_sharing, bos_idx=0):
super(WrapEncoder, self).__init__()
self.prepare_decoder = PrepareDecoder(src_vocab_size, d_model, max_length, prepostprocess_dropout, bos_idx=bos_idx)
self.encoder = Encoder(n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd)
def forward(self, enc_inputs):
src_word, src_pos, src_slf_attn_bias = enc_inputs
enc_input = self.prepare_decoder(src_word, src_pos)
enc_output = self.encoder(enc_input, src_slf_attn_bias)
return enc_output
class GSRM(nn.Module):
def __init__(self, in_channels, char_num, max_text_length, num_heads, num_encoder_tus, num_decoder_tus, hidden_dims):
super(GSRM, self).__init__()
self.char_num = char_num
self.max_length = max_text_length
self.num_heads = num_heads
self.num_encoder_TUs = num_encoder_tus
self.num_decoder_TUs = num_decoder_tus
self.hidden_dims = hidden_dims
self.fc0 = torch.nn.Linear(in_features=in_channels, out_features=self.char_num)
self.wrap_encoder0 = WrapEncoder(src_vocab_size=self.char_num + 1, max_length=self.max_length, n_layer=self.num_decoder_TUs, n_head=self.num_heads, d_key=int(self.hidden_dims / self.num_heads), d_value=int(self.hidden_dims / self.num_heads), d_model=self.hidden_dims, d_inner_hid=self.hidden_dims, prepostprocess_dropout=0.0, attention_dropout=0.0, relu_dropout=0.0, preprocess_cmd='n', postprocess_cmd='da', weight_sharing=True)
self.wrap_encoder1 = WrapEncoder(src_vocab_size=self.char_num + 1, max_length=self.max_length, n_layer=self.num_decoder_TUs, n_head=self.num_heads, d_key=int(self.hidden_dims / self.num_heads), d_value=int(self.hidden_dims / self.num_heads), d_model=self.hidden_dims, d_inner_hid=self.hidden_dims, prepostprocess_dropout=0.0, attention_dropout=0.0, relu_dropout=0.0, preprocess_cmd='n', postprocess_cmd='da', weight_sharing=True)
self.mul = lambda x: torch.matmul(x, self.wrap_encoder0.prepare_decoder.emb0.weight.t())
def forward(self, inputs, gsrm_word_pos, gsrm_slf_attn_bias1, gsrm_slf_attn_bias2):
b, t, c = inputs.shape
pvam_features = torch.reshape(inputs, [-1, c])
word_out = self.fc0(pvam_features)
word_ids = torch.argmax(F.softmax(word_out, dim=-1), dim=1)
word_ids = torch.reshape(word_ids, shape=[-1, t, 1])
"""
This module is achieved through bi-transformers,
ngram_feature1 is the froward one, ngram_fetaure2 is the backward one
"""
pad_idx = self.char_num
word1 = F.pad(word_ids.type(torch.float32), [0, 0, 1, 0, 0, 0], value=1.0 * pad_idx)
word1 = word1.type(torch.int64)
word1 = word1[:, :-1, :]
word2 = word_ids
enc_inputs_1 = [word1, gsrm_word_pos, gsrm_slf_attn_bias1]
enc_inputs_2 = [word2, gsrm_word_pos, gsrm_slf_attn_bias2]
gsrm_feature1 = self.wrap_encoder0(enc_inputs_1)
gsrm_feature2 = self.wrap_encoder1(enc_inputs_2)
gsrm_feature2 = F.pad(gsrm_feature2, [0, 0, 0, 1, 0, 0], value=0.0)
gsrm_feature2 = gsrm_feature2[:, 1:]
gsrm_features = gsrm_feature1 + gsrm_feature2
gsrm_out = self.mul(gsrm_features)
b, t, c = gsrm_out.shape
gsrm_out = torch.reshape(gsrm_out, [-1, c])
return gsrm_features, word_out, gsrm_out
class VSFD(nn.Module):
def __init__(self, in_channels=512, pvam_ch=512, char_num=38):
super(VSFD, self).__init__()
self.char_num = char_num
self.fc0 = torch.nn.Linear(in_features=in_channels * 2, out_features=pvam_ch)
self.fc1 = torch.nn.Linear(in_features=pvam_ch, out_features=self.char_num)
def forward(self, pvam_feature, gsrm_feature):
b, t, c1 = pvam_feature.shape
b, t, c2 = gsrm_feature.shape
combine_feature_ = torch.cat([pvam_feature, gsrm_feature], dim=2)
img_comb_feature_ = torch.reshape(combine_feature_, shape=[-1, c1 + c2])
img_comb_feature_map = self.fc0(img_comb_feature_)
img_comb_feature_map = torch.sigmoid(img_comb_feature_map)
img_comb_feature_map = torch.reshape(img_comb_feature_map, shape=[-1, t, c1])
combine_feature = img_comb_feature_map * pvam_feature + (1.0 - img_comb_feature_map) * gsrm_feature
img_comb_feature = torch.reshape(combine_feature, shape=[-1, c1])
out = self.fc1(img_comb_feature)
return out
class SRNHead(nn.Module):
def __init__(self, in_channels, out_channels, max_text_length, num_heads, num_encoder_TUs, num_decoder_TUs, hidden_dims, **kwargs):
super(SRNHead, self).__init__()
self.char_num = out_channels
self.max_length = max_text_length
self.num_heads = num_heads
self.num_encoder_TUs = num_encoder_TUs
self.num_decoder_TUs = num_decoder_TUs
self.hidden_dims = hidden_dims
self.pvam = PVAM(in_channels=in_channels, char_num=self.char_num, max_text_length=self.max_length, num_heads=self.num_heads, num_encoder_tus=self.num_encoder_TUs, hidden_dims=self.hidden_dims)
self.gsrm = GSRM(in_channels=in_channels, char_num=self.char_num, max_text_length=self.max_length, num_heads=self.num_heads, num_encoder_tus=self.num_encoder_TUs, num_decoder_tus=self.num_decoder_TUs, hidden_dims=self.hidden_dims)
self.vsfd = VSFD(in_channels=in_channels, char_num=self.char_num)
self.gsrm.wrap_encoder1.prepare_decoder.emb0 = self.gsrm.wrap_encoder0.prepare_decoder.emb0
def forward(self, inputs, others):
encoder_word_pos = others[0]
gsrm_word_pos = others[1].type(torch.long)
gsrm_slf_attn_bias1 = others[2]
gsrm_slf_attn_bias2 = others[3]
pvam_feature = self.pvam(inputs, encoder_word_pos, gsrm_word_pos)
gsrm_feature, word_out, gsrm_out = self.gsrm(pvam_feature, gsrm_word_pos, gsrm_slf_attn_bias1, gsrm_slf_attn_bias2)
final_out = self.vsfd(pvam_feature, gsrm_feature)
if not self.training:
final_out = F.softmax(final_out, dim=1)
_, decoded_out = torch.topk(final_out, k=1)
predicts = OrderedDict([('predict', final_out), ('pvam_feature', pvam_feature), ('decoded_out', decoded_out), ('word_out', word_out), ('gsrm_out', gsrm_out)])
return predicts
class AttentionGRUCell(nn.Module):
def __init__(self, input_size, hidden_size, num_embeddings, use_gru=False):
super(AttentionGRUCell, self).__init__()
self.i2h = nn.Linear(input_size, hidden_size, bias=False)
self.h2h = nn.Linear(hidden_size, hidden_size)
self.score = nn.Linear(hidden_size, 1, bias=False)
self.rnn = nn.GRUCell(input_size=input_size + num_embeddings, hidden_size=hidden_size)
self.hidden_size = hidden_size
def forward(self, prev_hidden, batch_H, char_onehots):
batch_H_proj = self.i2h(batch_H)
prev_hidden_proj = torch.unsqueeze(self.h2h(prev_hidden), dim=1)
res = torch.add(batch_H_proj, prev_hidden_proj)
res = torch.tanh(res)
e = self.score(res)
alpha = F.softmax(e, dim=1)
alpha = alpha.permute(0, 2, 1)
context = torch.squeeze(torch.matmul(alpha, batch_H), dim=1)
concat_context = torch.cat([context, char_onehots.float()], 1)
cur_hidden = self.rnn(concat_context, prev_hidden)
return (cur_hidden, cur_hidden), alpha
class TableAttentionHead(nn.Module):
def __init__(self, in_channels, hidden_size, loc_type, in_max_len=488, **kwargs):
super(TableAttentionHead, self).__init__()
self.input_size = in_channels[-1]
self.hidden_size = hidden_size
self.elem_num = 30
self.max_text_length = 100
self.max_elem_length = kwargs.get('max_elem_length', 500)
self.max_cell_num = 500
self.structure_attention_cell = AttentionGRUCell(self.input_size, hidden_size, self.elem_num, use_gru=False)
self.structure_generator = nn.Linear(hidden_size, self.elem_num)
self.loc_type = loc_type
self.in_max_len = in_max_len
if self.loc_type == 1:
self.loc_generator = nn.Linear(hidden_size, 4)
else:
if self.in_max_len == 640:
self.loc_fea_trans = nn.Linear(400, self.max_elem_length + 1)
elif self.in_max_len == 800:
self.loc_fea_trans = nn.Linear(625, self.max_elem_length + 1)
else:
self.loc_fea_trans = nn.Linear(256, self.max_elem_length + 1)
self.loc_generator = nn.Linear(self.input_size + hidden_size, 4)
def _char_to_onehot(self, input_char, onehot_dim):
input_ont_hot = F.one_hot(input_char.type(torch.int64), onehot_dim)
return input_ont_hot
def forward(self, inputs, targets=None):
fea = inputs[-1]
if len(fea.shape) == 3:
pass
else:
last_shape = int(np.prod(fea.shape[2:]))
fea = torch.reshape(fea, [fea.shape[0], fea.shape[1], last_shape])
fea = fea.permute(0, 2, 1)
batch_size = fea.shape[0]
hidden = torch.zeros((batch_size, self.hidden_size))
output_hiddens = []
if self.training and targets is not None:
raise NotImplementedError
else:
temp_elem = torch.zeros([batch_size], dtype=torch.int32)
structure_probs = None
loc_preds = None
elem_onehots = None
outputs = None
alpha = None
max_elem_length = torch.as_tensor(self.max_elem_length)
i = 0
while i < max_elem_length + 1:
elem_onehots = self._char_to_onehot(temp_elem, onehot_dim=self.elem_num)
(outputs, hidden), alpha = self.structure_attention_cell(hidden, fea, elem_onehots)
output_hiddens.append(torch.unsqueeze(outputs, dim=1))
structure_probs_step = self.structure_generator(outputs)
temp_elem = structure_probs_step.argmax(dim=1, keepdim=False)
i += 1
output = torch.cat(output_hiddens, dim=1)
structure_probs = self.structure_generator(output)
structure_probs = F.softmax(structure_probs, dim=-1)
if self.loc_type == 1:
loc_preds = self.loc_generator(output)
loc_preds = F.sigmoid(loc_preds)
else:
loc_fea = fea.permute(0, 2, 1)
loc_fea = self.loc_fea_trans(loc_fea)
loc_fea = loc_fea.permute(0, 2, 1)
loc_concat = torch.cat([output, loc_fea], dim=2)
loc_preds = self.loc_generator(loc_concat)
loc_preds = F.sigmoid(loc_preds)
return {'structure_probs': structure_probs, 'loc_preds': loc_preds}
class AttentionLSTMCell(nn.Module):
def __init__(self, input_size, hidden_size, num_embeddings, use_gru=False):
super(AttentionLSTMCell, self).__init__()
self.i2h = nn.Linear(input_size, hidden_size, bias=False)
self.h2h = nn.Linear(hidden_size, hidden_size)
self.score = nn.Linear(hidden_size, 1, bias=False)
if not use_gru:
self.rnn = nn.LSTMCell(input_size=input_size + num_embeddings, hidden_size=hidden_size)
else:
self.rnn = nn.GRUCell(input_size=input_size + num_embeddings, hidden_size=hidden_size)
self.hidden_size = hidden_size
def forward(self, prev_hidden, batch_H, char_onehots):
batch_H_proj = self.i2h(batch_H)
prev_hidden_proj = torch.unsqueeze(self.h2h(prev_hidden[0]), dim=1)
res = torch.add(batch_H_proj, prev_hidden_proj)
res = torch.tanh(res)
e = self.score(res)
alpha = F.softmax(e, dim=1)
alpha = alpha.permute(0, 2, 1)
context = torch.squeeze(torch.matmul(alpha, batch_H), dim=1)
concat_context = torch.cat([context, char_onehots.float()], 1)
cur_hidden = self.rnn(concat_context, prev_hidden)
return (cur_hidden, cur_hidden), alpha
class AttentionLSTM(nn.Module):
def __init__(self, in_channels, out_channels, hidden_size, **kwargs):
super(AttentionLSTM, self).__init__()
self.input_size = in_channels
self.hidden_size = hidden_size
self.num_classes = out_channels
self.attention_cell = AttentionLSTMCell(in_channels, hidden_size, out_channels, use_gru=False)
self.generator = nn.Linear(hidden_size, out_channels)
def _char_to_onehot(self, input_char, onehot_dim):
input_ont_hot = F.one_hot(input_char, onehot_dim)
return input_ont_hot
def forward(self, inputs, targets=None, batch_max_length=25):
batch_size = inputs.shape[0]
num_steps = batch_max_length
hidden = torch.zeros((batch_size, self.hidden_size)), torch.zeros((batch_size, self.hidden_size))
output_hiddens = []
if targets is not None:
for i in range(num_steps):
char_onehots = self._char_to_onehot(targets[:, i], onehot_dim=self.num_classes)
hidden, alpha = self.attention_cell(hidden, inputs, char_onehots)
hidden = hidden[1][0], hidden[1][1]
output_hiddens.append(torch.unsqueeze(hidden[0], dim=1))
output = torch.cat(output_hiddens, dim=1)
probs = self.generator(output)
else:
targets = torch.zeros([batch_size], dtype=torch.int32)
probs = None
for i in range(num_steps):
char_onehots = self._char_to_onehot(targets, onehot_dim=self.num_classes)
hidden, alpha = self.attention_cell(hidden, inputs, char_onehots)
probs_step = self.generator(hidden[0])
hidden = hidden[1][0], hidden[1][1]
if probs is None:
probs = torch.unsqueeze(probs_step, dim=1)
else:
probs = torch.cat([probs, torch.unsqueeze(probs_step, dim=1)], dim=1)
next_input = probs_step.argmax(dim=1)
targets = next_input
return probs
class TableFPN(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(TableFPN, self).__init__()
self.out_channels = 512
self.in2_conv = nn.Conv2d(in_channels=in_channels[0], out_channels=self.out_channels, kernel_size=1, bias=False)
self.in3_conv = nn.Conv2d(in_channels=in_channels[1], out_channels=self.out_channels, kernel_size=1, stride=1, bias=False)
self.in4_conv = nn.Conv2d(in_channels=in_channels[2], out_channels=self.out_channels, kernel_size=1, bias=False)
self.in5_conv = nn.Conv2d(in_channels=in_channels[3], out_channels=self.out_channels, kernel_size=1, bias=False)
self.p5_conv = nn.Conv2d(in_channels=self.out_channels, out_channels=self.out_channels // 4, kernel_size=3, padding=1, bias=False)
self.p4_conv = nn.Conv2d(in_channels=self.out_channels, out_channels=self.out_channels // 4, kernel_size=3, padding=1, bias=False)
self.p3_conv = nn.Conv2d(in_channels=self.out_channels, out_channels=self.out_channels // 4, kernel_size=3, padding=1, bias=False)
self.p2_conv = nn.Conv2d(in_channels=self.out_channels, out_channels=self.out_channels // 4, kernel_size=3, padding=1, bias=False)
self.fuse_conv = nn.Conv2d(in_channels=self.out_channels * 4, out_channels=512, kernel_size=3, padding=1, bias=False)
def forward(self, x):
c2, c3, c4, c5 = x
in5 = self.in5_conv(c5)
in4 = self.in4_conv(c4)
in3 = self.in3_conv(c3)
in2 = self.in2_conv(c2)
out4 = in4 + F.interpolate(in5, size=in4.shape[2:4], mode='nearest')
out3 = in3 + F.interpolate(out4, size=in3.shape[2:4], mode='nearest')
out2 = in2 + F.interpolate(out3, size=in2.shape[2:4], mode='nearest')
p4 = F.interpolate(out4, size=in5.shape[2:4], mode='nearest')
p3 = F.interpolate(out3, size=in5.shape[2:4], mode='nearest')
p2 = F.interpolate(out2, size=in5.shape[2:4], mode='nearest')
fuse = torch.cat([in5, p4, p3, p2], dim=1)
fuse_conv = self.fuse_conv(fuse) * 0.005
return [c5 + fuse_conv]
def hard_sigmoid(x, slope=0.1666667, offset=0.5):
return torch.clamp(slope * x + offset, 0.0, 1.0)
class SEModule(nn.Module):
def __init__(self, in_channels, reduction=4, name=''):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels // reduction, kernel_size=1, stride=1, padding=0, bias=True)
self.conv2 = nn.Conv2d(in_channels=in_channels // reduction, out_channels=in_channels, kernel_size=1, stride=1, padding=0, bias=True)
def forward(self, inputs):
outputs = self.avg_pool(inputs)
outputs = self.conv1(outputs)
outputs = F.relu(outputs)
outputs = self.conv2(outputs)
outputs = hard_sigmoid(outputs, slope=0.2, offset=0.5)
return inputs * outputs
class ResidualUnit(nn.Module):
def __init__(self, in_channels, mid_channels, out_channels, kernel_size, stride, use_se, act=None, name=''):
super(ResidualUnit, self).__init__()
self.if_shortcut = stride == 1 and in_channels == out_channels
self.if_se = use_se
self.expand_conv = ConvBNLayer(in_channels=in_channels, out_channels=mid_channels, kernel_size=1, stride=1, padding=0, if_act=True, act=act, name=name + '_expand')
self.bottleneck_conv = ConvBNLayer(in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2), groups=mid_channels, if_act=True, act=act, name=name + '_depthwise')
if self.if_se:
self.mid_se = SEModule(mid_channels, name=name + '_se')
self.linear_conv = ConvBNLayer(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, if_act=False, act=None, name=name + '_linear')
def forward(self, inputs):
x = self.expand_conv(inputs)
x = self.bottleneck_conv(x)
if self.if_se:
x = self.mid_se(x)
x = self.linear_conv(x)
if self.if_shortcut:
x = torch.add(inputs, x)
return x
def make_divisible(v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class MobileNetV3(nn.Module):
def __init__(self, in_channels=3, model_name='large', scale=0.5, disable_se=False, **kwargs):
"""
the MobilenetV3 backbone network for detection module.
Args:
params(dict): the super parameters for build network
"""
super(MobileNetV3, self).__init__()
self.disable_se = disable_se
if model_name == 'large':
cfg = [[3, 16, 16, False, 'relu', 1], [3, 64, 24, False, 'relu', 2], [3, 72, 24, False, 'relu', 1], [5, 72, 40, True, 'relu', 2], [5, 120, 40, True, 'relu', 1], [5, 120, 40, True, 'relu', 1], [3, 240, 80, False, 'hardswish', 2], [3, 200, 80, False, 'hardswish', 1], [3, 184, 80, False, 'hardswish', 1], [3, 184, 80, False, 'hardswish', 1], [3, 480, 112, True, 'hardswish', 1], [3, 672, 112, True, 'hardswish', 1], [5, 672, 160, True, 'hardswish', 2], [5, 960, 160, True, 'hardswish', 1], [5, 960, 160, True, 'hardswish', 1]]
cls_ch_squeeze = 960
elif model_name == 'small':
cfg = [[3, 16, 16, True, 'relu', 2], [3, 72, 24, False, 'relu', 2], [3, 88, 24, False, 'relu', 1], [5, 96, 40, True, 'hardswish', 2], [5, 240, 40, True, 'hardswish', 1], [5, 240, 40, True, 'hardswish', 1], [5, 120, 48, True, 'hardswish', 1], [5, 144, 48, True, 'hardswish', 1], [5, 288, 96, True, 'hardswish', 2], [5, 576, 96, True, 'hardswish', 1], [5, 576, 96, True, 'hardswish', 1]]
cls_ch_squeeze = 576
else:
raise NotImplementedError('mode[' + model_name + '_model] is not implemented!')
supported_scale = [0.35, 0.5, 0.75, 1.0, 1.25]
assert scale in supported_scale, 'supported scale are {} but input scale is {}'.format(supported_scale, scale)
inplanes = 16
self.conv = ConvBNLayer(in_channels=in_channels, out_channels=make_divisible(inplanes * scale), kernel_size=3, stride=2, padding=1, groups=1, if_act=True, act='hardswish', name='conv1')
self.stages = nn.ModuleList()
self.out_channels = []
block_list = []
i = 0
inplanes = make_divisible(inplanes * scale)
for k, exp, c, se, nl, s in cfg:
se = se and not self.disable_se
start_idx = 2 if model_name == 'large' else 0
if s == 2 and i > start_idx:
self.out_channels.append(inplanes)
self.stages.append(nn.Sequential(*block_list))
block_list = []
block_list.append(ResidualUnit(in_channels=inplanes, mid_channels=make_divisible(scale * exp), out_channels=make_divisible(scale * c), kernel_size=k, stride=s, use_se=se, act=nl, name='conv' + str(i + 2)))
inplanes = make_divisible(scale * c)
i += 1
block_list.append(ConvBNLayer(in_channels=inplanes, out_channels=make_divisible(scale * cls_ch_squeeze), kernel_size=1, stride=1, padding=0, groups=1, if_act=True, act='hardswish', name='conv_last'))
self.stages.append(nn.Sequential(*block_list))
self.out_channels.append(make_divisible(scale * cls_ch_squeeze))
def forward(self, x):
x = self.conv(x)
out_list = []
for stage in self.stages:
x = stage(x)
out_list.append(x)
return out_list
DISABLE_SE = True
INPUT_SIZE = 1, 3, 488, 488
IN_CHANNELS = INPUT_SIZE[1]
MODEL_NAME = 'large'
SCALE = 1.0
class PTNet(torch.nn.Module):
def __init__(self, **kwargs):
super(PTNet, self).__init__()
self.backbone = pt_table_mobilenet_v3.MobileNetV3(in_channels=IN_CHANNELS, model_name=MODEL_NAME, scale=SCALE, disable_se=DISABLE_SE)
head_in_channels = self.backbone.out_channels
def forward(self, x, **kwargs):
x = self.backbone(x)
return x
class DeformableConvV2(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, weight_attr=None, bias_attr=None, lr_scale=1, regularizer=None, skip_quant=False, dcn_bias_regularizer=None, dcn_bias_lr_scale=2.0):
super(DeformableConvV2, self).__init__()
self.offset_channel = 2 * kernel_size ** 2 * groups
self.mask_channel = kernel_size ** 2 * groups
if bias_attr:
dcn_bias_attr = True
else:
dcn_bias_attr = False
self.conv_dcn = torchvision.ops.DeformConv2d(in_channels, out_channels, kernel_size, stride=stride, padding=(kernel_size - 1) // 2 * dilation, dilation=dilation, groups=groups // 2 if groups > 1 else 1, bias=dcn_bias_attr)
self.conv_offset = nn.Conv2d(in_channels, groups * 3 * kernel_size ** 2, kernel_size, stride=stride, padding=(kernel_size - 1) // 2, bias=True)
if skip_quant:
self.conv_offset.skip_quant = True
def forward(self, x):
offset_mask = self.conv_offset(x)
offset, mask = torch.split(offset_mask, split_size_or_sections=[self.offset_channel, self.mask_channel], dim=1)
mask = torch.sigmoid(mask)
y = self.conv_dcn(x, offset, mask=mask)
return y
class DropBlock(nn.Module):
def __init__(self, block_size, keep_prob, name, data_format='NCHW'):
"""
DropBlock layer, see https://arxiv.org/abs/1810.12890
Args:
block_size (int): block size
keep_prob (int): keep probability
name (str): layer name
data_format (str): data format, NCHW or NHWC
"""
super(DropBlock, self).__init__()
self.block_size = block_size
self.keep_prob = keep_prob
self.name = name
self.data_format = data_format
def forward(self, x):
if not self.training or self.keep_prob == 1:
return x
else:
gamma = (1.0 - self.keep_prob) / self.block_size ** 2
if self.data_format == 'NCHW':
shape = x.shape[2:]
else:
shape = x.shape[1:3]
for s in shape:
gamma *= s / (s - self.block_size + 1)
matrix = torch.rand(x.size(), dtype=x.dtype) < gamma
mask_inv = F.max_pool2d(matrix, self.block_size, stride=1, padding=self.block_size // 2, data_format=self.data_format)
mask = 1.0 - mask_inv
y = x * mask * (mask.numel() / mask.sum())
return y
class PPYOLODetBlockCSP(nn.Module):
def __init__(self, cfg, ch_in, ch_out, act, norm_type, name, data_format='NCHW'):
"""
PPYOLODetBlockCSP layer
Args:
cfg (list): layer configs for this block
ch_in (int): input channel
ch_out (int): output channel
act (str): default mish
name (str): block name
data_format (str): data format, NCHW or NHWC
"""
super(PPYOLODetBlockCSP, self).__init__()
self.data_format = data_format
self.conv1 = ConvBNLayer(ch_in, ch_out, 1, padding=0, act=act, norm_type=norm_type, name=name + '_left', data_format=data_format)
self.conv2 = ConvBNLayer(ch_in, ch_out, 1, padding=0, act=act, norm_type=norm_type, name=name + '_right', data_format=data_format)
self.conv3 = ConvBNLayer(ch_out * 2, ch_out * 2, 1, padding=0, act=act, norm_type=norm_type, name=name, data_format=data_format)
self.conv_module = nn.Sequential()
for idx, (layer_name, layer, args, kwargs) in enumerate(cfg):
name = name.replace('.', '_')
layer_name = layer_name.replace('.', '_')
kwargs.update(name=name + '_' + layer_name, data_format=data_format)
_layer = layer(*args, **kwargs)
self.conv_module.add_module(layer_name, _layer)
def forward(self, inputs):
conv_left = self.conv1(inputs)
conv_right = self.conv2(inputs)
conv_left = self.conv_module(conv_left)
if self.data_format == 'NCHW':
conv = torch.cat([conv_left, conv_right], dim=1)
else:
conv = torch.cat([conv_left, conv_right], dim=-1)
conv = self.conv3(conv)
return conv, conv
class SPP(nn.Module):
def __init__(self, ch_in, ch_out, k, pool_size, norm_type, freeze_norm=False, name='', act='leaky', data_format='NCHW'):
"""
SPP layer, which consist of four pooling layer follwed by conv layer
Args:
ch_in (int): input channel of conv layer
ch_out (int): output channel of conv layer
k (int): kernel size of conv layer
norm_type (str): batch norm type
freeze_norm (bool): whether to freeze norm, default False
name (str): layer name
act (str): activation function
data_format (str): data format, NCHW or NHWC
"""
super(SPP, self).__init__()
self.pool = nn.ModuleList()
self.data_format = data_format
for i, size in enumerate(pool_size):
self.pool.add_module('{}_pool_{}'.format(name, i), nn.MaxPool2d(kernel_size=size, stride=1, padding=size // 2, ceil_mode=False))
self.conv = ConvBNLayer(ch_in, ch_out, k, padding=k // 2, norm_type=norm_type, freeze_norm=freeze_norm, name=name, act=act, data_format=data_format)
def forward(self, x):
outs = [x]
for i, pool in enumerate(self.pool):
outs.append(pool(x))
if self.data_format == 'NCHW':
y = torch.cat(outs, dim=1)
else:
y = torch.cat(outs, dim=-1)
y = self.conv(y)
return y
class PPYOLOPAN(nn.Module):
__shared__ = ['norm_type', 'data_format']
def __init__(self, in_channels=[512, 1024, 2048], norm_type='bn', data_format='NCHW', act='mish', conv_block_num=3, drop_block=False, block_size=3, keep_prob=1.0, spp=False):
"""
PPYOLOPAN layer with SPP, DropBlock and CSP connection.
Args:
in_channels (list): input channels for fpn
norm_type (str): batch norm type, default bn
data_format (str): data format, NCHW or NHWC
act (str): activation function, default mish
conv_block_num (int): conv block num of each pan block
drop_block (bool): whether use DropBlock or not
block_size (int): block size of DropBlock
keep_prob (float): keep probability of DropBlock
spp (bool): whether use spp or not
"""
super(PPYOLOPAN, self).__init__()
assert len(in_channels) > 0, 'in_channels length should > 0'
self.in_channels = in_channels
self.num_blocks = len(in_channels)
self.drop_block = drop_block
self.block_size = block_size
self.keep_prob = keep_prob
self.spp = spp
self.conv_block_num = conv_block_num
self.data_format = data_format
if self.drop_block:
dropblock_cfg = [['dropblock', DropBlock, [self.block_size, self.keep_prob], dict()]]
else:
dropblock_cfg = []
self.fpn_blocks = nn.ModuleList()
self.fpn_routes = nn.ModuleDict()
self.fpn_routes_names = []
fpn_channels = []
for i, ch_in in enumerate(self.in_channels[::-1]):
if i > 0:
ch_in += 512 // 2 ** (i - 1)
channel = 512 // 2 ** i
base_cfg = []
for j in range(self.conv_block_num):
base_cfg += [['{}_0'.format(j), ConvBNLayer, [channel, channel, 1], dict(padding=0, act=act, norm_type=norm_type)], ['{}_1'.format(j), ConvBNLayer, [channel, channel, 3], dict(padding=1, act=act, norm_type=norm_type)]]
if i == 0 and self.spp:
base_cfg[3] = ['spp', SPP, [channel * 4, channel, 1], dict(pool_size=[5, 9, 13], act=act, norm_type=norm_type)]
cfg = base_cfg[:4] + dropblock_cfg + base_cfg[4:]
name = 'fpn_{}'.format(i)
self.fpn_blocks.add_module(name, PPYOLODetBlockCSP(cfg, ch_in, channel, act, norm_type, name, data_format))
fpn_channels.append(channel * 2)
if i < self.num_blocks - 1:
name = 'fpn_transition_{}'.format(i)
self.fpn_routes.add_module(name, ConvBNLayer(ch_in=channel * 2, ch_out=channel, filter_size=1, stride=1, padding=0, act=act, norm_type=norm_type, data_format=data_format, name=name))
self.fpn_routes_names.append(name)
self.pan_blocks = nn.ModuleDict()
self.pan_blocks_names = []
self.pan_routes = nn.ModuleDict()
self.pan_routes_names = []
self._out_channels = [512 // 2 ** (self.num_blocks - 2)]
for i in reversed(range(self.num_blocks - 1)):
name = 'pan_transition_{}'.format(i)
self.pan_routes.add_module(name, ConvBNLayer(ch_in=fpn_channels[i + 1], ch_out=fpn_channels[i + 1], filter_size=3, stride=2, padding=1, act=act, norm_type=norm_type, data_format=data_format, name=name))
route_name = [name] + self.pan_routes_names
self.pan_routes_names = route_name
base_cfg = []
ch_in = fpn_channels[i] + fpn_channels[i + 1]
channel = 512 // 2 ** i
for j in range(self.conv_block_num):
base_cfg += [['{}_0'.format(j), ConvBNLayer, [channel, channel, 1], dict(padding=0, act=act, norm_type=norm_type)], ['{}_1'.format(j), ConvBNLayer, [channel, channel, 3], dict(padding=1, act=act, norm_type=norm_type)]]
cfg = base_cfg[:4] + dropblock_cfg + base_cfg[4:]
name = 'pan_{}'.format(i)
self.pan_blocks.add_module(name, PPYOLODetBlockCSP(cfg, ch_in, channel, act, norm_type, name, data_format))
pan_block_name = [name] + self.pan_blocks_names
self.pan_blocks_names = pan_block_name
self._out_channels.append(channel * 2)
self._out_channels = self._out_channels[::-1]
def forward(self, blocks, for_mot=False):
assert len(blocks) == self.num_blocks
blocks = blocks[::-1]
fpn_feats = []
if for_mot:
emb_feats = []
for i, (block, fpn_block) in enumerate(zip(blocks, self.fpn_blocks)):
if i > 0:
if self.data_format == 'NCHW':
block = torch.cat([route, block], dim=1)
else:
block = torch.cat([route, block], dim=-1)
route, tip = fpn_block(block)
fpn_feats.append(tip)
if for_mot:
emb_feats.append(route)
if i < self.num_blocks - 1:
route = self.fpn_routes[self.fpn_routes_names[i]](route)
route = F.interpolate(route, scale_factor=2.0)
pan_feats = [fpn_feats[-1]]
route = fpn_feats[self.num_blocks - 1]
for i, pan_route_name, pan_block_name in zip(reversed(range(self.num_blocks - 1)), reversed(self.pan_routes_names), reversed(self.pan_blocks_names)):
block = fpn_feats[i]
route = self.pan_routes[pan_route_name](route)
if self.data_format == 'NCHW':
block = torch.cat([route, block], dim=1)
else:
block = torch.cat([route, block], dim=-1)
route, tip = self.pan_blocks[pan_block_name](block)
pan_feats.append(tip)
if for_mot:
return {'yolo_feats': pan_feats[::-1], 'emb_feats': emb_feats}
else:
return pan_feats[::-1]
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape]}
class ResNet(nn.Module):
def __init__(self, in_channels=3, layers=50, **kwargs):
super(ResNet, self).__init__()
self.layers = layers
supported_layers = [18, 34, 50, 101, 152, 200]
assert layers in supported_layers, 'supported layers are {} but input layer is {}'.format(supported_layers, layers)
if layers == 18:
depth = [2, 2, 2, 2]
elif layers == 34 or layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
elif layers == 200:
depth = [3, 12, 48, 3]
num_channels = [64, 256, 512, 1024] if layers >= 50 else [64, 64, 128, 256]
num_filters = [64, 128, 256, 512]
self.conv1_1 = ConvBNLayer(in_channels=in_channels, out_channels=32, kernel_size=3, stride=2, act='relu', name='conv1_1')
self.conv1_2 = ConvBNLayer(in_channels=32, out_channels=32, kernel_size=3, stride=1, act='relu', name='conv1_2')
self.conv1_3 = ConvBNLayer(in_channels=32, out_channels=64, kernel_size=3, stride=1, act='relu', name='conv1_3')
self.pool2d_max = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.stages = nn.ModuleList()
self.out_channels = []
if layers >= 50:
for block in range(len(depth)):
block_list = nn.Sequential()
shortcut = False
for i in range(depth[block]):
if layers in [101, 152] and block == 2:
if i == 0:
conv_name = 'res' + str(block + 2) + 'a'
else:
conv_name = 'res' + str(block + 2) + 'b' + str(i)
else:
conv_name = 'res' + str(block + 2) + chr(97 + i)
bottleneck_block = BottleneckBlock(in_channels=num_channels[block] if i == 0 else num_filters[block] * 4, out_channels=num_filters[block], stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut, if_first=block == i == 0, name=conv_name)
shortcut = True
block_list.add_module('bb_%d_%d' % (block, i), bottleneck_block)
self.out_channels.append(num_filters[block] * 4)
self.stages.append(block_list)
else:
for block in range(len(depth)):
block_list = nn.Sequential()
shortcut = False
for i in range(depth[block]):
conv_name = 'res' + str(block + 2) + chr(97 + i)
basic_block = BasicBlock(in_channels=num_channels[block] if i == 0 else num_filters[block], out_channels=num_filters[block], stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut, if_first=block == i == 0, name=conv_name)
shortcut = True
block_list.add_module('bb_%d_%d' % (block, i), basic_block)
self.out_channels.append(num_filters[block])
self.stages.append(block_list)
def forward(self, inputs):
y = self.conv1_1(inputs)
y = self.conv1_2(y)
y = self.conv1_3(y)
y = self.pool2d_max(y)
out = []
for block in self.stages:
y = block(y)
out.append(y)
return out
def _de_sigmoid(x, eps=1e-07):
x = torch.clip(x, eps, 1.0 / eps)
x = torch.clip(1.0 / x - 1.0, eps, 1.0 / eps)
x = -torch.log(x)
return x
class YOLOv3Head(nn.Module):
__shared__ = ['num_classes', 'data_format']
__inject__ = ['loss']
def __init__(self, in_channels=[1024, 512, 256], anchors=[[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]], anchor_masks=[[6, 7, 8], [3, 4, 5], [0, 1, 2]], num_classes=80, loss='YOLOv3Loss', iou_aware=False, iou_aware_factor=0.4, data_format='NCHW'):
"""
Head for YOLOv3 network
Args:
num_classes (int): number of foreground classes
anchors (list): anchors
anchor_masks (list): anchor masks
loss (object): YOLOv3Loss instance
iou_aware (bool): whether to use iou_aware
iou_aware_factor (float): iou aware factor
data_format (str): data format, NCHW or NHWC
"""
super(YOLOv3Head, self).__init__()
assert len(in_channels) > 0, 'in_channels length should > 0'
self.in_channels = in_channels
self.num_classes = num_classes
self.loss = loss
self.iou_aware = iou_aware
self.iou_aware_factor = iou_aware_factor
self.parse_anchor(anchors, anchor_masks)
self.num_outputs = len(self.anchors)
self.data_format = data_format
self.yolo_outputs = nn.ModuleList()
self.yolo_outputs_names = []
for i in range(len(self.anchors)):
if self.iou_aware:
num_filters = len(self.anchors[i]) * (self.num_classes + 6)
else:
num_filters = len(self.anchors[i]) * (self.num_classes + 5)
name = 'yolo_output_{}'.format(i)
conv = nn.Conv2d(in_channels=self.in_channels[i], out_channels=num_filters, kernel_size=1, stride=1, padding=0)
conv.skip_quant = True
self.yolo_outputs.add_module(name, conv)
self.yolo_outputs_names.append(name)
def parse_anchor(self, anchors, anchor_masks):
self.anchors = [[anchors[i] for i in mask] for mask in anchor_masks]
self.mask_anchors = []
anchor_num = len(anchors)
for masks in anchor_masks:
self.mask_anchors.append([])
for mask in masks:
assert mask < anchor_num, 'anchor mask index overflow'
self.mask_anchors[-1].extend(anchors[mask])
def forward(self, feats, targets=None):
assert len(feats) == len(self.anchors)
yolo_outputs = []
for i, (feat, yolo_output) in enumerate(zip(feats, self.yolo_outputs)):
yolo_output = yolo_output(feat)
if self.data_format == 'NHWC':
yolo_output = yolo_output.permute(0, 3, 1, 2)
yolo_outputs.append(yolo_output)
if self.training:
return self.loss(yolo_outputs, targets, self.anchors)
elif self.iou_aware:
y = []
for i, out in enumerate(yolo_outputs):
na = len(self.anchors[i])
ioup, x = out[:, 0:na, :, :], out[:, na:, :, :]
b, c, h, w = x.shape
no = c // na
x = x.reshape((b, na, no, h * w))
ioup = ioup.reshape((b, na, 1, h * w))
obj = x[:, :, 4:5, :]
ioup = F.sigmoid(ioup)
obj = F.sigmoid(obj)
obj_t = obj ** (1 - self.iou_aware_factor) * ioup ** self.iou_aware_factor
obj_t = _de_sigmoid(obj_t)
loc_t = x[:, :, :4, :]
cls_t = x[:, :, 5:, :]
y_t = torch.cat([loc_t, obj_t, cls_t], dim=2)
y_t = y_t.reshape((b, c, h, w))
y.append(y_t)
return y
else:
return yolo_outputs
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape]}
class PPYOLOv2Base(nn.Module):
def __init__(self, **kwargs):
super(PPYOLOv2Base, self).__init__()
self._init_params(**kwargs)
self._init_network()
self._initialize_weights()
def _init_params(self, **kwargs):
self.num_classes = kwargs.get('INIT_num_classes', 80)
self.arch = kwargs.get('INIT_arch', 50)
self.scale_x_y = kwargs.get('INIT_scale_x_y', 1.05)
self.downsample_ratio = kwargs.get('INIT_downsample_ratio', 32)
self.anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]]
self.anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
def _init_network(self):
if self.arch == 50:
self._init_network_resnet50()
elif self.arch == 101:
self._init_network_resnet101()
else:
raise ValueError('INIT_arch must be [50, 101], but got {}'.format(self.arch))
def _init_network_resnet50(self):
self.backbone = ResNet(depth=50, ch_in=64, variant='d', lr_mult_list=[1.0, 1.0, 1.0, 1.0], groups=1, base_width=64, norm_type='bn', norm_decay=0, freeze_norm=False, freeze_at=-1, return_idx=[1, 2, 3], dcn_v2_stages=[3], num_stages=4, std_senet=False)
self.neck = PPYOLOPAN(in_channels=[512, 1024, 2048], norm_type='bn', data_format='NCHW', act='mish', conv_block_num=3, drop_block=True, block_size=3, keep_prob=1.0, spp=True)
self.head = YOLOv3Head(in_channels=[1024, 512, 256], anchors=[[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]], anchor_masks=[[6, 7, 8], [3, 4, 5], [0, 1, 2]], num_classes=self.num_classes, loss='YOLOv3Loss', iou_aware=True, iou_aware_factor=0.5, data_format='NCHW')
def _init_network_resnet101(self):
self.backbone = ResNet(depth=101, ch_in=64, variant='d', lr_mult_list=[1.0, 1.0, 1.0, 1.0], groups=1, base_width=64, norm_type='bn', norm_decay=0, freeze_norm=False, freeze_at=-1, return_idx=[1, 2, 3], dcn_v2_stages=[3], num_stages=4, std_senet=False)
self.neck = PPYOLOPAN(in_channels=[512, 1024, 2048], norm_type='bn', data_format='NCHW', act='mish', conv_block_num=3, drop_block=False, block_size=3, keep_prob=1.0, spp=True)
self.head = YOLOv3Head(in_channels=[1024, 512, 256], anchors=[[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]], anchor_masks=[[6, 7, 8], [3, 4, 5], [0, 1, 2]], num_classes=self.num_classes, loss='YOLOv3Loss', iou_aware=True, iou_aware_factor=0.5, data_format='NCHW')
def forward(self, x):
x = self.backbone(x)
x = self.neck(x)
x = self.head(x)
return x
def load_paddle_weights(self, weights_path):
None
with fluid.dygraph.guard():
para_state_dict, opti_state_dict = fluid.load_dygraph(weights_path)
sd = para_state_dict
for key, value in sd.items():
None
for key, value in self.state_dict().items():
None
for key, value in self.state_dict().items():
if key.endswith('num_batches_tracked'):
continue
ppname = key
ppname = ppname.replace('.running_mean', '._mean')
ppname = ppname.replace('.running_var', '._variance')
if key.startswith('backbone.conv'):
pass
if key.startswith('backbone.res_layers'):
ppname = ppname.replace('.res_layers', '')
ppname = ppname.replace('.blocks', '')
if key.startswith('neck.fpn_blocks'):
ppname = ppname.replace('.fpn_blocks', '')
ppname = ppname.replace('.fpn_', '.fpn.')
ppname = ppname.replace('.conv_module.0_0', '.conv_module.0.0')
ppname = ppname.replace('.conv_module.0_1', '.conv_module.0.1')
ppname = ppname.replace('.conv_module.1_0', '.conv_module.1.0')
ppname = ppname.replace('.conv_module.1_1', '.conv_module.1.1')
ppname = ppname.replace('.conv_module.2_0', '.conv_module.2.0')
ppname = ppname.replace('.conv_module.2_1', '.conv_module.2.1')
if key.startswith('neck.fpn_routes'):
ppname = ppname.replace('.fpn_routes', '')
ppname = ppname.replace('.fpn_transition_', '.fpn_transition.')
if key.startswith('neck.pan_blocks'):
ppname = ppname.replace('.pan_blocks', '')
ppname = ppname.replace('.pan_', '.pan.')
ppname = ppname.replace('.conv_module.0_0', '.conv_module.0.0')
ppname = ppname.replace('.conv_module.0_1', '.conv_module.0.1')
ppname = ppname.replace('.conv_module.1_0', '.conv_module.1.0')
ppname = ppname.replace('.conv_module.1_1', '.conv_module.1.1')
ppname = ppname.replace('.conv_module.2_0', '.conv_module.2.0')
ppname = ppname.replace('.conv_module.2_1', '.conv_module.2.1')
if key.startswith('neck.pan_routes'):
ppname = ppname.replace('.pan_routes', '')
ppname = ppname.replace('.pan_transition_', '.pan_transition.')
if key.startswith('head.yolo_outputs'):
ppname = ppname.replace('head.yolo_outputs.yolo_output_', 'yolo_head.yolo_output.')
try:
weights = sd[ppname]
self.state_dict()[key].copy_(torch.Tensor(weights))
except Exception as e:
None
None
raise e
None
def load_pytorch_weights(self, weights_path):
self.load_state_dict(torch.load(weights_path))
None
def save_pytorch_weights(self, weights_path):
try:
torch.save(self.state_dict(), weights_path, _use_new_zipfile_serialization=False)
except:
torch.save(self.state_dict(), weights_path)
None
class DownSample(nn.Module):
def __init__(self, ch_in, ch_out, filter_size=3, stride=2, padding=1, norm_type='bn', norm_decay=0.0, freeze_norm=False, data_format='NCHW'):
"""
downsample layer
Args:
ch_in (int): input channel
ch_out (int): output channel
filter_size (int): filter size, default 3
stride (int): stride, default 2
padding (int): padding size, default 1
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
freeze_norm (bool): whether to freeze norm, default False
data_format (str): data format, NCHW or NHWC
"""
super(DownSample, self).__init__()
self.conv_bn_layer = ConvBNLayer(ch_in=ch_in, ch_out=ch_out, filter_size=filter_size, stride=stride, padding=padding, norm_type=norm_type, norm_decay=norm_decay, freeze_norm=freeze_norm, data_format=data_format)
self.ch_out = ch_out
def forward(self, inputs):
out = self.conv_bn_layer(inputs)
return out
class Blocks(nn.Module):
def __init__(self, block, ch_in, ch_out, count, name_adapter, stage_num, variant='b', groups=1, base_width=64, lr=1.0, norm_type='bn', norm_decay=0.0, freeze_norm=True, dcn_v2=False, std_senet=False):
super(Blocks, self).__init__()
self.blocks = nn.Sequential()
for i in range(count):
conv_name = name_adapter.fix_layer_warp_name(stage_num, count, i)
layer = block(ch_in=ch_in, ch_out=ch_out, stride=2 if i == 0 and stage_num != 2 else 1, shortcut=False if i == 0 else True, variant=variant, groups=groups, base_width=base_width, lr=lr, norm_type=norm_type, norm_decay=norm_decay, freeze_norm=freeze_norm, dcn_v2=dcn_v2, std_senet=std_senet)
self.blocks.add_module(conv_name, layer)
if i == 0:
ch_in = ch_out * block.expansion
def forward(self, inputs):
block_out = self.blocks(inputs)
return block_out
DarkNet_cfg = {(53): [1, 2, 8, 8, 4]}
class DarkNet(nn.Module):
__shared__ = ['norm_type', 'data_format']
def __init__(self, depth=53, freeze_at=-1, return_idx=[2, 3, 4], num_stages=5, norm_type='bn', norm_decay=0.0, freeze_norm=False, data_format='NCHW'):
"""
Darknet, see https://pjreddie.com/darknet/yolo/
Args:
depth (int): depth of network
freeze_at (int): freeze the backbone at which stage
filter_size (int): filter size, default 3
return_idx (list): index of stages whose feature maps are returned
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
data_format (str): data format, NCHW or NHWC
"""
super(DarkNet, self).__init__()
self.depth = depth
self.freeze_at = freeze_at
self.return_idx = return_idx
self.num_stages = num_stages
self.stages = DarkNet_cfg[self.depth][0:num_stages]
self.conv0 = ConvBNLayer(ch_in=3, ch_out=32, filter_size=3, stride=1, padding=1, norm_type=norm_type, norm_decay=norm_decay, freeze_norm=freeze_norm, data_format=data_format)
self.downsample0 = DownSample(ch_in=32, ch_out=32 * 2, norm_type=norm_type, norm_decay=norm_decay, freeze_norm=freeze_norm, data_format=data_format)
self._out_channels = []
self.darknet_conv_block_list = nn.ModuleList()
self.downsample_list = nn.ModuleDict()
self.downsample_list_names = []
ch_in = [64, 128, 256, 512, 1024]
for i, stage in enumerate(self.stages):
name = 'stage_{}'.format(i)
conv_block = Blocks(int(ch_in[i]), 32 * 2 ** i, stage, norm_type=norm_type, norm_decay=norm_decay, freeze_norm=freeze_norm, data_format=data_format, name=name)
self.darknet_conv_block_list.add_module(name, conv_block)
if i in return_idx:
self._out_channels.append(64 * 2 ** i)
for i in range(num_stages - 1):
down_name = 'stage_{}_downsample'.format(i)
downsample = DownSample(ch_in=32 * 2 ** (i + 1), ch_out=32 * 2 ** (i + 2), norm_type=norm_type, norm_decay=norm_decay, freeze_norm=freeze_norm, data_format=data_format)
self.downsample_list.add_module(down_name, downsample)
self.downsample_list_names.append(down_name)
def forward(self, inputs):
x = inputs
out = self.conv0(x)
out = self.downsample0(out)
blocks = []
for i, conv_block_i in enumerate(self.darknet_conv_block_list):
out = conv_block_i(out)
if i in self.return_idx:
blocks.append(out)
if i < self.num_stages - 1:
out = self.downsample_list[self.downsample_list_names[i]](out)
return blocks
class ConvNormLayer(nn.Module):
def __init__(self, ch_in, ch_out, filter_size, stride, groups=1, norm_type='bn', norm_decay=0.0, norm_groups=32, lr_scale=1.0, freeze_norm=False, initializer=None):
super(ConvNormLayer, self).__init__()
assert norm_type in ['bn', 'sync_bn', 'gn']
bias_attr = False
self.conv = nn.Conv2d(in_channels=ch_in, out_channels=ch_out, kernel_size=filter_size, stride=stride, padding=(filter_size - 1) // 2, groups=groups, bias=bias_attr)
norm_lr = 0.0 if freeze_norm else 1.0
if norm_type == 'bn':
self.norm = nn.BatchNorm2d(ch_out)
elif norm_type == 'sync_bn':
self.norm = nn.SyncBatchNorm(ch_out)
elif norm_type == 'gn':
self.norm = nn.GroupNorm(num_groups=norm_groups, num_channels=ch_out, affine=bias_attr)
def forward(self, inputs):
out = self.conv(inputs)
out = self.norm(out)
return out
class SELayer(nn.Module):
def __init__(self, ch, reduction_ratio=16):
super(SELayer, self).__init__()
self.pool = nn.AdaptiveAvgPool2d(1)
stdv = 1.0 / math.sqrt(ch)
c_ = ch // reduction_ratio
self.squeeze = nn.Linear(in_features=ch, out_features=c_, bias=True)
stdv = 1.0 / math.sqrt(c_)
self.extract = nn.Linear(in_features=c_, out_features=ch, bias=True)
def forward(self, inputs):
out = self.pool(inputs)
out = out.squeeze(dim=3).squeeze(dim=2)
out = self.squeeze(out)
out = F.relu(out)
out = self.extract(out)
out = F.sigmoid(out)
out = out.unsqueeze(dim=2).unsqueeze(dim=3)
scale = out * inputs
return scale
class BottleNeck(nn.Module):
expansion = 4
def __init__(self, ch_in, ch_out, stride, shortcut, variant='b', groups=1, base_width=4, lr=1.0, norm_type='bn', norm_decay=0.0, freeze_norm=True, dcn_v2=False, std_senet=False):
super(BottleNeck, self).__init__()
if variant == 'a':
stride1, stride2 = stride, 1
else:
stride1, stride2 = 1, stride
width = int(ch_out * (base_width / 64.0)) * groups
self.shortcut = shortcut
if not shortcut:
if variant == 'd' and stride == 2:
self.short = nn.Sequential()
self.short.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True))
self.short.add_module('conv', ConvNormLayer(ch_in=ch_in, ch_out=ch_out * self.expansion, filter_size=1, stride=1, norm_type=norm_type, norm_decay=norm_decay, freeze_norm=freeze_norm, lr=lr))
else:
self.short = ConvNormLayer(ch_in=ch_in, ch_out=ch_out * self.expansion, filter_size=1, stride=stride, norm_type=norm_type, norm_decay=norm_decay, freeze_norm=freeze_norm, lr=lr)
self.branch2a = ConvNormLayer(ch_in=ch_in, ch_out=width, filter_size=1, stride=stride1, groups=1, act='relu', norm_type=norm_type, norm_decay=norm_decay, freeze_norm=freeze_norm, lr=lr)
self.branch2b = ConvNormLayer(ch_in=width, ch_out=width, filter_size=3, stride=stride2, groups=groups, act='relu', norm_type=norm_type, norm_decay=norm_decay, freeze_norm=freeze_norm, lr=lr, dcn_v2=dcn_v2)
self.branch2c = ConvNormLayer(ch_in=width, ch_out=ch_out * self.expansion, filter_size=1, stride=1, groups=1, norm_type=norm_type, norm_decay=norm_decay, freeze_norm=freeze_norm, lr=lr)
self.std_senet = std_senet
if self.std_senet:
self.se = SELayer(ch_out * self.expansion)
def forward(self, inputs):
out = self.branch2a(inputs)
out = self.branch2b(out)
out = self.branch2c(out)
if self.std_senet:
out = self.se(out)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
out = torch.add(out, short)
out = F.relu(out)
return out
class NameAdapter(object):
"""Fix the backbones variable names for pretrained weight"""
def __init__(self, model):
super(NameAdapter, self).__init__()
self.model = model
@property
def model_type(self):
return getattr(self.model, '_model_type', '')
@property
def variant(self):
return getattr(self.model, 'variant', '')
def fix_conv_norm_name(self, name):
if name == 'conv1':
bn_name = 'bn_' + name
else:
bn_name = 'bn' + name[3:]
if self.model_type == 'SEResNeXt':
bn_name = name + '_bn'
return bn_name
def fix_shortcut_name(self, name):
if self.model_type == 'SEResNeXt':
name = 'conv' + name + '_prj'
return name
def fix_bottleneck_name(self, name):
if self.model_type == 'SEResNeXt':
conv_name1 = 'conv' + name + '_x1'
conv_name2 = 'conv' + name + '_x2'
conv_name3 = 'conv' + name + '_x3'
shortcut_name = name
else:
conv_name1 = name + '_branch2a'
conv_name2 = name + '_branch2b'
conv_name3 = name + '_branch2c'
shortcut_name = name + '_branch1'
return conv_name1, conv_name2, conv_name3, shortcut_name
def fix_basicblock_name(self, name):
if self.model_type == 'SEResNeXt':
conv_name1 = 'conv' + name + '_x1'
conv_name2 = 'conv' + name + '_x2'
shortcut_name = name
else:
conv_name1 = name + '_branch2a'
conv_name2 = name + '_branch2b'
shortcut_name = name + '_branch1'
return conv_name1, conv_name2, shortcut_name
def fix_layer_warp_name(self, stage_num, count, i):
name = 'res' + str(stage_num)
if count > 10 and stage_num == 4:
if i == 0:
conv_name = name + 'a'
else:
conv_name = name + 'b' + str(i)
else:
conv_name = name + chr(ord('a') + i)
if self.model_type == 'SEResNeXt':
conv_name = str(stage_num + 2) + '_' + str(i + 1)
return conv_name
def fix_c1_stage_name(self):
return 'res_conv1' if self.model_type == 'ResNeXt' else 'conv1'
class Res5Head(nn.Module):
def __init__(self, depth=50):
super(Res5Head, self).__init__()
feat_in, feat_out = [1024, 512]
if depth < 50:
feat_in = 256
na = NameAdapter(self)
block = BottleNeck if depth >= 50 else BasicBlock
self.res5 = Blocks(block, feat_in, feat_out, count=3, name_adapter=na, stage_num=5)
self.feat_out = feat_out if depth < 50 else feat_out * 4
def forward(self, roi_feat, stage=0):
y = self.res5(roi_feat)
return y
class YoloDetBlock(nn.Module):
def __init__(self, ch_in, channel, norm_type, freeze_norm=False, name='', data_format='NCHW'):
"""
YOLODetBlock layer for yolov3, see https://arxiv.org/abs/1804.02767
Args:
ch_in (int): input channel
channel (int): base channel
norm_type (str): batch norm type
freeze_norm (bool): whether to freeze norm, default False
name (str): layer name
data_format (str): data format, NCHW or NHWC
"""
super(YoloDetBlock, self).__init__()
self.ch_in = ch_in
self.channel = channel
assert channel % 2 == 0, 'channel {} cannot be divided by 2'.format(channel)
conv_def = [['conv0', ch_in, channel, 1, '_0_0'], ['conv1', channel, channel * 2, 3, '_0_1'], ['conv2', channel * 2, channel, 1, '_1_0'], ['conv3', channel, channel * 2, 3, '_1_1'], ['route', channel * 2, channel, 1, '_2']]
self.conv_module = nn.Sequential()
for idx, (conv_name, ch_in, ch_out, filter_size, post_name) in enumerate(conv_def):
self.conv_module.add_module(conv_name, ConvBNLayer(ch_in=ch_in, ch_out=ch_out, filter_size=filter_size, padding=(filter_size - 1) // 2, norm_type=norm_type, freeze_norm=freeze_norm, data_format=data_format, name=name + post_name))
self.tip = ConvBNLayer(ch_in=channel, ch_out=channel * 2, filter_size=3, padding=1, norm_type=norm_type, freeze_norm=freeze_norm, data_format=data_format, name=name + '.tip')
def forward(self, inputs):
route = self.conv_module(inputs)
tip = self.tip(route)
return route, tip
def add_coord(x, data_format):
b = x.size()[0]
if data_format == 'NCHW':
h, w = x.shape[2], x.shape[3]
else:
h, w = x.shape[1], x.shape[2]
gx = torch.arange(w, dtype=x.dtype) / ((w - 1.0) * 2.0) - 1.0
gy = torch.arange(h, dtype=x.dtype) / ((h - 1.0) * 2.0) - 1.0
if data_format == 'NCHW':
gx = gx.reshape([1, 1, 1, w]).expand([b, 1, h, w])
gy = gy.reshape([1, 1, h, 1]).expand([b, 1, h, w])
else:
gx = gx.reshape([1, 1, w, 1]).expand([b, h, w, 1])
gy = gy.reshape([1, h, 1, 1]).expand([b, h, w, 1])
gx.stop_gradient = True
gy.stop_gradient = True
return gx, gy
class CoordConv(nn.Module):
def __init__(self, ch_in, ch_out, filter_size, padding, norm_type, freeze_norm=False, name='', data_format='NCHW'):
"""
CoordConv layer
Args:
ch_in (int): input channel
ch_out (int): output channel
filter_size (int): filter size, default 3
padding (int): padding size, default 0
norm_type (str): batch norm type, default bn
name (str): layer name
data_format (str): data format, NCHW or NHWC
"""
super(CoordConv, self).__init__()
self.conv = ConvBNLayer(ch_in + 2, ch_out, filter_size=filter_size, padding=padding, norm_type=norm_type, freeze_norm=freeze_norm, data_format=data_format, name=name)
self.data_format = data_format
def forward(self, x):
gx, gy = add_coord(x, self.data_format)
if self.data_format == 'NCHW':
y = torch.cat([x, gx, gy], dim=1)
else:
y = torch.cat([x, gx, gy], dim=-1)
y = self.conv(y)
return y
class PPYOLODetBlock(nn.Module):
def __init__(self, cfg, name, data_format='NCHW'):
"""
PPYOLODetBlock layer
Args:
cfg (list): layer configs for this block
name (str): block name
data_format (str): data format, NCHW or NHWC
"""
super(PPYOLODetBlock, self).__init__()
self.conv_module = nn.Sequential()
for idx, (conv_name, layer, args, kwargs) in enumerate(cfg[:-1]):
kwargs.update(name='{}_{}'.format(name, conv_name), data_format=data_format)
self.conv_module.add_module(conv_name, layer(*args, **kwargs))
conv_name, layer, args, kwargs = cfg[-1]
kwargs.update(name='{}_{}'.format(name, conv_name), data_format=data_format)
self.tip = layer(*args, **kwargs)
def forward(self, inputs):
route = self.conv_module(inputs)
tip = self.tip(route)
return route, tip
class PPYOLOTinyDetBlock(nn.Module):
def __init__(self, ch_in, ch_out, name, drop_block=False, block_size=3, keep_prob=1.0, data_format='NCHW'):
"""
PPYOLO Tiny DetBlock layer
Args:
ch_in (list): input channel number
ch_out (list): output channel number
name (str): block name
drop_block: whether user DropBlock
block_size: drop block size
keep_prob: probability to keep block in DropBlock
data_format (str): data format, NCHW or NHWC
"""
super(PPYOLOTinyDetBlock, self).__init__()
self.drop_block_ = drop_block
self.conv_module = nn.Sequential()
cfgs = [['_0', ch_in, ch_out, 1, 1, 0, 1], ['_1', ch_out, ch_out, 5, 1, 2, ch_out], ['_', ch_out, ch_out, 1, 1, 0, 1], ['_route', ch_out, ch_out, 5, 1, 2, ch_out]]
for cfg in cfgs:
conv_name, conv_ch_in, conv_ch_out, filter_size, stride, padding, groups = cfg
self.conv_module.add_module(name + conv_name, ConvBNLayer(ch_in=conv_ch_in, ch_out=conv_ch_out, filter_size=filter_size, stride=stride, padding=padding, groups=groups, name=name + conv_name))
self.tip = ConvBNLayer(ch_in=ch_out, ch_out=ch_out, filter_size=1, stride=1, padding=0, groups=1, name=name + conv_name)
if self.drop_block_:
self.drop_block = DropBlock(block_size=block_size, keep_prob=keep_prob, data_format=data_format, name=name + '_dropblock')
def forward(self, inputs):
if self.drop_block_:
inputs = self.drop_block(inputs)
route = self.conv_module(inputs)
tip = self.tip(route)
return route, tip
class YOLOv3FPN(nn.Module):
__shared__ = ['norm_type', 'data_format']
def __init__(self, in_channels=[256, 512, 1024], norm_type='bn', freeze_norm=False, data_format='NCHW'):
"""
YOLOv3FPN layer
Args:
in_channels (list): input channels for fpn
norm_type (str): batch norm type, default bn
data_format (str): data format, NCHW or NHWC
"""
super(YOLOv3FPN, self).__init__()
assert len(in_channels) > 0, 'in_channels length should > 0'
self.in_channels = in_channels
self.num_blocks = len(in_channels)
self._out_channels = []
self.yolo_blocks = nn.ModuleDict()
self.yolo_blocks_names = []
self.routes = nn.ModuleDict()
self.routes_names = []
self.data_format = data_format
for i in range(self.num_blocks):
name = 'yolo_block_{}'.format(i)
in_channel = in_channels[-i - 1]
if i > 0:
in_channel += 512 // 2 ** i
yolo_block = YoloDetBlock(in_channel, channel=512 // 2 ** i, norm_type=norm_type, freeze_norm=freeze_norm, data_format=data_format, name=name)
self.yolo_blocks.add_module(name, yolo_block)
self.yolo_blocks_names.append(name)
self._out_channels.append(1024 // 2 ** i)
if i < self.num_blocks - 1:
name = 'yolo_transition_{}'.format(i)
route = ConvBNLayer(ch_in=512 // 2 ** i, ch_out=256 // 2 ** i, filter_size=1, stride=1, padding=0, norm_type=norm_type, freeze_norm=freeze_norm, data_format=data_format, name=name)
self.routes.add_module(name, route)
self.routes_names.append(name)
def forward(self, blocks, for_mot=False):
assert len(blocks) == self.num_blocks
blocks = blocks[::-1]
yolo_feats = []
if for_mot:
emb_feats = []
for i, block in enumerate(blocks):
if i > 0:
if self.data_format == 'NCHW':
block = torch.cat([route, block], dim=1)
else:
block = torch.cat([route, block], dim=-1)
route, tip = self.yolo_blocks[self.yolo_blocks_names[i]](block)
yolo_feats.append(tip)
if for_mot:
emb_feats.append(route)
if i < self.num_blocks - 1:
route = self.routes[self.routes_names[i]](route)
route = F.interpolate(route, scale_factor=2.0)
if for_mot:
return {'yolo_feats': yolo_feats, 'emb_feats': emb_feats}
else:
return yolo_feats
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape]}
class PPYOLOFPN(nn.Module):
__shared__ = ['norm_type', 'data_format']
def __init__(self, in_channels=[512, 1024, 2048], norm_type='bn', freeze_norm=False, data_format='NCHW', coord_conv=False, conv_block_num=2, drop_block=False, block_size=3, keep_prob=1.0, spp=False):
"""
PPYOLOFPN layer
Args:
in_channels (list): input channels for fpn
norm_type (str): batch norm type, default bn
data_format (str): data format, NCHW or NHWC
coord_conv (bool): whether use CoordConv or not
conv_block_num (int): conv block num of each pan block
drop_block (bool): whether use DropBlock or not
block_size (int): block size of DropBlock
keep_prob (float): keep probability of DropBlock
spp (bool): whether use spp or not
"""
super(PPYOLOFPN, self).__init__()
assert len(in_channels) > 0, 'in_channels length should > 0'
self.in_channels = in_channels
self.num_blocks = len(in_channels)
self.coord_conv = coord_conv
self.drop_block = drop_block
self.block_size = block_size
self.keep_prob = keep_prob
self.spp = spp
self.conv_block_num = conv_block_num
self.data_format = data_format
if self.coord_conv:
ConvLayer = CoordConv
else:
ConvLayer = ConvBNLayer
if self.drop_block:
dropblock_cfg = [['dropblock', DropBlock, [self.block_size, self.keep_prob], dict()]]
else:
dropblock_cfg = []
self._out_channels = []
self.yolo_blocks = nn.ModuleDict()
self.yolo_blocks_names = []
self.routes = nn.ModuleDict()
self.routes_names = []
for i, ch_in in enumerate(self.in_channels[::-1]):
if i > 0:
ch_in += 512 // 2 ** i
channel = 64 * 2 ** self.num_blocks // 2 ** i
base_cfg = []
c_in, c_out = ch_in, channel
for j in range(self.conv_block_num):
base_cfg += [['conv{}'.format(2 * j), ConvLayer, [c_in, c_out, 1], dict(padding=0, norm_type=norm_type, freeze_norm=freeze_norm)], ['conv{}'.format(2 * j + 1), ConvBNLayer, [c_out, c_out * 2, 3], dict(padding=1, norm_type=norm_type, freeze_norm=freeze_norm)]]
c_in, c_out = c_out * 2, c_out
base_cfg += [['route', ConvLayer, [c_in, c_out, 1], dict(padding=0, norm_type=norm_type, freeze_norm=freeze_norm)], ['tip', ConvLayer, [c_out, c_out * 2, 3], dict(padding=1, norm_type=norm_type, freeze_norm=freeze_norm)]]
if self.conv_block_num == 2:
if i == 0:
if self.spp:
spp_cfg = [['spp', SPP, [channel * 4, channel, 1], dict(pool_size=[5, 9, 13], norm_type=norm_type, freeze_norm=freeze_norm)]]
else:
spp_cfg = []
cfg = base_cfg[0:3] + spp_cfg + base_cfg[3:4] + dropblock_cfg + base_cfg[4:6]
else:
cfg = base_cfg[0:2] + dropblock_cfg + base_cfg[2:6]
elif self.conv_block_num == 0:
if self.spp and i == 0:
spp_cfg = [['spp', SPP, [c_in * 4, c_in, 1], dict(pool_size=[5, 9, 13], norm_type=norm_type, freeze_norm=freeze_norm)]]
else:
spp_cfg = []
cfg = spp_cfg + dropblock_cfg + base_cfg
name = 'yolo_block_{}'.format(i)
yolo_block = PPYOLODetBlock(cfg, name)
self.yolo_blocks.add_module(name, yolo_block)
self.yolo_blocks_names.append(name)
self._out_channels.append(channel * 2)
if i < self.num_blocks - 1:
name = 'yolo_transition_{}'.format(i)
route = ConvBNLayer(ch_in=channel, ch_out=256 // 2 ** i, filter_size=1, stride=1, padding=0, norm_type=norm_type, freeze_norm=freeze_norm, data_format=data_format, name=name)
self.routes.add_module(name, route)
self.routes_names.append(name)
def forward(self, blocks, for_mot=False):
assert len(blocks) == self.num_blocks
blocks = blocks[::-1]
yolo_feats = []
if for_mot:
emb_feats = []
for i, block in enumerate(blocks):
if i > 0:
if self.data_format == 'NCHW':
block = torch.cat([route, block], dim=1)
else:
block = torch.cat([route, block], dim=-1)
route, tip = self.yolo_blocks[self.yolo_blocks_names[i]](block)
yolo_feats.append(tip)
if for_mot:
emb_feats.append(route)
if i < self.num_blocks - 1:
route = self.routes[self.routes_names[i]](route)
route = F.interpolate(route, scale_factor=2.0, data_format=self.data_format)
if for_mot:
return {'yolo_feats': yolo_feats, 'emb_feats': emb_feats}
else:
return yolo_feats
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape]}
class PPYOLOTinyFPN(nn.Module):
__shared__ = ['norm_type', 'data_format']
def __init__(self, in_channels=[80, 56, 34], detection_block_channels=[160, 128, 96], norm_type='bn', data_format='NCHW', **kwargs):
"""
PPYOLO Tiny FPN layer
Args:
in_channels (list): input channels for fpn
detection_block_channels (list): channels in fpn
norm_type (str): batch norm type, default bn
data_format (str): data format, NCHW or NHWC
kwargs: extra key-value pairs, such as parameter of DropBlock and spp
"""
super(PPYOLOTinyFPN, self).__init__()
assert len(in_channels) > 0, 'in_channels length should > 0'
self.in_channels = in_channels[::-1]
assert len(detection_block_channels) > 0, 'detection_block_channelslength should > 0'
self.detection_block_channels = detection_block_channels
self.data_format = data_format
self.num_blocks = len(in_channels)
self.drop_block = kwargs.get('drop_block', False)
self.block_size = kwargs.get('block_size', 3)
self.keep_prob = kwargs.get('keep_prob', 1.0)
self.spp_ = kwargs.get('spp', False)
if self.spp_:
self.spp = SPP(self.in_channels[0] * 4, self.in_channels[0], k=1, pool_size=[5, 9, 13], norm_type=norm_type, name='spp')
self._out_channels = []
self.yolo_blocks = nn.ModuleDict()
self.yolo_blocks_names = []
self.routes = nn.ModuleDict()
self.routes_names = []
for i, (ch_in, ch_out) in enumerate(zip(self.in_channels, self.detection_block_channels)):
name = 'yolo_block_{}'.format(i)
if i > 0:
ch_in += self.detection_block_channels[i - 1]
yolo_block = PPYOLOTinyDetBlock(ch_in, ch_out, name, drop_block=self.drop_block, block_size=self.block_size, keep_prob=self.keep_prob)
self.yolo_blocks.add_module(name, yolo_block)
self.yolo_blocks_names.append(name)
self._out_channels.append(ch_out)
if i < self.num_blocks - 1:
name = 'yolo_transition_{}'.format(i)
route = ConvBNLayer(ch_in=ch_out, ch_out=ch_out, filter_size=1, stride=1, padding=0, norm_type=norm_type, data_format=data_format, name=name)
self.routes.add_module(name, route)
self.routes_names.append(name)
def forward(self, blocks, for_mot=False):
assert len(blocks) == self.num_blocks
blocks = blocks[::-1]
yolo_feats = []
if for_mot:
emb_feats = []
for i, block in enumerate(blocks):
if i == 0 and self.spp_:
block = self.spp(block)
if i > 0:
if self.data_format == 'NCHW':
block = torch.cat([route, block], dim=1)
else:
block = torch.cat([route, block], dim=-1)
route, tip = self.yolo_blocks[self.yolo_blocks_names[i]](block)
yolo_feats.append(tip)
if for_mot:
emb_feats.append(route)
if i < self.num_blocks - 1:
route = self.routes[self.routes_names[i]](route)
route = F.interpolate(route, scale_factor=2.0)
if for_mot:
return {'yolo_feats': yolo_feats, 'emb_feats': emb_feats}
else:
return yolo_feats
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape]}
class LayoutLMPooler(Layer):
def __init__(self, hidden_size, pool_act='tanh'):
super(LayoutLMPooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
self.pool_act = pool_act
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
if self.pool_act == 'tanh':
pooled_output = self.activation(pooled_output)
return pooled_output
class LayoutLMEmbeddings(Layer):
"""
Include embeddings from word, position and token_type embeddings
"""
def __init__(self, vocab_size, hidden_size=768, hidden_dropout_prob=0.1, max_position_embeddings=512, max_2d_position_embeddings=1024, layer_norm_eps=1e-12, pad_token_id=0, type_vocab_size=16):
super(LayoutLMEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(vocab_size, hidden_size, padding_idx=pad_token_id)
self.position_embeddings = nn.Embedding(max_position_embeddings, hidden_size)
self.x_position_embeddings = nn.Embedding(max_2d_position_embeddings, hidden_size)
self.y_position_embeddings = nn.Embedding(max_2d_position_embeddings, hidden_size)
self.h_position_embeddings = nn.Embedding(max_2d_position_embeddings, hidden_size)
self.w_position_embeddings = nn.Embedding(max_2d_position_embeddings, hidden_size)
self.token_type_embeddings = nn.Embedding(type_vocab_size, hidden_size)
self.layer_norm = nn.LayerNorm(hidden_size, eps=layer_norm_eps)
self.dropout = nn.Dropout(hidden_dropout_prob)
self.register_buffer('position_ids', torch.arange(max_position_embeddings).expand((1, -1)))
def forward(self, input_ids, bbox=None, token_type_ids=None, position_ids=None):
if position_ids is None:
ones = torch.ones_like(input_ids, dtype=torch.long)
seq_length = torch.cumsum(ones, dim=-1)
position_ids = seq_length - ones
position_ids.stop_gradient = True
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids, dtype=torch.long)
word_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError('The :obj:`bbox`coordinate values should be within 0-1000 range.') from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = word_embeddings + position_embeddings + left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LayoutXLMPooler(Layer):
def __init__(self, hidden_size, with_pool):
super(LayoutXLMPooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
self.with_pool = with_pool
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
if self.with_pool == 'tanh':
pooled_output = self.activation(pooled_output)
return pooled_output
class LayoutXLMEmbeddings(Layer):
"""
Include embeddings from word, position and token_type embeddings
"""
def __init__(self, config):
super(LayoutXLMEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config['vocab_size'], config['hidden_size'], padding_idx=0)
self.position_embeddings = nn.Embedding(config['max_position_embeddings'], config['hidden_size'])
self.x_position_embeddings = nn.Embedding(config['max_2d_position_embeddings'], config['coordinate_size'])
self.y_position_embeddings = nn.Embedding(config['max_2d_position_embeddings'], config['coordinate_size'])
self.h_position_embeddings = nn.Embedding(config['max_2d_position_embeddings'], config['coordinate_size'])
self.w_position_embeddings = nn.Embedding(config['max_2d_position_embeddings'], config['coordinate_size'])
self.token_type_embeddings = nn.Embedding(config['type_vocab_size'], config['hidden_size'])
self.LayerNorm = nn.LayerNorm(config['hidden_size'], eps=config['layer_norm_eps'])
self.dropout = nn.Dropout(config['hidden_dropout_prob'])
self.register_buffer('position_ids', torch.arange(config['max_position_embeddings']).expand((1, -1)))
def _cal_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError('The :obj:`bbox`coordinate values should be within 0-1000 range.') from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
spatial_position_embeddings = torch.cat([left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings], dim=-1)
return spatial_position_embeddings
def forward(self, input_ids, bbox=None, token_type_ids=None, position_ids=None):
if position_ids is None:
ones = torch.ones_like(input_ids, dtype=torch.long)
seq_length = torch.cumsum(ones, dim=-1)
position_ids = seq_length - ones
position_ids.stop_gradient = True
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids, dtype=torch.long)
input_embedings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError('The :obj:`bbox`coordinate values should be within 0-1000 range.') from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = input_embedings + position_embeddings + left_position_embeddings + upper_position_embeddings + right_position_embeddings + lower_position_embeddings + h_position_embeddings + w_position_embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LayoutXLMSelfOutput(Layer):
def __init__(self, config):
super(LayoutXLMSelfOutput, self).__init__()
self.dense = nn.Linear(config['hidden_size'], config['hidden_size'])
self.LayerNorm = nn.LayerNorm(config['hidden_size'], eps=config['layer_norm_eps'])
self.dropout = nn.Dropout(config['hidden_dropout_prob'])
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LayoutXLMSelfAttention(Layer):
def __init__(self, config):
super(LayoutXLMSelfAttention, self).__init__()
if config['hidden_size'] % config['num_attention_heads'] != 0 and not hasattr(config, 'embedding_size'):
raise ValueError('The hidden size {} is not a multiple of the number of attention heads {}'.format(config['hidden_size'], config['num_attention_heads']))
self.fast_qkv = config['fast_qkv']
self.num_attention_heads = config['num_attention_heads']
self.attention_head_size = int(config['hidden_size'] / config['num_attention_heads'])
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.has_relative_attention_bias = config['has_relative_attention_bias']
self.has_spatial_attention_bias = config['has_spatial_attention_bias']
if config['fast_qkv']:
self.qkv_linear = nn.Linear(config['hidden_size'], 3 * self.all_head_size, bias=False)
self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
else:
self.query = nn.Linear(config['hidden_size'], self.all_head_size)
self.key = nn.Linear(config['hidden_size'], self.all_head_size)
self.value = nn.Linear(config['hidden_size'], self.all_head_size)
self.dropout = nn.Dropout(config['attention_probs_dropout_prob'])
def transpose_for_scores(self, x):
new_x_shape = x.shape[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.reshape(new_x_shape)
return x.permute(0, 2, 1, 3)
def compute_qkv(self, hidden_states):
if self.fast_qkv:
qkv = self.qkv_linear(hidden_states)
q, k, v = torch.chunk(qkv, 3, dim=-1)
if q.ndimension() == self.q_bias.ndimension():
q = q + self.q_bias
v = v + self.v_bias
else:
_sz = (1,) * (q.ndimension() - 1) + (-1,)
q = q + self.q_bias.reshape(_sz)
v = v + self.v_bias.reshape(_sz)
else:
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, rel_pos=None, rel_2d_pos=None):
q, k, v = self.compute_qkv(hidden_states)
query_layer = self.transpose_for_scores(q)
key_layer = self.transpose_for_scores(k)
value_layer = self.transpose_for_scores(v)
query_layer = query_layer / math.sqrt(self.attention_head_size)
attention_scores = torch.matmul(query_layer, key_layer.permute(0, 1, 3, 2))
if self.has_relative_attention_bias:
attention_scores += rel_pos
if self.has_spatial_attention_bias:
attention_scores += rel_2d_pos
attention_scores = attention_scores.float().masked_fill_(attention_mask, float('-inf'))
attention_probs = F.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.shape[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LayoutXLMAttention(Layer):
def __init__(self, config):
super(LayoutXLMAttention, self).__init__()
self.self = LayoutXLMSelfAttention(config)
self.output = LayoutXLMSelfOutput(config)
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, rel_pos=None, rel_2d_pos=None):
self_outputs = self.self(hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
class LayoutXLMIntermediate(Layer):
def __init__(self, config):
super(LayoutXLMIntermediate, self).__init__()
self.dense = nn.Linear(config['hidden_size'], config['intermediate_size'])
if config['hidden_act'] == 'gelu':
self.intermediate_act_fn = nn.GELU()
else:
assert False, 'hidden_act is set as: {}, please check it..'.format(config['hidden_act'])
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class LayoutXLMOutput(Layer):
def __init__(self, config):
super(LayoutXLMOutput, self).__init__()
self.dense = nn.Linear(config['intermediate_size'], config['hidden_size'])
self.LayerNorm = nn.LayerNorm(config['hidden_size'], eps=config['layer_norm_eps'])
self.dropout = nn.Dropout(config['hidden_dropout_prob'])
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LayoutXLMLayer(Layer):
def __init__(self, config):
super(LayoutXLMLayer, self).__init__()
self.seq_len_dim = 1
self.attention = LayoutXLMAttention(config)
self.add_cross_attention = False
self.intermediate = LayoutXLMIntermediate(config)
self.output = LayoutXLMOutput(config)
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, rel_pos=None, rel_2d_pos=None):
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
layer_output = self.feed_forward_chunk(attention_output)
outputs = (layer_output,) + outputs
return outputs
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class LayoutXLMEncoder(Layer):
def __init__(self, config):
super(LayoutXLMEncoder, self).__init__()
self.config = config
self.layer = nn.ModuleList([LayoutXLMLayer(config) for _ in range(config['num_hidden_layers'])])
self.has_relative_attention_bias = config['has_relative_attention_bias']
self.has_spatial_attention_bias = config['has_spatial_attention_bias']
if self.has_relative_attention_bias:
self.rel_pos_bins = config['rel_pos_bins']
self.max_rel_pos = config['max_rel_pos']
self.rel_pos_onehot_size = config['rel_pos_bins']
self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config['num_attention_heads'], bias=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config['max_rel_2d_pos']
self.rel_2d_pos_bins = config['rel_2d_pos_bins']
self.rel_2d_pos_onehot_size = config['rel_2d_pos_bins']
self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config['num_attention_heads'], bias=False)
self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config['num_attention_heads'], bias=False)
def _cal_1d_pos_emb(self, hidden_states, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(rel_pos_mat, num_buckets=self.rel_pos_bins, max_distance=self.max_rel_pos)
rel_pos = torch.nn.functional.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states.dtype)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2).contiguous()
return rel_pos
def _cal_2d_pos_emb(self, hidden_states, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = relative_position_bucket(rel_pos_x_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos)
rel_pos_y = relative_position_bucket(rel_pos_y_2d_mat, num_buckets=self.rel_2d_pos_bins, max_distance=self.max_rel_2d_pos)
rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states.dtype)
rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states.dtype)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2).contiguous()
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2).contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, output_attentions=False, output_hidden_states=False, bbox=None, position_ids=None):
all_hidden_states = () if output_hidden_states else None
rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None
hidden_save = dict()
hidden_save['input_hidden_states'] = hidden_states
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
hidden_save['input_attention_mask'] = attention_mask
hidden_save['input_layer_head_mask'] = layer_head_mask
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, rel_pos=rel_pos, rel_2d_pos=rel_2d_pos)
hidden_states = layer_outputs[0]
hidden_save['{}_data'.format(i)] = hidden_states
return hidden_states,
class Conv_BN_ReLU(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size=1, stride=1, padding=0):
super(Conv_BN_ReLU, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(out_planes, momentum=0.1)
self.relu = nn.ReLU()
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def forward(self, x):
return self.relu(self.bn(self.conv(x)))
class FPN(nn.Module):
def __init__(self, in_channels, out_channels):
super(FPN, self).__init__()
self.toplayer_ = Conv_BN_ReLU(in_channels[3], out_channels, kernel_size=1, stride=1, padding=0)
self.latlayer1_ = Conv_BN_ReLU(in_channels[2], out_channels, kernel_size=1, stride=1, padding=0)
self.latlayer2_ = Conv_BN_ReLU(in_channels[1], out_channels, kernel_size=1, stride=1, padding=0)
self.latlayer3_ = Conv_BN_ReLU(in_channels[0], out_channels, kernel_size=1, stride=1, padding=0)
self.smooth1_ = Conv_BN_ReLU(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.smooth2_ = Conv_BN_ReLU(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.smooth3_ = Conv_BN_ReLU(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.out_channels = out_channels * 4
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def _upsample(self, x, scale=1):
return F.upsample(x, scale_factor=scale, mode='bilinear')
def _upsample_add(self, x, y, scale=1):
return F.upsample(x, scale_factor=scale, mode='bilinear') + y
def forward(self, x):
f2, f3, f4, f5 = x
p5 = self.toplayer_(f5)
f4 = self.latlayer1_(f4)
p4 = self._upsample_add(p5, f4, 2)
p4 = self.smooth1_(p4)
f3 = self.latlayer2_(f3)
p3 = self._upsample_add(p4, f3, 2)
p3 = self.smooth2_(p3)
f2 = self.latlayer3_(f2)
p2 = self._upsample_add(p3, f2, 2)
p2 = self.smooth3_(p2)
p3 = self._upsample(p3, 2)
p4 = self._upsample(p4, 4)
p5 = self._upsample(p5, 8)
fuse = torch.cat([p2, p3, p4, p5], dim=1)
return fuse
class LastLevelMaxPool(nn.Module):
"""
This module is used in the original FPN to generate a downsampled
P6 feature from P5.
"""
def __init__(self):
super(LastLevelMaxPool, self).__init__()
self.num_levels = 1
self.in_feature = 'p5'
def forward(self, x):
return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
class CNNBlockBase(Module):
def __init__(self, in_channels, out_channels, stride):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
in_channels (int):
out_channels (int):
stride (int):
"""
super(CNNBlockBase, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = stride
def freeze(self):
for p in self.parameters():
p.stop_gradient = True
class FrozenBatchNorm(nn.BatchNorm2d):
def __init__(self, num_channels):
super(FrozenBatchNorm, self).__init__(num_channels)
def get_norm(norm, out_channels):
"""
Args:
norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
or a callable that takes a channel number and returns
the normalization layer as a nn.Layer.
out_channels (int): out_channels
Returns:
nn.Layer or None: the normalization layer
"""
if norm is None:
return None
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {'BN': nn.BatchNorm2d, 'SyncBN': nn.SyncBatchNorm, 'FrozenBN': FrozenBatchNorm}[norm]
return norm(out_channels)
class BasicStem(CNNBlockBase):
"""
The standard ResNet stem (layers before the first residual block),
with a conv, relu and max_pool.
"""
def __init__(self, in_channels=3, out_channels=64, norm='BN'):
"""
Args:
norm (str or callable): norm after the first conv layer.
See :func:`layers.get_norm` for supported format.
"""
super(BasicStem, self).__init__(in_channels, out_channels, 4)
self.in_channels = in_channels
self.conv1 = Conv2d(in_channels, out_channels, kernel_size=7, stride=2, padding=3, bias=False, norm=get_norm(norm, out_channels))
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
class DeformBottleneckBlock(CNNBlockBase):
"""
Similar to :class:`BottleneckBlock`, but with :paper:`deformable conv <deformconv>`
in the 3x3 convolution.
"""
def __init__(self, in_channels, out_channels, *, bottleneck_channels, stride=1, num_groups=1, norm='BN', stride_in_1x1=False, dilation=1, deform_modulated=False, deform_num_groups=1):
raise NotImplementedError
def build_resnet_backbone(cfg, input_shape=None):
"""
Create a ResNet instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
if input_shape is None:
ch = 3
else:
ch = input_shape.channels
norm = cfg.MODEL.RESNETS.NORM
stem = BasicStem(in_channels=ch, out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS, norm=norm)
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
assert res5_dilation in {1, 2}, 'res5_dilation cannot be {}.'.format(res5_dilation)
num_blocks_per_stage = {(18): [2, 2, 2, 2], (34): [3, 4, 6, 3], (50): [3, 4, 6, 3], (101): [3, 4, 23, 3], (152): [3, 8, 36, 3]}[depth]
if depth in [18, 34]:
assert out_channels == 64, 'Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34'
assert not any(deform_on_per_stage), 'MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34'
assert res5_dilation == 1, 'Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34'
assert num_groups == 1, 'Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34'
stages = []
for idx, stage_idx in enumerate(range(2, 6)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or stage_idx == 5 and dilation == 2 else 2
stage_kargs = {'num_blocks': num_blocks_per_stage[idx], 'stride_per_block': [first_stride] + [1] * (num_blocks_per_stage[idx] - 1), 'in_channels': in_channels, 'out_channels': out_channels, 'norm': norm}
if depth in [18, 34]:
stage_kargs['block_class'] = BasicBlock
else:
stage_kargs['bottleneck_channels'] = bottleneck_channels
stage_kargs['stride_in_1x1'] = stride_in_1x1
stage_kargs['dilation'] = dilation
stage_kargs['num_groups'] = num_groups
if deform_on_per_stage[idx]:
stage_kargs['block_class'] = DeformBottleneckBlock
stage_kargs['deform_modulated'] = deform_modulated
stage_kargs['deform_num_groups'] = deform_num_groups
else:
stage_kargs['block_class'] = BottleneckBlock
blocks = ResNet.make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features, freeze_at=freeze_at)
def build_resnet_fpn_backbone(cfg, input_shape=None):
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, norm=cfg.MODEL.FPN.NORM, top_block=LastLevelMaxPool(), fuse_type=cfg.MODEL.FPN.FUSE_TYPE)
return backbone
def read_config(fp=None):
if fp is None:
dir_name = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(dir_name, 'visual_backbone.yaml')
with open(fp, 'r') as fin:
yacs_config = _yacs_config
cfg = yacs_config.CfgNode().load_cfg(fin)
cfg.freeze()
return cfg
class VisualBackbone(Module):
def __init__(self, config):
super(VisualBackbone, self).__init__()
self.cfg = read_config()
self.backbone = build_resnet_fpn_backbone(self.cfg)
assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)
num_channels = len(self.cfg.MODEL.PIXEL_MEAN)
self.register_buffer('pixel_mean', torch.as_tensor(self.cfg.MODEL.PIXEL_MEAN).reshape([num_channels, 1, 1]))
self.register_buffer('pixel_std', torch.as_tensor(self.cfg.MODEL.PIXEL_STD).reshape([num_channels, 1, 1]))
self.out_feature_key = 'p2'
self.pool = nn.AdaptiveAvgPool2d(config['image_feature_pool_shape'][:2])
if len(config['image_feature_pool_shape']) == 2:
config['image_feature_pool_shape'].append(self.backbone.output_shape()[self.out_feature_key].channels)
assert self.backbone.output_shape()[self.out_feature_key].channels == config['image_feature_pool_shape'][2]
def forward(self, images):
images_input = (torch.as_tensor(images) - self.pixel_mean) / self.pixel_std
features = self.backbone(images_input)
features = features[self.out_feature_key]
features = self.pool(features).flatten(start_dim=2).transpose([0, 2, 1])
return features
class BiaffineAttention(Layer):
"""Implements a biaffine attention operator for binary relation classification."""
def __init__(self, in_features, out_features):
super(BiaffineAttention, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bilinear = nn.Bilinear(in_features, in_features, out_features, bias=False)
self.linear = nn.Linear(2 * in_features, out_features)
def forward(self, x_1, x_2):
return self.bilinear(x_1, x_2) + self.linear(torch.cat((x_1, x_2), dim=-1))
class REDecoder(Layer):
def __init__(self, hidden_size=768, hidden_dropout_prob=0.1):
super(REDecoder, self).__init__()
self.entity_emb = nn.Embedding(3, hidden_size)
projection = nn.Sequential(nn.Linear(hidden_size * 2, hidden_size), nn.ReLU(), nn.Dropout(hidden_dropout_prob), nn.Linear(hidden_size, hidden_size // 2), nn.ReLU(), nn.Dropout(hidden_dropout_prob))
self.ffnn_head = copy.deepcopy(projection)
self.ffnn_tail = copy.deepcopy(projection)
self.rel_classifier = BiaffineAttention(hidden_size // 2, 2)
def build_relation(self, relations, entities):
batch_size = len(relations)
new_relations = []
for b in range(batch_size):
if len(entities[b]['start']) <= 2:
entities[b] = {'end': [1, 1], 'label': [0, 0], 'start': [0, 0]}
all_possible_relations = set([(i, j) for i in range(len(entities[b]['label'])) for j in range(len(entities[b]['label'])) if entities[b]['label'][i] == 1 and entities[b]['label'][j] == 2])
if len(all_possible_relations) == 0:
all_possible_relations = {(0, 1)}
positive_relations = set(list(zip(relations[b]['head'], relations[b]['tail'])))
negative_relations = all_possible_relations - positive_relations
positive_relations = set([i for i in positive_relations if i in all_possible_relations])
reordered_relations = list(positive_relations) + list(negative_relations)
relation_per_doc = {'head': [i[0] for i in reordered_relations], 'tail': [i[1] for i in reordered_relations], 'label': [1] * len(positive_relations) + [0] * (len(reordered_relations) - len(positive_relations))}
assert len(relation_per_doc['head']) != 0
new_relations.append(relation_per_doc)
return new_relations, entities
def get_predicted_relations(self, logits, relations, entities):
pred_relations = []
for i, pred_label in enumerate(logits.argmax(-1)):
if pred_label != 1:
continue
rel = {}
rel['head_id'] = relations['head'][i]
rel['head'] = entities['start'][rel['head_id']], entities['end'][rel['head_id']]
rel['head_type'] = entities['label'][rel['head_id']]
rel['tail_id'] = relations['tail'][i]
rel['tail'] = entities['start'][rel['tail_id']], entities['end'][rel['tail_id']]
rel['tail_type'] = entities['label'][rel['tail_id']]
rel['type'] = 1
pred_relations.append(rel)
return pred_relations
def forward(self, hidden_states, entities, relations):
batch_size, max_n_words, context_dim = hidden_states.shape
relations, entities = self.build_relation(relations, entities)
loss = 0
all_pred_relations = []
for b in range(batch_size):
head_entities = torch.as_tensor(relations[b]['head'])
tail_entities = torch.as_tensor(relations[b]['tail'])
relation_labels = torch.as_tensor(relations[b]['label'], dtype=torch.long)
entities_start_index = torch.as_tensor(entities[b]['start'])
entities_labels = torch.as_tensor(entities[b]['label'])
head_index = entities_start_index[head_entities]
head_label = entities_labels[head_entities]
head_label_repr = self.entity_emb(head_label)
tail_index = entities_start_index[tail_entities]
tail_label = entities_labels[tail_entities]
tail_label_repr = self.entity_emb(tail_label)
tmp_hidden_states = hidden_states[b][head_index]
if len(tmp_hidden_states.shape) == 1:
tmp_hidden_states = torch.unsqueeze(tmp_hidden_states, dim=0)
head_repr = torch.cat((tmp_hidden_states, head_label_repr), dim=-1)
tmp_hidden_states = hidden_states[b][tail_index]
if len(tmp_hidden_states.shape) == 1:
tmp_hidden_states = torch.unsqueeze(tmp_hidden_states, dim=0)
tail_repr = torch.cat((tmp_hidden_states, tail_label_repr), dim=-1)
heads = self.ffnn_head(head_repr)
tails = self.ffnn_tail(tail_repr)
logits = self.rel_classifier(heads, tails)
loss = None
pred_relations = self.get_predicted_relations(logits, relations[b], entities[b])
all_pred_relations.append(pred_relations)
return loss, all_pred_relations
class Conv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
norm = kwargs.pop('norm', None)
activation = kwargs.pop('activation', None)
super(Conv2d, self).__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
x = super(Conv2d, self).forward(x)
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
return x
class ShapeSpec(namedtuple('_ShapeSpec', ['channels', 'height', 'width', 'stride'])):
def __new__(cls, channels=None, height=None, width=None, stride=None):
return super().__new__(cls, channels, height, width, stride)
class Backbone(nn.Module):
def __init__(self):
super(Backbone, self).__init__()
@abstractmethod
def forward(self, *args):
pass
@property
def size_divisibility(self) ->int:
return 0
def output_shape(self):
return {name: ShapeSpec(channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]) for name in self._out_features}
COMMUNITY_MODEL_PREFIX = 'https://bj.bcebos.com/paddlenlp/models/transformers/community/'
class InitTrackerMeta(type(Layer)):
"""
This metaclass wraps the `__init__` method of a class to add `init_config`
attribute for instances of that class, and `init_config` use a dict to track
the initial configuration. If the class has `_wrap_init` method, it would be
hooked after `__init__` and called as `_wrap_init(self, init_fn, init_args)`.
Since InitTrackerMeta would be used as metaclass for pretrained model classes,
which always are Layer and `type(Layer)` is not `type`, thus use `type(Layer)`
rather than `type` as base class for it to avoid inheritance metaclass
conflicts.
"""
def __init__(cls, name, bases, attrs):
init_func = cls.__init__
help_func = getattr(cls, '_wrap_init', None) if '__init__' in attrs else None
cls.__init__ = InitTrackerMeta.init_and_track_conf(init_func, help_func)
super(InitTrackerMeta, cls).__init__(name, bases, attrs)
@staticmethod
def init_and_track_conf(init_func, help_func=None):
"""
wraps `init_func` which is `__init__` method of a class to add `init_config`
attribute for instances of that class.
Args:
init_func (callable): It should be the `__init__` method of a class.
help_func (callable, optional): If provided, it would be hooked after
`init_func` and called as `_wrap_init(self, init_func, *init_args, **init_args)`.
Default None.
Returns:
function: the wrapped function
"""
@functools.wraps(init_func)
def __impl__(self, *args, **kwargs):
init_func(self, *args, **kwargs)
if help_func:
help_func(self, init_func, *args, **kwargs)
self.init_config = kwargs
if args:
kwargs['init_args'] = args
kwargs['init_class'] = self.__class__.__name__
return __impl__
MODEL_HOME = '/root/.paddlenlp/models'
def fn_args_to_dict(func, *args, **kwargs):
"""
Inspect function `func` and its arguments for running, and extract a
dict mapping between argument names and keys.
"""
if hasattr(inspect, 'getfullargspec'):
spec_args, spec_varargs, spec_varkw, spec_defaults, _, _, _ = inspect.getfullargspec(func)
else:
spec_args, spec_varargs, spec_varkw, spec_defaults = inspect.getargspec(func)
init_dict = dict(zip(spec_args, args))
kwargs_dict = dict(zip(spec_args[-len(spec_defaults):], spec_defaults)) if spec_defaults else {}
kwargs_dict.update(kwargs)
init_dict.update(kwargs_dict)
return init_dict
def _load_paddle_layoutxlm_weights(torch_model, weights_path):
with paddle.fluid.dygraph.guard():
load_layer_state_dict, opti_state_dict = paddle.fluid.load_dygraph(weights_path)
load_layer = []
not_load_layer = []
torch_state_dict = torch_model.state_dict()
for k, v in load_layer_state_dict.items():
ppname = name = k
if ppname.endswith('._mean'):
name = ppname.replace('._mean', '.running_mean')
if ppname.endswith('._variance'):
name = ppname.replace('._variance', '.running_var')
load_layer.append(name)
cur_weights = torch_state_dict[name]
cur_w_shape = cur_weights.shape
if ppname.endswith('.weight'):
if len(v.shape) == len(cur_w_shape) == 2 and v.shape[0] == cur_w_shape[1] and v.shape[1] == cur_w_shape[0]:
if ppname.startswith('layoutxlm.embeddings.'):
torch_state_dict[name].copy_(torch.Tensor(v))
else:
torch_state_dict[name].copy_(torch.Tensor(v.T))
else:
torch_state_dict[name].copy_(torch.Tensor(v))
else:
torch_state_dict[name].copy_(torch.Tensor(v))
None
def _load_torch_weights(torch_model, weights_path):
torch_model.load_state_dict(torch.load(weights_path))
None
def load_layoutxlm_weights(torch_model, weights_path):
if weights_path.endswith('.pdparams'):
_load_paddle_layoutxlm_weights(torch_model, weights_path)
else:
_load_torch_weights(torch_model, weights_path)
logger_initialized = {}
@functools.lru_cache()
def get_logger(name='root', log_file=None, log_level=logging.DEBUG):
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified a FileHandler will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level. Note that only the process of
rank 0 is affected, and other processes will set the level to
"Error" thus be silent most of the time.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
for logger_name in logger_initialized:
if name.startswith(logger_name):
return logger
formatter = logging.Formatter('[%(asctime)s] %(name)s %(levelname)s: %(message)s', datefmt='%Y/%m/%d %H:%M:%S')
stream_handler = logging.StreamHandler(stream=sys.stdout)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if log_file is not None and dist.get_rank() == 0:
log_file_folder = os.path.split(log_file)[0]
os.makedirs(log_file_folder, exist_ok=True)
file_handler = logging.FileHandler(log_file, 'a')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger_initialized[name] = True
return logger
logger = get_logger()
def build_backbone(config, model_type):
if model_type == 'det':
support_dict = ['MobileNetV3', 'ResNet', 'ResNet_vd', 'ResNet_SAST']
elif model_type == 'rec' or model_type == 'cls':
support_dict = ['MobileNetV1Enhance', 'MobileNetV3', 'ResNet', 'ResNetFPN', 'MTB', 'ResNet31', 'SVTRNet', 'ViTSTR']
elif model_type == 'e2e':
support_dict = ['ResNet']
elif model_type == 'table':
support_dict = ['ResNet', 'MobileNetV3']
else:
raise NotImplementedError
module_name = config.pop('name')
assert module_name in support_dict, Exception('when model typs is {}, backbone only support {}'.format(model_type, support_dict))
module_class = eval(module_name)(**config)
return module_class
def build_head(config, **kwargs):
support_dict = ['DBHead', 'PSEHead', 'EASTHead', 'SASTHead', 'CTCHead', 'ClsHead', 'AttentionHead', 'SRNHead', 'PGHead', 'Transformer', 'TableAttentionHead', 'SARHead', 'FCEHead']
module_name = config.pop('name')
assert module_name in support_dict, Exception('head only support {}'.format(support_dict))
None
module_class = eval(module_name)(**config, **kwargs)
return module_class
def build_neck(config):
support_dict = ['FPN', 'DBFPN', 'EASTFPN', 'SASTFPN', 'SequenceEncoder', 'PGFPN', 'TableFPN', 'RSEFPN', 'LKPAN', 'FCEFPN']
module_name = config.pop('name')
assert module_name in support_dict, Exception('neck only support {}'.format(support_dict))
module_class = eval(module_name)(**config)
return module_class
def build_transform(config):
support_dict = ['TPS', 'STN_ON']
module_name = config.pop('name')
assert module_name in support_dict, Exception('transform only support {}'.format(support_dict))
module_class = eval(module_name)(**config)
return module_class
class BaseModel(nn.Module):
def __init__(self, config, **kwargs):
"""
the module for OCR.
args:
config (dict): the super parameters for module.
"""
super(BaseModel, self).__init__()
in_channels = config.get('in_channels', 3)
model_type = config['model_type']
if 'Transform' not in config or config['Transform'] is None:
self.use_transform = False
else:
self.use_transform = True
config['Transform']['in_channels'] = in_channels
self.transform = build_transform(config['Transform'])
in_channels = self.transform.out_channels
config['Backbone']['in_channels'] = in_channels
self.backbone = build_backbone(config['Backbone'], model_type)
in_channels = self.backbone.out_channels
if 'Neck' not in config or config['Neck'] is None:
self.use_neck = False
else:
self.use_neck = True
config['Neck']['in_channels'] = in_channels
self.neck = build_neck(config['Neck'])
in_channels = self.neck.out_channels
if 'Head' not in config or config['Head'] is None:
self.use_head = False
else:
self.use_head = True
config['Head']['in_channels'] = in_channels
self.head = build_head(config['Head'], **kwargs)
self.return_all_feats = config.get('return_all_feats', False)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
def forward(self, x):
y = dict()
if self.use_transform:
x = self.transform(x)
x = self.backbone(x)
y['backbone_out'] = x
if self.use_neck:
x = self.neck(x)
y['neck_out'] = x
if self.use_head:
x = self.head(x)
if isinstance(x, dict) and 'ctc_nect' in x.keys():
y['neck_out'] = x['ctc_neck']
y['head_out'] = x
elif isinstance(x, dict):
y.update(x)
else:
y['head_out'] = x
if self.return_all_feats:
if self.training:
return y
else:
return {'head_out': y['head_out']}
else:
return x
class ResNet_vd(nn.Module):
def __init__(self, in_channels=3, layers=50, dcn_stage=None, out_indices=None, **kwargs):
super(ResNet_vd, self).__init__()
self.layers = layers
supported_layers = [18, 34, 50, 101, 152, 200]
assert layers in supported_layers, 'supported layers are {} but input layer is {}'.format(supported_layers, layers)
if layers == 18:
depth = [2, 2, 2, 2]
elif layers == 34 or layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
elif layers == 200:
depth = [3, 12, 48, 3]
num_channels = [64, 256, 512, 1024] if layers >= 50 else [64, 64, 128, 256]
num_filters = [64, 128, 256, 512]
self.dcn_stage = dcn_stage if dcn_stage is not None else [False, False, False, False]
self.out_indices = out_indices if out_indices is not None else [0, 1, 2, 3]
self.conv1_1 = ConvBNLayer(in_channels=in_channels, out_channels=32, kernel_size=3, stride=2, act='relu', name='conv1_1')
self.conv1_2 = ConvBNLayer(in_channels=32, out_channels=32, kernel_size=3, stride=1, act='relu', name='conv1_2')
self.conv1_3 = ConvBNLayer(in_channels=32, out_channels=64, kernel_size=3, stride=1, act='relu', name='conv1_3')
self.pool2d_max = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.stages = nn.ModuleList()
self.out_channels = []
if layers >= 50:
for block in range(len(depth)):
block_list = nn.Sequential()
shortcut = False
is_dcn = self.dcn_stage[block]
for i in range(depth[block]):
if layers in [101, 152] and block == 2:
if i == 0:
conv_name = 'res' + str(block + 2) + 'a'
else:
conv_name = 'res' + str(block + 2) + 'b' + str(i)
else:
conv_name = 'res' + str(block + 2) + chr(97 + i)
bottleneck_block = BottleneckBlock(in_channels=num_channels[block] if i == 0 else num_filters[block] * 4, out_channels=num_filters[block], stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut, if_first=block == i == 0, name=conv_name, is_dcn=is_dcn)
shortcut = True
block_list.add_module('bb_%d_%d' % (block, i), bottleneck_block)
if block in self.out_indices:
self.out_channels.append(num_filters[block] * 4)
self.stages.append(block_list)
else:
for block in range(len(depth)):
block_list = nn.Sequential()
shortcut = False
for i in range(depth[block]):
conv_name = 'res' + str(block + 2) + chr(97 + i)
basic_block = BasicBlock(in_channels=num_channels[block] if i == 0 else num_filters[block], out_channels=num_filters[block], stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut, if_first=block == i == 0, name=conv_name)
shortcut = True
block_list.add_module('bb_%d_%d' % (block, i), basic_block)
if block in self.out_indices:
self.out_channels.append(num_filters[block])
self.stages.append(block_list)
def forward(self, inputs):
y = self.conv1_1(inputs)
y = self.conv1_2(y)
y = self.conv1_3(y)
y = self.pool2d_max(y)
out = []
for i, block in enumerate(self.stages):
y = block(y)
if i in self.out_indices:
out.append(y)
return out
class ResNet_SAST(nn.Module):
def __init__(self, in_channels=3, layers=50, **kwargs):
super(ResNet_SAST, self).__init__()
self.layers = layers
supported_layers = [18, 34, 50, 101, 152, 200]
assert layers in supported_layers, 'supported layers are {} but input layer is {}'.format(supported_layers, layers)
if layers == 18:
depth = [2, 2, 2, 2]
elif layers == 34 or layers == 50:
depth = [3, 4, 6, 3, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
elif layers == 200:
depth = [3, 12, 48, 3]
num_channels = [64, 256, 512, 1024, 2048] if layers >= 50 else [64, 64, 128, 256]
num_filters = [64, 128, 256, 512, 512]
self.conv1_1 = ConvBNLayer(in_channels=in_channels, out_channels=32, kernel_size=3, stride=2, act='relu', name='conv1_1')
self.conv1_2 = ConvBNLayer(in_channels=32, out_channels=32, kernel_size=3, stride=1, act='relu', name='conv1_2')
self.conv1_3 = ConvBNLayer(in_channels=32, out_channels=64, kernel_size=3, stride=1, act='relu', name='conv1_3')
self.pool2d_max = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.stages = nn.ModuleList()
self.out_channels = [3, 64]
if layers >= 50:
for block in range(len(depth)):
block_list = nn.Sequential()
shortcut = False
for i in range(depth[block]):
if layers in [101, 152] and block == 2:
if i == 0:
conv_name = 'res' + str(block + 2) + 'a'
else:
conv_name = 'res' + str(block + 2) + 'b' + str(i)
else:
conv_name = 'res' + str(block + 2) + chr(97 + i)
bottleneck_block = BottleneckBlock(in_channels=num_channels[block] if i == 0 else num_filters[block] * 4, out_channels=num_filters[block], stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut, if_first=block == i == 0, name=conv_name)
shortcut = True
block_list.add_module('bb_%d_%d' % (block, i), bottleneck_block)
self.out_channels.append(num_filters[block] * 4)
self.stages.append(block_list)
else:
for block in range(len(depth)):
block_list = nn.Sequential()
shortcut = False
for i in range(depth[block]):
conv_name = 'res' + str(block + 2) + chr(97 + i)
basic_block = BasicBlock(in_channels=num_channels[block] if i == 0 else num_filters[block], out_channels=num_filters[block], stride=2 if i == 0 and block != 0 else 1, shortcut=shortcut, if_first=block == i == 0, name=conv_name)
shortcut = True
block_list.add_module('bb_%d_%d' % (block, i), basic_block)
self.out_channels.append(num_filters[block])
self.stages.append(block_list)
def forward(self, inputs):
out = [inputs]
y = self.conv1_1(inputs)
y = self.conv1_2(y)
y = self.conv1_3(y)
out.append(y)
y = self.pool2d_max(y)
for block in self.stages:
y = block(y)
out.append(y)
return out
class DepthwiseSeparable(nn.Module):
def __init__(self, num_channels, num_filters1, num_filters2, num_groups, stride, scale, dw_size=3, padding=1, use_se=False):
super(DepthwiseSeparable, self).__init__()
self.use_se = use_se
self._depthwise_conv = ConvBNLayer(num_channels=num_channels, num_filters=int(num_filters1 * scale), filter_size=dw_size, stride=stride, padding=padding, num_groups=int(num_groups * scale))
if use_se:
self._se = SEModule(int(num_filters1 * scale))
self._pointwise_conv = ConvBNLayer(num_channels=int(num_filters1 * scale), filter_size=1, num_filters=int(num_filters2 * scale), stride=1, padding=0)
def forward(self, inputs):
y = self._depthwise_conv(inputs)
if self.use_se:
y = self._se(y)
y = self._pointwise_conv(y)
return y
class MobileNetV1Enhance(nn.Module):
def __init__(self, in_channels=3, scale=0.5, last_conv_stride=1, last_pool_type='max', **kwargs):
super().__init__()
self.scale = scale
self.block_list = []
self.conv1 = ConvBNLayer(num_channels=in_channels, filter_size=3, channels=3, num_filters=int(32 * scale), stride=2, padding=1)
conv2_1 = DepthwiseSeparable(num_channels=int(32 * scale), num_filters1=32, num_filters2=64, num_groups=32, stride=1, scale=scale)
self.block_list.append(conv2_1)
conv2_2 = DepthwiseSeparable(num_channels=int(64 * scale), num_filters1=64, num_filters2=128, num_groups=64, stride=1, scale=scale)
self.block_list.append(conv2_2)
conv3_1 = DepthwiseSeparable(num_channels=int(128 * scale), num_filters1=128, num_filters2=128, num_groups=128, stride=1, scale=scale)
self.block_list.append(conv3_1)
conv3_2 = DepthwiseSeparable(num_channels=int(128 * scale), num_filters1=128, num_filters2=256, num_groups=128, stride=(2, 1), scale=scale)
self.block_list.append(conv3_2)
conv4_1 = DepthwiseSeparable(num_channels=int(256 * scale), num_filters1=256, num_filters2=256, num_groups=256, stride=1, scale=scale)
self.block_list.append(conv4_1)
conv4_2 = DepthwiseSeparable(num_channels=int(256 * scale), num_filters1=256, num_filters2=512, num_groups=256, stride=(2, 1), scale=scale)
self.block_list.append(conv4_2)
for _ in range(5):
conv5 = DepthwiseSeparable(num_channels=int(512 * scale), num_filters1=512, num_filters2=512, num_groups=512, stride=1, dw_size=5, padding=2, scale=scale, use_se=False)
self.block_list.append(conv5)
conv5_6 = DepthwiseSeparable(num_channels=int(512 * scale), num_filters1=512, num_filters2=1024, num_groups=512, stride=(2, 1), dw_size=5, padding=2, scale=scale, use_se=True)
self.block_list.append(conv5_6)
conv6 = DepthwiseSeparable(num_channels=int(1024 * scale), num_filters1=1024, num_filters2=1024, num_groups=1024, stride=last_conv_stride, dw_size=5, padding=2, use_se=True, scale=scale)
self.block_list.append(conv6)
self.block_list = nn.Sequential(*self.block_list)
if last_pool_type == 'avg':
self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
else:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.out_channels = int(1024 * scale)
def forward(self, inputs):
y = self.conv1(inputs)
y = self.block_list(y)
y = self.pool(y)
return y
class MTB(nn.Module):
def __init__(self, cnn_num, in_channels):
super(MTB, self).__init__()
self.block = nn.Sequential()
self.out_channels = in_channels
self.cnn_num = cnn_num
if self.cnn_num == 2:
for i in range(self.cnn_num):
self.block.add_module('conv_{}'.format(i), nn.Conv2d(in_channels=in_channels if i == 0 else 32 * 2 ** (i - 1), out_channels=32 * 2 ** i, kernel_size=3, stride=2, padding=1))
self.block.add_module('relu_{}'.format(i), nn.ReLU())
self.block.add_module('bn_{}'.format(i), nn.BatchNorm2d(32 * 2 ** i))
def forward(self, images):
x = self.block(images)
if self.cnn_num == 2:
x = x.permute(0, 3, 2, 1)
x_shape = x.shape
x = torch.reshape(x, (x_shape[0], x_shape[1], x_shape[2] * x_shape[3]))
return x
class ResNet31(nn.Module):
"""
Args:
in_channels (int): Number of channels of input image tensor.
layers (list[int]): List of BasicBlock number for each stage.
channels (list[int]): List of out_channels of Conv2d layer.
out_indices (None | Sequence[int]): Indices of output stages.
last_stage_pool (bool): If True, add `MaxPool2d` layer to last stage.
"""
def __init__(self, in_channels=3, layers=[1, 2, 5, 3], channels=[64, 128, 256, 256, 512, 512, 512], out_indices=None, last_stage_pool=False):
super(ResNet31, self).__init__()
assert isinstance(in_channels, int)
assert isinstance(last_stage_pool, bool)
self.out_indices = out_indices
self.last_stage_pool = last_stage_pool
self.conv1_1 = nn.Conv2d(in_channels, channels[0], kernel_size=3, stride=1, padding=1)
self.bn1_1 = nn.BatchNorm2d(channels[0])
self.relu1_1 = nn.ReLU(inplace=True)
self.conv1_2 = nn.Conv2d(channels[0], channels[1], kernel_size=3, stride=1, padding=1)
self.bn1_2 = nn.BatchNorm2d(channels[1])
self.relu1_2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True)
self.block2 = self._make_layer(channels[1], channels[2], layers[0])
self.conv2 = nn.Conv2d(channels[2], channels[2], kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(channels[2])
self.relu2 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True)
self.block3 = self._make_layer(channels[2], channels[3], layers[1])
self.conv3 = nn.Conv2d(channels[3], channels[3], kernel_size=3, stride=1, padding=1)
self.bn3 = nn.BatchNorm2d(channels[3])
self.relu3 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1), padding=0, ceil_mode=True)
self.block4 = self._make_layer(channels[3], channels[4], layers[2])
self.conv4 = nn.Conv2d(channels[4], channels[4], kernel_size=3, stride=1, padding=1)
self.bn4 = nn.BatchNorm2d(channels[4])
self.relu4 = nn.ReLU(inplace=True)
self.pool5 = None
if self.last_stage_pool:
self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True)
self.block5 = self._make_layer(channels[4], channels[5], layers[3])
self.conv5 = nn.Conv2d(channels[5], channels[5], kernel_size=3, stride=1, padding=1)
self.bn5 = nn.BatchNorm2d(channels[5])
self.relu5 = nn.ReLU(inplace=True)
self.out_channels = channels[-1]
def _make_layer(self, input_channels, output_channels, blocks):
layers = []
for _ in range(blocks):
downsample = None
if input_channels != output_channels:
downsample = nn.Sequential(nn.Conv2d(input_channels, output_channels, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(output_channels))
layers.append(BasicBlock(input_channels, output_channels, downsample=downsample))
input_channels = output_channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1_1(x)
x = self.bn1_1(x)
x = self.relu1_1(x)
x = self.conv1_2(x)
x = self.bn1_2(x)
x = self.relu1_2(x)
outs = []
for i in range(4):
layer_index = i + 2
pool_layer = getattr(self, 'pool{}'.format(layer_index))
block_layer = getattr(self, 'block{}'.format(layer_index))
conv_layer = getattr(self, 'conv{}'.format(layer_index))
bn_layer = getattr(self, 'bn{}'.format(layer_index))
relu_layer = getattr(self, 'relu{}'.format(layer_index))
if pool_layer is not None:
x = pool_layer(x)
x = block_layer(x)
x = conv_layer(x)
x = bn_layer(x)
x = relu_layer(x)
outs.append(x)
if self.out_indices is not None:
return tuple([outs[i] for i in self.out_indices])
return x
def drop_path(x, drop_prob=0.0, training=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ...
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = torch.as_tensor(1 - drop_prob)
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype)
random_tensor = torch.floor(random_tensor)
output = x.divide(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, input):
return input
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer='gelu', drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = Activation(act_type=act_layer, inplace=True)
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class ConvMixer(nn.Module):
def __init__(self, dim, num_heads=8, HW=[8, 25], local_k=[3, 3]):
super().__init__()
self.HW = HW
self.dim = dim
self.local_mixer = nn.Conv2d(dim, dim, local_k, 1, [local_k[0] // 2, local_k[1] // 2], groups=num_heads)
def forward(self, x):
h = self.HW[0]
w = self.HW[1]
x = x.transpose([0, 2, 1]).reshape([0, self.dim, h, w])
x = self.local_mixer(x)
x = x.flatten(2).permute(0, 2, 1)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, mixer='Global', HW=[8, 25], local_k=[7, 11], qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.HW = HW
if HW is not None:
H = HW[0]
W = HW[1]
self.N = H * W
self.C = dim
if mixer == 'Local' and HW is not None:
hk = local_k[0]
wk = local_k[1]
mask = torch.ones(H * W, H + hk - 1, W + wk - 1, dtype=torch.float32)
for h in range(0, H):
for w in range(0, W):
mask[h * W + w, h:h + hk, w:w + wk] = 0.0
mask_paddle = mask[:, hk // 2:H + hk // 2, wk // 2:W + wk // 2].flatten(1)
mask_inf = torch.full([H * W, H * W], fill_value=float('-Inf'), dtype=torch.float32)
mask = torch.where(mask_paddle < 1, mask_paddle, mask_inf)
self.mask = mask.unsqueeze(0).unsqueeze(1)
self.mixer = mixer
def forward(self, x):
if self.HW is not None:
N = self.N
C = self.C
else:
_, N, C = x.shape
qkv = self.qkv(x)
qkv = qkv.reshape((-1, N, 3, self.num_heads, C // self.num_heads)).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]
attn = q.matmul(k.permute(0, 1, 3, 2))
if self.mixer == 'Local':
attn += self.mask
attn = nn.functional.softmax(attn, dim=-1)
attn = self.attn_drop(attn)
x = attn.matmul(v).permute(0, 2, 1, 3).reshape((-1, N, C))
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mixer='Global', local_mixer=[7, 11], HW=None, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer='gelu', norm_layer='nn.LayerNorm', epsilon=1e-06, prenorm=True):
super().__init__()
if isinstance(norm_layer, str):
self.norm1 = eval(norm_layer)(dim, eps=epsilon)
else:
self.norm1 = norm_layer(dim)
if mixer == 'Global' or mixer == 'Local':
self.mixer = Attention(dim, num_heads=num_heads, mixer=mixer, HW=HW, local_k=local_mixer, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
elif mixer == 'Conv':
self.mixer = ConvMixer(dim, num_heads=num_heads, HW=HW, local_k=local_mixer)
else:
raise TypeError('The mixer must be one of [Global, Local, Conv]')
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else Identity()
if isinstance(norm_layer, str):
self.norm2 = eval(norm_layer)(dim, eps=epsilon)
else:
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp_ratio = mlp_ratio
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.prenorm = prenorm
def forward(self, x):
if self.prenorm:
x = self.norm1(x + self.drop_path(self.mixer(x)))
x = self.norm2(x + self.drop_path(self.mlp(x)))
else:
x = x + self.drop_path(self.mixer(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=[32, 100], in_channels=3, embed_dim=768, sub_num=2, patch_size=[4, 4], mode='pope'):
super().__init__()
num_patches = img_size[1] // 2 ** sub_num * (img_size[0] // 2 ** sub_num)
self.img_size = img_size
self.num_patches = num_patches
self.embed_dim = embed_dim
self.norm = None
if mode == 'pope':
if sub_num == 2:
self.proj = nn.Sequential(ConvBNLayer(in_channels=in_channels, out_channels=embed_dim // 2, kernel_size=3, stride=2, padding=1, act='gelu', bias_attr=True), ConvBNLayer(in_channels=embed_dim // 2, out_channels=embed_dim, kernel_size=3, stride=2, padding=1, act='gelu', bias_attr=True))
if sub_num == 3:
self.proj = nn.Sequential(ConvBNLayer(in_channels=in_channels, out_channels=embed_dim // 4, kernel_size=3, stride=2, padding=1, act='gelu', bias_attr=True), ConvBNLayer(in_channels=embed_dim // 4, out_channels=embed_dim // 2, kernel_size=3, stride=2, padding=1, act='gelu', bias_attr=True), ConvBNLayer(in_channels=embed_dim // 2, out_channels=embed_dim, kernel_size=3, stride=2, padding=1, act='gelu', bias_attr=True))
elif mode == 'linear':
self.proj = nn.Conv2d(1, embed_dim, kernel_size=patch_size, stride=patch_size)
self.num_patches = img_size[0] // patch_size[0] * img_size[1] // patch_size[1]
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], "Input image size ({}*{}) doesn't match model ({}*{}).".format(H, W, self.img_size[0], self.img_size[1])
x = self.proj(x).flatten(2).permute(0, 2, 1)
return x
class SubSample(nn.Module):
def __init__(self, in_channels, out_channels, types='Pool', stride=[2, 1], sub_norm='nn.LayerNorm', act=None):
super().__init__()
self.types = types
if types == 'Pool':
self.avgpool = nn.AvgPool2d(kernel_size=[3, 5], stride=stride, padding=[1, 2])
self.maxpool = nn.MaxPool2d(kernel_size=[3, 5], stride=stride, padding=[1, 2])
self.proj = nn.Linear(in_channels, out_channels)
else:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
self.norm = eval(sub_norm)(out_channels)
if act is not None:
self.act = act()
else:
self.act = None
def forward(self, x):
if self.types == 'Pool':
x1 = self.avgpool(x)
x2 = self.maxpool(x)
x = (x1 + x2) * 0.5
out = self.proj(x.flatten(2).permute(0, 2, 1))
else:
x = self.conv(x)
out = x.flatten(2).permute(0, 2, 1)
out = self.norm(out)
if self.act is not None:
out = self.act(out)
return out
class SVTRNet(nn.Module):
def __init__(self, img_size=[32, 100], in_channels=3, embed_dim=[64, 128, 256], depth=[3, 6, 3], num_heads=[2, 4, 8], mixer=['Local'] * 6 + ['Global'] * 6, local_mixer=[[7, 11], [7, 11], [7, 11]], patch_merging='Conv', mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0.0, last_drop=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, norm_layer='nn.LayerNorm', sub_norm='nn.LayerNorm', epsilon=1e-06, out_channels=192, out_char_num=25, block_unit='Block', act='gelu', last_stage=True, sub_num=2, prenorm=True, use_lenhead=False, **kwargs):
super().__init__()
self.img_size = img_size
self.embed_dim = embed_dim
self.out_channels = out_channels
self.prenorm = prenorm
patch_merging = None if patch_merging != 'Conv' and patch_merging != 'Pool' else patch_merging
self.patch_embed = PatchEmbed(img_size=img_size, in_channels=in_channels, embed_dim=embed_dim[0], sub_num=sub_num)
num_patches = self.patch_embed.num_patches
self.HW = [img_size[0] // 2 ** sub_num, img_size[1] // 2 ** sub_num]
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim[0]))
self.pos_drop = nn.Dropout(p=drop_rate)
Block_unit = eval(block_unit)
dpr = np.linspace(0, drop_path_rate, sum(depth))
self.blocks1 = nn.ModuleList([Block_unit(dim=embed_dim[0], num_heads=num_heads[0], mixer=mixer[0:depth[0]][i], HW=self.HW, local_mixer=local_mixer[0], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, act_layer=act, attn_drop=attn_drop_rate, drop_path=dpr[0:depth[0]][i], norm_layer=norm_layer, epsilon=epsilon, prenorm=prenorm) for i in range(depth[0])])
if patch_merging is not None:
self.sub_sample1 = SubSample(embed_dim[0], embed_dim[1], sub_norm=sub_norm, stride=[2, 1], types=patch_merging)
HW = [self.HW[0] // 2, self.HW[1]]
else:
HW = self.HW
self.patch_merging = patch_merging
self.blocks2 = nn.ModuleList([Block_unit(dim=embed_dim[1], num_heads=num_heads[1], mixer=mixer[depth[0]:depth[0] + depth[1]][i], HW=HW, local_mixer=local_mixer[1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, act_layer=act, attn_drop=attn_drop_rate, drop_path=dpr[depth[0]:depth[0] + depth[1]][i], norm_layer=norm_layer, epsilon=epsilon, prenorm=prenorm) for i in range(depth[1])])
if patch_merging is not None:
self.sub_sample2 = SubSample(embed_dim[1], embed_dim[2], sub_norm=sub_norm, stride=[2, 1], types=patch_merging)
HW = [self.HW[0] // 4, self.HW[1]]
else:
HW = self.HW
self.blocks3 = nn.ModuleList([Block_unit(dim=embed_dim[2], num_heads=num_heads[2], mixer=mixer[depth[0] + depth[1]:][i], HW=HW, local_mixer=local_mixer[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, act_layer=act, attn_drop=attn_drop_rate, drop_path=dpr[depth[0] + depth[1]:][i], norm_layer=norm_layer, epsilon=epsilon, prenorm=prenorm) for i in range(depth[2])])
self.last_stage = last_stage
if last_stage:
self.avg_pool = nn.AdaptiveAvgPool2d([1, out_char_num])
self.last_conv = nn.Conv2d(in_channels=embed_dim[2], out_channels=self.out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.hardswish = Activation('hard_swish', inplace=True)
self.dropout = nn.Dropout(p=last_drop)
if not prenorm:
self.norm = eval(norm_layer)(embed_dim[-1], eps=epsilon)
self.use_lenhead = use_lenhead
if use_lenhead:
self.len_conv = nn.Linear(embed_dim[2], self.out_channels)
self.hardswish_len = Activation('hard_swish', inplace=True)
self.dropout_len = nn.Dropout(p=last_drop)
torch.nn.init.xavier_normal_(self.pos_embed)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def forward_features(self, x):
x = self.patch_embed(x)
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks1:
x = blk(x)
if self.patch_merging is not None:
x = self.sub_sample1(x.permute(0, 2, 1).reshape([-1, self.embed_dim[0], self.HW[0], self.HW[1]]))
for blk in self.blocks2:
x = blk(x)
if self.patch_merging is not None:
x = self.sub_sample2(x.permute(0, 2, 1).reshape([-1, self.embed_dim[1], self.HW[0] // 2, self.HW[1]]))
for blk in self.blocks3:
x = blk(x)
if not self.prenorm:
x = self.norm(x)
return x
def forward(self, x):
x = self.forward_features(x)
if self.use_lenhead:
len_x = self.len_conv(x.mean(1))
len_x = self.dropout_len(self.hardswish_len(len_x))
if self.last_stage:
if self.patch_merging is not None:
h = self.HW[0] // 4
else:
h = self.HW[0]
x = self.avg_pool(x.permute(0, 2, 1).reshape([-1, self.embed_dim[2], h, self.HW[1]]))
x = self.last_conv(x)
x = self.hardswish(x)
x = self.dropout(x)
if self.use_lenhead:
return x, len_x
return x
scale_dim_heads = {'tiny': [192, 3], 'small': [384, 6], 'base': [768, 12]}
class ViTSTR(nn.Module):
def __init__(self, img_size=[224, 224], in_channels=1, scale='tiny', seqlen=27, patch_size=[16, 16], embed_dim=None, depth=12, num_heads=None, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_path_rate=0.0, drop_rate=0.0, attn_drop_rate=0.0, norm_layer='nn.LayerNorm', act_layer='gelu', epsilon=1e-06, out_channels=None, **kwargs):
super().__init__()
self.seqlen = seqlen
embed_dim = embed_dim if embed_dim is not None else scale_dim_heads[scale][0]
num_heads = num_heads if num_heads is not None else scale_dim_heads[scale][1]
out_channels = out_channels if out_channels is not None else embed_dim
self.patch_embed = PatchEmbed(img_size=img_size, in_channels=in_channels, embed_dim=embed_dim, patch_size=patch_size, mode='linear')
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = np.linspace(0, drop_path_rate, depth)
self.blocks = nn.ModuleList([Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, epsilon=epsilon, prenorm=False) for i in range(depth)])
self.norm = eval(norm_layer)(embed_dim, eps=epsilon)
self.out_channels = out_channels
torch.nn.init.xavier_normal_(self.pos_embed)
torch.nn.init.xavier_normal_(self.cls_token)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.repeat(B, 1, 1)
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = x[:, :self.seqlen]
return x.permute(0, 2, 1).unsqueeze(2)
class ClsHead(nn.Module):
"""
Class orientation
Args:
params(dict): super parameters for build Class network
"""
def __init__(self, in_channels, class_dim, **kwargs):
super(ClsHead, self).__init__()
self.training = False
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(in_channels, class_dim, bias=True)
def forward(self, x):
x = self.pool(x)
x = torch.reshape(x, shape=[x.shape[0], x.shape[1]])
x = self.fc(x)
if not self.training:
x = F.softmax(x, dim=1)
return x
class Head(nn.Module):
def __init__(self, in_channels, **kwargs):
super(Head, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels // 4, kernel_size=3, padding=1, bias=False)
self.conv_bn1 = nn.BatchNorm2d(in_channels // 4)
self.relu1 = Activation(act_type='relu')
self.conv2 = nn.ConvTranspose2d(in_channels=in_channels // 4, out_channels=in_channels // 4, kernel_size=2, stride=2)
self.conv_bn2 = nn.BatchNorm2d(in_channels // 4)
self.relu2 = Activation(act_type='relu')
self.conv3 = nn.ConvTranspose2d(in_channels=in_channels // 4, out_channels=1, kernel_size=2, stride=2)
def forward(self, x):
x = self.conv1(x)
x = self.conv_bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.conv_bn2(x)
x = self.relu2(x)
x = self.conv3(x)
x = torch.sigmoid(x)
return x
class DBHead(nn.Module):
"""
Differentiable Binarization (DB) for text detection:
see https://arxiv.org/abs/1911.08947
args:
params(dict): super parameters for build DB network
"""
def __init__(self, in_channels, k=50, **kwargs):
super(DBHead, self).__init__()
self.k = k
binarize_name_list = ['conv2d_56', 'batch_norm_47', 'conv2d_transpose_0', 'batch_norm_48', 'conv2d_transpose_1', 'binarize']
thresh_name_list = ['conv2d_57', 'batch_norm_49', 'conv2d_transpose_2', 'batch_norm_50', 'conv2d_transpose_3', 'thresh']
self.binarize = Head(in_channels)
self.thresh = Head(in_channels)
def step_function(self, x, y):
return torch.reciprocal(1 + torch.exp(-self.k * (x - y)))
def forward(self, x):
shrink_maps = self.binarize(x)
if not self.training:
return {'maps': shrink_maps}
threshold_maps = self.thresh(x)
binary_maps = self.step_function(shrink_maps, threshold_maps)
y = torch.cat([shrink_maps, threshold_maps, binary_maps], dim=1)
return {'maps': y}
class EASTHead(nn.Module):
"""
"""
def __init__(self, in_channels, model_name, **kwargs):
super(EASTHead, self).__init__()
self.model_name = model_name
if self.model_name == 'large':
num_outputs = [128, 64, 1, 8]
else:
num_outputs = [64, 32, 1, 8]
self.det_conv1 = ConvBNLayer(in_channels=in_channels, out_channels=num_outputs[0], kernel_size=3, stride=1, padding=1, if_act=True, act='relu', name='det_head1')
self.det_conv2 = ConvBNLayer(in_channels=num_outputs[0], out_channels=num_outputs[1], kernel_size=3, stride=1, padding=1, if_act=True, act='relu', name='det_head2')
self.score_conv = ConvBNLayer(in_channels=num_outputs[1], out_channels=num_outputs[2], kernel_size=1, stride=1, padding=0, if_act=False, act=None, name='f_score')
self.geo_conv = ConvBNLayer(in_channels=num_outputs[1], out_channels=num_outputs[3], kernel_size=1, stride=1, padding=0, if_act=False, act=None, name='f_geo')
def forward(self, x):
f_det = self.det_conv1(x)
f_det = self.det_conv2(f_det)
f_score = self.score_conv(f_det)
f_score = torch.sigmoid(f_score)
f_geo = self.geo_conv(f_det)
f_geo = (torch.sigmoid(f_geo) - 0.5) * 2 * 800
pred = {'f_score': f_score, 'f_geo': f_geo}
return pred
def multi_apply(func, *args, **kwargs):
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
class FCEHead(nn.Module):
"""The class for implementing FCENet head.
FCENet(CVPR2021): Fourier Contour Embedding for Arbitrary-shaped Text
Detection.
[https://arxiv.org/abs/2104.10442]
Args:
in_channels (int): The number of input channels.
scales (list[int]) : The scale of each layer.
fourier_degree (int) : The maximum Fourier transform degree k.
"""
def __init__(self, in_channels, fourier_degree=5):
super().__init__()
assert isinstance(in_channels, int)
self.downsample_ratio = 1.0
self.in_channels = in_channels
self.fourier_degree = fourier_degree
self.out_channels_cls = 4
self.out_channels_reg = (2 * self.fourier_degree + 1) * 2
self.out_conv_cls = nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels_cls, kernel_size=3, stride=1, padding=1, groups=1, bias=True)
self.out_conv_reg = nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels_reg, kernel_size=3, stride=1, padding=1, groups=1, bias=True)
def forward(self, feats, targets=None):
cls_res, reg_res = multi_apply(self.forward_single, feats)
level_num = len(cls_res)
outs = {}
if not self.training:
for i in range(level_num):
tr_pred = F.softmax(cls_res[i][:, 0:2, :, :], dim=1)
tcl_pred = F.softmax(cls_res[i][:, 2:, :, :], dim=1)
outs['level_{}'.format(i)] = torch.cat([tr_pred, tcl_pred, reg_res[i]], dim=1)
else:
preds = [[cls_res[i], reg_res[i]] for i in range(level_num)]
outs['levels'] = preds
return outs
def forward_single(self, x):
cls_predict = self.out_conv_cls(x)
reg_predict = self.out_conv_reg(x)
return cls_predict, reg_predict
class PSEHead(nn.Module):
def __init__(self, in_channels, hidden_dim=256, out_channels=7, **kwargs):
super(PSEHead, self).__init__()
self.conv1 = nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(hidden_dim, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x, **kwargs):
out = self.conv1(x)
out = self.relu1(self.bn1(out))
out = self.conv2(out)
return {'maps': out}
class SAST_Header1(nn.Module):
def __init__(self, in_channels, **kwargs):
super(SAST_Header1, self).__init__()
out_channels = [64, 64, 128]
self.score_conv = nn.Sequential(ConvBNLayer(in_channels, out_channels[0], 1, 1, act='relu', name='f_score1'), ConvBNLayer(out_channels[0], out_channels[1], 3, 1, act='relu', name='f_score2'), ConvBNLayer(out_channels[1], out_channels[2], 1, 1, act='relu', name='f_score3'), ConvBNLayer(out_channels[2], 1, 3, 1, act=None, name='f_score4'))
self.border_conv = nn.Sequential(ConvBNLayer(in_channels, out_channels[0], 1, 1, act='relu', name='f_border1'), ConvBNLayer(out_channels[0], out_channels[1], 3, 1, act='relu', name='f_border2'), ConvBNLayer(out_channels[1], out_channels[2], 1, 1, act='relu', name='f_border3'), ConvBNLayer(out_channels[2], 4, 3, 1, act=None, name='f_border4'))
def forward(self, x):
f_score = self.score_conv(x)
f_score = torch.sigmoid(f_score)
f_border = self.border_conv(x)
return f_score, f_border
class SAST_Header2(nn.Module):
def __init__(self, in_channels, **kwargs):
super(SAST_Header2, self).__init__()
out_channels = [64, 64, 128]
self.tvo_conv = nn.Sequential(ConvBNLayer(in_channels, out_channels[0], 1, 1, act='relu', name='f_tvo1'), ConvBNLayer(out_channels[0], out_channels[1], 3, 1, act='relu', name='f_tvo2'), ConvBNLayer(out_channels[1], out_channels[2], 1, 1, act='relu', name='f_tvo3'), ConvBNLayer(out_channels[2], 8, 3, 1, act=None, name='f_tvo4'))
self.tco_conv = nn.Sequential(ConvBNLayer(in_channels, out_channels[0], 1, 1, act='relu', name='f_tco1'), ConvBNLayer(out_channels[0], out_channels[1], 3, 1, act='relu', name='f_tco2'), ConvBNLayer(out_channels[1], out_channels[2], 1, 1, act='relu', name='f_tco3'), ConvBNLayer(out_channels[2], 2, 3, 1, act=None, name='f_tco4'))
def forward(self, x):
f_tvo = self.tvo_conv(x)
f_tco = self.tco_conv(x)
return f_tvo, f_tco
class SASTHead(nn.Module):
"""
"""
def __init__(self, in_channels, **kwargs):
super(SASTHead, self).__init__()
self.head1 = SAST_Header1(in_channels)
self.head2 = SAST_Header2(in_channels)
def forward(self, x):
f_score, f_border = self.head1(x)
f_tvo, f_tco = self.head2(x)
predicts = {}
predicts['f_score'] = f_score
predicts['f_border'] = f_border
predicts['f_tvo'] = f_tvo
predicts['f_tco'] = f_tco
return predicts
class PGHead(nn.Module):
"""
"""
def __init__(self, in_channels, **kwargs):
super(PGHead, self).__init__()
self.conv_f_score1 = ConvBNLayer(in_channels=in_channels, out_channels=64, kernel_size=1, stride=1, padding=0, act='relu', name='conv_f_score{}'.format(1))
self.conv_f_score2 = ConvBNLayer(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, act='relu', name='conv_f_score{}'.format(2))
self.conv_f_score3 = ConvBNLayer(in_channels=64, out_channels=128, kernel_size=1, stride=1, padding=0, act='relu', name='conv_f_score{}'.format(3))
self.conv1 = nn.Conv2d(in_channels=128, out_channels=1, kernel_size=3, stride=1, padding=1, groups=1, bias=False)
self.conv_f_boder1 = ConvBNLayer(in_channels=in_channels, out_channels=64, kernel_size=1, stride=1, padding=0, act='relu', name='conv_f_boder{}'.format(1))
self.conv_f_boder2 = ConvBNLayer(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, act='relu', name='conv_f_boder{}'.format(2))
self.conv_f_boder3 = ConvBNLayer(in_channels=64, out_channels=128, kernel_size=1, stride=1, padding=0, act='relu', name='conv_f_boder{}'.format(3))
self.conv2 = nn.Conv2d(in_channels=128, out_channels=4, kernel_size=3, stride=1, padding=1, groups=1, bias=False)
self.conv_f_char1 = ConvBNLayer(in_channels=in_channels, out_channels=128, kernel_size=1, stride=1, padding=0, act='relu', name='conv_f_char{}'.format(1))
self.conv_f_char2 = ConvBNLayer(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1, act='relu', name='conv_f_char{}'.format(2))
self.conv_f_char3 = ConvBNLayer(in_channels=128, out_channels=256, kernel_size=1, stride=1, padding=0, act='relu', name='conv_f_char{}'.format(3))
self.conv_f_char4 = ConvBNLayer(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1, act='relu', name='conv_f_char{}'.format(4))
self.conv_f_char5 = ConvBNLayer(in_channels=256, out_channels=256, kernel_size=1, stride=1, padding=0, act='relu', name='conv_f_char{}'.format(5))
self.conv3 = nn.Conv2d(in_channels=256, out_channels=37, kernel_size=3, stride=1, padding=1, groups=1, bias=False)
self.conv_f_direc1 = ConvBNLayer(in_channels=in_channels, out_channels=64, kernel_size=1, stride=1, padding=0, act='relu', name='conv_f_direc{}'.format(1))
self.conv_f_direc2 = ConvBNLayer(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, act='relu', name='conv_f_direc{}'.format(2))
self.conv_f_direc3 = ConvBNLayer(in_channels=64, out_channels=128, kernel_size=1, stride=1, padding=0, act='relu', name='conv_f_direc{}'.format(3))
self.conv4 = nn.Conv2d(in_channels=128, out_channels=2, kernel_size=3, stride=1, padding=1, groups=1, bias=False)
def forward(self, x):
f_score = self.conv_f_score1(x)
f_score = self.conv_f_score2(f_score)
f_score = self.conv_f_score3(f_score)
f_score = self.conv1(f_score)
f_score = torch.sigmoid(f_score)
f_border = self.conv_f_boder1(x)
f_border = self.conv_f_boder2(f_border)
f_border = self.conv_f_boder3(f_border)
f_border = self.conv2(f_border)
f_char = self.conv_f_char1(x)
f_char = self.conv_f_char2(f_char)
f_char = self.conv_f_char3(f_char)
f_char = self.conv_f_char4(f_char)
f_char = self.conv_f_char5(f_char)
f_char = self.conv3(f_char)
f_direction = self.conv_f_direc1(x)
f_direction = self.conv_f_direc2(f_direction)
f_direction = self.conv_f_direc3(f_direction)
f_direction = self.conv4(f_direction)
predicts = {}
predicts['f_score'] = f_score
predicts['f_border'] = f_border
predicts['f_char'] = f_char
predicts['f_direction'] = f_direction
return predicts
class MultiheadAttention(nn.Module):
"""Allows the model to jointly attend to information
from different representation subspaces.
See reference: Attention Is All You Need
.. math::
ext{MultiHead}(Q, K, V) = ext{Concat}(head_1,\\dots,head_h)W^O
ext{where} head_i = ext{Attention}(QW_i^Q, KW_i^K, VW_i^V)
Args:
embed_dim: total dimension of the model
num_heads: parallel attention layers, or heads
"""
def __init__(self, embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
self._reset_parameters()
self.conv1 = torch.nn.Conv2d(in_channels=embed_dim, out_channels=embed_dim, kernel_size=(1, 1))
self.conv2 = torch.nn.Conv2d(in_channels=embed_dim, out_channels=embed_dim, kernel_size=(1, 1))
self.conv3 = torch.nn.Conv2d(in_channels=embed_dim, out_channels=embed_dim, kernel_size=(1, 1))
def _reset_parameters(self):
xavier_uniform_(self.out_proj.weight)
def forward(self, query, key, value, key_padding_mask=None, incremental_state=None, attn_mask=None):
"""
Inputs of forward function
query: [target length, batch size, embed dim]
key: [sequence length, batch size, embed dim]
value: [sequence length, batch size, embed dim]
key_padding_mask: if True, mask padding based on batch size
incremental_state: if provided, previous time steps are cashed
need_weights: output attn_output_weights
static_kv: key and value are static
Outputs of forward function
attn_output: [target length, batch size, embed dim]
attn_output_weights: [batch size, target length, sequence length]
"""
q_shape = query.shape
src_shape = key.shape
q = self._in_proj_q(query)
k = self._in_proj_k(key)
v = self._in_proj_v(value)
q *= self.scaling
q = torch.reshape(q, (q_shape[0], q_shape[1], self.num_heads, self.head_dim))
q = q.permute(1, 2, 0, 3)
k = torch.reshape(k, (src_shape[0], q_shape[1], self.num_heads, self.head_dim))
k = k.permute(1, 2, 0, 3)
v = torch.reshape(v, (src_shape[0], q_shape[1], self.num_heads, self.head_dim))
v = v.permute(1, 2, 0, 3)
if key_padding_mask is not None:
assert key_padding_mask.shape[0] == q_shape[1]
assert key_padding_mask.shape[1] == src_shape[0]
attn_output_weights = torch.matmul(q, k.permute(0, 1, 3, 2))
if attn_mask is not None:
attn_mask = torch.unsqueeze(torch.unsqueeze(attn_mask, 0), 0)
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = torch.reshape(attn_output_weights, [q_shape[1], self.num_heads, q_shape[0], src_shape[0]])
key = torch.unsqueeze(torch.unsqueeze(key_padding_mask, 1), 2)
key = key.type(torch.float32)
y = torch.full(size=key.shape, fill_value=float('-Inf'), dtype=torch.float32)
y = torch.where(key == 0.0, key, y)
attn_output_weights += y
attn_output_weights = F.softmax(attn_output_weights.type(torch.float32), dim=-1, dtype=torch.float32 if attn_output_weights.dtype == torch.float16 else attn_output_weights.dtype)
attn_output_weights = F.dropout(attn_output_weights, p=self.dropout, training=self.training)
attn_output = torch.matmul(attn_output_weights, v)
attn_output = torch.reshape(attn_output.permute(2, 0, 1, 3), [q_shape[0], q_shape[1], self.embed_dim])
attn_output = self.out_proj(attn_output)
return attn_output
def _in_proj_q(self, query):
query = query.permute(1, 2, 0)
query = torch.unsqueeze(query, dim=2)
res = self.conv1(query)
res = torch.squeeze(res, dim=2)
res = res.permute(2, 0, 1)
return res
def _in_proj_k(self, key):
key = key.permute(1, 2, 0)
key = torch.unsqueeze(key, dim=2)
res = self.conv2(key)
res = torch.squeeze(res, dim=2)
res = res.permute(2, 0, 1)
return res
def _in_proj_v(self, value):
value = value.permute(1, 2, 0)
value = torch.unsqueeze(value, dim=2)
res = self.conv3(value)
res = torch.squeeze(res, dim=2)
res = res.permute(2, 0, 1)
return res
class AttentionHead(nn.Module):
def __init__(self, in_channels, out_channels, hidden_size, **kwargs):
super(AttentionHead, self).__init__()
self.input_size = in_channels
self.hidden_size = hidden_size
self.num_classes = out_channels
self.attention_cell = AttentionGRUCell(in_channels, hidden_size, out_channels, use_gru=False)
self.generator = nn.Linear(hidden_size, out_channels)
def _char_to_onehot(self, input_char, onehot_dim):
input_ont_hot = F.one_hot(input_char.type(torch.int64), onehot_dim)
return input_ont_hot
def forward(self, inputs, targets=None, batch_max_length=25):
batch_size = inputs.size()[0]
num_steps = batch_max_length
hidden = torch.zeros((batch_size, self.hidden_size))
output_hiddens = []
if targets is not None:
for i in range(num_steps):
char_onehots = self._char_to_onehot(targets[:, i], onehot_dim=self.num_classes)
(outputs, hidden), alpha = self.attention_cell(hidden, inputs, char_onehots)
output_hiddens.append(torch.unsqueeze(outputs, dim=1))
output = torch.cat(output_hiddens, dim=1)
probs = self.generator(output)
else:
targets = torch.zeros([batch_size], dtype=torch.int32)
probs = None
char_onehots = None
outputs = None
alpha = None
for i in range(num_steps):
char_onehots = self._char_to_onehot(targets, onehot_dim=self.num_classes)
(outputs, hidden), alpha = self.attention_cell(hidden, inputs, char_onehots)
probs_step = self.generator(outputs)
if probs is None:
probs = torch.unsqueeze(probs_step, dim=1)
else:
probs = torch.cat([probs, torch.unsqueeze(probs_step, dim=1)], dim=1)
next_input = probs_step.argmax(dim=1)
targets = next_input
return probs
class CTCHead(nn.Module):
def __init__(self, in_channels, out_channels=6625, fc_decay=0.0004, mid_channels=None, return_feats=False, **kwargs):
super(CTCHead, self).__init__()
if mid_channels is None:
self.fc = nn.Linear(in_channels, out_channels, bias=True)
else:
self.fc1 = nn.Linear(in_channels, mid_channels, bias=True)
self.fc2 = nn.Linear(mid_channels, out_channels, bias=True)
self.out_channels = out_channels
self.mid_channels = mid_channels
self.return_feats = return_feats
def forward(self, x, labels=None):
if self.mid_channels is None:
predicts = self.fc(x)
else:
x = self.fc1(x)
predicts = self.fc2(x)
if self.return_feats:
result = x, predicts
else:
result = predicts
if not self.training:
predicts = F.softmax(predicts, dim=2)
result = predicts
return result
class Beam:
""" Beam search """
def __init__(self, size, device=False):
self.size = size
self._done = False
self.scores = torch.zeros((size,), dtype=torch.float32)
self.all_scores = []
self.prev_ks = []
self.next_ys = [torch.full((size,), 0, dtype=torch.int64)]
self.next_ys[0][0] = 2
def get_current_state(self):
"""Get the outputs for the current timestep."""
return self.get_tentative_hypothesis()
def get_current_origin(self):
"""Get the backpointers for the current timestep."""
return self.prev_ks[-1]
@property
def done(self):
return self._done
def advance(self, word_prob):
"""Update beam status and check if finished or not."""
num_words = word_prob.shape[1]
if len(self.prev_ks) > 0:
beam_lk = word_prob + self.scores.unsqueeze(1).expand_as(word_prob)
else:
beam_lk = word_prob[0]
flat_beam_lk = beam_lk.reshape([-1])
best_scores, best_scores_id = flat_beam_lk.topk(self.size, 0, True, True)
self.all_scores.append(self.scores)
self.scores = best_scores
prev_k = best_scores_id // num_words
self.prev_ks.append(prev_k)
self.next_ys.append(best_scores_id - prev_k * num_words)
if self.next_ys[-1][0] == 3:
self._done = True
self.all_scores.append(self.scores)
return self._done
def sort_scores(self):
"""Sort the scores."""
return self.scores, torch.tensor([i for i in range(int(self.scores.shape[0]))], dtype=torch.int32)
def get_the_best_score_and_idx(self):
"""Get the score of the best in the beam."""
scores, ids = self.sort_scores()
return scores[1], ids[1]
def get_tentative_hypothesis(self):
"""Get the decoded sequence for the current timestep."""
if len(self.next_ys) == 1:
dec_seq = self.next_ys[0].unsqueeze(1)
else:
_, keys = self.sort_scores()
hyps = [self.get_hypothesis(k) for k in keys]
hyps = [([2] + h) for h in hyps]
dec_seq = torch.tensor(hyps, dtype=torch.int64)
return dec_seq
def get_hypothesis(self, k):
""" Walk back to construct the full hypothesis. """
hyp = []
for j in range(len(self.prev_ks) - 1, -1, -1):
hyp.append(self.next_ys[j + 1][k])
k = self.prev_ks[j][k]
return list(map(lambda x: x.item(), hyp[::-1]))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab, padding_idx, scale_embedding):
super(Embeddings, self).__init__()
self.embedding = nn.Embedding(vocab, d_model, padding_idx=padding_idx)
w0 = np.random.normal(0.0, d_model ** -0.5, (vocab, d_model)).astype(np.float32)
self.embedding.weight.data = torch.from_numpy(w0)
self.d_model = d_model
self.scale_embedding = scale_embedding
def forward(self, x):
if self.scale_embedding:
x = self.embedding(x)
return x * math.sqrt(self.d_model)
return self.embedding(x)
class PositionalEncoding(nn.Module):
"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
ext{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
ext{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
ext{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, dropout, dim, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros([max_len, dim])
position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(torch.arange(0, dim, 2).type(torch.float32) * (-math.log(10000.0) / dim))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = torch.unsqueeze(pe, 0)
pe = pe.permute(1, 0, 2)
self.register_buffer('pe', pe)
def forward(self, x):
"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[:x.shape[0], :]
return self.dropout(x)
def _get_clones(module, N):
return LayerList([copy.deepcopy(module) for i in range(N)])
class TransformerDecoder(nn.Module):
"""TransformerDecoder is a stack of N decoder layers
Args:
decoder_layer: an instance of the TransformerDecoderLayer() class (required).
num_layers: the number of sub-decoder-layers in the decoder (required).
norm: the layer normalization component (optional).
"""
def __init__(self, decoder_layer, num_layers):
super(TransformerDecoder, self).__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None):
"""Pass the inputs (and mask) through the decoder layer in turn.
Args:
tgt: the sequence to the decoder (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
"""
output = tgt
for i in range(self.num_layers):
output = self.layers[i](output, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask)
return output
class TransformerDecoderLayer(nn.Module):
"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
This standard decoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, attention_dropout_rate=0.0, residual_dropout_rate=0.1):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=attention_dropout_rate)
self.multihead_attn = MultiheadAttention(d_model, nhead, dropout=attention_dropout_rate)
self.conv1 = nn.Conv2d(in_channels=d_model, out_channels=dim_feedforward, kernel_size=(1, 1))
self.conv2 = nn.Conv2d(in_channels=dim_feedforward, out_channels=d_model, kernel_size=(1, 1))
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.norm3 = LayerNorm(d_model)
self.dropout1 = Dropout(residual_dropout_rate)
self.dropout2 = Dropout(residual_dropout_rate)
self.dropout3 = Dropout(residual_dropout_rate)
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None):
"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
"""
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt = tgt.permute(1, 2, 0)
tgt = torch.unsqueeze(tgt, 2)
tgt2 = self.conv2(F.relu(self.conv1(tgt)))
tgt2 = torch.squeeze(tgt2, 2)
tgt2 = tgt2.permute(2, 0, 1)
tgt = torch.squeeze(tgt, 2)
tgt = tgt.permute(2, 0, 1)
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
class TransformerEncoder(nn.Module):
"""TransformerEncoder is a stack of N encoder layers
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
"""
def __init__(self, encoder_layer, num_layers):
super(TransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
def forward(self, src):
"""Pass the input through the endocder layers in turn.
Args:
src: the sequnce to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
"""
output = src
for i in range(self.num_layers):
output = self.layers[i](output, src_mask=None, src_key_padding_mask=None)
return output
class TransformerEncoderLayer(nn.Module):
"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, attention_dropout_rate=0.0, residual_dropout_rate=0.1):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=attention_dropout_rate)
self.conv1 = nn.Conv2d(in_channels=d_model, out_channels=dim_feedforward, kernel_size=(1, 1))
self.conv2 = nn.Conv2d(in_channels=dim_feedforward, out_channels=d_model, kernel_size=(1, 1))
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = Dropout(residual_dropout_rate)
self.dropout2 = Dropout(residual_dropout_rate)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
"""Pass the input through the endocder layer.
Args:
src: the sequnce to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
"""
src2 = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src = src.permute(1, 2, 0)
src = torch.unsqueeze(src, 2)
src2 = self.conv2(F.relu(self.conv1(src)))
src2 = torch.squeeze(src2, 2)
src2 = src2.permute(2, 0, 1)
src = torch.squeeze(src, 2)
src = src.permute(2, 0, 1)
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
class Transformer(nn.Module):
"""A transformer model. User is able to modify the attributes as needed. The architechture
is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information
Processing Systems, pages 6000-6010.
Args:
d_model: the number of expected features in the encoder/decoder inputs (default=512).
nhead: the number of heads in the multiheadattention models (default=8).
num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6).
num_decoder_layers: the number of sub-decoder-layers in the decoder (default=6).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
custom_encoder: custom encoder (default=None).
custom_decoder: custom decoder (default=None).
"""
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, beam_size=0, num_decoder_layers=6, dim_feedforward=1024, attention_dropout_rate=0.0, residual_dropout_rate=0.1, custom_encoder=None, custom_decoder=None, in_channels=0, out_channels=0, scale_embedding=True):
super(Transformer, self).__init__()
self.out_channels = out_channels
self.embedding = Embeddings(d_model=d_model, vocab=self.out_channels, padding_idx=0, scale_embedding=scale_embedding)
self.positional_encoding = PositionalEncoding(dropout=residual_dropout_rate, dim=d_model)
if custom_encoder is not None:
self.encoder = custom_encoder
elif num_encoder_layers > 0:
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, attention_dropout_rate, residual_dropout_rate)
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers)
else:
self.encoder = None
if custom_decoder is not None:
self.decoder = custom_decoder
else:
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, attention_dropout_rate, residual_dropout_rate)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers)
self._reset_parameters()
self.beam_size = beam_size
self.d_model = d_model
self.nhead = nhead
self.tgt_word_prj = nn.Linear(d_model, self.out_channels, bias=False)
w0 = np.random.normal(0.0, d_model ** -0.5, (self.out_channels, d_model)).astype(np.float32)
self.tgt_word_prj.weight.data = torch.from_numpy(w0)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
xavier_normal_(m.weight)
if m.bias is not None:
torch.nn.init.zeros_(m.bias)
def forward_train(self, src, tgt):
tgt = tgt[:, :-1]
tgt_key_padding_mask = self.generate_padding_mask(tgt)
tgt = self.embedding(tgt).permute(1, 0, 2)
tgt = self.positional_encoding(tgt)
tgt_mask = self.generate_square_subsequent_mask(tgt.shape[0])
if self.encoder is not None:
src = self.positional_encoding(src.permute(1, 0, 2))
memory = self.encoder(src)
else:
memory = src.squeeze(2).permute(2, 0, 1)
output = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_mask=None, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=None)
output = output.permute(1, 0, 2)
logit = self.tgt_word_prj(output)
return logit
def forward(self, src, targets=None):
"""Take in and process masked source/target sequences.
Args:
src: the sequence to the encoder (required).
tgt: the sequence to the decoder (required).
Shape:
- src: :math:`(S, N, E)`.
- tgt: :math:`(T, N, E)`.
Examples:
>>> output = transformer_model(src, tgt)
"""
if self.training:
max_len = targets[1].max()
tgt = targets[0][:, :2 + max_len]
return self.forward_train(src, tgt)
elif self.beam_size > 0:
return self.forward_beam(src)
else:
return self.forward_test(src)
def forward_test(self, src):
bs = src.shape[0]
if self.encoder is not None:
src = self.positional_encoding(src.permute(1, 0, 2))
memory = self.encoder(src)
else:
memory = torch.squeeze(src, 2).permute(2, 0, 1)
dec_seq = torch.full((bs, 1), 2, dtype=torch.int64)
dec_prob = torch.full((bs, 1), 1.0, dtype=torch.float32)
for len_dec_seq in range(1, 25):
dec_seq_embed = self.embedding(dec_seq).permute(1, 0, 2)
dec_seq_embed = self.positional_encoding(dec_seq_embed)
tgt_mask = self.generate_square_subsequent_mask(dec_seq_embed.shape[0])
output = self.decoder(dec_seq_embed, memory, tgt_mask=tgt_mask, memory_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None)
dec_output = output.permute(1, 0, 2)
dec_output = dec_output[:, -1, :]
tgt_word_prj = self.tgt_word_prj(dec_output)
word_prob = F.softmax(tgt_word_prj, dim=1)
preds_idx = word_prob.argmax(dim=1)
if torch.equal(preds_idx, torch.full(preds_idx.shape, 3, dtype=torch.int64)):
break
preds_prob = torch.max(word_prob, dim=1).values
dec_seq = torch.cat([dec_seq, torch.reshape(preds_idx, (-1, 1))], dim=1)
dec_prob = torch.cat([dec_prob, torch.reshape(preds_prob, (-1, 1))], dim=1)
return [dec_seq, dec_prob]
def forward_beam(self, images):
""" Translation work in one batch """
def get_inst_idx_to_tensor_position_map(inst_idx_list):
""" Indicate the position of an instance in a tensor. """
return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}
def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):
""" Collect tensor parts associated to active instances. """
beamed_tensor_shape = beamed_tensor.shape
n_curr_active_inst = len(curr_active_inst_idx)
new_shape = n_curr_active_inst * n_bm, beamed_tensor_shape[1], beamed_tensor_shape[2]
beamed_tensor = beamed_tensor.reshape([n_prev_active_inst, -1])
beamed_tensor = beamed_tensor.index_select(curr_active_inst_idx, axis=0)
beamed_tensor = beamed_tensor.reshape(new_shape)
return beamed_tensor
def collate_active_info(src_enc, inst_idx_to_position_map, active_inst_idx_list):
n_prev_active_inst = len(inst_idx_to_position_map)
active_inst_idx = [inst_idx_to_position_map[k] for k in active_inst_idx_list]
active_inst_idx = torch.tensor(active_inst_idx, dtype=torch.int64)
active_src_enc = collect_active_part(src_enc.permute(1, 0, 2), active_inst_idx, n_prev_active_inst, n_bm).permute(1, 0, 2)
active_inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
return active_src_enc, active_inst_idx_to_position_map
def beam_decode_step(inst_dec_beams, len_dec_seq, enc_output, inst_idx_to_position_map, n_bm, memory_key_padding_mask):
""" Decode and update beam status, and then return active beam idx """
def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq):
dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq)
dec_partial_seq = dec_partial_seq.reshape([-1, len_dec_seq])
return dec_partial_seq
def predict_word(dec_seq, enc_output, n_active_inst, n_bm, memory_key_padding_mask):
dec_seq = self.embedding(dec_seq).permute(1, 0, 2)
dec_seq = self.positional_encoding(dec_seq)
tgt_mask = self.generate_square_subsequent_mask(dec_seq.shape[0])
dec_output = self.decoder(dec_seq, enc_output, tgt_mask=tgt_mask, tgt_key_padding_mask=None, memory_key_padding_mask=memory_key_padding_mask)
dec_output = dec_output.permute(1, 0, 2)
dec_output = dec_output[:, -1, :]
word_prob = F.softmax(self.tgt_word_prj(dec_output), dim=1)
word_prob = torch.reshape(word_prob, (n_active_inst, n_bm, -1))
return word_prob
def collect_active_inst_idx_list(inst_beams, word_prob, inst_idx_to_position_map):
active_inst_idx_list = []
for inst_idx, inst_position in inst_idx_to_position_map.items():
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])
if not is_inst_complete:
active_inst_idx_list += [inst_idx]
return active_inst_idx_list
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)
word_prob = predict_word(dec_seq, enc_output, n_active_inst, n_bm, None)
active_inst_idx_list = collect_active_inst_idx_list(inst_dec_beams, word_prob, inst_idx_to_position_map)
return active_inst_idx_list
def collect_hypothesis_and_scores(inst_dec_beams, n_best):
all_hyp, all_scores = [], []
for inst_idx in range(len(inst_dec_beams)):
scores, tail_idxs = inst_dec_beams[inst_idx].sort_scores()
all_scores += [scores[:n_best]]
hyps = [inst_dec_beams[inst_idx].get_hypothesis(i) for i in tail_idxs[:n_best]]
all_hyp += [hyps]
return all_hyp, all_scores
with torch.no_grad():
if self.encoder is not None:
src = self.positional_encoding(images.permute(1, 0, 2))
src_enc = self.encoder(src)
else:
src_enc = images.squeeze(2).transpose([0, 2, 1])
n_bm = self.beam_size
src_shape = src_enc.shape
inst_dec_beams = [Beam(n_bm) for _ in range(1)]
active_inst_idx_list = list(range(1))
src_enc = src_enc.repeat(1, n_bm, 1)
inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
for len_dec_seq in range(1, 25):
src_enc_copy = src_enc.clone()
active_inst_idx_list = beam_decode_step(inst_dec_beams, len_dec_seq, src_enc_copy, inst_idx_to_position_map, n_bm, None)
if not active_inst_idx_list:
break
src_enc, inst_idx_to_position_map = collate_active_info(src_enc_copy, inst_idx_to_position_map, active_inst_idx_list)
batch_hyp, batch_scores = collect_hypothesis_and_scores(inst_dec_beams, 1)
result_hyp = []
hyp_scores = []
for bs_hyp, score in zip(batch_hyp, batch_scores):
l = len(bs_hyp[0])
bs_hyp_pad = bs_hyp[0] + [3] * (25 - l)
result_hyp.append(bs_hyp_pad)
score = float(score) / l
hyp_score = [score for _ in range(25)]
hyp_scores.append(hyp_score)
return [torch.tensor(np.array(result_hyp), dtype=torch.int64), torch.tensor(hyp_scores)]
def generate_square_subsequent_mask(self, sz):
"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = torch.zeros([sz, sz], dtype=torch.float32)
mask_inf = torch.triu(torch.full(size=[sz, sz], fill_value=float('-Inf'), dtype=torch.float32), diagonal=1)
mask = mask + mask_inf
return mask
def generate_padding_mask(self, x):
padding_mask = x == torch.tensor(0, dtype=x.dtype)
return padding_mask
def _reset_parameters(self):
"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
class PositionalEncoding_2d(nn.Module):
"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
ext{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
ext{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
ext{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, dropout, dim, max_len=5000):
super(PositionalEncoding_2d, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros([max_len, dim])
position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(torch.arange(0, dim, 2).type(torch.float32) * (-math.log(10000.0) / dim))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = torch.unsqueeze(pe, 0).permute(1, 0, 2)
self.register_buffer('pe', pe)
self.avg_pool_1 = nn.AdaptiveAvgPool2d((1, 1))
self.linear1 = nn.Linear(dim, dim)
self.linear1.weight.data.fill_(1.0)
self.avg_pool_2 = nn.AdaptiveAvgPool2d((1, 1))
self.linear2 = nn.Linear(dim, dim)
self.linear2.weight.data.fill_(1.0)
def forward(self, x):
"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
w_pe = self.pe[:x.shape[-1], :]
w1 = self.linear1(self.avg_pool_1(x).squeeze()).unsqueeze(0)
w_pe = w_pe * w1
w_pe = w_pe.permute(1, 2, 0)
w_pe = torch.unsqueeze(w_pe, 2)
h_pe = self.pe[:x.shape[-2], :]
w2 = self.linear2(self.avg_pool_2(x).squeeze()).unsqueeze(0)
h_pe = h_pe * w2
h_pe = h_pe.permute(1, 2, 0)
h_pe = torch.unsqueeze(h_pe, 3)
x = x + w_pe + h_pe
x = torch.reshape(x, [x.shape[0], x.shape[1], x.shape[2] * x.shape[3]]).permute(2, 0, 1)
return self.dropout(x)
class SAREncoder(nn.Module):
"""
Args:
enc_bi_rnn (bool): If True, use bidirectional RNN in encoder.
enc_drop_rnn (float): Dropout probability of RNN layer in encoder.
enc_gru (bool): If True, use GRU, else LSTM in encoder.
d_model (int): Dim of channels from backbone.
d_enc (int): Dim of encoder RNN layer.
mask (bool): If True, mask padding in RNN sequence.
"""
def __init__(self, enc_bi_rnn=False, enc_drop_rnn=0.0, enc_gru=False, d_model=512, d_enc=512, mask=True, **kwargs):
super().__init__()
assert isinstance(enc_bi_rnn, bool)
assert isinstance(enc_drop_rnn, (int, float))
assert 0 <= enc_drop_rnn < 1.0
assert isinstance(enc_gru, bool)
assert isinstance(d_model, int)
assert isinstance(d_enc, int)
assert isinstance(mask, bool)
self.enc_bi_rnn = enc_bi_rnn
self.enc_drop_rnn = enc_drop_rnn
self.mask = mask
kwargs = dict(input_size=d_model, hidden_size=d_enc, num_layers=2, batch_first=True, dropout=enc_drop_rnn, bidirectional=enc_bi_rnn)
if enc_gru:
self.rnn_encoder = nn.GRU(**kwargs)
else:
self.rnn_encoder = nn.LSTM(**kwargs)
encoder_rnn_out_size = d_enc * (int(enc_bi_rnn) + 1)
self.linear = nn.Linear(encoder_rnn_out_size, encoder_rnn_out_size)
def forward(self, feat, img_metas=None):
if img_metas is not None:
assert len(img_metas[0]) == feat.size(0)
valid_ratios = None
if img_metas is not None and self.mask:
valid_ratios = img_metas[-1]
h_feat = feat.shape[2]
feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0)
feat_v = feat_v.squeeze(2)
feat_v = feat_v.permute(0, 2, 1).contiguous()
holistic_feat = self.rnn_encoder(feat_v)[0]
if valid_ratios is not None:
valid_hf = []
T = holistic_feat.size(1)
for i in range(valid_ratios.size(0)):
valid_step = torch.min(T, torch.ceil(T * valid_ratios[i])) - 1
valid_hf.append(holistic_feat[i, valid_step, :])
valid_hf = torch.stack(valid_hf, dim=0)
else:
valid_hf = holistic_feat[:, -1, :]
holistic_feat = self.linear(valid_hf)
return holistic_feat
class BaseDecoder(nn.Module):
def __init__(self, **kwargs):
super().__init__()
def forward_train(self, feat, out_enc, targets, img_metas):
raise NotImplementedError
def forward_test(self, feat, out_enc, img_metas):
raise NotImplementedError
def forward(self, feat, out_enc, label=None, img_metas=None, train_mode=True):
self.train_mode = train_mode
if train_mode:
return self.forward_train(feat, out_enc, label, img_metas)
return self.forward_test(feat, out_enc, img_metas)
class ParallelSARDecoder(BaseDecoder):
"""
Args:
out_channels (int): Output class number.
enc_bi_rnn (bool): If True, use bidirectional RNN in encoder.
dec_bi_rnn (bool): If True, use bidirectional RNN in decoder.
dec_drop_rnn (float): Dropout of RNN layer in decoder.
dec_gru (bool): If True, use GRU, else LSTM in decoder.
d_model (int): Dim of channels from backbone.
d_enc (int): Dim of encoder RNN layer.
d_k (int): Dim of channels of attention module.
pred_dropout (float): Dropout probability of prediction layer.
max_seq_len (int): Maximum sequence length for decoding.
mask (bool): If True, mask padding in feature map.
start_idx (int): Index of start token.
padding_idx (int): Index of padding token.
pred_concat (bool): If True, concat glimpse feature from
attention with holistic feature and hidden state.
"""
def __init__(self, out_channels, enc_bi_rnn=False, dec_bi_rnn=False, dec_drop_rnn=0.0, dec_gru=False, d_model=512, d_enc=512, d_k=64, pred_dropout=0.0, max_text_length=30, mask=True, pred_concat=True, **kwargs):
super().__init__()
self.num_classes = out_channels
self.enc_bi_rnn = enc_bi_rnn
self.d_k = d_k
self.start_idx = out_channels - 2
self.padding_idx = out_channels - 1
self.max_seq_len = max_text_length
self.mask = mask
self.pred_concat = pred_concat
encoder_rnn_out_size = d_enc * (int(enc_bi_rnn) + 1)
decoder_rnn_out_size = encoder_rnn_out_size * (int(dec_bi_rnn) + 1)
self.conv1x1_1 = nn.Linear(decoder_rnn_out_size, d_k)
self.conv3x3_1 = nn.Conv2d(d_model, d_k, kernel_size=3, stride=1, padding=1)
self.conv1x1_2 = nn.Linear(d_k, 1)
kwargs = dict(input_size=encoder_rnn_out_size, hidden_size=encoder_rnn_out_size, num_layers=2, batch_first=True, dropout=dec_drop_rnn, bidirectional=dec_bi_rnn)
if dec_gru:
self.rnn_decoder = nn.GRU(**kwargs)
else:
self.rnn_decoder = nn.LSTM(**kwargs)
self.embedding = nn.Embedding(self.num_classes, encoder_rnn_out_size, padding_idx=self.padding_idx)
self.pred_dropout = nn.Dropout(pred_dropout)
pred_num_classes = self.num_classes - 1
if pred_concat:
fc_in_channel = decoder_rnn_out_size + d_model + encoder_rnn_out_size
else:
fc_in_channel = d_model
self.prediction = nn.Linear(fc_in_channel, pred_num_classes)
def _2d_attention(self, decoder_input, feat, holistic_feat, valid_ratios=None):
y = self.rnn_decoder(decoder_input)[0]
attn_query = self.conv1x1_1(y)
bsz, seq_len, attn_size = attn_query.shape
attn_query = attn_query.view(bsz, seq_len, attn_size, 1, 1)
attn_key = self.conv3x3_1(feat)
attn_key = attn_key.unsqueeze(1)
attn_weight = torch.tanh(torch.add(attn_key, attn_query))
attn_weight = attn_weight.permute(0, 1, 3, 4, 2).contiguous()
attn_weight = self.conv1x1_2(attn_weight)
bsz, T, h, w, c = attn_weight.size()
assert c == 1
if valid_ratios is not None:
for i in range(valid_ratios.size(0)):
valid_width = torch.min(w, torch.ceil(w * valid_ratios[i]))
if valid_width < w:
attn_weight[i, :, :, valid_width:, :] = float('-inf')
attn_weight = attn_weight.view(bsz, T, -1)
attn_weight = F.softmax(attn_weight, dim=-1)
attn_weight = attn_weight.view(bsz, T, h, w, c).permute(0, 1, 4, 2, 3).contiguous()
attn_feat = torch.sum(torch.mul(feat.unsqueeze(1), attn_weight), (3, 4), keepdim=False)
if self.pred_concat:
hf_c = holistic_feat.shape[-1]
holistic_feat = holistic_feat.expand(bsz, seq_len, hf_c)
y = self.prediction(torch.cat((y, attn_feat, holistic_feat), 2))
else:
y = self.prediction(attn_feat)
if self.train_mode:
y = self.pred_dropout(y)
return y
def forward_train(self, feat, out_enc, label, img_metas):
"""
img_metas: [label, valid_ratio]
"""
if img_metas is not None:
assert img_metas[0].size(0) == feat.size(0)
valid_ratios = None
if img_metas is not None and self.mask:
valid_ratios = img_metas[-1]
lab_embedding = self.embedding(label)
out_enc = out_enc.unsqueeze(1)
in_dec = torch.cat((out_enc, lab_embedding), dim=1)
out_dec = self._2d_attention(in_dec, feat, out_enc, valid_ratios=valid_ratios)
return out_dec[:, 1:, :]
def forward_test(self, feat, out_enc, img_metas):
if img_metas is not None:
assert len(img_metas[0]) == feat.shape[0]
valid_ratios = None
if img_metas is not None and self.mask:
valid_ratios = img_metas[-1]
seq_len = self.max_seq_len
bsz = feat.size(0)
start_token = torch.full((bsz,), fill_value=self.start_idx, device=feat.device, dtype=torch.long)
start_token = self.embedding(start_token)
emb_dim = start_token.shape[1]
start_token = start_token.unsqueeze(1).expand(bsz, seq_len, emb_dim)
out_enc = out_enc.unsqueeze(1)
decoder_input = torch.cat((out_enc, start_token), dim=1)
outputs = []
for i in range(1, seq_len + 1):
decoder_output = self._2d_attention(decoder_input, feat, out_enc, valid_ratios=valid_ratios)
char_output = decoder_output[:, i, :]
char_output = F.softmax(char_output, -1)
outputs.append(char_output)
_, max_idx = torch.max(char_output, dim=1, keepdim=False)
char_embedding = self.embedding(max_idx)
if i < seq_len:
decoder_input[:, i + 1, :] = char_embedding
outputs = torch.stack(outputs, 1)
return outputs
class SARHead(nn.Module):
def __init__(self, in_channels, out_channels, enc_dim=512, max_text_length=30, enc_bi_rnn=False, enc_drop_rnn=0.1, enc_gru=False, dec_bi_rnn=False, dec_drop_rnn=0.0, dec_gru=False, d_k=512, pred_dropout=0.1, pred_concat=True, **kwargs):
super(SARHead, self).__init__()
self.encoder = SAREncoder(enc_bi_rnn=enc_bi_rnn, enc_drop_rnn=enc_drop_rnn, enc_gru=enc_gru, d_model=in_channels, d_enc=enc_dim)
self.decoder = ParallelSARDecoder(out_channels=out_channels, enc_bi_rnn=enc_bi_rnn, dec_bi_rnn=dec_bi_rnn, dec_drop_rnn=dec_drop_rnn, dec_gru=dec_gru, d_model=in_channels, d_enc=enc_dim, d_k=d_k, pred_dropout=pred_dropout, max_text_length=max_text_length, pred_concat=pred_concat)
def forward(self, feat, targets=None):
"""
img_metas: [label, valid_ratio]
"""
holistic_feat = self.encoder(feat, targets)
if self.training:
label = targets[0]
final_out = self.decoder(feat, holistic_feat, label, img_metas=targets)
else:
final_out = self.decoder(feat, holistic_feat, label=None, img_metas=targets, train_mode=False)
return final_out
def hard_swish(x, inplace=True):
return x * F.relu6(x + 3.0, inplace=inplace) / 6.0
class DSConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding, stride=1, groups=None, if_act=True, act='relu', **kwargs):
super(DSConv, self).__init__()
if groups == None:
groups = in_channels
self.if_act = if_act
self.act = act
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels=in_channels, out_channels=int(in_channels * 4), kernel_size=1, stride=1, bias=False)
self.bn2 = nn.BatchNorm2d(int(in_channels * 4))
self.conv3 = nn.Conv2d(in_channels=int(in_channels * 4), out_channels=out_channels, kernel_size=1, stride=1, bias=False)
self._c = [in_channels, out_channels]
if in_channels != out_channels:
self.conv_end = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, bias=False)
def forward(self, inputs):
x = self.conv1(inputs)
x = self.bn1(x)
x = self.conv2(x)
x = self.bn2(x)
if self.if_act:
if self.act == 'relu':
x = F.relu(x)
elif self.act == 'hardswish':
x = hard_swish(x)
else:
None
exit()
x = self.conv3(x)
if self._c[0] != self._c[1]:
x = x + self.conv_end(inputs)
return x
class ASFBlock(nn.Module):
"""
This code is refered from:
https://github.com/MhLiao/DB/blob/master/decoders/feature_attention.py
"""
def __init__(self, in_channels, inter_channels, out_features_num=4):
"""
Adaptive Scale Fusion (ASF) block of DBNet++
Args:
in_channels: the number of channels in the input data
inter_channels: the number of middle channels
out_features_num: the number of fused stages
"""
super(ASFBlock, self).__init__()
self.in_channels = in_channels
self.inter_channels = inter_channels
self.out_features_num = out_features_num
self.conv = nn.Conv2d(in_channels, inter_channels, 3, padding=1)
self.spatial_scale = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, bias=False, padding=1), nn.ReLU(), nn.Conv2d(in_channels=1, out_channels=1, kernel_size=1, bias=False), nn.Sigmoid())
self.channel_scale = nn.Sequential(nn.Conv2d(in_channels=inter_channels, out_channels=out_features_num, kernel_size=1, bias=False), nn.Sigmoid())
def forward(self, fuse_features, features_list):
fuse_features = self.conv(fuse_features)
spatial_x = torch.mean(fuse_features, dim=1, keepdim=True)
attention_scores = self.spatial_scale(spatial_x) + fuse_features
attention_scores = self.channel_scale(attention_scores)
assert len(features_list) == self.out_features_num
out_list = []
for i in range(self.out_features_num):
out_list.append(attention_scores[:, i:i + 1] * features_list[i])
return torch.cat(out_list, dim=1)
class DBFPN(nn.Module):
def __init__(self, in_channels, out_channels, use_asf=False, **kwargs):
super(DBFPN, self).__init__()
self.out_channels = out_channels
self.use_asf = use_asf
self.in2_conv = nn.Conv2d(in_channels=in_channels[0], out_channels=self.out_channels, kernel_size=1, bias=False)
self.in3_conv = nn.Conv2d(in_channels=in_channels[1], out_channels=self.out_channels, kernel_size=1, bias=False)
self.in4_conv = nn.Conv2d(in_channels=in_channels[2], out_channels=self.out_channels, kernel_size=1, bias=False)
self.in5_conv = nn.Conv2d(in_channels=in_channels[3], out_channels=self.out_channels, kernel_size=1, bias=False)
self.p5_conv = nn.Conv2d(in_channels=self.out_channels, out_channels=self.out_channels // 4, kernel_size=3, padding=1, bias=False)
self.p4_conv = nn.Conv2d(in_channels=self.out_channels, out_channels=self.out_channels // 4, kernel_size=3, padding=1, bias=False)
self.p3_conv = nn.Conv2d(in_channels=self.out_channels, out_channels=self.out_channels // 4, kernel_size=3, padding=1, bias=False)
self.p2_conv = nn.Conv2d(in_channels=self.out_channels, out_channels=self.out_channels // 4, kernel_size=3, padding=1, bias=False)
if self.use_asf is True:
self.asf = ASFBlock(self.out_channels, self.out_channels // 4)
def forward(self, x):
c2, c3, c4, c5 = x
in5 = self.in5_conv(c5)
in4 = self.in4_conv(c4)
in3 = self.in3_conv(c3)
in2 = self.in2_conv(c2)
out4 = in4 + F.interpolate(in5, scale_factor=2, mode='nearest')
out3 = in3 + F.interpolate(out4, scale_factor=2, mode='nearest')
out2 = in2 + F.interpolate(out3, scale_factor=2, mode='nearest')
p5 = self.p5_conv(in5)
p4 = self.p4_conv(out4)
p3 = self.p3_conv(out3)
p2 = self.p2_conv(out2)
p5 = F.interpolate(p5, scale_factor=8, mode='nearest')
p4 = F.interpolate(p4, scale_factor=4, mode='nearest')
p3 = F.interpolate(p3, scale_factor=2, mode='nearest')
fuse = torch.cat([p5, p4, p3, p2], dim=1)
if self.use_asf is True:
fuse = self.asf(fuse, [p5, p4, p3, p2])
return fuse
class RSELayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, shortcut=True):
super(RSELayer, self).__init__()
self.out_channels = out_channels
self.in_conv = nn.Conv2d(in_channels=in_channels, out_channels=self.out_channels, kernel_size=kernel_size, padding=int(kernel_size // 2), bias=False)
self.se_block = SEModule(self.out_channels)
self.shortcut = shortcut
def forward(self, ins):
x = self.in_conv(ins)
if self.shortcut:
out = x + self.se_block(x)
else:
out = self.se_block(x)
return out
class RSEFPN(nn.Module):
def __init__(self, in_channels, out_channels, shortcut=True, **kwargs):
super(RSEFPN, self).__init__()
self.out_channels = out_channels
self.ins_conv = nn.ModuleList()
self.inp_conv = nn.ModuleList()
for i in range(len(in_channels)):
self.ins_conv.append(RSELayer(in_channels[i], out_channels, kernel_size=1, shortcut=shortcut))
self.inp_conv.append(RSELayer(out_channels, out_channels // 4, kernel_size=3, shortcut=shortcut))
def forward(self, x):
c2, c3, c4, c5 = x
in5 = self.ins_conv[3](c5)
in4 = self.ins_conv[2](c4)
in3 = self.ins_conv[1](c3)
in2 = self.ins_conv[0](c2)
out4 = in4 + F.upsample(in5, scale_factor=2, mode='nearest')
out3 = in3 + F.upsample(out4, scale_factor=2, mode='nearest')
out2 = in2 + F.upsample(out3, scale_factor=2, mode='nearest')
p5 = self.inp_conv[3](in5)
p4 = self.inp_conv[2](out4)
p3 = self.inp_conv[1](out3)
p2 = self.inp_conv[0](out2)
p5 = F.upsample(p5, scale_factor=8, mode='nearest')
p4 = F.upsample(p4, scale_factor=4, mode='nearest')
p3 = F.upsample(p3, scale_factor=2, mode='nearest')
fuse = torch.cat([p5, p4, p3, p2], dim=1)
return fuse
class LKPAN(nn.Module):
def __init__(self, in_channels, out_channels, mode='large', **kwargs):
super(LKPAN, self).__init__()
self.out_channels = out_channels
self.ins_conv = nn.ModuleList()
self.inp_conv = nn.ModuleList()
self.pan_head_conv = nn.ModuleList()
self.pan_lat_conv = nn.ModuleList()
if mode.lower() == 'lite':
p_layer = DSConv
elif mode.lower() == 'large':
p_layer = nn.Conv2d
else:
raise ValueError("mode can only be one of ['lite', 'large'], but received {}".format(mode))
for i in range(len(in_channels)):
self.ins_conv.append(nn.Conv2d(in_channels=in_channels[i], out_channels=self.out_channels, kernel_size=1, bias=False))
self.inp_conv.append(p_layer(in_channels=self.out_channels, out_channels=self.out_channels // 4, kernel_size=9, padding=4, bias=False))
if i > 0:
self.pan_head_conv.append(nn.Conv2d(in_channels=self.out_channels // 4, out_channels=self.out_channels // 4, kernel_size=3, padding=1, stride=2, bias=False))
self.pan_lat_conv.append(p_layer(in_channels=self.out_channels // 4, out_channels=self.out_channels // 4, kernel_size=9, padding=4, bias=False))
def forward(self, x):
c2, c3, c4, c5 = x
in5 = self.ins_conv[3](c5)
in4 = self.ins_conv[2](c4)
in3 = self.ins_conv[1](c3)
in2 = self.ins_conv[0](c2)
out4 = in4 + F.upsample(in5, scale_factor=2, mode='nearest')
out3 = in3 + F.upsample(out4, scale_factor=2, mode='nearest')
out2 = in2 + F.upsample(out3, scale_factor=2, mode='nearest')
f5 = self.inp_conv[3](in5)
f4 = self.inp_conv[2](out4)
f3 = self.inp_conv[1](out3)
f2 = self.inp_conv[0](out2)
pan3 = f3 + self.pan_head_conv[0](f2)
pan4 = f4 + self.pan_head_conv[1](pan3)
pan5 = f5 + self.pan_head_conv[2](pan4)
p2 = self.pan_lat_conv[0](f2)
p3 = self.pan_lat_conv[1](pan3)
p4 = self.pan_lat_conv[2](pan4)
p5 = self.pan_lat_conv[3](pan5)
p5 = F.upsample(p5, scale_factor=8, mode='nearest')
p4 = F.upsample(p4, scale_factor=4, mode='nearest')
p3 = F.upsample(p3, scale_factor=2, mode='nearest')
fuse = torch.cat([p5, p4, p3, p2], dim=1)
return fuse
class DeConvBNLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, if_act=True, act=None, name=None):
super(DeConvBNLayer, self).__init__()
self.if_act = if_act
self.act = act
self.deconv = nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=(kernel_size - 1) // 2, groups=groups, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
self.act = act
if act is not None:
self._act = Activation(act)
def forward(self, x):
x = self.deconv(x)
x = self.bn(x)
if self.act is not None:
x = self._act(x)
return x
class EASTFPN(nn.Module):
def __init__(self, in_channels, model_name, **kwargs):
super(EASTFPN, self).__init__()
self.model_name = model_name
if self.model_name == 'large':
self.out_channels = 128
else:
self.out_channels = 64
self.in_channels = in_channels[::-1]
self.h1_conv = ConvBNLayer(in_channels=self.out_channels + self.in_channels[1], out_channels=self.out_channels, kernel_size=3, stride=1, padding=1, if_act=True, act='relu', name='unet_h_1')
self.h2_conv = ConvBNLayer(in_channels=self.out_channels + self.in_channels[2], out_channels=self.out_channels, kernel_size=3, stride=1, padding=1, if_act=True, act='relu', name='unet_h_2')
self.h3_conv = ConvBNLayer(in_channels=self.out_channels + self.in_channels[3], out_channels=self.out_channels, kernel_size=3, stride=1, padding=1, if_act=True, act='relu', name='unet_h_3')
self.g0_deconv = DeConvBNLayer(in_channels=self.in_channels[0], out_channels=self.out_channels, kernel_size=4, stride=2, padding=1, if_act=True, act='relu', name='unet_g_0')
self.g1_deconv = DeConvBNLayer(in_channels=self.out_channels, out_channels=self.out_channels, kernel_size=4, stride=2, padding=1, if_act=True, act='relu', name='unet_g_1')
self.g2_deconv = DeConvBNLayer(in_channels=self.out_channels, out_channels=self.out_channels, kernel_size=4, stride=2, padding=1, if_act=True, act='relu', name='unet_g_2')
self.g3_conv = ConvBNLayer(in_channels=self.out_channels, out_channels=self.out_channels, kernel_size=3, stride=1, padding=1, if_act=True, act='relu', name='unet_g_3')
def forward(self, x):
f = x[::-1]
h = f[0]
g = self.g0_deconv(h)
h = torch.cat([g, f[1]], dim=1)
h = self.h1_conv(h)
g = self.g1_deconv(h)
h = torch.cat([g, f[2]], dim=1)
h = self.h2_conv(h)
g = self.g2_deconv(h)
h = torch.cat([g, f[3]], dim=1)
h = self.h3_conv(h)
g = self.g3_conv(h)
return g
class FCEFPN(nn.Module):
"""
Feature Pyramid Network, see https://arxiv.org/abs/1612.03144
Args:
in_channels (list[int]): input channels of each level which can be
derived from the output shape of backbone by from_config
out_channels (list[int]): output channel of each level
spatial_scales (list[float]): the spatial scales between input feature
maps and original input image which can be derived from the output
shape of backbone by from_config
has_extra_convs (bool): whether to add extra conv to the last level.
default False
extra_stage (int): the number of extra stages added to the last level.
default 1
use_c5 (bool): Whether to use c5 as the input of extra stage,
otherwise p5 is used. default True
norm_type (string|None): The normalization type in FPN module. If
norm_type is None, norm will not be used after conv and if
norm_type is string, bn, gn, sync_bn are available. default None
norm_decay (float): weight decay for normalization layer weights.
default 0.
freeze_norm (bool): whether to freeze normalization layer.
default False
relu_before_extra_convs (bool): whether to add relu before extra convs.
default False
"""
def __init__(self, in_channels, out_channels, spatial_scales=[0.25, 0.125, 0.0625, 0.03125], has_extra_convs=False, extra_stage=1, use_c5=True, norm_type=None, norm_decay=0.0, freeze_norm=False, relu_before_extra_convs=True):
super(FCEFPN, self).__init__()
self.out_channels = out_channels
for s in range(extra_stage):
spatial_scales = spatial_scales + [spatial_scales[-1] / 2.0]
self.spatial_scales = spatial_scales
self.has_extra_convs = has_extra_convs
self.extra_stage = extra_stage
self.use_c5 = use_c5
self.relu_before_extra_convs = relu_before_extra_convs
self.norm_type = norm_type
self.norm_decay = norm_decay
self.freeze_norm = freeze_norm
self.lateral_convs = []
self.lateral_convs_module = nn.ModuleList()
self.fpn_convs = []
self.fpn_convs_module = nn.ModuleList()
fan = out_channels * 3 * 3
st_stage = 4 - len(in_channels)
ed_stage = st_stage + len(in_channels) - 1
for i in range(st_stage, ed_stage + 1):
if i == 3:
lateral_name = 'fpn_inner_res5_sum'
else:
lateral_name = 'fpn_inner_res{}_sum_lateral'.format(i + 2)
in_c = in_channels[i - st_stage]
if self.norm_type is not None:
lateral = ConvNormLayer(ch_in=in_c, ch_out=out_channels, filter_size=1, stride=1, norm_type=self.norm_type, norm_decay=self.norm_decay, freeze_norm=self.freeze_norm, initializer=None)
else:
lateral = nn.Conv2d(in_channels=in_c, out_channels=out_channels, kernel_size=1)
self.lateral_convs_module.add_module(lateral_name, lateral)
self.lateral_convs.append(lateral)
for i in range(st_stage, ed_stage + 1):
fpn_name = 'fpn_res{}_sum'.format(i + 2)
fpn_conv_module = nn.Sequential()
if self.norm_type is not None:
fpn_conv = ConvNormLayer(ch_in=out_channels, ch_out=out_channels, filter_size=3, stride=1, norm_type=self.norm_type, norm_decay=self.norm_decay, freeze_norm=self.freeze_norm, initializer=None)
else:
fpn_conv = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, padding=1)
self.fpn_convs_module.add_module(fpn_name, fpn_conv)
self.fpn_convs.append(fpn_conv)
if self.has_extra_convs:
for i in range(self.extra_stage):
lvl = ed_stage + 1 + i
if i == 0 and self.use_c5:
in_c = in_channels[-1]
else:
in_c = out_channels
extra_fpn_name = 'fpn_{}'.format(lvl + 2)
extra_fpn_conv_module = nn.Sequential()
if self.norm_type is not None:
extra_fpn_conv = ConvNormLayer(ch_in=in_c, ch_out=out_channels, filter_size=3, stride=2, norm_type=self.norm_type, norm_decay=self.norm_decay, freeze_norm=self.freeze_norm, initializer=None)
else:
extra_fpn_conv = nn.Conv2d(in_channels=in_c, out_channels=out_channels, kernel_size=3, stride=2, padding=1)
self.fpn_convs_module.add_module(extra_fpn_name, extra_fpn_conv)
self.fpn_convs.append(extra_fpn_conv)
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], 'spatial_scales': [(1.0 / i.stride) for i in input_shape]}
def forward(self, body_feats):
laterals = []
num_levels = len(body_feats)
for i in range(num_levels):
laterals.append(self.lateral_convs[i](body_feats[i]))
for i in range(1, num_levels):
lvl = num_levels - i
upsample = F.interpolate(laterals[lvl], scale_factor=2.0, mode='nearest')
laterals[lvl - 1] += upsample
fpn_output = []
for lvl in range(num_levels):
fpn_output.append(self.fpn_convs[lvl](laterals[lvl]))
if self.extra_stage > 0:
if not self.has_extra_convs:
assert self.extra_stage == 1, 'extra_stage should be 1 if FPN has not extra convs'
fpn_output.append(torch.max_pool2d(fpn_output[-1], 1, stride=2))
else:
if self.use_c5:
extra_source = body_feats[-1]
else:
extra_source = fpn_output[-1]
fpn_output.append(self.fpn_convs[num_levels](extra_source))
for i in range(1, self.extra_stage):
if self.relu_before_extra_convs:
fpn_output.append(self.fpn_convs[num_levels + i](F.relu(fpn_output[-1])))
else:
fpn_output.append(self.fpn_convs[num_levels + i](fpn_output[-1]))
return fpn_output
class PGFPN(nn.Module):
def __init__(self, in_channels, **kwargs):
super(PGFPN, self).__init__()
num_inputs = [2048, 2048, 1024, 512, 256]
num_outputs = [256, 256, 192, 192, 128]
self.out_channels = 128
self.conv_bn_layer_1 = ConvBNLayer(in_channels=3, out_channels=32, kernel_size=3, stride=1, act=None, name='FPN_d1')
self.conv_bn_layer_2 = ConvBNLayer(in_channels=64, out_channels=64, kernel_size=3, stride=1, act=None, name='FPN_d2')
self.conv_bn_layer_3 = ConvBNLayer(in_channels=256, out_channels=128, kernel_size=3, stride=1, act=None, name='FPN_d3')
self.conv_bn_layer_4 = ConvBNLayer(in_channels=32, out_channels=64, kernel_size=3, stride=2, act=None, name='FPN_d4')
self.conv_bn_layer_5 = ConvBNLayer(in_channels=64, out_channels=64, kernel_size=3, stride=1, act='relu', name='FPN_d5')
self.conv_bn_layer_6 = ConvBNLayer(in_channels=64, out_channels=128, kernel_size=3, stride=2, act=None, name='FPN_d6')
self.conv_bn_layer_7 = ConvBNLayer(in_channels=128, out_channels=128, kernel_size=3, stride=1, act='relu', name='FPN_d7')
self.conv_bn_layer_8 = ConvBNLayer(in_channels=128, out_channels=128, kernel_size=1, stride=1, act=None, name='FPN_d8')
self.conv_h0 = ConvBNLayer(in_channels=num_inputs[0], out_channels=num_outputs[0], kernel_size=1, stride=1, act=None, name='conv_h{}'.format(0))
self.conv_h1 = ConvBNLayer(in_channels=num_inputs[1], out_channels=num_outputs[1], kernel_size=1, stride=1, act=None, name='conv_h{}'.format(1))
self.conv_h2 = ConvBNLayer(in_channels=num_inputs[2], out_channels=num_outputs[2], kernel_size=1, stride=1, act=None, name='conv_h{}'.format(2))
self.conv_h3 = ConvBNLayer(in_channels=num_inputs[3], out_channels=num_outputs[3], kernel_size=1, stride=1, act=None, name='conv_h{}'.format(3))
self.conv_h4 = ConvBNLayer(in_channels=num_inputs[4], out_channels=num_outputs[4], kernel_size=1, stride=1, act=None, name='conv_h{}'.format(4))
self.dconv0 = DeConvBNLayer(in_channels=num_outputs[0], out_channels=num_outputs[0 + 1], name='dconv_{}'.format(0))
self.dconv1 = DeConvBNLayer(in_channels=num_outputs[1], out_channels=num_outputs[1 + 1], act=None, name='dconv_{}'.format(1))
self.dconv2 = DeConvBNLayer(in_channels=num_outputs[2], out_channels=num_outputs[2 + 1], act=None, name='dconv_{}'.format(2))
self.dconv3 = DeConvBNLayer(in_channels=num_outputs[3], out_channels=num_outputs[3 + 1], act=None, name='dconv_{}'.format(3))
self.conv_g1 = ConvBNLayer(in_channels=num_outputs[1], out_channels=num_outputs[1], kernel_size=3, stride=1, act='relu', name='conv_g{}'.format(1))
self.conv_g2 = ConvBNLayer(in_channels=num_outputs[2], out_channels=num_outputs[2], kernel_size=3, stride=1, act='relu', name='conv_g{}'.format(2))
self.conv_g3 = ConvBNLayer(in_channels=num_outputs[3], out_channels=num_outputs[3], kernel_size=3, stride=1, act='relu', name='conv_g{}'.format(3))
self.conv_g4 = ConvBNLayer(in_channels=num_outputs[4], out_channels=num_outputs[4], kernel_size=3, stride=1, act='relu', name='conv_g{}'.format(4))
self.convf = ConvBNLayer(in_channels=num_outputs[4], out_channels=num_outputs[4], kernel_size=1, stride=1, act=None, name='conv_f{}'.format(4))
def forward(self, x):
c0, c1, c2, c3, c4, c5, c6 = x
f = [c0, c1, c2]
g = [None, None, None]
h = [None, None, None]
h[0] = self.conv_bn_layer_1(f[0])
h[1] = self.conv_bn_layer_2(f[1])
h[2] = self.conv_bn_layer_3(f[2])
g[0] = self.conv_bn_layer_4(h[0])
g[1] = torch.add(g[0], h[1])
g[1] = F.relu(g[1])
g[1] = self.conv_bn_layer_5(g[1])
g[1] = self.conv_bn_layer_6(g[1])
g[2] = torch.add(g[1], h[2])
g[2] = F.relu(g[2])
g[2] = self.conv_bn_layer_7(g[2])
f_down = self.conv_bn_layer_8(g[2])
f1 = [c6, c5, c4, c3, c2]
g = [None, None, None, None, None]
h = [None, None, None, None, None]
h[0] = self.conv_h0(f1[0])
h[1] = self.conv_h1(f1[1])
h[2] = self.conv_h2(f1[2])
h[3] = self.conv_h3(f1[3])
h[4] = self.conv_h4(f1[4])
g[0] = self.dconv0(h[0])
g[1] = torch.add(g[0], h[1])
g[1] = F.relu(g[1])
g[1] = self.conv_g1(g[1])
g[1] = self.dconv1(g[1])
g[2] = torch.add(g[1], h[2])
g[2] = F.relu(g[2])
g[2] = self.conv_g2(g[2])
g[2] = self.dconv2(g[2])
g[3] = torch.add(g[2], h[3])
g[3] = F.relu(g[3])
g[3] = self.conv_g3(g[3])
g[3] = self.dconv3(g[3])
g[4] = torch.add(g[3], h[4])
g[4] = F.relu(g[4])
g[4] = self.conv_g4(g[4])
f_up = self.convf(g[4])
f_common = torch.add(f_down, f_up)
f_common = F.relu(f_common)
return f_common
class Im2Seq(nn.Module):
def __init__(self, in_channels, **kwargs):
super().__init__()
self.out_channels = in_channels
def forward(self, x):
B, C, H, W = x.shape
x = x.squeeze(dim=2)
x = x.permute(0, 2, 1)
return x
class EncoderWithRNN_(nn.Module):
def __init__(self, in_channels, hidden_size):
super(EncoderWithRNN_, self).__init__()
self.out_channels = hidden_size * 2
self.rnn1 = nn.LSTM(in_channels, hidden_size, bidirectional=False, batch_first=True, num_layers=2)
self.rnn2 = nn.LSTM(in_channels, hidden_size, bidirectional=False, batch_first=True, num_layers=2)
def forward(self, x):
self.rnn1.flatten_parameters()
self.rnn2.flatten_parameters()
out1, h1 = self.rnn1(x)
out2, h2 = self.rnn2(torch.flip(x, [1]))
return torch.cat([out1, torch.flip(out2, [1])], 2)
class EncoderWithRNN(nn.Module):
def __init__(self, in_channels, hidden_size):
super(EncoderWithRNN, self).__init__()
self.out_channels = hidden_size * 2
self.lstm = nn.LSTM(in_channels, hidden_size, num_layers=2, batch_first=True, bidirectional=True)
def forward(self, x):
x, _ = self.lstm(x)
return x
class EncoderWithFC(nn.Module):
def __init__(self, in_channels, hidden_size):
super(EncoderWithFC, self).__init__()
self.out_channels = hidden_size
self.fc = nn.Linear(in_channels, hidden_size, bias=True)
def forward(self, x):
x = self.fc(x)
return x
class EncoderWithSVTR(nn.Module):
def __init__(self, in_channels, dims=64, depth=2, hidden_dims=120, use_guide=False, num_heads=8, qkv_bias=True, mlp_ratio=2.0, drop_rate=0.1, attn_drop_rate=0.1, drop_path=0.0, qk_scale=None):
super(EncoderWithSVTR, self).__init__()
self.depth = depth
self.use_guide = use_guide
self.conv1 = ConvBNLayer(in_channels, in_channels // 8, padding=1, act='swish')
self.conv2 = ConvBNLayer(in_channels // 8, hidden_dims, kernel_size=1, act='swish')
self.svtr_block = nn.ModuleList([Block(dim=hidden_dims, num_heads=num_heads, mixer='Global', HW=None, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, act_layer='swish', attn_drop=attn_drop_rate, drop_path=drop_path, norm_layer='nn.LayerNorm', epsilon=1e-05, prenorm=False) for i in range(depth)])
self.norm = nn.LayerNorm(hidden_dims, eps=1e-06)
self.conv3 = ConvBNLayer(hidden_dims, in_channels, kernel_size=1, act='swish')
self.conv4 = ConvBNLayer(2 * in_channels, in_channels // 8, padding=1, act='swish')
self.conv1x1 = ConvBNLayer(in_channels // 8, dims, kernel_size=1, act='swish')
self.out_channels = dims
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def forward(self, x):
if self.use_guide:
z = x.clone()
z.stop_gradient = True
else:
z = x
h = z
z = self.conv1(z)
z = self.conv2(z)
B, C, H, W = z.shape
z = z.flatten(2).permute(0, 2, 1)
for blk in self.svtr_block:
z = blk(z)
z = self.norm(z)
z = z.reshape([-1, H, W, C]).permute(0, 3, 1, 2)
z = self.conv3(z)
z = torch.cat((h, z), dim=1)
z = self.conv1x1(self.conv4(z))
return z
class SequenceEncoder(nn.Module):
def __init__(self, in_channels, encoder_type, hidden_size=48, **kwargs):
super(SequenceEncoder, self).__init__()
self.encoder_reshape = Im2Seq(in_channels)
self.out_channels = self.encoder_reshape.out_channels
self.encoder_type = encoder_type
if encoder_type == 'reshape':
self.only_reshape = True
else:
support_encoder_dict = {'reshape': Im2Seq, 'fc': EncoderWithFC, 'rnn': EncoderWithRNN, 'svtr': EncoderWithSVTR}
assert encoder_type in support_encoder_dict, '{} must in {}'.format(encoder_type, support_encoder_dict.keys())
if encoder_type == 'svtr':
self.encoder = support_encoder_dict[encoder_type](self.encoder_reshape.out_channels, **kwargs)
else:
self.encoder = support_encoder_dict[encoder_type](self.encoder_reshape.out_channels, hidden_size)
self.out_channels = self.encoder.out_channels
self.only_reshape = False
def forward(self, x):
if self.encoder_type != 'svtr':
x = self.encoder_reshape(x)
if not self.only_reshape:
x = self.encoder(x)
return x
else:
x = self.encoder(x)
x = self.encoder_reshape(x)
return x
class FPN_Up_Fusion(nn.Module):
def __init__(self, in_channels):
super(FPN_Up_Fusion, self).__init__()
in_channels = in_channels[::-1]
out_channels = [256, 256, 192, 192, 128]
self.h0_conv = ConvBNLayer(in_channels[0], out_channels[0], 1, 1, act=None, name='fpn_up_h0')
self.h1_conv = ConvBNLayer(in_channels[1], out_channels[1], 1, 1, act=None, name='fpn_up_h1')
self.h2_conv = ConvBNLayer(in_channels[2], out_channels[2], 1, 1, act=None, name='fpn_up_h2')
self.h3_conv = ConvBNLayer(in_channels[3], out_channels[3], 1, 1, act=None, name='fpn_up_h3')
self.h4_conv = ConvBNLayer(in_channels[4], out_channels[4], 1, 1, act=None, name='fpn_up_h4')
self.g0_conv = DeConvBNLayer(out_channels[0], out_channels[1], 4, 2, act=None, name='fpn_up_g0')
self.g1_conv = nn.Sequential(ConvBNLayer(out_channels[1], out_channels[1], 3, 1, act='relu', name='fpn_up_g1_1'), DeConvBNLayer(out_channels[1], out_channels[2], 4, 2, act=None, name='fpn_up_g1_2'))
self.g2_conv = nn.Sequential(ConvBNLayer(out_channels[2], out_channels[2], 3, 1, act='relu', name='fpn_up_g2_1'), DeConvBNLayer(out_channels[2], out_channels[3], 4, 2, act=None, name='fpn_up_g2_2'))
self.g3_conv = nn.Sequential(ConvBNLayer(out_channels[3], out_channels[3], 3, 1, act='relu', name='fpn_up_g3_1'), DeConvBNLayer(out_channels[3], out_channels[4], 4, 2, act=None, name='fpn_up_g3_2'))
self.g4_conv = nn.Sequential(ConvBNLayer(out_channels[4], out_channels[4], 3, 1, act='relu', name='fpn_up_fusion_1'), ConvBNLayer(out_channels[4], out_channels[4], 1, 1, act=None, name='fpn_up_fusion_2'))
def _add_relu(self, x1, x2):
x = torch.add(x1, x2)
x = F.relu(x)
return x
def forward(self, x):
f = x[2:][::-1]
h0 = self.h0_conv(f[0])
h1 = self.h1_conv(f[1])
h2 = self.h2_conv(f[2])
h3 = self.h3_conv(f[3])
h4 = self.h4_conv(f[4])
g0 = self.g0_conv(h0)
g1 = self._add_relu(g0, h1)
g1 = self.g1_conv(g1)
g2 = self.g2_conv(self._add_relu(g1, h2))
g3 = self.g3_conv(self._add_relu(g2, h3))
g4 = self.g4_conv(self._add_relu(g3, h4))
return g4
class FPN_Down_Fusion(nn.Module):
def __init__(self, in_channels):
super(FPN_Down_Fusion, self).__init__()
out_channels = [32, 64, 128]
self.h0_conv = ConvBNLayer(in_channels[0], out_channels[0], 3, 1, act=None, name='fpn_down_h0')
self.h1_conv = ConvBNLayer(in_channels[1], out_channels[1], 3, 1, act=None, name='fpn_down_h1')
self.h2_conv = ConvBNLayer(in_channels[2], out_channels[2], 3, 1, act=None, name='fpn_down_h2')
self.g0_conv = ConvBNLayer(out_channels[0], out_channels[1], 3, 2, act=None, name='fpn_down_g0')
self.g1_conv = nn.Sequential(ConvBNLayer(out_channels[1], out_channels[1], 3, 1, act='relu', name='fpn_down_g1_1'), ConvBNLayer(out_channels[1], out_channels[2], 3, 2, act=None, name='fpn_down_g1_2'))
self.g2_conv = nn.Sequential(ConvBNLayer(out_channels[2], out_channels[2], 3, 1, act='relu', name='fpn_down_fusion_1'), ConvBNLayer(out_channels[2], out_channels[2], 1, 1, act=None, name='fpn_down_fusion_2'))
def forward(self, x):
f = x[:3]
h0 = self.h0_conv(f[0])
h1 = self.h1_conv(f[1])
h2 = self.h2_conv(f[2])
g0 = self.g0_conv(h0)
g1 = torch.add(g0, h1)
g1 = F.relu(g1)
g1 = self.g1_conv(g1)
g2 = torch.add(g1, h2)
g2 = F.relu(g2)
g2 = self.g2_conv(g2)
return g2
class Cross_Attention(nn.Module):
def __init__(self, in_channels):
super(Cross_Attention, self).__init__()
self.theta_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act='relu', name='f_theta')
self.phi_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act='relu', name='f_phi')
self.g_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act='relu', name='f_g')
self.fh_weight_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fh_weight')
self.fh_sc_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fh_sc')
self.fv_weight_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fv_weight')
self.fv_sc_conv = ConvBNLayer(in_channels, in_channels, 1, 1, act=None, name='fv_sc')
self.f_attn_conv = ConvBNLayer(in_channels * 2, in_channels, 1, 1, act='relu', name='f_attn')
def _cal_fweight(self, f, shape):
f_theta, f_phi, f_g = f
f_theta = f_theta.permute(0, 2, 3, 1)
f_theta = torch.reshape(f_theta, [shape[0] * shape[1], shape[2], 128])
f_phi = f_phi.permute(0, 2, 3, 1)
f_phi = torch.reshape(f_phi, [shape[0] * shape[1], shape[2], 128])
f_g = f_g.permute(0, 2, 3, 1)
f_g = torch.reshape(f_g, [shape[0] * shape[1], shape[2], 128])
f_attn = torch.matmul(f_theta, f_phi.permute(0, 2, 1))
f_attn = f_attn / 128 ** 0.5
f_attn = F.softmax(f_attn, dim=-1)
f_weight = torch.matmul(f_attn, f_g)
f_weight = torch.reshape(f_weight, [shape[0], shape[1], shape[2], 128])
return f_weight
def forward(self, f_common):
f_shape = f_common.size()
f_theta = self.theta_conv(f_common)
f_phi = self.phi_conv(f_common)
f_g = self.g_conv(f_common)
fh_weight = self._cal_fweight([f_theta, f_phi, f_g], [f_shape[0], f_shape[2], f_shape[3]])
fh_weight = fh_weight.permute(0, 3, 1, 2)
fh_weight = self.fh_weight_conv(fh_weight)
fh_sc = self.fh_sc_conv(f_common)
f_h = F.relu(fh_weight + fh_sc)
fv_theta = f_theta.permute(0, 1, 3, 2)
fv_phi = f_phi.permute(0, 1, 3, 2)
fv_g = f_g.permute(0, 1, 3, 2)
fv_weight = self._cal_fweight([fv_theta, fv_phi, fv_g], [f_shape[0], f_shape[3], f_shape[2]])
fv_weight = fv_weight.permute(0, 3, 2, 1)
fv_weight = self.fv_weight_conv(fv_weight)
fv_sc = self.fv_sc_conv(f_common)
f_v = F.relu(fv_weight + fv_sc)
f_attn = torch.cat([f_h, f_v], dim=1)
f_attn = self.f_attn_conv(f_attn)
return f_attn
class SASTFPN(nn.Module):
def __init__(self, in_channels, with_cab=False, **kwargs):
super(SASTFPN, self).__init__()
self.in_channels = in_channels
self.with_cab = with_cab
self.FPN_Down_Fusion = FPN_Down_Fusion(self.in_channels)
self.FPN_Up_Fusion = FPN_Up_Fusion(self.in_channels)
self.out_channels = 128
self.cross_attention = Cross_Attention(self.out_channels)
def forward(self, x):
f_down = self.FPN_Down_Fusion(x)
f_up = self.FPN_Up_Fusion(x)
f_common = torch.add(f_down, f_up)
f_common = F.relu(f_common)
if self.with_cab:
f_common = self.cross_attention(f_common)
return f_common
def conv3x3_block(in_channels, out_channels, stride=1):
n = 3 * 3 * out_channels
w = math.sqrt(2.0 / n)
conv_layer = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=True)
block = nn.Sequential(conv_layer, nn.BatchNorm2d(out_channels), nn.ReLU())
return block
class STN(nn.Module):
def __init__(self, in_channels, num_ctrlpoints, activation='none'):
super(STN, self).__init__()
self.in_channels = in_channels
self.num_ctrlpoints = num_ctrlpoints
self.activation = activation
self.stn_convnet = nn.Sequential(conv3x3_block(in_channels, 32), nn.MaxPool2d(kernel_size=2, stride=2), conv3x3_block(32, 64), nn.MaxPool2d(kernel_size=2, stride=2), conv3x3_block(64, 128), nn.MaxPool2d(kernel_size=2, stride=2), conv3x3_block(128, 256), nn.MaxPool2d(kernel_size=2, stride=2), conv3x3_block(256, 256), nn.MaxPool2d(kernel_size=2, stride=2), conv3x3_block(256, 256))
self.stn_fc1 = nn.Sequential(nn.Linear(2 * 256, 512, bias=True), nn.BatchNorm1d(512), nn.ReLU(inplace=True))
fc2_bias = self.init_stn()
self.stn_fc2 = nn.Linear(512, num_ctrlpoints * 2, bias=True)
def init_stn(self):
margin = 0.01
sampling_num_per_side = int(self.num_ctrlpoints / 2)
ctrl_pts_x = np.linspace(margin, 1.0 - margin, sampling_num_per_side)
ctrl_pts_y_top = np.ones(sampling_num_per_side) * margin
ctrl_pts_y_bottom = np.ones(sampling_num_per_side) * (1 - margin)
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
ctrl_points = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0).astype(np.float32)
if self.activation == 'none':
pass
elif self.activation == 'sigmoid':
ctrl_points = -np.log(1.0 / ctrl_points - 1.0)
ctrl_points = torch.Tensor(ctrl_points)
fc2_bias = torch.reshape(ctrl_points, shape=[ctrl_points.shape[0] * ctrl_points.shape[1]])
return fc2_bias
def forward(self, x):
x = self.stn_convnet(x)
batch_size, _, h, w = x.shape
x = torch.reshape(x, shape=(batch_size, -1))
img_feat = self.stn_fc1(x)
x = self.stn_fc2(0.1 * img_feat)
if self.activation == 'sigmoid':
x = F.sigmoid(x)
x = torch.reshape(x, shape=[-1, self.num_ctrlpoints, 2])
return img_feat, x
def build_output_control_points(num_control_points, margins):
margin_x, margin_y = margins
num_ctrl_pts_per_side = num_control_points // 2
ctrl_pts_x = np.linspace(margin_x, 1.0 - margin_x, num_ctrl_pts_per_side)
ctrl_pts_y_top = np.ones(num_ctrl_pts_per_side) * margin_y
ctrl_pts_y_bottom = np.ones(num_ctrl_pts_per_side) * (1.0 - margin_y)
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
output_ctrl_pts_arr = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)
output_ctrl_pts = torch.Tensor(output_ctrl_pts_arr)
return output_ctrl_pts
def compute_partial_repr(input_points, control_points):
N = input_points.shape[0]
M = control_points.shape[0]
pairwise_diff = torch.reshape(input_points, shape=[N, 1, 2]) - torch.reshape(control_points, shape=[1, M, 2])
pairwise_diff_square = pairwise_diff * pairwise_diff
pairwise_dist = pairwise_diff_square[:, :, 0] + pairwise_diff_square[:, :, 1]
repr_matrix = 0.5 * pairwise_dist * torch.log(pairwise_dist)
mask = repr_matrix != repr_matrix
repr_matrix.masked_fill_(mask, 0)
return repr_matrix
def grid_sample(input, grid, canvas=None):
input.stop_gradient = False
output = F.grid_sample(input, grid, align_corners=True) if torch.__version__ >= '1.3.0' else F.grid_sample(input, grid)
if canvas is None:
return output
else:
input_mask = input.data.new(input.size()).fill_(1)
output_mask = F.grid_sample(input_mask, grid)
padded_output = output * output_mask + canvas * (1 - output_mask)
return padded_output
class TPSSpatialTransformer(nn.Module):
def __init__(self, output_image_size=None, num_control_points=None, margins=None):
super(TPSSpatialTransformer, self).__init__()
self.output_image_size = output_image_size
self.num_control_points = num_control_points
self.margins = margins
self.target_height, self.target_width = output_image_size
target_control_points = build_output_control_points(num_control_points, margins)
N = num_control_points
forward_kernel = torch.zeros(N + 3, N + 3)
target_control_partial_repr = compute_partial_repr(target_control_points, target_control_points)
forward_kernel[:N, :N].copy_(target_control_partial_repr)
forward_kernel[:N, -3].fill_(1)
forward_kernel[-3, :N].fill_(1)
forward_kernel[:N, -2:].copy_(target_control_points)
forward_kernel[-2:, :N].copy_(target_control_points.transpose(0, 1))
inverse_kernel = torch.inverse(forward_kernel)
HW = self.target_height * self.target_width
target_coordinate = list(itertools.product(range(self.target_height), range(self.target_width)))
target_coordinate = torch.Tensor(target_coordinate)
Y, X = target_coordinate.split(1, dim=1)
Y = Y / (self.target_height - 1)
X = X / (self.target_width - 1)
target_coordinate = torch.cat([X, Y], dim=1)
target_coordinate_partial_repr = compute_partial_repr(target_coordinate, target_control_points)
target_coordinate_repr = torch.cat([target_coordinate_partial_repr, torch.ones(HW, 1), target_coordinate], dim=1)
self.inverse_kernel = inverse_kernel
self.padding_matrix = torch.zeros(3, 2)
self.target_coordinate_repr = target_coordinate_repr
self.target_control_points = target_control_points
def forward(self, input, source_control_points):
assert source_control_points.ndimension() == 3
assert source_control_points.shape[1] == self.num_control_points
assert source_control_points.shape[2] == 2
batch_size = source_control_points.size(0)
Y = torch.cat([source_control_points, self.padding_matrix.expand(batch_size, 3, 2)], 1)
mapping_matrix = torch.matmul(self.inverse_kernel, Y)
source_coordinate = torch.matmul(self.target_coordinate_repr, mapping_matrix)
grid = torch.reshape(source_coordinate, shape=[-1, self.target_height, self.target_width, 2])
grid = torch.clamp(grid, 0, 1)
grid = 2.0 * grid - 1.0
output_maps = grid_sample(input, grid, canvas=None)
return output_maps, source_coordinate
class STN_ON(nn.Module):
def __init__(self, in_channels, tps_inputsize, tps_outputsize, num_control_points, tps_margins, stn_activation):
super(STN_ON, self).__init__()
self.tps = TPSSpatialTransformer(output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins))
self.stn_head = STN(in_channels=in_channels, num_ctrlpoints=num_control_points, activation=stn_activation)
self.tps_inputsize = tps_inputsize
self.out_channels = in_channels
def forward(self, image):
stn_input = torch.nn.functional.interpolate(image, self.tps_inputsize, mode='bilinear', align_corners=True)
stn_img_feat, ctrl_points = self.stn_head(stn_input)
x, _ = self.tps(image, ctrl_points)
return x
class LocalizationNetwork(nn.Module):
def __init__(self, in_channels, num_fiducial, loc_lr, model_name):
super(LocalizationNetwork, self).__init__()
self.F = num_fiducial
F = num_fiducial
if model_name == 'large':
num_filters_list = [64, 128, 256, 512]
fc_dim = 256
else:
num_filters_list = [16, 32, 64, 128]
fc_dim = 64
self.block_list = nn.Sequential()
for fno in range(0, len(num_filters_list)):
num_filters = num_filters_list[fno]
name = 'loc_conv%d' % fno
conv = ConvBNLayer(in_channels=in_channels, out_channels=num_filters, kernel_size=3, act='relu', name=name)
self.block_list.add_module(name, conv)
if fno == len(num_filters_list) - 1:
pool = nn.AdaptiveAvgPool2d(1)
else:
pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
in_channels = num_filters
self.block_list.add_module('{}_pool'.format(name), pool)
name = 'loc_fc1'
stdv = 1.0 / math.sqrt(num_filters_list[-1] * 1.0)
self.fc1 = nn.Linear(in_channels, fc_dim, bias=True)
initial_bias = self.get_initial_fiducials()
initial_bias = initial_bias.reshape(-1)
name = 'loc_fc2'
self.fc2 = nn.Linear(fc_dim, F * 2, bias=True)
self.out_channels = F * 2
def forward(self, x):
"""
Estimating parameters of geometric transformation
Args:
image: input
Return:
batch_C_prime: the matrix of the geometric transformation
"""
B = x.shape[0]
i = 0
for block in self.block_list:
x = block(x)
x = x.squeeze(dim=2).squeeze(dim=2)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = x.reshape(shape=[-1, self.F, 2])
return x
def get_initial_fiducials(self):
""" see RARE paper Fig. 6 (a) """
F = self.F
ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))
ctrl_pts_y_top = np.linspace(0.0, -1.0, num=int(F / 2))
ctrl_pts_y_bottom = np.linspace(1.0, 0.0, num=int(F / 2))
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
initial_bias = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)
return initial_bias
class GridGenerator(nn.Module):
def __init__(self, in_channels, num_fiducial):
super(GridGenerator, self).__init__()
self.eps = 1e-06
self.F = num_fiducial
name = 'ex_fc'
self.fc = nn.Linear(in_channels, 6, bias=True)
def forward(self, batch_C_prime, I_r_size):
"""
Generate the grid for the grid_sampler.
Args:
batch_C_prime: the matrix of the geometric transformation
I_r_size: the shape of the input image
Return:
batch_P_prime: the grid for the grid_sampler
"""
C = self.build_C_paddle()
P = self.build_P_paddle(I_r_size)
inv_delta_C_tensor = self.build_inv_delta_C_paddle(C).type(torch.float32)
P_hat_tensor = self.build_P_hat_paddle(C, torch.as_tensor(P)).type(torch.float32)
inv_delta_C_tensor.stop_gradient = True
P_hat_tensor.stop_gradient = True
batch_C_ex_part_tensor = self.get_expand_tensor(batch_C_prime)
batch_C_ex_part_tensor.stop_gradient = True
batch_C_prime_with_zeros = torch.cat([batch_C_prime, batch_C_ex_part_tensor], dim=1)
inv_delta_C_tensor = inv_delta_C_tensor
batch_T = torch.matmul(inv_delta_C_tensor, batch_C_prime_with_zeros)
P_hat_tensor = P_hat_tensor
batch_P_prime = torch.matmul(P_hat_tensor, batch_T)
return batch_P_prime
def build_C_paddle(self):
""" Return coordinates of fiducial points in I_r; C """
F = self.F
ctrl_pts_x = torch.linspace(-1.0, 1.0, int(F / 2), dtype=torch.float64)
ctrl_pts_y_top = -1 * torch.ones([int(F / 2)], dtype=torch.float64)
ctrl_pts_y_bottom = torch.ones([int(F / 2)], dtype=torch.float64)
ctrl_pts_top = torch.stack([ctrl_pts_x, ctrl_pts_y_top], dim=1)
ctrl_pts_bottom = torch.stack([ctrl_pts_x, ctrl_pts_y_bottom], dim=1)
C = torch.cat([ctrl_pts_top, ctrl_pts_bottom], dim=0)
return C
def build_P_paddle(self, I_r_size):
I_r_height, I_r_width = I_r_size
I_r_grid_x = (torch.arange(-I_r_width, I_r_width, 2, dtype=torch.float64) + 1.0) / torch.as_tensor(np.array([I_r_width]).astype(np.float64))
I_r_grid_y = (torch.arange(-I_r_height, I_r_height, 2, dtype=torch.float64) + 1.0) / torch.as_tensor(np.array([I_r_height]).astype(np.float64))
P = torch.stack(torch.meshgrid([I_r_grid_x, I_r_grid_y]), dim=2)
P = P.permute(1, 0, 2)
return P.reshape([-1, 2])
def build_inv_delta_C_paddle(self, C):
""" Return inv_delta_C which is needed to calculate T """
F = self.F
hat_C = torch.zeros((F, F), dtype=torch.float64)
for i in range(0, F):
for j in range(i, F):
if i == j:
hat_C[i, j] = 1
else:
r = torch.norm(C[i] - C[j])
hat_C[i, j] = r
hat_C[j, i] = r
hat_C = hat_C ** 2 * torch.log(hat_C)
delta_C = torch.cat([torch.cat([torch.ones((F, 1), dtype=torch.float64), C, hat_C], dim=1), torch.cat([torch.zeros((2, 3), dtype=torch.float64), C.permute(1, 0)], dim=1), torch.cat([torch.zeros((1, 3), dtype=torch.float64), torch.ones((1, F), dtype=torch.float64)], dim=1)], dim=0)
inv_delta_C = torch.inverse(delta_C)
return inv_delta_C
def build_P_hat_paddle(self, C, P):
F = self.F
eps = self.eps
n = P.shape[0]
P_tile = torch.unsqueeze(P, dim=1).repeat(1, F, 1)
C_tile = torch.unsqueeze(C, dim=0)
P_diff = P_tile - C_tile
rbf_norm = torch.norm(P_diff, p=2, dim=2, keepdim=False)
rbf = torch.mul(rbf_norm ** 2, torch.log(rbf_norm + eps))
P_hat = torch.cat([torch.ones((n, 1), dtype=torch.float64), P, rbf], dim=1)
return P_hat
def get_expand_tensor(self, batch_C_prime):
B, H, C = batch_C_prime.shape
batch_C_prime = batch_C_prime.reshape([B, H * C])
batch_C_ex_part_tensor = self.fc(batch_C_prime)
batch_C_ex_part_tensor = batch_C_ex_part_tensor.reshape([-1, 3, 2])
return batch_C_ex_part_tensor
class TPS(nn.Module):
def __init__(self, in_channels, num_fiducial, loc_lr, model_name):
super(TPS, self).__init__()
self.loc_net = LocalizationNetwork(in_channels, num_fiducial, loc_lr, model_name)
self.grid_generator = GridGenerator(self.loc_net.out_channels, num_fiducial)
self.out_channels = in_channels
def forward(self, image):
image.stop_gradient = False
batch_C_prime = self.loc_net(image)
batch_P_prime = self.grid_generator(batch_C_prime, image.shape[2:])
batch_P_prime = batch_P_prime.reshape([-1, image.shape[2], image.shape[3], 2])
if torch.__version__ < '1.3.0':
batch_I_r = F.grid_sample(image, grid=batch_P_prime)
else:
batch_I_r = F.grid_sample(image, grid=batch_P_prime, align_corners=True)
return batch_I_r
import torch
from torch.nn import MSELoss, ReLU
from _paritybench_helpers import _mock_config, _mock_layer, _paritybench_base, _fails_compile
TESTCASES = [
# (nn.Module, init_args, forward_args, jit_compiles)
(ASFBlock,
lambda: ([], {'in_channels': 4, 'inter_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(AttentionGRUCell,
lambda: ([], {'input_size': 4, 'hidden_size': 4, 'num_embeddings': 4}),
lambda: ([torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])], {}),
True),
(AttentionHead,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'hidden_size': 4}),
lambda: ([torch.rand([4, 4])], {}),
False),
(Backbone,
lambda: ([], {}),
lambda: ([], {}),
False),
(BasicStem,
lambda: ([], {}),
lambda: ([torch.rand([4, 3, 64, 64])], {}),
False),
(BiaffineAttention,
lambda: ([], {'in_features': 4, 'out_features': 4}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(Block,
lambda: ([], {'dim': 4, 'num_heads': 4}),
lambda: ([torch.rand([4, 4, 4])], {}),
False),
(CTCHead,
lambda: ([], {'in_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(ClsHead,
lambda: ([], {'in_channels': 4, 'class_dim': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Conv2d,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(ConvNormLayer,
lambda: ([], {'ch_in': 4, 'ch_out': 4, 'filter_size': 4, 'stride': 1}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Conv_BN_ReLU,
lambda: ([], {'in_planes': 4, 'out_planes': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(DBHead,
lambda: ([], {'in_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(DSConv,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'padding': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(DeConvBNLayer,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4, 'stride': 1}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(DropBlock,
lambda: ([], {'block_size': 4, 'keep_prob': 4, 'name': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(DropPath,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(Encoder,
lambda: ([], {'n_layer': 1, 'n_head': 4, 'd_key': 4, 'd_value': 4, 'd_model': 4, 'd_inner_hid': 4, 'prepostprocess_dropout': 0.5, 'attention_dropout': 0.5, 'relu_dropout': 0.5}),
lambda: ([torch.rand([4, 4, 4]), torch.rand([4, 4, 4])], {}),
False),
(EncoderLayer,
lambda: ([], {'n_head': 4, 'd_key': 4, 'd_value': 4, 'd_model': 4, 'd_inner_hid': 4, 'prepostprocess_dropout': 0.5, 'attention_dropout': 0.5, 'relu_dropout': 0.5}),
lambda: ([torch.rand([4, 4, 4]), torch.rand([4, 4, 4])], {}),
False),
(EncoderWithFC,
lambda: ([], {'in_channels': 4, 'hidden_size': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(EncoderWithRNN,
lambda: ([], {'in_channels': 4, 'hidden_size': 4}),
lambda: ([torch.rand([4, 4])], {}),
True),
(EncoderWithRNN_,
lambda: ([], {'in_channels': 4, 'hidden_size': 4}),
lambda: ([torch.rand([4, 4, 4])], {}),
False),
(FFN,
lambda: ([], {'d_inner_hid': 4, 'd_model': 4, 'dropout_rate': 0.5}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(FrozenBatchNorm,
lambda: ([], {'num_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(GELU,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Head,
lambda: ([], {'in_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Hsigmoid,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Hswish,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Identity,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Lambda,
lambda: ([], {'func': _mock_layer()}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(LastLevelMaxPool,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(LayoutLMPooler,
lambda: ([], {'hidden_size': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(LayoutXLMOutput,
lambda: ([], {'config': _mock_config(intermediate_size=4, hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
False),
(LayoutXLMPooler,
lambda: ([], {'hidden_size': 4, 'with_pool': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(LayoutXLMSelfOutput,
lambda: ([], {'config': _mock_config(hidden_size=4, layer_norm_eps=1, hidden_dropout_prob=0.5)}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
False),
(LocalizationNetwork,
lambda: ([], {'in_channels': 4, 'num_fiducial': 4, 'loc_lr': 4, 'model_name': 4}),
lambda: ([torch.rand([4, 4, 64, 64])], {}),
True),
(MTB,
lambda: ([], {'cnn_num': 4, 'in_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Mlp,
lambda: ([], {'in_features': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(MultiHeadAttention,
lambda: ([], {'d_key': 4, 'd_value': 4, 'd_model': 4}),
lambda: ([torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 1, 4]), torch.rand([4, 4])], {}),
False),
(MultiheadAttention,
lambda: ([], {'embed_dim': 4, 'num_heads': 4}),
lambda: ([torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])], {}),
True),
(PSEHead,
lambda: ([], {'in_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(PTAttentionGRUCell,
lambda: ([], {'input_size': 4, 'hidden_size': 4, 'num_embeddings': 4}),
lambda: ([torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])], {}),
True),
(PTAttentionHead,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'hidden_size': 4}),
lambda: ([torch.rand([4, 4])], {}),
False),
(PositionalEncoding,
lambda: ([], {'dropout': 0.5, 'dim': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(PositionalEncoding_2d,
lambda: ([], {'dropout': 0.5, 'dim': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(PrePostProcessLayer,
lambda: ([], {'process_cmd': [4, 4], 'd_model': 4, 'dropout_rate': 0.5}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(PrepareEncoder,
lambda: ([], {'src_vocab_size': 4, 'src_emb_dim': 4, 'src_max_len': 4}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
False),
(RSELayer,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(SARHead,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(SASTHead,
lambda: ([], {'in_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(SAST_Header1,
lambda: ([], {'in_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(SAST_Header2,
lambda: ([], {'in_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(SEModule,
lambda: ([], {'in_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(ShortCut,
lambda: ([], {'in_channels': 4, 'out_channels': 4, 'stride': 1, 'name': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(SubSample,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(Swish,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(TPS,
lambda: ([], {'in_channels': 4, 'num_fiducial': 4, 'loc_lr': 4, 'model_name': 4}),
lambda: ([torch.rand([4, 4, 64, 64])], {}),
False),
(TransformerDecoderLayer,
lambda: ([], {'d_model': 4, 'nhead': 4}),
lambda: ([torch.rand([4, 4, 4]), torch.rand([4, 4, 4])], {}),
False),
(TransformerEncoderLayer,
lambda: ([], {'d_model': 4, 'nhead': 4}),
lambda: ([torch.rand([4, 4, 4])], {}),
False),
]
class Test_frotms_PaddleOCR2Pytorch(_paritybench_base):
def test_000(self):
self._check(*TESTCASES[0])
def test_001(self):
self._check(*TESTCASES[1])
def test_002(self):
self._check(*TESTCASES[2])
def test_003(self):
self._check(*TESTCASES[3])
def test_004(self):
self._check(*TESTCASES[4])
def test_005(self):
self._check(*TESTCASES[5])
def test_006(self):
self._check(*TESTCASES[6])
def test_007(self):
self._check(*TESTCASES[7])
def test_008(self):
self._check(*TESTCASES[8])
def test_009(self):
self._check(*TESTCASES[9])
def test_010(self):
self._check(*TESTCASES[10])
def test_011(self):
self._check(*TESTCASES[11])
def test_012(self):
self._check(*TESTCASES[12])
def test_013(self):
self._check(*TESTCASES[13])
def test_014(self):
self._check(*TESTCASES[14])
def test_015(self):
self._check(*TESTCASES[15])
def test_016(self):
self._check(*TESTCASES[16])
def test_017(self):
self._check(*TESTCASES[17])
def test_018(self):
self._check(*TESTCASES[18])
def test_019(self):
self._check(*TESTCASES[19])
def test_020(self):
self._check(*TESTCASES[20])
def test_021(self):
self._check(*TESTCASES[21])
def test_022(self):
self._check(*TESTCASES[22])
def test_023(self):
self._check(*TESTCASES[23])
def test_024(self):
self._check(*TESTCASES[24])
def test_025(self):
self._check(*TESTCASES[25])
def test_026(self):
self._check(*TESTCASES[26])
def test_027(self):
self._check(*TESTCASES[27])
def test_028(self):
self._check(*TESTCASES[28])
def test_029(self):
self._check(*TESTCASES[29])
def test_030(self):
self._check(*TESTCASES[30])
def test_031(self):
self._check(*TESTCASES[31])
def test_032(self):
self._check(*TESTCASES[32])
def test_033(self):
self._check(*TESTCASES[33])
def test_034(self):
self._check(*TESTCASES[34])
def test_035(self):
self._check(*TESTCASES[35])
def test_036(self):
self._check(*TESTCASES[36])
def test_037(self):
self._check(*TESTCASES[37])
def test_038(self):
self._check(*TESTCASES[38])
def test_039(self):
self._check(*TESTCASES[39])
def test_040(self):
self._check(*TESTCASES[40])
def test_041(self):
self._check(*TESTCASES[41])
def test_042(self):
self._check(*TESTCASES[42])
def test_043(self):
self._check(*TESTCASES[43])
def test_044(self):
self._check(*TESTCASES[44])
def test_045(self):
self._check(*TESTCASES[45])
def test_046(self):
self._check(*TESTCASES[46])
def test_047(self):
self._check(*TESTCASES[47])
def test_048(self):
self._check(*TESTCASES[48])
def test_049(self):
self._check(*TESTCASES[49])
def test_050(self):
self._check(*TESTCASES[50])
def test_051(self):
self._check(*TESTCASES[51])
def test_052(self):
self._check(*TESTCASES[52])
def test_053(self):
self._check(*TESTCASES[53])
def test_054(self):
self._check(*TESTCASES[54])
def test_055(self):
self._check(*TESTCASES[55])
def test_056(self):
self._check(*TESTCASES[56])
def test_057(self):
self._check(*TESTCASES[57])
def test_058(self):
self._check(*TESTCASES[58])
|
[
"[email protected]"
] | |
62fdac76001172e852e91825b8e6245b3d0c843a
|
69d2627942a554d6914ba05de097a290fed66bad
|
/vb2py/targets/pythoncard/vbcontrols/vbtextfield.py
|
c37d1f300778de7e1e6b1ae23f4e6f2c677da7be
|
[
"BSD-3-Clause"
] |
permissive
|
rayzamgh/sumurProjection
|
0fcef39cc75e620057b012f1bd35cae1c49a5554
|
847ce71e85093ea5ee668ec61dbfba760ffa6bbd
|
refs/heads/master
| 2020-07-23T23:33:26.621550 | 2019-12-22T05:31:24 | 2019-12-22T05:31:24 | 207,738,494 | 1 | 0 | null | 2019-10-28T16:00:07 | 2019-09-11T06:23:43 |
Python
|
UTF-8
|
Python
| false | false | 831 |
py
|
from vb2py.targets.pythoncard.controlclasses import VBWrapped, VBWidget
from vb2py.targets.pythoncard import Register
import vb2py.logger
log = vb2py.logger.getLogger("VBTextField")
from PythonCard.components import textfield
import wx
import sys
from PythonCard import event, registry, widget
class VBTextField(VBWidget):
__metaclass__ = VBWrapped
_translations = {
"Text" : "text",
"Enabled" : "enabled",
"Visible" : "visible",
}
_indexed_translations = {
"Left" : ("position", 0),
"Top" : ("position", 1),
"Width" : ("size", 0),
"Height" : ("size", 1),
}
_proxy_for = textfield.TextField
log.debug("Registering VBTextField as '%s'" % sys.modules[__name__].VBTextField)
Register(VBTextField)
|
[
"[email protected]"
] | |
1951975481074fd5d822438ba56fe73946a2f7b6
|
c0baa78917da5bf81cd04758b127a8d3c5d27da6
|
/vize/160401025/client/client.py
|
c993aad9b32dc6e608e41e9816a5ed83282d47a3
|
[
"Unlicense"
] |
permissive
|
nyucel/blm304
|
9e3049e2743e2e9055e8e067724a966e82579d07
|
e23f28674229470b5f110ea37428f9c1ca13ac51
|
refs/heads/master
| 2022-11-07T12:36:12.620417 | 2020-06-27T11:09:59 | 2020-06-27T11:09:59 | 259,326,143 | 14 | 207 |
Unlicense
| 2020-06-27T11:10:00 | 2020-04-27T13:14:48 |
Python
|
UTF-8
|
Python
| false | false | 4,818 |
py
|
# -*- coding: utf-8 -*-
"""
@author: Halil İbrahim Koç
"""
import socket
import time
import os
import sys
port=42
buffer=4096
if len(sys.argv) != 2:
print("Host bilgisi girilmelidir.")
sys.exit()
try:
socket.gethostbyname(sys.argv[1])
except socket.error:
print("Host bilgisi gereklidir. Kontrol edip tekrar deneyiniz.")
sys.exit()
host = sys.argv[1]
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print("Bağlantı başlatılıyor")
s.setblocking(0)
s.settimeout(15)
except socket.error:
print("Başarısız bağlantı")
sys.exit()
while True:
command = input(
"Aşağıdaki komutlardan birini giriniz: \n1. get [dosya ismi]\n2. put [dosya ismi]\n3. list\n4. exit\n ")
"""o get [dosya ismi]
o put [dosya ismi]
o list
o exit"""
clientCommand = command.encode('utf-8')
try:
s.sendto(clientCommand, (host, port))
except ConnectionResetError:
print(
"Port bilgisi yanlış.")
sys.exit()
clientArguments = command.split()
if clientArguments[0] == "get":
try:
ClientData, clientAddr = s.recvfrom(51200)
except:
print("Program zaman aşımına uğradı.")
sys.exit()
text = ClientData.decode('utf8')
print(text)
try:
ClientData2, clientAddr2 = s.recvfrom(buffer)
except:
print("Program zaman aşımına uğradı.")
sys.exit()
text2 = ClientData2.decode('utf8')
print(text2)
if len(text2) < 50:
if clientArguments[0] == "get":
ComingFile = open(clientArguments[1], "wb")
d = 0
try:
# number of paclets
CountC, countaddress = s.recvfrom(buffer)
except:
print("Bağlantı zaman aşımına uğradı.")
sys.exit()
packet1 = CountC.decode('utf8')
packet12 = int(packet1)
while packet12 != 0:
ClientBData, clientbAddr = s.recvfrom(4096)
dataS = ComingFile.write(ClientBData)
d += 1
print("Paket Adedi:" + str(d))
packet12 = packet12 - 1
ComingFile.close()
print(
"Dosya indirildi.")
elif clientArguments[0] == "put":
try:
ClientData, clientAddr = s.recvfrom(buffer)
except:
print("Bağlantı zaman aşımına uğradı.")
sys.exit()
text = ClientData.decode('utf8')
print(text)
if text == "Put fonksiyonu başlatıldı.":
if os.path.isfile(clientArguments[1]):
c = 0
#Length = len(CL1[1])
size = os.stat(clientArguments[1])
sizeS = size.st_size # number of packets
#sizeS = sizeS[:-1]
print("Dosya boyutu(bayt): " + str(sizeS))
Num = int(sizeS / buffer)
Num = Num + 1
print("Gönderilen Paket Sayısı: " + str(Num))
s.sendto(str(Num).encode('utf8'), clientAddr)
packet_num = int(Num)
SendingFile = open(clientArguments[1], "rb")
while packet_num != 0:
Run = SendingFile.read(buffer)
s.sendto(Run, clientAddr)
c += 1
packet_num -= 1
print("Paket Sayısı:" + str(c))
SendingFile.close()
print("İstemciden sunucuya put işlemi sona erdi.")
# s.sendto(str(sizeS).encode('utf8'),clientAddr)
else:
print("İstemcinin bulunduğu dizinde dosya bulunamadı.")
else:
print("Geçersiz.")
elif clientArguments[0] == "list":
try:
ClientData, clientAddr = s.recvfrom(buffer)
ClientData2, clientAddr2 = s.recvfrom(buffer)
except:
print("Bağlantı zaman aşımına uğradı.")
sys.exit()
text = ClientData.decode('utf8')
print(text)
lists=ClientData2.decode('utf8')
print("Server'daki dosyalar:")
lists=lists.split(',')
for i in lists:
print(i)
elif clientArguments[0] == "exit":
print(
"Client ve Server kapatılıyor.")
quit()
print("Client kapatıldı.")
|
[
"[email protected]"
] | |
eac3e5d6c2b6049b58d419a252bf24a4d5f309b5
|
90c6262664d013d47e9a3a9194aa7a366d1cabc4
|
/tests/opcodes/cases/test_set_size_149.py
|
2290405a5f8b27eb8d84dfab529207a21fb3edd7
|
[
"MIT"
] |
permissive
|
tqtezos/pytezos
|
3942fdab7aa7851e9ea81350fa360180229ec082
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
refs/heads/master
| 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 |
MIT
| 2020-12-30T16:44:56 | 2019-12-12T17:47:53 |
Python
|
UTF-8
|
Python
| false | false | 829 |
py
|
from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestset_size_149(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=True)
def test_opcode_set_size_149(self):
res = self.i.execute(f'INCLUDE "{abspath("opcodes/contracts/set_size.tz")}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN {} 111')
self.assertTrue(res['success'])
exp_val_expr = michelson_to_micheline('0')
exp_val = parse_expression(exp_val_expr, res['result']['storage'].type_expr)
self.assertEqual(exp_val, res['result']['storage']._val)
|
[
"[email protected]"
] | |
98d68f537c4a3515d10e239146c0345ecd931861
|
7ae0efc9798b7c9fa720022ed5d763d6ab27cd13
|
/python/paddle/fluid/tests/unittests/asp/test_asp_utils.py
|
4aac878763b6f6f7cab09ca5cdc3cfeab0f49d6d
|
[
"Apache-2.0"
] |
permissive
|
ceci3/Paddle
|
e1d0b56a1bb1de9a0d26977868795f86e2c0580b
|
e4d475eabd83e7a6fa1e88c64c28747450f87d66
|
refs/heads/develop
| 2023-08-03T03:43:35.139011 | 2022-02-08T11:36:07 | 2022-02-08T11:36:07 | 171,274,803 | 0 | 3 |
Apache-2.0
| 2021-08-24T07:14:24 | 2019-02-18T11:49:16 |
C++
|
UTF-8
|
Python
| false | false | 9,706 |
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import threading, time
import paddle
from paddle.static import sparsity
import numpy as np
class TestASPUtils(unittest.TestCase):
def test_get_check_method(self):
self.assertEqual(
paddle.fluid.contrib.sparsity.CheckMethod.get_checking_method(
paddle.fluid.contrib.sparsity.MaskAlgo.MASK_1D),
paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D)
self.assertEqual(
paddle.fluid.contrib.sparsity.CheckMethod.get_checking_method(
paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_GREEDY),
paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D)
self.assertEqual(
paddle.fluid.contrib.sparsity.CheckMethod.get_checking_method(
paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_BEST),
paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D)
def test_density(self):
x = np.array([[1.0, 1.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 1.0]])
self.assertEqual(sparsity.calculate_density(x), 0.56)
x[:, 0] = 0.0
self.assertEqual(sparsity.calculate_density(x), 0.4)
def test_check_mask_1d(self):
x = np.array([[1.0, 0.0, 0.0, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0],
[1.0, 1.0, 0.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 1.0]])
self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_1d(x, 2, 4))
self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_1d(x, 3, 4))
self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_1d(x, 2, 5))
self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_1d(x, 3, 5))
self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_1d(x, 3, 6))
self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_1d(x, 4, 6))
def test_get_mask_1d(self):
for _ in range(10):
x = np.random.randint(10, size=(5, 5))
x = paddle.fluid.contrib.sparsity.get_mask_1d(x, 2, 4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_mask_1d(x, 2, 4))
x = np.random.randn(5, 4)
x = paddle.fluid.contrib.sparsity.get_mask_1d(x, 2, 4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_mask_1d(x, 2, 4))
def test_check_mask_2d(self):
x = np.array([[1.0, 0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0]])
self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4))
self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_2d(x, 3, 4))
self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 5))
self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_2d(x, 3, 5))
self.assertTrue(paddle.fluid.contrib.sparsity.check_mask_2d(x, 3, 6))
self.assertFalse(paddle.fluid.contrib.sparsity.check_mask_2d(x, 4, 6))
def test_get_mask_2d_greedy(self):
for _ in range(10):
x = np.random.randint(10, size=(5, 5))
x = paddle.fluid.contrib.sparsity.get_mask_2d_greedy(x, 2, 4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4))
x = np.random.randn(5, 4)
x = paddle.fluid.contrib.sparsity.get_mask_2d_greedy(x, 2, 4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4))
def test_get_mask_2d_best(self):
for _ in range(10):
x = np.random.randint(10, size=(5, 5))
x = paddle.fluid.contrib.sparsity.get_mask_2d_best(x, 2, 4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4))
x = np.random.randn(5, 4)
x = paddle.fluid.contrib.sparsity.get_mask_2d_best(x, 2, 4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_mask_2d(x, 2, 4))
def test_threadsafe_valid_2d_patterns(self):
def get_reference(m=4, n=2):
from itertools import permutations
patterns = np.zeros(m)
patterns[:n] = 1
patterns = list(set(permutations(patterns.tolist())))
patterns = patterns + patterns
patterns = np.asarray(list(set(permutations(patterns, m))))
valid = ((patterns.sum(axis=1) <= n).sum(axis=1) == m
).nonzero()[0].reshape(-1)
valid_patterns = np.empty((valid.shape[0], m, m))
valid_patterns[:] = patterns[valid[:]]
return valid_patterns
for _ in range(4):
computing_thread = threading.Thread(
target=paddle.fluid.contrib.sparsity.utils.
_compute_valid_2d_patterns,
args=(2, 4))
computing_thread.start()
time.sleep(3)
patterns_map = paddle.fluid.contrib.sparsity.utils._valid_2d_patterns
reference_patterns = get_reference()
reference_key = '4_2'
self.assertTrue(reference_key in patterns_map)
self.assertTrue(len(patterns_map) == 1)
self.assertTrue((reference_patterns == patterns_map[reference_key]).all(
))
def test_check_sparsity(self):
for _ in range(10):
x = np.random.randint(10, size=(5))
x_2d = x.reshape(1, x.shape[0])
self.__test_1D_2D_sparsity_checking_methods(x_2d)
x = np.random.randint(10, size=(5, 5))
x_2d = x
self.__test_1D_2D_sparsity_checking_methods(x_2d)
x = np.random.randint(10, size=(5, 5, 5))
x_2d = x.reshape(x.shape[0] * x.shape[1], x.shape[2])
self.__test_1D_2D_sparsity_checking_methods(x_2d)
x = np.random.randint(10, size=(5, 5, 5, 5))
x_2d = x.reshape(x.shape[0], x.shape[1] * x.shape[2] * x.shape[3])
self.__test_1D_2D_sparsity_checking_methods(x_2d)
def test_create_mask(self):
for _ in range(10):
x = np.random.randint(10, size=(5))
self.__test_1D_2D_sparse_mask_generation_methods(x)
x = np.random.randint(10, size=(5, 5))
self.__test_1D_2D_sparse_mask_generation_methods(x)
x = np.random.randint(10, size=(5, 5, 5))
self.__test_1D_2D_sparse_mask_generation_methods(x)
x = np.random.randint(10, size=(5, 5, 5, 5))
self.__test_1D_2D_sparse_mask_generation_methods(x)
def __test_1D_2D_sparsity_checking_methods(self, x_2d):
mask = paddle.fluid.contrib.sparsity.get_mask_1d(x_2d, 2, 4)
self.assertEqual(
paddle.fluid.contrib.sparsity.check_sparsity(
mask,
func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D,
n=2,
m=4),
paddle.fluid.contrib.sparsity.check_mask_1d(mask, 2, 4))
mask = paddle.fluid.contrib.sparsity.get_mask_2d_best(x_2d, 2, 4)
self.assertEqual(
paddle.fluid.contrib.sparsity.check_sparsity(
mask,
func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D,
n=2,
m=4),
paddle.fluid.contrib.sparsity.check_mask_2d(mask, 2, 4))
def __test_1D_2D_sparse_mask_generation_methods(self, x):
mask = paddle.fluid.contrib.sparsity.create_mask(
x,
func_name=paddle.fluid.contrib.sparsity.MaskAlgo.MASK_1D,
n=2,
m=4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_sparsity(
mask,
func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_1D,
n=2,
m=4))
mask = paddle.fluid.contrib.sparsity.create_mask(
x,
func_name=paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_GREEDY,
n=2,
m=4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_sparsity(
mask,
func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D,
n=2,
m=4))
mask = paddle.fluid.contrib.sparsity.create_mask(
x,
func_name=paddle.fluid.contrib.sparsity.MaskAlgo.MASK_2D_BEST,
n=2,
m=4)
self.assertTrue(
paddle.fluid.contrib.sparsity.check_sparsity(
mask,
func_name=paddle.fluid.contrib.sparsity.CheckMethod.CHECK_2D,
n=2,
m=4))
|
[
"[email protected]"
] | |
2cb15c7e19657fdd48dace6e7f42e043d7826183
|
264d31fb1af03024c4e0a6530aa059ac59a71365
|
/model_pruning/python/pruning.py
|
8f2d111b2de54ec2a597c0cf471315b37f34b5b5
|
[
"Apache-2.0"
] |
permissive
|
adarob/google-research
|
b5cf7d329635080f0f911213e916454f7598a762
|
b6ec492d289969dd1440553eb56f77b2474b90c7
|
refs/heads/master
| 2021-01-03T01:58:33.983703 | 2020-02-11T19:05:14 | 2020-02-11T19:15:42 | 239,868,678 | 0 | 1 |
Apache-2.0
| 2020-02-11T21:31:39 | 2020-02-11T21:31:38 | null |
UTF-8
|
Python
| false | false | 37,469 |
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to add support for magnitude-based model pruning.
# Adds variables and ops to the graph to enable
# elementwise masking of weights
apply_mask(weights)
# Returns a list containing the sparsity of each of the weight tensors
get_weight_sparsity()
# Returns a list of all the masked weight tensorflow variables
get_masked_weights()
# Returns a list of all the mask tensorflow variables
get_masks()
# Returns a list of all the thresholds
get_thresholds()
# Returns a list of all the weight tensors that have been masked
get_weights()
The Pruning class uses a tf.hparams object to set up the
parameters for a model pruning. Here's a typical usage:
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
# Create a pruning object using the pruning_hparams
p = pruning.Pruning(pruning_hparams)
# Add mask update ops to the graph
mask_update_op = p.conditional_mask_update_op()
# Add the summaries
p.add_pruning_summaries()
# Run the op
session.run(mask_update_op)
# An object of the pruning also accepts externally defined sparsity:
sparsity = tf.Variable(0.5, name = "ConstantSparsity")
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow.compat.v1 as tf
from model_pruning.python import pruning_utils
from tensorflow.contrib import training as contrib_training
from tensorflow.python.ops import variables # pylint: disable=g-direct-tensorflow-import
MASK_COLLECTION = 'masks'
THRESHOLD_COLLECTION = 'thresholds'
MASKED_WEIGHT_COLLECTION = 'masked_weights'
WEIGHT_COLLECTION = 'kernel'
# The 'weights' part of the name is needed for the quantization library
# to recognize that the kernel should be quantized.
MASKED_WEIGHT_NAME = 'weights/masked_weight'
WEIGHT_GRADIENT_COLLECTION = 'gradient_weights'
OLD_WEIGHT_COLLECTION = 'old_weights'
OLD_OLD_WEIGHT_COLLECTION = 'old_old_weights'
def apply_mask(x, scope='', prune_option='weight'):
"""Apply mask to a given weight tensor.
Args:
x: Input weight tensor
scope: The current variable scope. Defaults to "".
prune_option: pruning option. Defaults to 'weight'. option =
'first_order_gradient' means using |weight| * |first order gradient| for
pruning. option = 'second_order_gradient' means using |weight| * |second
order gradient| for pruning.
Returns:
Tensor representing masked_weights
"""
mask = pruning_utils.weight_mask_variable(x, scope)
threshold = pruning_utils.weight_threshold_variable(x, scope)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
masked_weights = tf.multiply(mask, x, MASKED_WEIGHT_NAME)
if prune_option in ('first_order_gradient', 'second_order_gradient'):
# absolute value of gradients for gradient based pruning
gradient = pruning_utils.weight_gradient_variable(x, scope)
old_weight = pruning_utils.old_weight_variable(x, scope)
old_old_weight = pruning_utils.old_old_weight_variable(x, scope)
# Make sure the mask for a given variable are not added multiple times to the
# collection. This is particularly important when applying mask to RNN's
# weight variables
if mask not in tf.get_collection_ref(MASK_COLLECTION):
tf.add_to_collection(THRESHOLD_COLLECTION, threshold)
tf.add_to_collection(MASK_COLLECTION, mask)
tf.add_to_collection(MASKED_WEIGHT_COLLECTION, masked_weights)
tf.add_to_collection(WEIGHT_COLLECTION, x)
if prune_option in ('first_order_gradient', 'second_order_gradient'):
tf.add_to_collection(WEIGHT_GRADIENT_COLLECTION, gradient)
tf.add_to_collection(OLD_WEIGHT_COLLECTION, old_weight)
tf.add_to_collection(OLD_OLD_WEIGHT_COLLECTION, old_old_weight)
return masked_weights
def get_masked_weights():
return tf.get_collection(MASKED_WEIGHT_COLLECTION)
def get_masks():
return tf.get_collection(MASK_COLLECTION)
def get_thresholds():
return tf.get_collection(THRESHOLD_COLLECTION)
def get_weights():
return tf.get_collection(WEIGHT_COLLECTION)
def get_gradients():
return tf.get_collection(WEIGHT_GRADIENT_COLLECTION)
def get_old_weights():
return tf.get_collection(OLD_WEIGHT_COLLECTION)
def get_old_old_weights():
return tf.get_collection(OLD_OLD_WEIGHT_COLLECTION)
def get_weight_sparsity():
"""Get sparsity of the weights.
Args: None
Returns:
A list containing the sparsity of each of the weight tensors
"""
masks = get_masks()
return [tf.nn.zero_fraction(mask) for mask in masks]
def get_pruning_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the pruning specification. Used for adding summaries and ops under
a common tensorflow name_scope
begin_pruning_step: integer
the global step at which to begin pruning
end_pruning_step: integer
the global step at which to terminate pruning. Defaults to -1 implying
that pruning continues till the training stops
weight_sparsity_map: list of strings
comma separed list of {weight_variable_name:target sparsity} or
{regex:target sparsity} pairs.
For layers/weights not in this list, sparsity as specified by the
target_sparsity hyperparameter is used.
Eg. [conv1:0.9,conv2/kernel:0.8]
block_dims_map: list of strings
comma separated list of {weight variable name:block_height x block_width}
or {regex:block_height x block_width} pairs. For layers/weights not in
this list, block dims are specified by the block_height, block_width
hyperparameters are used Eg. [dense1:4x4,dense2:1x16,dense3:1x1]
threshold_decay: float
the decay factor to use for exponential decay of the thresholds
pruning_frequency: integer
How often should the masks be updated? (in # of global_steps)
nbins: integer
number of bins to use for histogram computation
block_height: integer
number of rows in a block (defaults to 1), can be -1 in which
case it is set to the size of the corresponding weight tensor.
block_width: integer
number of cols in a block (defaults to 1), can be -1 in which
case it is set to the size of the corresponding weight tensor.
block_pooling_function: string
Whether to perform average (AVG) or max (MAX) pooling in the block
(default: AVG)
initial_sparsity: float
initial sparsity value
target_sparsity: float
target sparsity value
sparsity_function_begin_step: integer
the global step at this which the gradual sparsity function begins to
take effect
sparsity_function_end_step: integer
the global step used as the end point for the gradual sparsity function
sparsity_function_exponent: float
exponent = 1 is linearly varying sparsity between initial and final.
exponent > 1 varies more slowly towards the end than the beginning
use_tpu: False
Indicates whether to use TPU
gradient_decay_rate: float
when prune_option is gradient based pruning, decay factor for gradient
decay
prune_option: string
option = 'weight' means using |weight| for pruning.
option = 'first_order_gradient' means using |weight| * |first order
gradient| for pruning.
option = 'second_order_gradient' means using |weight| * |second order
gradient| for pruning.
second order gradient is approximated by |weight + old_old_weight -
2*old_weight|.
option = 'compression' means using compression.
alpha_decrement_value: only effective when prune_option is 'compression',
see graph_compression/compression_lib/compression_op.py. The following
arguments are all only effective when prune_option == 'compression', see
graph_compression/compression_lib/compression_op.py for details.
begin_compression_step: only effective when prune_option is 'compression',
see graph_compression/compression_op.py.
end_compresson_step: only effective when prune_option is 'compression',
see graph_compression/compression_op.py.
compression_frequency: only effective when prune_option is 'compression',
see graph_compression/compression_op.py.
compression_option: only effective when prune_option is 'compression',
see graph_compression/compression_op.py.
rank: only effective when prune_option is 'compression',
see graph_compression/compression_op.py.
update_option: only effective when prune_option is 'compression',
see graph_compression/compression_op.py.
run_update_interval_check: only effective when prune_option is 'compression'
see graph_compression/compression_op.py.
pruning_fraction: only effective when prune_option is 'compression',
see graph_compression/compression_op.py.
We use the following sparsity function:
num_steps = (sparsity_function_end_step -
sparsity_function_begin_step)/pruning_frequency
sparsity(step) = (initial_sparsity - target_sparsity)*
[1-step/(num_steps -1)]**exponent + target_sparsity
Args: None
Returns:
tf.HParams object initialized to default values
"""
return contrib_training.HParams(
name='model_pruning',
begin_pruning_step=0,
end_pruning_step=-1,
weight_sparsity_map=[''],
block_dims_map=[''],
threshold_decay=0.0,
pruning_frequency=10,
nbins=256,
block_height=1,
block_width=1,
block_pooling_function='AVG',
initial_sparsity=0.0,
target_sparsity=0.5,
sparsity_function_begin_step=0,
sparsity_function_end_step=100,
sparsity_function_exponent=3.0,
use_tpu=False,
gradient_decay_rate=0.99,
prune_option='weight',
alpha_decrement_value=0.01,
begin_compression_step=0,
end_compresson_step=-1,
compression_frequency=10,
compression_option=0,
rank=7,
update_option=0,
run_update_interval_check=1,
pruning_fraction=0.4)
class Pruning(object):
def __init__(self, spec=None, global_step=None, sparsity=None):
"""Set up the specification for model pruning.
If a spec is provided, the sparsity is set up based on the sparsity_function
in the spec. The effect of sparsity_function is overridden if the sparsity
variable is passed to the constructor. This enables setting up arbitrary
sparsity profiles externally and passing it to this pruning functions.
Args:
spec: Pruning spec, a tf.HParams object
global_step: A tensorflow variable that is used while setting up the
sparsity function
sparsity: A tensorflow scalar variable storing the sparsity
"""
# Pruning specification
self._spec = spec if spec else get_pruning_hparams()
tf.logging.vlog(0, 'Pruning spec...')
self.print_hparams()
self.matrix_compression_spec = self._spec
# Sanity check for pruning hparams
self._validate_spec()
# A tensorflow variable that tracks the sparsity function.
# If not provided as input, the graph must already contain the global_step
# variable before calling this constructor.
self._global_step = self._setup_global_step(global_step)
# Stores the tensorflow sparsity variable.
# Built using self._setup_sparsity() or provided externally
self._sparsity = (
sparsity if sparsity is not None else self._setup_sparsity())
# List of tensorflow assignments ops for new masks and thresholds
self._assign_ops = []
self._assign_gradient_ops = []
self._assign_old_weight_ops = []
self._assign_old_old_weight_ops = []
# Tensorflow variable keeping track of the last global step when the masks
# and gradients were updated
self._last_update_step = self._setup_last_update_step()
self._last_gradient_update_step = self._setup_last_gradient_update_step()
# Block dimensions
self._block_dims = [self._spec.block_height, self._spec.block_width]
# Block pooling function
self._block_pooling_function = self._spec.block_pooling_function
# Mapping of layer/weight names and block dims
self._block_dims_map = self._get_block_dims_map()
# Mapping of weight names and target sparsity
self._weight_sparsity_map = self._get_weight_sparsity_map()
def _validate_spec(self):
spec = self._spec
if spec.begin_pruning_step < 0:
raise ValueError('Illegal value for begin_pruning_step')
if spec.begin_pruning_step >= spec.end_pruning_step:
if spec.end_pruning_step != -1:
raise ValueError(
'Pruning must begin before it can end. begin_step=%d, end_step=%d.'
'Set end_pruning_step to -1 if pruning is required till training'
'stops' % (spec.begin_pruning_step, spec.end_pruning_step))
if spec.sparsity_function_begin_step < 0:
raise ValueError('Illegal value for sparsity_function_begin_step')
if spec.sparsity_function_begin_step >= spec.sparsity_function_end_step:
raise ValueError('Sparsity function requires begin_step < end_step')
if not 0.0 <= spec.threshold_decay < 1.0:
raise ValueError('threshold_decay must be in range [0,1)')
if not 0.0 <= spec.initial_sparsity < 1.0:
raise ValueError('initial_sparsity must be in range [0,1)')
if not 0.0 <= spec.target_sparsity < 1.0:
raise ValueError('target_sparsity must be in range [0,1)')
if spec.prune_option not in ('weight', 'first_order_gradient',
'second_order_gradient'):
raise ValueError('prune option specified is not supported')
def _setup_global_step(self, global_step):
graph_global_step = global_step
if graph_global_step is None:
graph_global_step = tf.train.get_global_step()
if not graph_global_step:
raise ValueError(
'Could not get the global step. Either pass it explicitly, or '
'ensure that the library is called within a TF graph.')
return tf.cast(graph_global_step, tf.int32)
def _setup_sparsity(self):
begin_step = self._spec.sparsity_function_begin_step
end_step = self._spec.sparsity_function_end_step
initial_sparsity = self._spec.initial_sparsity
target_sparsity = self._spec.target_sparsity
exponent = self._spec.sparsity_function_exponent
with tf.name_scope(self._spec.name):
p = tf.minimum(
1.0,
tf.maximum(
0.0,
tf.div(
tf.cast(self._global_step - begin_step, tf.float32),
end_step - begin_step)))
sparsity = tf.add(
tf.multiply(initial_sparsity - target_sparsity,
tf.pow(1 - p, exponent)),
target_sparsity,
name='sparsity')
return sparsity
def _setup_last_update_step(self):
with tf.variable_scope(
self._spec.name, use_resource=self._spec.use_tpu) as scope:
try:
last_update_step = tf.get_variable(
'last_mask_update_step', [],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.int32)
except ValueError:
scope.reuse_variables()
last_update_step = tf.get_variable(
'last_mask_update_step', dtype=tf.int32)
return last_update_step
def _get_block_dims_map(self):
"""Returns the map of layer name: block dims."""
block_dims_map = {}
val_list = self._spec.block_dims_map
filtered_val_list = [l for l in val_list if l]
for val in filtered_val_list:
weight_name, block_dims_str = val.split(':')
block_dims_str = block_dims_str.split('x')
if len(block_dims_str) != 2:
raise ValueError('Expected 2 values for block dim for %s, got %s' %
(weight_name, block_dims_str))
block_dims = [int(block_dims_str[0]), int(block_dims_str[1])]
block_dims_map[re.compile(weight_name)] = block_dims
return block_dims_map
def _get_block_dims(self, weight_name):
"""Returns the block dims for the given layer/weight name."""
block_dims_list = [
block_dims for regexp, block_dims in self._block_dims_map.items()
if regexp.search(weight_name)
]
if not block_dims_list:
return self._block_dims
if len(block_dims_list) > 1:
raise ValueError('Multiple matches in block_dims_map for weight %s' %
weight_name)
return block_dims_list[0]
def _setup_last_gradient_update_step(self):
with tf.variable_scope(
self._spec.name, use_resource=self._spec.use_tpu) as scope:
try:
last_gradient_update_step = tf.get_variable(
'last_gradient_update_step', [],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.int32)
except ValueError:
scope.reuse_variables()
last_gradient_update_step = tf.get_variable(
'last_gradient_update_step', dtype=tf.int32)
return last_gradient_update_step
def _get_weight_sparsity_map(self):
"""Returns the map of weight_name:sparsity parsed from the hparams."""
weight_sparsity_map = {}
val_list = self._spec.weight_sparsity_map
filtered_val_list = [l for l in val_list if l]
for val in filtered_val_list:
weight_name, sparsity = val.split(':')
if float(sparsity) >= 1.0:
raise ValueError('Weight sparsity can not exceed 1.0')
weight_sparsity_map[re.compile(weight_name)] = float(sparsity)
return weight_sparsity_map
def _get_sparsity(self, weight_name):
"""Returns target sparsity for the given layer/weight name."""
target_sparsity = [
sparsity for regexp, sparsity in self._weight_sparsity_map.items()
if regexp.search(weight_name)
]
if not target_sparsity:
return self._sparsity
if len(target_sparsity) > 1:
raise ValueError('Multiple matches in weight_sparsity_map for weight %s' %
weight_name)
# TODO(suyoggupta): This will work when initial_sparsity = 0. Generalize
# to handle other cases as well.
return tf.multiply(self._sparsity,
tf.div(target_sparsity[0], self._spec.target_sparsity))
def _update_mask(self, weights, threshold, gradients): # pylint: disable=unused-argument
"""Updates the mask for a given weight tensor.
This functions first computes the cdf of the weight tensor, and estimates
the threshold value such that 'desired_sparsity' fraction of weights
have magnitude less than the threshold.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
gradients: The gradient tensor that is used for salience calculation.
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if sparsity is not defined
"""
if self._sparsity is None:
raise ValueError('Sparsity variable undefined')
sparsity = self._get_sparsity(weights.op.name)
with tf.name_scope(weights.op.name + '_pruning_ops'):
tf.logging.info('Applying option %s pruning', self._spec.prune_option)
if self._spec.prune_option == 'weight':
abs_weights = tf.abs(weights)
elif self._spec.prune_option in ('first_order_gradient',
'second_order_gradient'):
if gradients is None:
raise ValueError('gradient tensor cannot be None.')
# gradient variable stores absolute value already
abs_weights = tf.multiply(tf.abs(weights), gradients)
else:
raise ValueError('undefined option')
k = tf.cast(
tf.round(tf.cast(tf.size(abs_weights), tf.float32) * (1 - sparsity)),
tf.int32)
# Generate a random shuffling of the weights s.t. the tie-breaker on
# weight magnitude is random uniform.
shuffling = tf.random_shuffle(
tf.range(tf.size(abs_weights)))
shuffling = tf.reshape(shuffling, [-1, 1])
# Flatten the weights and scatter the values randomly.
abs_weights = tf.reshape(abs_weights, [-1])
abs_weights = tf.scatter_nd(
shuffling,
abs_weights,
tf.shape(abs_weights))
# Sort the entire array
_, indices = tf.nn.top_k(abs_weights, k=tf.size(abs_weights))
# `k` is how many non-zero weights we're going to have. Create a new
# mask where the first `k` elements are set to one and all others are
# set to zero.
mask_staging = tf.range(tf.size(abs_weights))
mask_staging = tf.cast(
tf.less(mask_staging, k),
tf.float32)
# Scatter the mask back into the proper positions for the weight matrix.
indices = tf.reshape(indices, [-1, 1])
new_mask = tf.scatter_nd(
indices,
mask_staging,
tf.shape(mask_staging))
# Un-shuffle the newly created mask.
new_mask = tf.reshape(
tf.gather_nd(
new_mask,
shuffling),
tf.shape(weights))
return tf.constant(0, tf.float32), new_mask
def _maybe_update_block_mask(self, weights, threshold, gradients=None):
"""Performs block-granular masking of the weights.
Block pruning occurs only if the block_height or block_width is > 1 and
if the weight tensor, when squeezed, has ndims = 2. Otherwise, elementwise
pruning occurs.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
gradients: The gradient tensor that used for salience calculation.
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if block pooling function is not AVG or MAX
"""
block_dims = self._get_block_dims(weights.op.name)
squeezed_weights = tf.squeeze(weights)
if squeezed_weights.get_shape().ndims != 2 or block_dims == [1, 1]:
return self._update_mask(weights, threshold, gradients)
if (self._spec.prune_option in ('first_order_gradient',
'second_order_gradient') and
gradients is None):
raise ValueError(
'Gradient based pruning implementation for block sparsity is not supported.'
)
for i in range(2):
if block_dims[i] == -1:
block_dims[i] = squeezed_weights.get_shape()[i]
if self._block_pooling_function not in ['AVG', 'MAX']:
raise ValueError('Unknown pooling function for block sparsity: %s' %
self._block_pooling_function)
with tf.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = tf.abs(squeezed_weights)
if gradients is not None:
abs_gradients = tf.abs(tf.squeeze(gradients))
pool_window = block_dims
pool_fn = pruning_utils.factorized_pool
squeeze_axis = None
if not self._spec.use_tpu:
pool_fn = tf.nn.pool
abs_weights = tf.reshape(
abs_weights,
[1, abs_weights.get_shape()[0],
abs_weights.get_shape()[1], 1])
if gradients is not None:
# Reshape gradients to be a rank 4 tensor of shape [1, .., .., 1].
abs_gradients = tf.reshape(
abs_gradients,
[1, gradients.get_shape()[0], gradients.get_shape()[1], 1])
squeeze_axis = [0, 3]
pooled_weights = pool_fn(
abs_weights,
window_shape=pool_window,
pooling_type=self._block_pooling_function,
strides=pool_window,
padding='SAME',
name=weights.op.name + '_pooled')
if gradients is not None:
pooled_gradients = pool_fn(
abs_gradients,
window_shape=pool_window,
pooling_type=self._block_pooling_function,
strides=pool_window,
padding='SAME',
name=gradients.op.name + '_pooled')
else:
pooled_gradients = None
if pooled_weights.get_shape().ndims != 2:
pooled_weights = tf.squeeze(pooled_weights, axis=squeeze_axis)
if gradients is not None and pooled_gradients.get_shape().ndims != 2:
pooled_gradients = tf.squeeze(pooled_gradients, axis=squeeze_axis)
smoothed_threshold, new_mask = self._update_mask(pooled_weights,
threshold,
pooled_gradients)
updated_mask = pruning_utils.expand_tensor(new_mask, block_dims)
sliced_mask = tf.slice(
updated_mask, [0, 0],
[squeezed_weights.get_shape()[0],
squeezed_weights.get_shape()[1]])
return smoothed_threshold, tf.reshape(sliced_mask, tf.shape(weights))
def _get_assign_old_weight_ops(self):
if self._assign_old_weight_ops:
raise ValueError(
'Assign op list not empty. _get_old_weight_assign_ops() called twice?'
)
weights = get_weights()
old_weights = get_old_weights()
if len(weights) != len(old_weights):
raise ValueError(
'Number of weights %s and number of old_weights %s mismatch' %
(len(weights), len(old_weights)))
for index, weight in enumerate(weights):
old_weight = old_weights[index]
self._assign_old_weight_ops.append(
pruning_utils.variable_assign(old_weight, weight))
def _get_assign_old_old_weight_ops(self):
if self._assign_old_old_weight_ops:
raise ValueError(
'Assign op list not empty. _get_old_old_weight_assign_ops() called twice?'
)
old_old_weights = get_old_old_weights()
old_weights = get_old_weights()
if len(old_old_weights) != len(old_weights):
raise ValueError(
'Number of old_old_weights %s and number of old_weights %s mismatch' %
(len(old_old_weights), len(old_weights)))
for index, old_old_weight in enumerate(old_old_weights):
old_weight = old_weights[index]
self._assign_old_old_weight_ops.append(
pruning_utils.variable_assign(old_old_weight, old_weight))
def _get_assign_gradient_ops(self):
# Make sure the assignment ops have not already been added to the list
if self._assign_gradient_ops:
raise ValueError(
'Assign op list not empty. _get_mask_assign_ops() called twice?')
weights = get_weights()
old_weights = get_old_weights()
old_old_weights = get_old_old_weights()
gradients = get_gradients()
if len(weights) != len(old_weights):
raise ValueError(
'Number of weights %s and number of old_weights %s mismatch' %
(len(weights), len(old_weights)))
if len(weights) != len(gradients):
raise ValueError(
'Number of weights %s and number of gradients %s mismatch' %
(len(weights), len(gradients)))
for index, _ in enumerate(weights):
weight = weights[index]
old_weight = old_weights[index]
old_old_weight = old_old_weights[index]
gradient = gradients[index]
if weight.shape.as_list() != old_weight.shape.as_list():
raise ValueError('weight tensor has different shape from old_weight')
if weight.shape.as_list() != gradient.shape.as_list():
raise ValueError('weight tensor has different shape from gradient')
if weight.shape.as_list() != old_old_weight.shape.as_list():
raise ValueError('weight tensor has different shape from old_weight')
is_partitioned = isinstance(weight, variables.PartitionedVariable)
if is_partitioned:
weight = weight.as_tensor()
old_weight = old_weight.as_tensor()
old_old_weight = old_old_weight.as_tensor()
decay = self._spec.gradient_decay_rate
if self._spec.prune_option == 'first_order_gradient':
tf.logging.info('Applying first order gradient pruning')
normalized_weight_delta = tf.nn.l2_normalize(
tf.abs(weight - old_weight))
elif self._spec.prune_option == 'second_order_gradient':
tf.logging.info('Applying second order gradient pruning')
normalized_weight_delta = tf.nn.l2_normalize(
tf.abs(weight + old_old_weight - 2 * old_weight))
else:
raise ValueError('Unknown prune option. Should not execute this code.')
new_gradient = decay * gradient + (1 - decay) * normalized_weight_delta
self._assign_gradient_ops.append(
pruning_utils.variable_assign(gradient, new_gradient))
def _get_mask_assign_ops(self):
# Make sure the assignment ops have not already been added to the list
if self._assign_ops:
raise ValueError(
'Assign op list not empty. _get_mask_assign_ops() called twice?')
masks = get_masks()
weights = get_weights()
thresholds = get_thresholds()
gradients = get_gradients()
if len(masks) != len(thresholds):
raise ValueError(
'Number of masks %s and number of thresholds %s mismatch' %
(len(masks), len(thresholds)))
for index, mask in enumerate(masks):
threshold = thresholds[index]
weight = weights[index]
if self._spec.prune_option in ('first_order_gradient',
'second_order_gradient'):
gradient = gradients[index]
else:
gradient = None
is_partitioned = isinstance(weight, variables.PartitionedVariable)
if is_partitioned:
weight = weight.as_tensor()
new_threshold, new_mask = self._maybe_update_block_mask(
weight, threshold, gradient)
self._assign_ops.append(
pruning_utils.variable_assign(threshold, new_threshold))
self._assign_ops.append(
pruning_utils.partitioned_variable_assign(mask, new_mask)
if is_partitioned else pruning_utils.variable_assign(mask, new_mask))
def old_weight_update_op(self):
with tf.name_scope(self._spec.name):
if self._spec.prune_option not in ('first_order_gradient',
'second_order_gradient'):
return tf.no_op('gradient_update_no_op')
if not self._assign_old_weight_ops:
self._get_assign_old_weight_ops()
with tf.control_dependencies(self._assign_old_weight_ops):
tf.logging.info('Updating old weights.')
return tf.no_op('old_weight_update')
def old_old_weight_update_op(self):
with tf.name_scope(self._spec.name):
if self._spec.prune_option != 'second_order_gradient':
return tf.no_op('gradient_update_no_op')
if not self._assign_old_old_weight_ops:
self._get_assign_old_old_weight_ops()
with tf.control_dependencies(self._assign_old_old_weight_ops):
tf.logging.info('Updating old old weights.')
return tf.no_op('old_old_weight_update')
def gradient_update_op(self):
with tf.name_scope(self._spec.name):
if self._spec.prune_option not in ('first_order_gradient',
'second_order_gradient'):
return tf.no_op('gradient_update_no_op')
if not self._assign_gradient_ops:
self._get_assign_gradient_ops()
with tf.control_dependencies([
tf.assign(
self._last_gradient_update_step,
self._global_step,
name='last_gradient_update_step_assign')
]):
with tf.control_dependencies(self._assign_gradient_ops):
tf.logging.info('Updating gradients.')
return tf.no_op('gradient_update')
def conditional_gradient_update_op(self):
def maybe_update_gradients():
with tf.name_scope(self._spec.name):
is_step_within_pruning_range = tf.logical_and(
tf.greater_equal(self._global_step, self._spec.begin_pruning_step),
# If end_pruning_step is negative, keep pruning forever!
tf.logical_or(
tf.less_equal(self._global_step, self._spec.end_pruning_step),
tf.less(self._spec.end_pruning_step, 0)))
return is_step_within_pruning_range
def gradient_update_op():
return self.gradient_update_op()
def no_update_op():
return tf.no_op()
return tf.cond(maybe_update_gradients(), gradient_update_op, no_update_op)
def mask_update_op(self):
with tf.name_scope(self._spec.name):
if not self._assign_ops:
self._get_mask_assign_ops()
grad_update_ops = self.gradient_update_op()
old_weight_update_ops = self.old_weight_update_op()
old_old_weight_update_ops = self.old_old_weight_update_op()
with tf.control_dependencies([
tf.assign(
self._last_update_step,
self._global_step,
name='last_mask_update_step_assign')
]):
with tf.control_dependencies([grad_update_ops]):
with tf.control_dependencies([old_old_weight_update_ops]):
with tf.control_dependencies([old_weight_update_ops]):
with tf.control_dependencies(self._assign_ops):
tf.logging.info('Updating masks.')
return tf.no_op('mask_update')
def conditional_mask_update_op(self):
def maybe_update_masks():
with tf.name_scope(self._spec.name):
is_step_within_pruning_range = tf.logical_and(
tf.greater_equal(self._global_step, self._spec.begin_pruning_step),
# If end_pruning_step is negative, keep pruning forever!
tf.logical_or(
tf.less_equal(self._global_step, self._spec.end_pruning_step),
tf.less(self._spec.end_pruning_step, 0)))
is_pruning_step = tf.less_equal(
tf.add(self._last_update_step, self._spec.pruning_frequency),
self._global_step)
return tf.logical_and(is_step_within_pruning_range, is_pruning_step)
def mask_update_op():
return self.mask_update_op()
def no_update_op():
return tf.no_op()
return tf.cond(maybe_update_masks(), mask_update_op, no_update_op)
def add_pruning_summaries(self):
"""Adds summaries of weight sparsities and thresholds."""
with tf.name_scope(self._spec.name + '_summaries'):
tf.summary.scalar('sparsity', self._sparsity)
tf.summary.scalar('last_mask_update_step', self._last_update_step)
tf.summary.scalar('last_gradient_update_step',
self._last_gradient_update_step)
masks = get_masks()
thresholds = get_thresholds()
gradients = get_gradients()
for mask, threshold, gradient in zip(masks, thresholds, gradients):
tf.summary.scalar(mask.op.name + '/sparsity', tf.nn.zero_fraction(mask))
tf.summary.scalar(threshold.op.name + '/threshold', threshold)
tf.summary.scalar(gradient.op.name + '/gradient', tf.norm(gradient))
tf.summary.scalar(gradient.op.name + '/gradient-sparsity',
tf.nn.zero_fraction(gradient))
tf.summary.histogram(gradient.op.name + '/abs.gradient', gradient)
def apply_mask(self, x, scope=''):
return apply_mask(x, scope, self._spec.prune_option)
def print_hparams(self):
tf.logging.vlog(0, self._spec.to_json())
|
[
"[email protected]"
] | |
46ae0f16489d49d88a5abf519a6add085e10a25d
|
36959b56e506dbbe2d3c381cdccfe16965c14d24
|
/Django/alms/leave/models.py
|
020e39c03be6ec8eadabbc804613a01e862b778e
|
[] |
no_license
|
Sathishkumar-M/Django
|
e2935fe0c69acb4cb39be2bc0504fd3d5619d002
|
e54038ef70295274639b6207efe8e7e3939cbe36
|
refs/heads/master
| 2020-03-21T20:22:48.684770 | 2018-06-28T10:42:51 | 2018-06-28T10:42:51 | 139,003,364 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,293 |
py
|
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
import datetime
# Create your models here.
class LeaveRules(models.Model):
leave_type = models.CharField(max_length=256,blank=False)
leave_rules = models.TextField(blank=True,default='')
created_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.leave_type
# @property
# def owner(self):
# return self.user
class LeaveApply(models.Model):
user = models.ForeignKey(User,related_name='leaves')
leave_type = models.ForeignKey(LeaveRules,related_name='leaves',null=True,blank=True)
start_date = models.DateField(default=datetime.date.today, blank=True)
end_date = models.DateField(default='', blank=True)
no_of_days = models.DecimalField(max_digits=2,decimal_places=0,null=True)
notes = models.TextField(blank=True,default='')
tag_to = models.CharField(max_length=256,blank=False)
status = models.CharField(max_length=256,default='Awaiting')
status_by = models.CharField(max_length=256,default='')
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True,null=True)
def __str__(self):
return self.user.username
|
[
"[email protected]"
] | |
c95ef593fefb765b7876720245730eeb8f614d53
|
89155ebee895cbd04e4eb7a9d079a820d90ffd7e
|
/viewset_modelviewset_application/app/migrations/0004_auto_20210122_0531.py
|
7d4d27a8ea437a57f34a859481d6f08daaf624ca
|
[] |
no_license
|
mahmudgithub/Rest-api-playground
|
822c0671b534fc057461703711ef980d9d31ce56
|
a452a329d60c9104afdeadde13f7493741e4914a
|
refs/heads/master
| 2023-03-31T17:23:13.605754 | 2021-04-11T14:10:31 | 2021-04-11T14:10:31 | 331,842,045 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 368 |
py
|
# Generated by Django 3.1.4 on 2021-01-22 13:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_one_city'),
]
operations = [
migrations.AlterField(
model_name='one',
name='city',
field=models.CharField(max_length=100),
),
]
|
[
"[email protected]"
] | |
28cce25feb029f8c2e58daa7e26b3c7c31e89446
|
56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e
|
/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544841/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_165/run_cfg.py
|
4b626943fbef786d0d255b19f5fe1f78243bbfcc
|
[] |
no_license
|
rmanzoni/HTT
|
18e6b583f04c0a6ca10142d9da3dd4c850cddabc
|
a03b227073b2d4d8a2abe95367c014694588bf98
|
refs/heads/master
| 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,499 |
py
|
import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544841/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1739.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_174.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1740.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1741.root',
'/store/cmst3/user/cmgtools/CMG/DY1JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_1742.root')
)
|
[
"[email protected]"
] | |
849e4f1ba87609eb8511f0ba7b7db5d349078f29
|
8b44c7f5f9c2264fd5bfe91ea324fdbd53813413
|
/algorithms/DQN/train_DQN.py
|
0d2c4954ac20e0ad3bed569e5108a376d106f5d8
|
[
"MIT"
] |
permissive
|
syd951186545/reinforce_py
|
ee33a63d6c8c94c3318877460a49470ef7788036
|
46769da50aea65346cd3a300b55306d25f1f2683
|
refs/heads/master
| 2020-05-14T13:54:32.067888 | 2018-06-08T14:43:45 | 2018-06-08T14:43:45 | 181,823,264 | 1 | 0 |
MIT
| 2019-04-17T05:31:04 | 2019-04-17T05:31:00 |
Python
|
UTF-8
|
Python
| false | false | 5,083 |
py
|
from __future__ import print_function
from __future__ import division
import os
import argparse
import gym
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from agent import DQN
def main(args):
set_random_seed(args.seed)
env = gym.make('CartPole-v0')
agent = DQN(env, args)
agent.construct_model(args.gpu)
# load pretrained models or init new a model.
saver = tf.train.Saver(max_to_keep=1)
if args.model_path is not None:
saver.restore(agent.sess, args.model_path)
ep_base = int(args.model_path.split('_')[-1])
best_mean_rewards = float(args.model_path.split('/')[-1].split('_')[0])
else:
agent.sess.run(tf.global_variables_initializer())
ep_base = 0
best_mean_rewards = None
rewards_history, steps_history = [], []
train_steps = 0
# Training
for ep in range(args.max_ep):
state = env.reset()
ep_rewards = 0
for step in range(env.spec.timestep_limit):
# pick action
action = agent.sample_action(state, policy='egreedy')
# Execution action.
next_state, reward, done, debug = env.step(action)
train_steps += 1
ep_rewards += reward
# modified reward to speed up learning
reward = 0.1 if not done else -1
# Learn and Update net parameters
agent.learn(state, action, reward, next_state, done)
state = next_state
if done:
break
steps_history.append(train_steps)
if not rewards_history:
rewards_history.append(ep_rewards)
else:
rewards_history.append(
rewards_history[-1] * 0.9 + ep_rewards * 0.1)
# Decay epsilon
if agent.epsilon > args.final_epsilon:
agent.epsilon -= (args.init_epsilon - args.final_epsilon) / args.max_ep
# Evaluate during training
if ep % args.log_every == args.log_every-1:
total_reward = 0
for i in range(args.test_ep):
state = env.reset()
for j in range(env.spec.timestep_limit):
action = agent.sample_action(state, policy='greedy')
state, reward, done, _ = env.step(action)
total_reward += reward
if done:
break
current_mean_rewards = total_reward / args.test_ep
print('Episode: %d Average Reward: %.2f' %
(ep + 1, current_mean_rewards))
# save model if current model outpeform the old one
if best_mean_rewards is None or (current_mean_rewards >= best_mean_rewards):
best_mean_rewards = current_mean_rewards
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
save_name = args.save_path + str(round(best_mean_rewards, 2)) \
+ '_' + str(ep_base + ep + 1)
saver.save(agent.sess, save_name)
print('Model saved %s' % save_name)
# plot training rewards
plt.plot(steps_history, rewards_history)
plt.xlabel('steps')
plt.ylabel('running avg rewards')
plt.show()
def args_parse():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_path', default=None,
help='Whether to use a saved model. (*None|model path)')
parser.add_argument(
'--save_path', default='./models/',
help='Path to save a model during training.')
parser.add_argument(
'--double_q', default=True, help='enable or disable double dqn')
parser.add_argument(
'--log_every', default=500, help='Log and save model every x episodes')
parser.add_argument(
'--gpu', default=-1,
help='running on a specify gpu, -1 indicates using cpu')
parser.add_argument(
'--seed', default=31, help='random seed')
parser.add_argument(
'--max_ep', type=int, default=2000, help='Number of training episodes')
parser.add_argument(
'--test_ep', type=int, default=50, help='Number of test episodes')
parser.add_argument(
'--init_epsilon', type=float, default=0.75, help='initial epsilon')
parser.add_argument(
'--final_epsilon', type=float, default=0.2, help='final epsilon')
parser.add_argument(
'--buffer_size', type=int, default=50000, help='Size of memory buffer')
parser.add_argument(
'--lr', type=float, default=1e-4, help='Learning rate')
parser.add_argument(
'--batch_size', type=int, default=128, help='Size of training batch')
parser.add_argument(
'--gamma', type=float, default=0.99, help='Discounted factor')
parser.add_argument(
'--target_network_update', type=int, default=1000,
help='update frequency of target network.')
return parser.parse_args()
def set_random_seed(seed):
np.random.seed(seed)
tf.set_random_seed(seed)
if __name__ == '__main__':
main(args_parse())
|
[
"[email protected]"
] | |
26dd0cdf3db785f86670a75ed04fad31e9b09252
|
8202512dc4fef386dc927fa60c22596e149fb6f0
|
/venv/bin/gunicorn
|
0092995224d9586509475559b4ecc8a8608582ec
|
[] |
no_license
|
dimoka777/tv
|
6e8d6e754383a897588ddd203e0670650c22245e
|
8159cb76c695802efb4a30ecc2cc5a71960a23c0
|
refs/heads/master
| 2020-06-19T05:18:59.333318 | 2019-07-13T15:19:50 | 2019-07-13T15:19:50 | 196,577,844 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 242 |
#!/home/dimoka/Django/tv/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"[email protected]"
] | ||
e278ef7ff6ed2f42a6e7361f4d93862a94eec361
|
6c96601d64a02c4050410c5e45efa6e1bd8253f6
|
/wikked/web.py
|
c300577283de746aa268b31b14b5208e90308176
|
[
"Apache-2.0"
] |
permissive
|
ludovicchabant/Wikked
|
7c0627f513f5ccbe052484dcb1ae89336e552cf4
|
02ec3c0361ac90b0366e7a90f8928a54d40616b5
|
refs/heads/master
| 2022-12-03T08:12:08.621371 | 2019-03-09T07:41:40 | 2019-03-09T07:41:40 | 15,740,703 | 17 | 0 |
Apache-2.0
| 2022-12-03T01:25:58 | 2014-01-08T16:17:33 |
Python
|
UTF-8
|
Python
| false | false | 7,577 |
py
|
import os
import os.path
import logging
import urllib.parse
from werkzeug import SharedDataMiddleware
from flask import Flask, abort, g
from wikked.wiki import Wiki, WikiParameters
# Create the main app.
static_folder = os.path.join(os.path.dirname(__file__), 'static')
app = Flask(
'wikked',
static_folder=static_folder,
static_url_path='/static')
app.config.from_object('wikked.settings')
app.config.from_envvar('WIKKED_SETTINGS', silent=True)
# Setup some config defaults.
app.config.setdefault('SQL_DEBUG', False)
app.config.setdefault('SQL_COMMIT_ON_TEARDOWN', False)
app.config.setdefault('WIKI_ROOT', None)
app.config.setdefault('WIKI_UPDATE_ON_START', True)
app.config.setdefault('WIKI_AUTO_RELOAD', False)
app.config.setdefault('WIKI_ASYNC_UPDATE', False)
app.config.setdefault('WIKI_SERVE_FILES', False)
app.config.setdefault('WIKI_BROKER_URL',
'sqla+sqlite:///%(root)s/.wiki/broker.db')
app.config.setdefault('WIKI_NO_FLASK_LOGGER', False)
app.config.setdefault('WIKI_STYLESHEET', None)
app.config.setdefault('PROFILE', False)
app.config.setdefault('PROFILE_DIR', None)
app.config.setdefault('INFLUXDB_HOST', None)
app.config.setdefault('INFLUXDB_PORT', 8086)
app.config.setdefault('INFLUXDB_USERNAME', 'root')
app.config.setdefault('INFLUXDB_PASSWORD', 'root')
app.config.setdefault('INFLUXDB_DATABASE', 'database')
if app.config['WIKI_NO_FLASK_LOGGER']:
app.logger.handlers = []
# Find the wiki root, and further configure the app if there's a
# config file in there.
wiki_root = app.config['WIKI_ROOT']
if wiki_root is None:
from wikked.utils import find_wiki_root
wiki_root = find_wiki_root()
if wiki_root is None:
raise Exception("Can't find the wiki root to use.")
config_path = os.path.join(wiki_root, '.wiki', 'app.cfg')
if os.path.isfile(config_path):
app.config.from_pyfile(config_path)
# Make the app serve static content and wiki assets in DEBUG mode.
app.config['WIKI_ROOT'] = wiki_root
app.config['WIKI_FILES_DIR'] = os.path.join(wiki_root, '_files')
if app.config['WIKI_SERVE_FILES']:
app.wsgi_app = SharedDataMiddleware(
app.wsgi_app,
{'/files': app.config['WIKI_FILES_DIR']})
# Add a special route for the `.well-known` directory.
app.wsgi_app = SharedDataMiddleware(
app.wsgi_app,
{'/.well-known': os.path.join(wiki_root, '.well-known')})
# Profiling
if app.config['PROFILE']:
profile_dir = app.config['PROFILE_DIR']
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, profile_dir=profile_dir)
# Customize logging.
if app.config['DEBUG']:
lg = logging.getLogger('wikked')
lg.setLevel(logging.DEBUG)
if app.config['SQL_DEBUG']:
lg = logging.getLogger('sqlalchemy')
lg.setLevel(logging.DEBUG)
app.logger.debug("Creating Flask application...")
# This lets components further modify the wiki that's created for
# each request.
app.wikked_post_init = []
# When requested, set the wiki as a request global.
def get_wiki():
wiki = getattr(g, '_wiki', None)
if wiki is None:
wiki = Wiki(app.wiki_params)
for i in app.wikked_post_init:
i(wiki)
wiki.start()
g.wiki = wiki
return wiki
# Set the default wiki parameters.
app.wiki_params = app.config.get('WIKI_FACTORY_PARAMETERS', None)
if app.wiki_params is None:
app.wiki_params = WikiParameters(wiki_root)
# Just uncache pages when the user has edited one.
def uncaching_wiki_updater(wiki, url):
app.logger.debug("Uncaching all pages because %s was edited." % url)
wiki.db.uncachePages(except_url=url, only_required=True)
app.wiki_params.wiki_updater = uncaching_wiki_updater
# Login extension.
def user_loader(username):
wiki = get_wiki()
return wiki.auth.getUser(username)
# Setup the Jinja environment.
def get_read_url(url):
return '/read/' + url.lstrip('/')
def get_edit_url(url):
return '/edit/' + url.lstrip('/')
def get_rev_url(url, rev):
return '/rev/%s?%s' % (url.lstrip('/'),
urllib.parse.urlencode({'rev': rev}))
def get_diff_url(url, rev1=None, rev2=None):
args = {}
if rev1 is not None:
args['rev1'] = rev1
if rev2 is not None:
args['rev2'] = rev2
if len(args) > 0:
return '/diff/%s?%s' % (url.lstrip('/'),
urllib.parse.urlencode(args))
return '/diff/%s' % url.lstrip('/')
app.jinja_env.globals.update({
'get_read_url': get_read_url,
'get_edit_url': get_edit_url,
'get_rev_url': get_rev_url,
'get_diff_url': get_diff_url
})
from flask_login import LoginManager # NOQA
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.user_loader(user_loader)
login_manager.unauthorized_handler(lambda: abort(401))
# Bcrypt extension.
from wikked.bcryptfallback import Bcrypt # NOQA
app.bcrypt = Bcrypt(app)
# Import the views.
import wikked.commonroutes # NOQA
import wikked.api.admin # NOQA
import wikked.api.edit # NOQA
import wikked.api.history # NOQA
import wikked.api.read # NOQA
import wikked.api.special # NOQA
import wikked.api.user # NOQA
import wikked.views.admin # NOQA
import wikked.views.edit # NOQA
import wikked.views.error # NOQA
import wikked.views.history # NOQA
import wikked.views.read # NOQA
import wikked.views.special # NOQA
import wikked.views.user # NOQA
# Async wiki update.
if app.config['WIKI_ASYNC_UPDATE']:
app.logger.debug("Will use Celery tasks to update the wiki...")
from wikked.tasks import celery_app, update_wiki
# Configure Celery.
app.config['WIKI_BROKER_URL'] = app.config['WIKI_BROKER_URL'] % (
{'root': wiki_root})
celery_app.conf.update(app.config)
app.logger.debug("Using Celery broker: %s" % app.config['WIKI_BROKER_URL'])
# Make the wiki use the background update task.
def async_updater(wiki):
app.logger.debug("Running update task on Celery.")
update_wiki.delay(wiki.root)
app.wiki_params.wiki_updater = async_updater
# InfluxDB metrics.
if app.config['INFLUXDB_HOST']:
try:
import influxdb
except ImportError:
raise Exception("Please install the `influxdb` package if you need "
"analytics for your Wikked app.")
host = app.config['INFLUXDB_HOST']
port = app.config['INFLUXDB_PORT']
username = app.config['INFLUXDB_USERNAME']
password = app.config['INFLUXDB_PASSWORD']
database = app.config['INFLUXDB_DATABASE']
metrics_db = influxdb.InfluxDBClient(host, port, username, password,
database)
app.logger.info("Opening InfluxDB %s on %s:%s as %s." % (
database, host, port, username))
import time
from flask import request, request_started, request_tearing_down
def on_request_started(sender, **extra):
g.metrics_start_time = time.clock()
def on_request_tearing_down(sender, **extra):
duration = time.clock() - g.metrics_start_time
data = [
{
"name": "requests",
"columns": ["request_path", "compute_time"],
"points": [
[str(request.path), duration]
]
}
]
metrics_db.write_points(data)
request_started.connect(on_request_started, app)
request_tearing_down.connect(on_request_tearing_down, app)
|
[
"[email protected]"
] | |
656e3ca9877621760222278ef164bbd08c56b93f
|
a870e1db82fbf8f57b9d5fb4ebdc5f205df1a063
|
/web/settings_local.py
|
5c9f3553952426e2191ef7bf823ec68d679890c2
|
[] |
no_license
|
mireq/django-frontend-template
|
36692d705dd84513e1389379219bdb619c9d1f8a
|
9f3bd4e3e9374deb77586374a6ed62f6e6ccb316
|
refs/heads/master
| 2023-08-16T19:23:09.761808 | 2023-08-13T04:12:38 | 2023-08-13T04:12:38 | 50,784,205 | 0 | 1 | null | 2023-08-13T04:12:40 | 2016-01-31T16:44:42 |
Python
|
UTF-8
|
Python
| false | false | 167 |
py
|
# pylint: disable=wildcard-import,unused-wildcard-import
from .settings import *
COMPRESS_POSTCSS_BINARY = 'postcss' # path to postcss binary
ALLOWED_HOSTS = ['*']
|
[
"[email protected]"
] | |
246f7edf8f9ea743330f9bfaca9cbe94bdb63b24
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-securitycenter/samples/generated_samples/securitycenter_v1p1beta1_generated_security_center_create_notification_config_sync.py
|
55d2d4595a454c01a04b080a669795c6d806194c
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 |
Apache-2.0
| 2023-09-14T21:45:18 | 2014-01-28T15:51:47 |
Python
|
UTF-8
|
Python
| false | false | 2,015 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateNotificationConfig
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-securitycenter
# [START securitycenter_v1p1beta1_generated_SecurityCenter_CreateNotificationConfig_sync]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import securitycenter_v1p1beta1
def sample_create_notification_config():
# Create a client
client = securitycenter_v1p1beta1.SecurityCenterClient()
# Initialize request argument(s)
request = securitycenter_v1p1beta1.CreateNotificationConfigRequest(
parent="parent_value",
config_id="config_id_value",
)
# Make the request
response = client.create_notification_config(request=request)
# Handle the response
print(response)
# [END securitycenter_v1p1beta1_generated_SecurityCenter_CreateNotificationConfig_sync]
|
[
"[email protected]"
] | |
1375c8329253029ffd84e0e0fcc00fa5367fdf5d
|
480e33f95eec2e471c563d4c0661784c92396368
|
/GeneratorInterface/PomwigInterface/python/POMWIG_SingleDiffractiveMinusWmunu_10TeV_cff.py
|
b86f148ed4e8c4c5f1a7b01056222a8797b3ed6e
|
[
"Apache-2.0"
] |
permissive
|
cms-nanoAOD/cmssw
|
4d836e5b76ae5075c232de5e062d286e2026e8bd
|
4eccb8a758b605875003124dd55ea58552b86af1
|
refs/heads/master-cmsswmaster
| 2021-01-23T21:19:52.295420 | 2020-08-27T08:01:20 | 2020-08-27T08:01:20 | 102,867,729 | 7 | 14 |
Apache-2.0
| 2022-05-23T07:58:09 | 2017-09-08T14:03:57 |
C++
|
UTF-8
|
Python
| false | false | 1,323 |
py
|
import FWCore.ParameterSet.Config as cms
herwig6Parameters = cms.PSet(
comEnergy = cms.double(10000.0),
useJimmy = cms.bool(False),
doMPInteraction = cms.bool(False),
herwigHepMCVerbosity = cms.untracked.bool(False),
herwigVerbosity = cms.untracked.int32(1),
printCards = cms.untracked.bool(True),
maxEventsToPrint = cms.untracked.int32(2),
crossSection = cms.untracked.double(-1.0),
filterEfficiency = cms.untracked.double(1.0),
)
source = cms.Source("EmptySource")
generator = cms.EDFilter("PomwigGeneratorFilter",
herwig6Parameters,
HerwigParameters = cms.PSet(
parameterSets = cms.vstring('SDInclusiveWmunu'),
SDInclusiveWmunu = cms.vstring('NSTRU = 14 ! H1 Pomeron Fit B',
'Q2WWMN = 1E-6 ! Minimum |t|',
'Q2WWMX = 4.0 ! Maximum |t|',
'YWWMIN = 1E-6 ! Minimum xi',
'YWWMAX = 0.2 ! Maximum xi',
'IPROC = 11452 ! Process PomP -> W -> munu',
'MODPDF(1) = 10150 ! Set MODPDF CTEQ61',
'MODPDF(2) = -1 ! Set MODPDF')
),
diffTopology = cms.int32(2),
survivalProbability = cms.double(0.05),
h1fit = cms.int32(2),
doPDGConvert = cms.bool(False)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"[email protected]"
] | |
f27b89cf6bda2b7d42a99756ab87727eb21adda5
|
76c40ae54b4985cdb0b4692106795ded8115da42
|
/train_cross_domain.py
|
78924d5ad3718d7470998843a2f82cac444aec26
|
[] |
no_license
|
leosampaio/keras-generative
|
1b23ea6b18af8c09bdf08c30dc6a0428c06eb385
|
ffafbae19d24c0ce7e812f610c4754a343400a9e
|
refs/heads/master
| 2021-05-09T09:04:43.636608 | 2018-11-21T17:26:28 | 2018-11-21T17:26:28 | 119,418,618 | 1 | 1 | null | 2018-01-29T17:48:49 | 2018-01-29T17:48:49 | null |
UTF-8
|
Python
| false | false | 4,264 |
py
|
import os
import sys
import math
import argparse
from keras import backend as K
import numpy as np
from sklearn.preprocessing import LabelBinarizer
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import matplotlib
matplotlib.use('Agg')
from models import models
from datasets import load_dataset
def main():
# Parsing arguments
parser = argparse.ArgumentParser(description='Training GANs or VAEs')
parser.add_argument('--model', type=str, required=True)
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--epoch', type=int, default=200)
parser.add_argument('--batchsize', type=int, default=50)
parser.add_argument('--output', default='output')
parser.add_argument('--zdims', type=int, default=256)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--testmode', action='store_true')
parser.add_argument('--conditional', action='store_true')
parser.add_argument('--aux-classifier', action='store_true')
parser.add_argument('--label-smoothing', default=0.0, type=float)
parser.add_argument('--input-noise', default=0.0, type=float)
parser.add_argument('--run-id', '-r', default=1, type=int)
parser.add_argument('--checkpoint-every', default=1, type=int)
parser.add_argument('--notify-every', default=1, type=int)
parser.add_argument('--triplet-margin', default=1., type=float)
parser.add_argument('--triplet-weight', default=1., type=float)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--n-layers-to-share', default=0, type=int)
parser.add_argument('--submodels', nargs=2,
help="Submodels used to build the bigger one",
required=True)
parser.add_argument('--resume-submodels', nargs=2,
help="Submodels pretrained weights")
parser.add_argument('--dis-loss-control', default=1., type=float)
args = parser.parse_args()
# select gpu and limit resources if applicable
if 'tensorflow' == K.backend():
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(args.gpu)
set_session(tf.Session(config=config))
# make output directory if not exists
if not os.path.isdir(args.output):
os.mkdir(args.output)
# load datasets
dataset = load_dataset(args.dataset)
# Construct model
if args.model not in models:
raise Exception('Unknown model:', args.model)
model = models[args.model](
input_shape=dataset.shape[1:],
z_dims=args.zdims,
output=args.output,
label_smoothing=args.label_smoothing,
input_noise=args.input_noise,
run_id=args.run_id,
test_mode=args.testmode,
checkpoint_every=args.checkpoint_every,
notify_every=args.notify_every,
aux_classifier=args.aux_classifier,
is_conditional=args.conditional,
conditional_dims=len(dataset.attr_names),
triplet_margin=args.triplet_margin,
triplet_weight=args.triplet_weight,
lr=args.lr,
submodels=args.submodels,
dis_loss_control=args.dis_loss_control,
submodels_weights=args.resume_submodels,
permutation_matrix_shape=(len(dataset), dataset.mirror_len)
)
if args.resume or args.resume_submodels:
model.load_model(args.resume)
# generate random samples to evaluate generated results over time
# use the same samples for all trainings - useful when resuming training
np.random.seed(14)
samples = np.random.normal(size=(100, args.zdims)).astype(np.float32)
conditionals_for_samples = np.array(
[LabelBinarizer().fit_transform(
range(0, len(dataset.attr_names)))
[i % len(dataset.attr_names)] for i in range(100)])
np.random.seed()
model.main_loop(dataset, samples,
samples_conditionals=conditionals_for_samples,
epochs=args.epoch,
batchsize=args.batchsize)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
04b1d77dab59666059f65cc7262e758d96a4570f
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/PYTHON_PRAC/python-mega-algo/conversions/hexadecimal_to_decimal.py
|
beb1c2c3ded67ee5fca164a33d6462fa7c495ebb
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 |
MIT
| 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null |
UTF-8
|
Python
| false | false | 1,509 |
py
|
hex_table = {hex(i)[2:]: i for i in range(16)} # Use [:2] to strip off the leading '0x'
def hex_to_decimal(hex_string: str) -> int:
"""
Convert a hexadecimal value to its decimal equivalent
#https://www.programiz.com/python-programming/methods/built-in/hex
>>> hex_to_decimal("a")
10
>>> hex_to_decimal("12f")
303
>>> hex_to_decimal(" 12f ")
303
>>> hex_to_decimal("FfFf")
65535
>>> hex_to_decimal("-Ff")
-255
>>> hex_to_decimal("F-f")
Traceback (most recent call last):
...
ValueError: Non-hexadecimal value was passed to the function
>>> hex_to_decimal("")
Traceback (most recent call last):
...
ValueError: Empty string was passed to the function
>>> hex_to_decimal("12m")
Traceback (most recent call last):
...
ValueError: Non-hexadecimal value was passed to the function
"""
hex_string = hex_string.strip().lower()
if not hex_string:
raise ValueError("Empty string was passed to the function")
is_negative = hex_string[0] == "-"
if is_negative:
hex_string = hex_string[1:]
if not all(char in hex_table for char in hex_string):
raise ValueError("Non-hexadecimal value was passed to the function")
decimal_number = 0
for char in hex_string:
decimal_number = 16 * decimal_number + hex_table[char]
return -decimal_number if is_negative else decimal_number
if __name__ == "__main__":
from doctest import testmod
testmod()
|
[
"[email protected]"
] | |
f69c7a91af50182b1bf902e53170333407c5b9b3
|
d145eb9a03200855caddbf53da53ee092386f93f
|
/python/etcd/test4.py
|
40b212bbfbc7814103a5054b2c71eebf234395df
|
[] |
no_license
|
oberstet/scratchbox
|
0ecb44df40664526c4eab6dae69837735e8cf7fe
|
87ac59b98782c70888df24b633f890e3305e7c8c
|
refs/heads/master
| 2022-11-08T03:36:36.108804 | 2022-10-22T09:52:20 | 2022-10-22T09:52:20 | 3,698,059 | 26 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,843 |
py
|
from twisted.internet.task import react
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
import etcd
import txaio
@inlineCallbacks
def main(reactor):
# a Twisted etcd client
client = etcd.Client(reactor, u'http://localhost:2379')
# get etcd status
status = yield client.status()
print(status)
# get value for a key
try:
value = yield client.get(b'/cf/foo')
print('value={}'.format(value))
except IndexError:
print('no such key =(')
# set a value for some keys
for i in range(3):
rev = yield client.set('/cf/foo0{}'.format(i).encode(), b'woa;)')
print('value set, revision={}'.format(rev))
# delete key
key = u'/cf/foo02'.encode()
rev = yield client.delete(key)
print(rev)
# iterate over key range (maybe an async iter in the future?)
pairs = yield client.get(b'/cf/foo01', b'/cf/foo05')
for key, value in pairs.items():
print('key={}: {}'.format(key, value))
# iterate over keys with given prefix
pairs = yield client.get(b'/cf/foo0', prefix=True)
for key, value in pairs.items():
print('key={}: {}'.format(key, value))
# watch keys for change events
prefixes = [b'/cf/', b'/foo/']
# our callback that will be invoked for every change event
def on_watch(key, value):
print('watch callback fired for key {}: {}'.format(key, value))
# start watching on given key prefixes
d = client.watch(prefixes, on_watch)
# sleep for n seconds ..
delay = 10
print('watching {} for {} seconds ..'.format(prefixes, delay))
yield sleep(delay)
# .. and stop watching
yield d.cancel()
# submit transaction
# create lease
if __name__ == '__main__':
txaio.start_logging(level='info')
react(main)
|
[
"[email protected]"
] | |
769f7cb542a82ef942a57683d24a36f3ba41cbc1
|
0be644ba0208b1f9f93018f74580fd5047618a3b
|
/src/industries/power_plant.py
|
aa022bf8d0e6cd3ab277a822db436cfdd9905568
|
[] |
no_license
|
ebla71/XIS
|
1ba48f019c063132b561d8c2e469634349b96aae
|
9f96b8f210b91da29ce0d3b34e896f629fc7dede
|
refs/heads/main
| 2023-08-22T11:45:22.723296 | 2021-10-05T09:11:11 | 2021-10-05T09:11:11 | 355,645,289 | 0 | 0 | null | 2021-09-25T21:51:01 | 2021-04-07T18:29:02 |
Python
|
UTF-8
|
Python
| false | false | 4,892 |
py
|
from industry import IndustryTertiary, TileLocationChecks
industry = IndustryTertiary(id='power_plant',
accept_cargo_types=['COAL', 'PETR', 'PEAT'],
prod_cargo_types=[],
prob_in_game='3',
prob_random='5',
prod_multiplier='[0, 0]',
map_colour='168',
life_type='IND_LIFE_TYPE_BLACK_HOLE',
prospect_chance='0.75',
name='string(STR_IND_POWER_PLANT)',
nearby_station_name='string(STR_STATION_POWERHUNGRY)',
fund_cost_multiplier='15',
intro_year=1900)
industry.economy_variations['FIRS'].enabled = True
industry.economy_variations['BASIC_ARCTIC'].enabled = True
industry.economy_variations['BASIC_ARCTIC'].accept_cargo_types = ['PEAT']
industry.add_tile(id='power_plant_tile_1',
animation_length=7,
animation_looping=True,
animation_speed=3,
custom_animation_control={'macro': 'random_first_frame',
'animation_triggers': 'bitmask(ANIM_TRIGGER_INDTILE_CONSTRUCTION_STATE)'},
location_checks=TileLocationChecks(require_effectively_flat=True,
disallow_industry_adjacent=True))
sprite_ground = industry.add_sprite(
sprite_number='GROUNDTILE_MUD_TRACKS'
)
sprite_ground_overlay = industry.add_sprite(
sprite_number='GROUNDTILE_MUD_TRACKS'
)
sprite_1 = industry.add_sprite(
sprite_number='2047'
)
sprite_2 = industry.add_sprite(
sprite_number='2050'
)
sprite_3 = industry.add_sprite(
sprite_number='2053'
)
sprite_4 = industry.add_sprite(
sprite_number='2054'
)
sprite_smoke_1 = industry.add_smoke_sprite(
smoke_type='white_smoke_big',
xoffset=3,
yoffset=0,
zoffset=36
)
industry.add_spritelayout(
id='power_plant_spritelayout_cooling_tower',
ground_sprite=sprite_ground,
ground_overlay=sprite_ground_overlay,
building_sprites=[sprite_1],
)
industry.add_spritelayout(
id='power_plant_spritelayout_large_building',
ground_sprite=sprite_ground,
ground_overlay=sprite_ground_overlay,
building_sprites=[sprite_2],
smoke_sprites=[sprite_smoke_1],
)
industry.add_spritelayout(
id='power_plant_spritelayout_small_building',
ground_sprite=sprite_ground,
ground_overlay=sprite_ground_overlay,
building_sprites=[sprite_3],
)
industry.add_spritelayout(
id='power_plant_spritelayout_substation',
ground_sprite=sprite_ground,
ground_overlay=sprite_ground_overlay,
building_sprites=[sprite_4],
)
industry.add_industry_layout(
id='power_plant_industry_layout_1',
layout=[(0, 0, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(0, 1, 'power_plant_tile_1', 'power_plant_spritelayout_small_building'),
(1, 0, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(1, 1, 'power_plant_tile_1', 'power_plant_spritelayout_large_building'),
(2, 0, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(2, 1, 'power_plant_tile_1', 'power_plant_spritelayout_large_building'),
(3, 0, 'power_plant_tile_1', 'power_plant_spritelayout_substation'),
(3, 1, 'power_plant_tile_1', 'power_plant_spritelayout_substation')]
)
industry.add_industry_layout(
id='power_plant_industry_layout_2',
layout=[(0, 1, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(0, 2, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(1, 0, 'power_plant_tile_1', 'power_plant_spritelayout_large_building'),
(1, 1, 'power_plant_tile_1', 'power_plant_spritelayout_large_building'),
(1, 2, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(2, 0, 'power_plant_tile_1', 'power_plant_spritelayout_small_building'),
(2, 1, 'power_plant_tile_1', 'power_plant_spritelayout_substation'),
(2, 2, 'power_plant_tile_1', 'power_plant_spritelayout_small_building')]
)
industry.add_industry_layout(
id='power_plant_industry_layout_3',
layout=[(0, 0, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(0, 1, 'power_plant_tile_1', 'power_plant_spritelayout_cooling_tower'),
(1, 0, 'power_plant_tile_1', 'power_plant_spritelayout_small_building'),
(1, 1, 'power_plant_tile_1', 'power_plant_spritelayout_large_building'),
(2, 0, 'power_plant_tile_1', 'power_plant_spritelayout_substation'),
(2, 1, 'power_plant_tile_1', 'power_plant_spritelayout_small_building')]
)
|
[
"[email protected]"
] | |
13d0d57dd8b47b5a6e7bcc2d381ee1431205e156
|
84e13b07d2c1c2ee9bc670bbc78a677358f4713d
|
/0x07-python-test_driven_development/4-print_square.py
|
6541eb37b96a7ee8c04013627e37407eb14cc943
|
[] |
no_license
|
thegermanblob/holbertonschool-higher_level_programming
|
b3ad5da5e120df1bced24313af50e2399f43a75c
|
f1b91a6cc1b9c3dd51dbcf83e61f0a084253c0be
|
refs/heads/main
| 2023-09-05T09:00:56.464406 | 2021-11-22T14:58:37 | 2021-11-22T14:58:37 | 361,759,191 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 461 |
py
|
#!/usr/bin/python3
""" Module that contains square """
def print_square(size):
""" Function that prints a square and validates size """
if type(size) is not int:
raise TypeError("size must be an integer")
if size < 0:
raise ValueError("size must be >= 0")
if size == 0:
print("")
else:
for x in range(0, size):
for y in range(0, size):
print("#", end="")
print("")
|
[
"[email protected]"
] | |
0264140fed967a1644148682d47d81f3fd7b0d4b
|
89a015d681c109801be843492dbbc76349fc8d24
|
/setup.py
|
4c5c6668172d3ad63eafbd7fcb7660ae259bb009
|
[
"MIT"
] |
permissive
|
chmouel/tekton-neat
|
2d4eba1116f98aed1334bb9768514200ce6c71fd
|
8714a0e673a7eb4c5670f4acbb61701719423cdd
|
refs/heads/main
| 2023-01-06T03:46:37.275733 | 2020-11-06T10:43:09 | 2020-11-06T10:43:09 | 310,533,512 | 4 | 2 |
MIT
| 2020-11-06T10:37:46 | 2020-11-06T08:15:15 |
Python
|
UTF-8
|
Python
| false | false | 1,515 |
py
|
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = []
setup_requirements = ['pyyaml']
test_requirements = [
'pytest>=3',
]
setup(
author="Chmouel Boudjnah",
author_email='[email protected]',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Export your tekton templates neatly",
entry_points={
'console_scripts': [
'tekton-neat=tekton_neat.cli:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='tekton-neat',
name='tekton-neat',
packages=find_packages(include=['tekton_neat', 'tekton_neat.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/chmouel/tekton-neat',
version='0.3.0',
zip_safe=False,
)
|
[
"[email protected]"
] | |
695d6603fe971ba90553ceab15ef9984279b0050
|
56ce2972848c1cb293bd7b5347ad377092ac2c78
|
/pythonx/cm_core.py
|
efa3686399c5deab80d53c35cc8b569df5fa6241
|
[
"MIT"
] |
permissive
|
Ron89/nvim-completion-manager
|
a01c0be520d195fb9406af7f50cc7ef20f261665
|
7b40275c2a26ed966a74613e93dbe72eab500115
|
refs/heads/master
| 2021-01-22T05:50:24.808823 | 2017-02-12T07:30:56 | 2017-02-12T07:30:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 24,393 |
py
|
# -*- coding: utf-8 -*-
# For debugging
# NVIM_PYTHON_LOG_FILE=nvim.log NVIM_PYTHON_LOG_LEVEL=INFO nvim
import os
import sys
import re
import logging
import copy
import importlib
import threading
from threading import Thread, RLock
import urllib
import json
from neovim import attach, setup_logging
from http.server import BaseHTTPRequestHandler, HTTPServer
from cm import cm
logger = logging.getLogger(__name__)
# use a trick to only register the source withou loading the entire
# module
class CmSkipLoading(Exception):
pass
class CoreHandler:
def __init__(self,nvim):
self._nvim = nvim
# { '{source_name}': {'startcol': , 'matches'}
self._matches = {}
self._sources = {}
self._last_matches = []
# should be True for supporting display menu directly without cm_refresh
self._has_popped_up = True
self._subscope_detectors = {}
scoper_paths = self._nvim.eval("globpath(&rtp,'pythonx/cm/scopers/*.py')").split("\n")
# auto find scopers
for path in scoper_paths:
if not path:
continue
try:
modulename = os.path.splitext(os.path.basename(path))[0]
modulename = "cm.scopers.%s" % modulename
m = importlib.import_module(modulename)
scoper = m.Scoper()
for scope in scoper.scopes:
if scope not in self._subscope_detectors:
self._subscope_detectors[scope] = []
self._subscope_detectors[scope].append(scoper)
logger.info('scoper <%s> imported for %s', modulename, scope)
except Exception as ex:
logger.exception('importing scoper <%s> failed: %s', modulename, ex)
# auto find sources
sources_paths = self._nvim.eval("globpath(&rtp,'pythonx/cm/sources/*.py')").split("\n")
for path in sources_paths:
modulename = os.path.splitext(os.path.basename(path))[0]
modulename = "cm.sources.%s" % modulename
# use a trick to only register the source withou loading the entire
# module
def register_source(name,abbreviation,priority,scopes=None,cm_refresh_patterns=None,events=[],detach=0):
# " jedi
# " refresh 1 for call signatures
# " detach 0, jedi enters infinite loops sometime, don't know why.
# call cm#register_source({
# \ 'name' : 'cm-jedi',
# \ 'priority': 9,
# \ 'abbreviation': 'Py',
# \ 'scopes': ['python'],
# \ 'refresh': 1,
# \ 'channels': [
# \ {
# \ 'type': 'python3',
# \ 'path': 'autoload/cm/sources/cm_jedi.py',
# \ 'events': ['InsertLeave'],
# \ 'detach': 0,
# \ }
# \ ],
# \ })
channel = dict(type='python3',
path= modulename,
detach=detach,
events=events)
source = {}
source['channels'] = [channel]
source['name'] = name
source['priority'] = priority
source['abbreviation'] = abbreviation
if cm_refresh_patterns:
source['cm_refresh_patterns'] = cm_refresh_patterns
if scopes:
source['scopes'] = scopes
logger.info('registering source: %s',source)
nvim.call('cm#register_source',source)
# use a trick to only register the source withou loading the entire
# module
raise CmSkipLoading()
cm.register_source = register_source
try:
# register_source
m = importlib.import_module(modulename)
except CmSkipLoading:
# This is not an error
logger.info('source <%s> registered', modulename)
except Exception as ex:
logger.exception("register_source for %s failed", modulename)
logger.info('_subscope_detectors: %s', self._subscope_detectors)
self._file_server = FileServer()
self._file_server.start(self._nvim.eval('v:servername'))
self._matcher = cm.smart_case_prefix_matcher
self._sorter = cm.alnum_sorter
self._ctx = None
def cm_complete(self,srcs,name,ctx,startcol,matches,refresh=0,*args):
# adjust for subscope
if ctx['lnum']==1:
startcol += ctx.get('scope_col',1)-1
# if cm.context_outdated(self._ctx,ctx):
# logger.info('ignore outdated context from [%s]', name)
# return
self._sources = srcs
try:
# process the matches early to eliminate unnecessary complete function call
result = self.process_matches(name,ctx,startcol,matches)
if (not result) and (not self._matches.get(name,{}).get('last_matches',[])):
# not popping up, ignore this request
logger.info('Not popping up, not refreshing for cm_complete by %s, startcol %s', name, startcol)
return
finally:
# storing matches
if name not in self._matches:
self._matches[name] = {}
if len(matches)==0:
del self._matches[name]
else:
self._matches[name]['startcol'] = startcol
self._matches[name]['refresh'] = refresh
self._matches[name]['matches'] = matches
# wait for cm_complete_timeout, reduce flashes
if self._has_popped_up:
logger.info("update popup for [%s]",name)
# the ctx in parameter maybe a subctx for completion source, use
# nvim.call to get the root context
self._refresh_completions(self._nvim.call('cm#context'))
else:
logger.info("delay popup for [%s]",name)
def cm_insert_enter(self):
self._matches = {}
def cm_complete_timeout(self,srcs,ctx,*args):
if not self._has_popped_up:
self._refresh_completions(ctx)
self._has_popped_up = True
# The completion core itself
def cm_refresh(self,srcs,root_ctx,*args):
# update file server
self._ctx = root_ctx
self._file_server.set_current_ctx(root_ctx)
# initial scope
root_ctx['scope'] = root_ctx['filetype']
self._sources = srcs
self._has_popped_up = False
# simple complete done
if root_ctx['typed'] == '':
self._matches = {}
elif re.match(r'[^0-9a-zA-Z_]',root_ctx['typed'][-1]):
self._matches = {}
root_ctx['src_uri'] = self._file_server.get_src_uri(root_ctx)
ctx_lists = [root_ctx,]
# scoping
i = 0
while i<len(ctx_lists):
ctx = ctx_lists[i]
scope = ctx['scope']
if scope in self._subscope_detectors:
for detector in self._subscope_detectors[scope]:
try:
sub_ctx = detector.sub_context(ctx, self._file_server.get_src(ctx))
if sub_ctx:
# adjust offset to global based
# and add the new context
sub_ctx['scope_offset'] += ctx.get('scope_offset',0)
sub_ctx['scope_lnum'] += ctx.get('scope_lnum',1)-1
if int(sub_ctx['lnum']) == 1:
sub_ctx['typed'] = sub_ctx['typed'][sub_ctx['scope_col']-1:]
sub_ctx['scope_col'] += ctx.get('scope_col',1)-1
logger.info('adjusting scope_col')
sub_ctx['src_uri'] = self._file_server.get_src_uri(sub_ctx)
ctx_lists.append(sub_ctx)
logger.info('new sub context: %s', sub_ctx)
except Exception as ex:
logger.exception("exception on scope processing: %s", ex)
i += 1
# do notify_sources_to_refresh
refreshes_calls = []
refreshes_channels = []
# get the sources that need to be notified
for ctx in ctx_lists:
for name in srcs:
info = srcs[name]
if not info.get('enable',True):
# ignore disabled source
continue
try:
if not self._check_scope(ctx,info):
logger.info('_check_scope ignore <%s> for context scope <%s>', name, ctx['scope'])
continue
if (name in self._matches) and not self._matches[name]['refresh']:
# no need to refresh
logger.info('cached for <%s>, no need to refresh', name)
continue
if not self._check_refresh_patterns(ctx['typed'],info):
continue
if 'cm_refresh' in info:
# check patterns when necessary
refreshes_calls.append(dict(name=name,context=ctx))
# start channels on demand here
if info.get('channels',None):
channel = info['channels'][0]
if 'id' not in channel:
if channel.get('has_terminated',0)==0:
logger.info('starting channels for %s',name)
# has not been started yet, start it now
info = self._nvim.call('cm#_start_channels',name)
for channel in info.get('channels',[]):
if 'id' in channel:
refreshes_channels.append(dict(name=name,id=channel['id'],context=ctx))
except Exception as inst:
logger.exception('cm_refresh process exception: %s', inst)
continue
if not refreshes_calls and not refreshes_channels:
logger.info('not notifying any channels, _refresh_completions now')
self._refresh_completions(root_ctx)
self._has_popped_up = True
else:
logger.info('notify_sources_to_refresh calls cnt [%s], channels cnt [%s]',len(refreshes_calls),len(refreshes_channels))
logger.debug('cm#_notify_sources_to_refresh [%s] [%s] [%s]', refreshes_calls, refreshes_channels, root_ctx)
self._nvim.call('cm#_notify_sources_to_refresh', refreshes_calls, refreshes_channels, root_ctx)
# check patterns for dict, if non dict, return True
def _check_refresh_patterns(self,typed,opt):
if type(opt)!=type({}):
return True
patterns = opt.get('cm_refresh_patterns',None)
if not patterns:
return True
for pattern in patterns:
if re.search(pattern,typed):
return True
return False
# almost the same as `s:check_scope` in `autoload/cm.vim`
def _check_scope(self,ctx,info):
scopes = info.get('scopes',['*'])
cur_scope = ctx.get('scope',ctx['filetype'])
for scope in scopes:
# only match filetype for `*` scope, to prevent multiple notification
if scope=='*' and cur_scope==ctx['filetype']:
return True
if scope==cur_scope:
return True
return False
def _refresh_completions(self,ctx):
matches = []
# sort by priority
names = sorted(self._matches.keys(),key=lambda x: self._sources[x]['priority'], reverse=True)
if len(names)==0:
# empty
logger.info('_refresh_completions names: %s, startcol: %s, matches: %s', names, ctx['col'], [])
self._complete(ctx, ctx['col'], [])
return
col = ctx['col']
startcol = col
base = ctx['typed'][startcol-1:]
# basick processing per source
for name in names:
try:
self._matches[name]['last_matches'] = []
source_startcol = self._matches[name]['startcol']
if source_startcol>col or source_startcol==0:
self._matches[name]['last_matches'] = []
logger.error('ignoring invalid startcol for %s %s', name, self._matches[name]['startcol'])
continue
source_matches = self._matches[name]['matches']
source_matches = self.process_matches(name,ctx,source_startcol,source_matches)
self._matches[name]['last_matches'] = source_matches
if not source_matches:
continue
# min non empty source_matches's source_startcol as startcol
if source_startcol < startcol:
startcol = source_startcol
except Exception as inst:
logger.exception('_refresh_completions process exception: %s', inst)
continue
# merge processing results of sources
for name in names:
try:
source_startcol = self._matches[name]['startcol']
source_matches = self._matches[name]['last_matches']
if not source_matches:
continue
prefix = ctx['typed'][startcol-1 : source_startcol-1]
for e in source_matches:
e['word'] = prefix + e['word']
# if 'abbr' in e:
# e['abbr'] = prefix + e['abbr']
matches += source_matches
except Exception as inst:
logger.exception('_refresh_completions process exception: %s', inst)
continue
if not matches:
startcol=len(ctx['typed']) or 1
logger.info('_refresh_completions names: %s, startcol: %s, matches cnt: %s', names, startcol, len(matches))
logger.debug('_refresh_completions names: %s, startcol: %s, matches: %s, source matches: %s', names, startcol, matches, self._matches)
self._complete(ctx, startcol, matches)
def process_matches(self,name,ctx,startcol,matches):
base = ctx['typed'][startcol-1:]
abbr = self._sources[name].get('abbreviation','')
# formalize datastructure
formalized = []
for item in matches:
e = {}
if type(item)==type(''):
e['word'] = item
else:
e = copy.deepcopy(item)
formalized.append(e)
# filtering and sorting
result = [ e for e in formalized if self._matcher(base=base,item=e)]
result = self._sorter(base,startcol,result)
# fix some text
for e in result:
if 'menu' not in e:
if 'info' in e and e['info'] and len(e['info'])<50:
if abbr:
e['menu'] = "<%s> %s" % (abbr,e['info'])
else:
e['menu'] = e['info']
else:
# info too long
if abbr:
e['menu'] = "<%s>" % abbr
else:
# e['menu'] = "<%s> %s" % (self._sources[name]['abbreviation'], e['info'])
pass
return result
def _complete(self, ctx, startcol, matches):
if not matches and not self._last_matches:
# no need to fire complete message
logger.info('matches==0, _last_matches==0, ignore')
return
self._nvim.call('cm#_core_complete', ctx, startcol, matches, async=True)
self._last_matches = matches
def cm_shutdown(self):
self._file_server.shutdown(wait=False)
# Cached file content in memory, and use http protocol to serve files, instead
# of asking vim for file every time. FileServer is important in implementing
# the scoping feature, for example, language specific completion inside
# markdown code fences.
class FileServer(Thread):
def __init__(self):
self._rlock = RLock()
self._current_context = None
self._cache_context = None
self._cache_src = ""
Thread.__init__(self)
def start(self,nvim_server_name):
"""
Start the file server
@type request: str
"""
server = self
class HttpHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
server.run_GET(self)
except Exception as ex:
logger.exception('exception on FileServer: %s', ex)
self.send_response(500)
self.send_header('Content-type','text/html')
self.end_headers()
message = str(ex)
self.wfile.write(bytes(message, "utf8"))
# create another connection to avoid synchronization issue?
self._nvim = attach('socket',path=nvim_server_name)
# Server settings
# 0 for random port
server_address = ('127.0.0.1', 0)
self._httpd = HTTPServer(server_address, HttpHandler)
Thread.start(self)
def run_GET(self,request):
"""
Process get request. This method, with the `run_` prefix is running on
the same thread as `self.run` method.
@type request: BaseHTTPRequestHandler
"""
params = {}
for e in urllib.parse.parse_qsl(urllib.parse.urlparse(request.path).query):
params[e[0]] = e[1]
logger.info('thread %s processing %s', threading.get_ident(), params)
context = json.loads(params['context'])
src = self.get_src(context)
if src is None:
src = ""
request.send_response(200)
request.send_header('Content-type','text/html')
request.end_headers()
request.wfile.write(bytes(src, "utf8"))
def run(self):
logger.info('running server on port %s, thread %s', self._httpd.server_port, threading.get_ident())
self._httpd.serve_forever()
def get_src(self,context):
with self._rlock:
# If context does not match current context, check the neovim current
# context, if does not match neither, return None
if cm.context_outdated(self._current_context,context):
self._current_context = self._nvim.eval('cm#context()')
if cm.context_outdated(self._current_context,context):
logger.info('get_src returning None for oudated context: %s', context)
return None
# update cache when necessary
if cm.context_outdated(self._current_context, self._cache_context):
logger.info('get_src updating cache for context %s', context)
self._cache_context = self._current_context
self._cache_src = "\n".join(self._nvim.current.buffer[:])
scope_offset = context.get('scope_offset',0)
scope_len = context.get('scope_len',len(self._cache_src))
return self._cache_src[scope_offset:scope_offset+scope_len]
def set_current_ctx(self,context):
"""
This method is running on main thread as cm core
"""
with self._rlock:
self._current_context = context
def get_src_uri(self,context):
# changedtick and curpos is enough for outdating check
stripped = dict(changedtick=context['changedtick'],curpos=context['curpos'])
if 'scope_offset' in context:
stripped['scope_offset'] = context['scope_offset']
if 'scope_len' in context:
stripped['scope_len'] = context['scope_len']
query = urllib.parse.urlencode(dict(context=json.dumps(stripped)))
return urllib.parse.urljoin('http://127.0.0.1:%s' % self._httpd.server_port, '?%s' % query)
def shutdown(self,wait=True):
"""
Shutdown the file server
"""
self._httpd.shutdown()
if wait:
self.join()
def main():
start_type = sys.argv[1]
# the default nice is inheriting from parent neovim process. Increment it
# so that heavy calculation will not block the ui.
try:
os.nice(1)
except:
pass
# psutil ionice
try:
import psutil
p = psutil.Process(os.getpid())
p.ionice(psutil.IOPRIO_CLASS_IDLE)
except:
pass
if start_type == 'core':
# use the module name here
setup_logging('cm_core')
logger = logging.getLogger(__name__)
logger.setLevel(get_loglevel())
# change proccess title
try:
import setproctitle
setproctitle.setproctitle('nvim-completion-manager core')
except:
pass
try:
# connect neovim
nvim = nvim_env()
handler = CoreHandler(nvim)
logger.info('starting core, enter event loop')
cm_event_loop('core',logger,nvim,handler)
except Exception as ex:
logger.exception('Exception when running channel: %s',ex)
exit(1)
finally:
# terminate here
exit(0)
elif start_type == 'channel':
name = sys.argv[2]
# use the module name here
setup_logging(name)
logger = logging.getLogger(name)
logger.setLevel(get_loglevel())
# change proccess title
try:
import setproctitle
setproctitle.setproctitle('nvim-completion-manager %s' % name)
except:
pass
try:
# connect neovim
nvim = nvim_env()
m = importlib.import_module(name)
handler = m.Source(nvim)
logger.info('handler created, entering event loop')
cm_event_loop('channel',logger,nvim,handler)
except Exception as ex:
logger.exception('Exception: %s',ex)
exit(1)
finally:
# terminate here
exit(0)
def nvim_env():
nvim = attach('stdio')
# setup pythonx
pythonxs = nvim.eval('globpath(&rtp,"pythonx")')
for path in pythonxs.split("\n"):
if not path:
continue
if path not in sys.path:
sys.path.append(path)
return nvim
def get_loglevel():
# logging setup
level = logging.INFO
if 'NVIM_PYTHON_LOG_LEVEL' in os.environ:
l = getattr(logging,
os.environ['NVIM_PYTHON_LOG_LEVEL'].strip(),
level)
if isinstance(l, int):
level = l
return level
def cm_event_loop(type,logger,nvim,handler):
def on_setup():
logger.info('on_setup')
def on_request(method, args):
func = getattr(handler,method,None)
if func is None:
logger.info('method: %s not implemented, ignore this request', method)
return None
func(*args)
def on_notification(method, args):
logger.debug('%s method: %s, args: %s', type, method, args)
if type=='channel' and method=='cm_refresh':
ctx = args[1]
# The refresh calculation may be heavy, and the notification queue
# may have outdated refresh events, it would be meaningless to
# process these event
if nvim.call('cm#context_changed',ctx):
logger.info('context_changed, ignoring context: %s', ctx)
return
func = getattr(handler,method,None)
if func is None:
logger.info('method: %s not implemented, ignore this message', method)
return
func(*args)
logger.debug('%s method %s completed', type, method)
nvim.run_loop(on_request, on_notification, on_setup)
# shutdown
func = getattr(handler,'cm_shutdown',None)
if func:
func()
main()
|
[
"l"
] |
l
|
fe4d5bc55a87020fa99d3ab6ac248746bcac93f7
|
ad9bd58a3ec8fa08dfcc994d4101ee815a9f5bc0
|
/02_algorithm/baekjoon/all_problem/1967.py
|
7869d00865f64f47645925bb25b3fd2d27997977
|
[] |
no_license
|
wally-wally/TIL
|
93fc1d0e3bc7d030341ed54155294c68c48b4c7d
|
936783bc86f563646c0398c24e2fcaa707f0ed23
|
refs/heads/master
| 2023-04-28T08:59:48.235747 | 2023-04-12T12:06:52 | 2023-04-12T12:06:52 | 195,918,111 | 40 | 7 | null | 2020-09-29T16:20:46 | 2019-07-09T02:31:02 |
Python
|
UTF-8
|
Python
| false | false | 1,243 |
py
|
import sys
sys.stdin = open('input_1967.txt', 'r')
from collections import deque
def find_farthest_vertex(start_node):
distance, farthest_vertex = 0, start_node
dq = deque()
dq.append((start_node, 0))
visited = [False for _ in range(n + 1)]
visited[start_node] = True
while dq:
pop_node, pop_dist = dq.popleft()
for node in tree_info[pop_node]:
if not visited[node]:
visited[node] = True
now_dist = tree_info[pop_node][node]
dq.append((node, pop_dist + now_dist))
if distance < pop_dist + now_dist:
distance = pop_dist + now_dist
farthest_vertex = node
return distance, farthest_vertex
n = int(input())
if n == 1:
print(0)
else:
tree_info = dict()
for _ in range(n - 1):
node1, node2, dist = map(int, input().split())
if node1 not in tree_info:
tree_info[node1] = dict()
tree_info[node1][node2] = dist
if node2 not in tree_info:
tree_info[node2] = dict()
tree_info[node2][node1] = dist
temp_vertex = find_farthest_vertex(1)[1]
distance = find_farthest_vertex(temp_vertex)[0]
print(distance)
|
[
"[email protected]"
] | |
e28df1d439e040246ab819554cc779df88495db5
|
b13b603bf8f07da1100b7fcb2e505f9c389e5764
|
/level2/구명보트.py
|
b2937d51318cf687b8b938b9224531f55a5bf9f3
|
[] |
no_license
|
123qpq/programers
|
3a499646f65bed9f15b0db3e66d7445536579942
|
970b62210f6c29ea0d13bd381fb1e0a9b997143f
|
refs/heads/main
| 2023-06-20T12:00:10.681082 | 2021-07-20T04:36:46 | 2021-07-20T04:36:46 | 322,791,861 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 355 |
py
|
from collections import deque
def solution(people, limit):
boats = 0
people.sort()
people = deque(people)
while people:
boats += 1
if len(people) == 1:
break
if people[0] + people[-1] <= limit:
people.popleft()
people.pop()
else:
people.pop()
return boats
|
[
"[email protected]"
] | |
594ba8fc903636d753d54513ba19fe91419f506c
|
6502929152acc82097c6bb9fa9b211a30b23b6c0
|
/2_anomaly_detection_nyc.py
|
446aea1c4757dddf1487d6ef315105b8176e12b4
|
[
"Apache-2.0"
] |
permissive
|
lulzzz/RNN-Time-series-Anomaly-Detection
|
ceec98a902b3504224834c03a22e7e7673f11470
|
194a20100749bae0f18b7a9681055b92f892c7e9
|
refs/heads/master
| 2020-03-10T03:37:45.860990 | 2018-04-11T01:38:42 | 2018-04-11T01:38:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,349 |
py
|
import argparse
import torch
import torch.nn as nn
from torch.autograd import Variable
import preprocess_data
from model import model
from torch import optim
from matplotlib import pyplot as plt
import numpy as np
from anomalyDetector import fit_norm_distribution_param
from anomalyDetector import anomalyScore
from sklearn.svm import SVR
parser = argparse.ArgumentParser(description='PyTorch RNN Anomaly Detection Model on nyc_taxi Dataset')
parser.add_argument('--prediction_window_size', type=int, default=10,
help='prediction_window_size')
args_ = parser.parse_args()
print("=> loading checkpoint ")
checkpoint = torch.load('./save/nyc_taxi/checkpoint.pth.tar')
print("=> loaded checkpoint")
args = checkpoint['args']
args.prediction_window_size= args_.prediction_window_size
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
TimeseriesData = preprocess_data.DataLoad(args.data)
train_dataset = preprocess_data.batchify(args,TimeseriesData.trainData, 1)[:10000]
test_dataset = preprocess_data.batchify(args,TimeseriesData.testData, 1)
###############################################################################
# Build the model
###############################################################################
model = model.RNNPredictor(rnn_type = args.model, enc_inp_size=3, rnn_inp_size = args.emsize, rnn_hid_size = args.nhid,
dec_out_size=3,
nlayers = args.nlayers,)
model.load_state_dict(checkpoint['state_dict'])
del checkpoint
if args.cuda:
model.cuda()
# At any point you can hit Ctrl + C t
endPoint=3500
try:
mean, cov = fit_norm_distribution_param(args, model, train_dataset, endPoint,channel_idx=0)
train_scores, _, _, hiddens,_ = anomalyScore(args, model, train_dataset, mean, cov, 3000)
score_predictor = SVR(C=1.0,epsilon=0.2)
score_predictor.fit(torch.cat(hiddens,dim=0).numpy(),train_scores)
scores, sorted_predictions,sorted_errors, _, predicted_scores = anomalyScore(args, model, test_dataset, mean, cov, endPoint,score_predictor=score_predictor)
sorted_predictions = torch.cat(sorted_predictions, dim=0)
sorted_errors = torch.cat(sorted_errors,dim=0)
scores = np.array(scores)
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
target= preprocess_data.reconstruct(test_dataset.cpu()[:, 0, 0].numpy(),
TimeseriesData.trainData['seqData_mean'],
TimeseriesData.trainData['seqData_std'])
sorted_predictions_mean = preprocess_data.reconstruct(sorted_predictions.mean(dim=1).numpy(),
TimeseriesData.trainData['seqData_mean'],
TimeseriesData.trainData['seqData_std'])
sorted_predictions_1step = preprocess_data.reconstruct(sorted_predictions[:,-1].numpy(),
TimeseriesData.trainData['seqData_mean'],
TimeseriesData.trainData['seqData_std'])
sorted_predictions_Nstep = preprocess_data.reconstruct(sorted_predictions[:,0].numpy(),
TimeseriesData.trainData['seqData_mean'],
TimeseriesData.trainData['seqData_std'])
#sorted_errors_mean = sorted_errors.mean(dim=1).abs().cpu().numpy()
sorted_errors_mean = sorted_errors.abs().mean(dim=1).cpu().numpy()
sorted_errors_mean *=TimeseriesData.trainData['seqData_std']
fig, ax1 = plt.subplots(figsize=(15,5))
ax1.plot(target,label='Target', color='black', marker='.', linestyle='--', markersize=1, linewidth=0.5)
ax1.plot(sorted_predictions_mean,label='Mean predictions', color='purple', marker='.', linestyle='--', markersize=1, linewidth=0.5)
ax1.plot(sorted_predictions_1step,label='1-step predictions', color='green', marker='.', linestyle='--', markersize=1, linewidth=0.5)
ax1.plot(sorted_predictions_Nstep,label=str(args.prediction_window_size)+'-step predictions', color='blue', marker='.', linestyle='--', markersize=1, linewidth=0.5)
ax1.plot(sorted_errors_mean,label='Absolute mean prediction errors', color='orange', marker='.', linestyle='--', markersize=1, linewidth=1)
ax1.legend(loc='upper left')
ax1.set_ylabel('Value',fontsize=15)
ax1.set_xlabel('Index',fontsize=15)
ax2 = ax1.twinx()
ax2.plot(scores,label='Anomaly scores from \nmultivariate normal distribution', color='red', marker='.', linestyle='--', markersize=1, linewidth=1)
ax2.plot(predicted_scores,label='Predicted anomaly scores from SVR', color='cyan', marker='.', linestyle='--', markersize=1, linewidth=1)
ax2.legend(loc='upper right')
ax2.set_ylabel('anomaly score',fontsize=15)
plt.axvspan(3024,3040 , color='yellow', alpha=0.3)
plt.xlim([0, endPoint])
plt.title('Anomaly Detection on ' + args.data + ' Dataset', fontsize=18, fontweight='bold')
plt.tight_layout()
plt.xlim([1500,endPoint])
plt.savefig('result/'+args.data+'/fig_scores.png')
plt.show()
|
[
"[email protected]"
] | |
252b8ee31d1bd50da52453b3ffc3d15607759ff3
|
e8bf00dba3e81081adb37f53a0192bb0ea2ca309
|
/domains/nav/problems/auto/problem1083_SD.py
|
44761016946ddef7cbb4c415a70a66f745d3cab7
|
[
"BSD-3-Clause"
] |
permissive
|
patras91/rae_release
|
1e6585ee34fe7dbb117b084df982ca8a8aed6795
|
0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30
|
refs/heads/master
| 2023-07-13T20:09:41.762982 | 2021-08-11T17:02:58 | 2021-08-11T17:02:58 | 394,797,515 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,135 |
py
|
__author__ = 'patras'
from domain_springDoor import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
DURATION.COUNTER = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
rv.LOCATIONS = [1, 2, 3, 4, 5]
rv.EDGES = {1: [2], 2: [1, 3], 3: [2, 4, 5], 4: [3], 5: [3]}
rv.DOORS = ['d1', 'd2']
rv.DOORLOCATIONS = {(2, 3): 'd1', (3, 5): 'd2'}
rv.DOORTYPES = {'d1': 'spring', 'd2': 'spring'}
rv.ROBOTS = ['r1', 'r2', 'r3', 'r4']
def ResetState():
state.load = {'r1': NIL, 'r2': NIL, 'r3': NIL, 'r4': NIL}
state.status = {'r1': 'free', 'r2': 'free', 'r3': 'free', 'r4': 'free'}
state.loc = {'r1': 4, 'r2': 3, 'r3': 1, 'r4': 3}
state.pos = {'o1': 4}
state.doorStatus = {'d1': 'closed', 'd2': 'closed', }
state.doorType = {'d1': UNK, 'd2': UNK, }
tasks = {
10: [['fetch', 'r1', 'o1', 3]],
}
eventsEnv = {
}
|
[
"[email protected]"
] | |
0b0c4aff2067846b9dd29a167bd9b9af5c1b88d7
|
aa6985deb43e26732899d06ee39fd6cb4befc2ae
|
/strongmotionfetch/retriever.py
|
78bb5088fde03bef4274155037a08d13e372b0ae
|
[
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
jnf0910/strongmotionfetch
|
2592396183a651799b3ae3a33c77a2b25128d85d
|
c9e6f30797e6893bf60506b24096c52537a0110b
|
refs/heads/master
| 2020-12-25T09:28:53.617747 | 2016-06-23T20:48:39 | 2016-06-23T20:48:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,020 |
py
|
class Retriever(object):
def __init__(self,rawfolder,inputfolder):
self._inputfolder = inputfolder
self._rawfolder = rawfolder
def getData(self,time,lat,lon,timewindow,radius):
self.fetch(time,lat,lon,timewindow,radius) #find files online, download to raw folder
traces = self.readFiles() #read any files downloaded into raw folder, turn into list of ObsPy Trace objects
amps = self.traceToAmps(traces) #pull out peak amplitudes, return as data structure
xmlstr = self.ampsToXML(amps) #convert these amps to an xml string
self.saveToXML(xmlstr) #write that xml string to a file in the input folder
def fetch(self,time,lat,lon,timewindow,radius):
#this is implemented in child classes
pass
def readFiles(self):
#this is implemented in child classes
pass
def traceToAmps(traces=None):
#this is implemented here
pass
def ampsToXML(amps=None):
#this is implemented here
pass
|
[
"[email protected]"
] | |
b76c818f8bfb550baeb076a2434b1a8e483f19e2
|
36b388d25580e60068da2d6cab88d94f90959749
|
/lib/datasets/SearchDatasetWrap.py
|
f7925b3c9235992b325687ea7c761c732570e971
|
[
"MIT"
] |
permissive
|
z-x-yang/NAS-Projects
|
4b733b381e325819f958d9af684267b9d4f7fac8
|
54ecec7f750e11077b2ecc60ddcd74ce417434ac
|
refs/heads/master
| 2020-09-08T14:45:13.712958 | 2019-11-09T16:04:05 | 2019-11-09T16:04:05 | 221,163,104 | 3 | 0 |
MIT
| 2019-11-12T08:05:37 | 2019-11-12T08:05:34 | null |
UTF-8
|
Python
| false | false | 1,186 |
py
|
import torch, copy, random
import torch.utils.data as data
class SearchDataset(data.Dataset):
def __init__(self, name, data, train_split, valid_split, check=True):
self.datasetname = name
self.data = data
self.train_split = train_split.copy()
self.valid_split = valid_split.copy()
if check:
intersection = set(train_split).intersection(set(valid_split))
assert len(intersection) == 0, 'the splitted train and validation sets should have no intersection'
self.length = len(self.train_split)
def __repr__(self):
return ('{name}(name={datasetname}, train={tr_L}, valid={val_L})'.format(name=self.__class__.__name__, datasetname=self.datasetname, tr_L=len(self.train_split), val_L=len(self.valid_split)))
def __len__(self):
return self.length
def __getitem__(self, index):
assert index >= 0 and index < self.length, 'invalid index = {:}'.format(index)
train_index = self.train_split[index]
valid_index = random.choice( self.valid_split )
train_image, train_label = self.data[train_index]
valid_image, valid_label = self.data[valid_index]
return train_image, train_label, valid_image, valid_label
|
[
"[email protected]"
] | |
c6269ba6d485726a565012d1258590f2aa850778
|
1e4eefff1c19ffb81016ce99f2284fb657293f65
|
/sorting/test/insertion_sort.py
|
0bc84a94a6f9223c9f3bc60740800fc4d84f4a58
|
[] |
no_license
|
Solero93/bcn-algorithm-club-py
|
5e1edf15f087e0edf2cf7ba0859fb5e4523525ad
|
1edf407498756e7ba36534387bb4241b8b455c4f
|
refs/heads/master
| 2020-03-28T09:06:30.328130 | 2019-03-25T10:38:48 | 2019-03-25T10:38:48 | 148,014,386 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
from unittest import TestCase
from src.insertion_sort import insertion_sort
from test.fixtures import test_cases
class BuiltInSortTest(TestCase):
def test_sort(self):
for case in test_cases:
self.assertEqual(insertion_sort(case), sorted(case),
msg=f'{case} should be {sorted(case)}')
|
[
"[email protected]"
] | |
00610e1274a588c4ba24de9621ed3e9c8cb3f68e
|
781e2692049e87a4256320c76e82a19be257a05d
|
/assignments/python/wc/src/99.py
|
126258ad6d501e7e292a6de1f96af9a0b43e54a6
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 169 |
py
|
def word_count(input):
counts = {}
for word in input.split():
if not word in counts:
counts[word] = 1
else:
counts[word] = counts[word] + 1
return counts
|
[
"[email protected]"
] | |
1a4b29df1d245f93801fe603eb728d890f2ba45e
|
adc6d8ee596e4710c3241332758bb6990bdd8914
|
/Trabajo de grado_final/Anexos/Codigos/test_todoRE.py
|
b3e6bc0cdb7029307a37568546836197b0bdb655
|
[] |
no_license
|
NatalyTinoco/Trabajo-de-grado_Artefactos
|
cf9491c47a8a23ce5bab7c52498093a61319f834
|
5cc4e009f94c871c7ed0d820eb113398ac66ec2f
|
refs/heads/master
| 2022-03-20T00:51:48.420253 | 2019-11-24T19:10:40 | 2019-11-24T19:10:40 | 197,964,659 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,752 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 5 12:53:35 2019
@author: Daniela y Nataly
Descripción del código: Código donde se une toda la metodología para identificación de Reflejos especulares (mRE).
*Entrada: imagePath1(ruta de la imagen)
*Salida: predic (predicción del clasificador (1-RE y 0-OTROS)), original_2(imagen original), imDU_2 (Imagen sin etiquetas),umbrImage (mascara con los RE reales),
original_3 (imagen con bbox graficados, ubicación de los artefactos) y bboxre (posición de los bbox).
"""
import cv2
import pickle
import pandas as pd
from Normalizacion import normalizacionMaxMin
from equalization import adaptativeequalization
from rOI import ROI
from ventanIDEA import ventanIDEA
from caracRE import caracRe
fileOpen = 'model_pickle'
with open(fileOpen,'rb') as f:
mpRE = pickle.load(f)
def test_all_RE(imagePath):
original = cv2.imread(imagePath)
original_2 =original.copy()
original_3=original.copy()
imNorm = normalizacionMaxMin(original)
imEqu = adaptativeequalization(imNorm)
imDR = imEqu.copy()
roiImage = ROI(imEqu)
for z in range(3):
imDR[:,:,z]=imEqu[:,:,z]*roiImage
imDR_2=original_2.copy()
for z in range(3):
imDR_2[:,:,z]=original_2[:,:,z]*roiImage
imDU_2=imDR_2.copy()
imDU = imDR.copy()
umbrImage = ventanIDEA(imDR,roiImage)
for z in range(3):
imDU[:,:,z]=imDR[:,:,z]*umbrImage
try:
contours,hierachy = cv2.findContours(umbrImage,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
except ValueError:
_,contours,_ = cv2.findContours(umbrImage,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
predic=[]
bboxre=[],[],[],[],[]
if len(contours)==0:
pred=0
else:
for c in range(len(contours)):
cnt = contours[c]
x,y,w,h = cv2.boundingRect(cnt)
cropped2 = imDR[int(y):int(y+h),int(x):int(x+w)]
brillo,contraste,desvi=caracRe(cropped2)
carac=pd.DataFrame({'contrastB':contraste,'desviacionB':desvi,'Brillo':brillo},index =['1'])
pred=int(mpRE.predict(carac))
if pred == 1:
umbrImage[int(y):int(y+h),int(x):int(x+w)] = umbrImage[int(y):int(y+h),int(x):int(x+w)]
cv2.rectangle(original_3,(int(x),int(y)),(int(x+w),int(y+h)),(0,0,255),2)
bboxre[0].append(1)
bboxre[1].append(int(x))
bboxre[2].append(int(y))
bboxre[3].append(int(w+x))
bboxre[4].append(int(h+y))
else:
umbrImage[int(y):int(y+h),int(x):int(x+w)] = 0
predic.append(pred)
return predic, original_2 , imDU_2,umbrImage,original_3,bboxre
|
[
"[email protected]"
] | |
ddd21339f6ece5a9cfaea4ecafc2c753eb9eee93
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Extensions/Adaptiv XVA/FPythonCode/HedgingCostHooksTemplate.py
|
9674f7e8d046ad05bf4c5c7fab47c7b123fe95f4
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,164 |
py
|
""" Compiled: 2020-09-18 10:38:50 """
#__src_file__ = "extensions/cva/adaptiv_xva/./etc/HedgingCostHooksTemplate.py"
'''---------------------------------------------------------------------
All functions in HedgingCostHooksTemplate can be
overridden in a HedgingCostHooks module. To do so, create a module called HedgingCostHooks
(or rename the HedgingCostHooksTemplate to HedgingCostHooks) and copy the function
declaration of the function you want to override into it.
---------------------------------------------------------------------'''
import acm
context = acm.GetDefaultContext()
def CreditDeskCounterParty():
return 'HedgingCost Credit sales desk'
def IsHedgingCostCandidate(trade):
"""Filter to enable Request HedgingCost"""
hasCreditBalance = trade.CreditBalance() != None
isStatusSimulated = trade.Status() == 'Simulated'
return hasCreditBalance and isStatusSimulated
def ConfirmedTradeStatus():
"""Trade status to use after HedgingCost is confirmed"""
return 'FO Confirmed'
def CreditBalanceIncludedTrades():
''' This method creates a filter (FASQLQuery) that specifies if a trade should be included in
the Credit Balance portfolio. The filter is merged with the filter defined by the mapping in the
extension value 'creditBalanceToTrade'.
It is also possible to use a stored insert items query
Example:
1. Create a shared Insert Items query that excludes trades where Status = Simulated named "GlobalHedgingCostFilter"
2. Use this stored query in the hook
def CreditBalanceIncludedTrades():
filter = acm.FStoredASQLQuery["GlobalHedgingCostFilter"]
return filter.Query()
'''
enum = acm.FEnumeration["enum(TradeStatus)"].Enumeration("Simulated")
return acm.Filter.SimpleAndQuery("FTrade", "Status", "GREATER", enum)
def GetSuggestedHedgingCost(trade):
calculationSpaceTradeSheet = acm.Calculations().CreateCalculationSpace(context, 'FTradeSheet')
denomValue = calculationSpaceTradeSheet.CalculateValue(trade, 'Incremental CVA')
return denomValue
|
[
"[email protected]"
] | |
581247815b035fa94285115f1360f3627bd39e61
|
c012d16bbb77853bc39973d920da94b70000dc01
|
/Admins/migrations/0001_initial.py
|
bcd34f16091d2c29f5fe1ceda6ea5f1c88c388ed
|
[] |
no_license
|
OverLoadedBurden/Dynamiet
|
68c7e88d900581bdd53f1e51756c14db65359fd2
|
1988bbeeb73c313cf07c750fba5adb7739445012
|
refs/heads/master
| 2020-11-25T01:20:34.645831 | 2020-03-14T01:17:57 | 2020-03-14T01:17:57 | 228,428,056 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 653 |
py
|
# Generated by Django 2.2.5 on 2020-02-14 21:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('name', models.CharField(max_length=255, primary_key=True, serialize=False)),
('password', models.CharField(max_length=255)),
('isAdmin', models.BooleanField(default=False)),
],
options={
'abstract': False,
'base_manager_name': 'objects',
},
),
]
|
[
"[email protected]"
] | |
5c21b8c69f06a8ed02e60a08f8978ace23ae7480
|
69abeb93aba9f9d978b522a027c502a8ea9f4f65
|
/manage.py
|
cd9a179107b2400db6cf790a5fc655bfe5ab9fa3
|
[] |
no_license
|
pengra/library
|
36d32160a85830db864effe9f1233761e6284048
|
35ba6de62ac8598ca26d7977fb0613c2b6781a09
|
refs/heads/master
| 2020-03-29T13:14:04.272936 | 2019-02-13T23:19:33 | 2019-02-13T23:19:33 | 149,947,390 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 806 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "labwatch.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
5003d8457a1f911eeb54d2ad10a3f8b5cdc99dc4
|
fdab4771b4956bac64727a8d67a8b769c296b7af
|
/manage.py
|
e0e7b902cb8d8c23cc67744c5abc554a8bc16c67
|
[
"MIT"
] |
permissive
|
darkdrei/Inventario
|
17e5dae215fa7b2e8d8aff8cc2cf37011c67fc03
|
dc2dcc830be5a49ba602c242d8c7d5d9c24c7b5c
|
refs/heads/master
| 2020-09-20T05:20:36.557735 | 2017-11-27T20:45:25 | 2017-11-27T20:45:25 | 94,501,187 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 801 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "inv.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
c9dc89b118798fc71c4cb566d283176f2deeaf07
|
23382d130e2a020ac90d7b88ab045b34f7719565
|
/setup.py
|
e14c95a9495bc2105d2b1bc69d26a302ae7755d6
|
[
"MIT"
] |
permissive
|
dunossauro/sfp
|
f2967f519017e9c9a8f95ff6dccd09293775c72a
|
944e78ff453aba692ed5b6a3cf7855093d6e987a
|
refs/heads/master
| 2021-04-06T13:03:18.403904 | 2018-06-22T17:16:44 | 2018-06-22T17:16:44 | 124,821,555 | 4 | 2 |
MIT
| 2018-07-10T21:32:27 | 2018-03-12T02:16:11 |
Python
|
UTF-8
|
Python
| false | false | 314 |
py
|
from setuptools import setup
setup(name='sfp',
version='0.0.1',
description='Simple Functional programming',
url='https://github.com/z4r4tu5tr4/sfp',
author='Eduardo Mendes',
author_email='[email protected]',
license='MIT',
packages=['sfp'],
zip_safe=False)
|
[
"[email protected]"
] | |
5f975472428eedee10faaba92c1b0d561dcc4e86
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/PhysicsTools/PatAlgos/python/selectionLayer1/electronCountFilter_cfi.py
|
80af141c59547eefb55cd53baaca26468ed67b02
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 |
Apache-2.0
| 2023-09-14T19:14:28 | 2013-06-26T14:09:07 |
C++
|
UTF-8
|
Python
| false | false | 262 |
py
|
import FWCore.ParameterSet.Config as cms
# module to filter on the number of Electrons
countPatElectrons = cms.EDFilter("PATCandViewCountFilter",
minNumber = cms.uint32(0),
maxNumber = cms.uint32(999999),
src = cms.InputTag("cleanPatElectrons")
)
|
[
"[email protected]"
] | |
901a4aed1e3a9151da4ec4c0e44368812347fc74
|
bdde5ee603138399475ef42eeca67701f0d910ae
|
/mycoplasma_home/views/pagelets/public/HomePagelet.py
|
cfb042b9323db49d62695f1ed3c8d37cf7719204
|
[] |
no_license
|
idoerg/MyDIG
|
63c19f980df8246a4a0b1c4e93fdd28bf69b97e9
|
88cc8f24a5d4b248dff1aafb54713e44537c611f
|
refs/heads/master
| 2021-01-10T20:38:46.336870 | 2012-11-30T02:45:08 | 2012-11-30T02:45:08 | 5,939,219 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,095 |
py
|
'''
Pagelet for the Home Page
Author: Andrew Oberlin
Date: July 23, 2012
'''
from renderEngine.PageletBase import PageletBase
from mycoplasma_home.models import Organism, OrganismWithImages, OrganismWithGenome, OrganismWithTags
class HomePagelet(PageletBase):
'''
Renders the center of the home page
Params: request -- the Django request object with the POST & GET args
Returns: Dictionary of arguments for rendering this pagelet
'''
def doProcessRender(self, request):
self.setLayout('public/home.html')
allMycoplasma = Organism.objects.filter(genus__exact="Mycoplasma").order_by('species')
allGenomes = OrganismWithGenome.objects.values_list('organism_id', flat=True).order_by('organism_id')
allImages = OrganismWithImages.objects.values_list('organism_id', flat=True).order_by('organism_id')
allTags = OrganismWithTags.objects.values_list('organism_id', flat=True).order_by('organism_id')
return {
'all_mycoplasma' : allMycoplasma,
'all_genomes' : allGenomes,
'all_images' : allImages,
'all_tags' : allTags
}
|
[
"[email protected]"
] | |
898f544645356394e471bd139055540ae348b4ee
|
3fbd28e72606e5358328bfe4b99eb0349ca6a54f
|
/.history/a_games_20210607183048.py
|
6937ef94493d7d66c872ea111a2a66ef9fd75b41
|
[] |
no_license
|
Tarun1001/codeforces
|
f0a2ef618fbd45e3cdda3fa961e249248ca56fdb
|
576b505d4b8b8652a3f116f32d8d7cda4a6644a1
|
refs/heads/master
| 2023-05-13T04:50:01.780931 | 2021-06-07T21:35:26 | 2021-06-07T21:35:26 | 374,399,423 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 447 |
py
|
n= int(input())
jer=[]
for _ in range(0, n):
h, a = map(int, input().split(""))
jer.append((h, a))
def sol(jer) :
count=0
for i in range(0,len(jer)):
for j in range(0,len(jer)):
if i==j:
continue
hometeamHomejersey= jer[i][0]
awayteamAwayjersey= jer[i][1]
if hometeamHomejersey==awayteamAwayjersey:
count+=1
return c
|
[
"[email protected]"
] | |
d6b7ff2786b8b58ae1df6bcc85b8c85ca3d12c8c
|
d0f21d669a1099fe7138d763985d0c392968f93f
|
/tests/test_visitors/test_ast/test_imports/test_protected_import.py
|
d27ec451dcc7abe58ea249e088f9f20d2d4a5fc7
|
[
"MIT"
] |
permissive
|
jigi-33/wemake-python-styleguide
|
3aab4f13023d3a882b19e65a9967f8abe2a72db1
|
1239a4726b91de588b20b268c47485373bf125a1
|
refs/heads/master
| 2021-01-08T19:24:11.454562 | 2020-02-28T20:09:58 | 2020-02-29T05:39:16 | 242,120,262 | 1 | 0 |
MIT
| 2020-02-29T05:39:18 | 2020-02-21T11:08:25 |
Python
|
UTF-8
|
Python
| false | false | 2,032 |
py
|
# -*- coding: utf-8 -*-
import pytest
from wemake_python_styleguide.violations.best_practices import (
ProtectedModuleMemberViolation,
ProtectedModuleViolation,
)
from wemake_python_styleguide.visitors.ast.imports import WrongImportVisitor
import_public = 'import public'
import_protected = 'import _protected'
import_from_protected = 'from _protected import something'
import_from_protected_path = 'from path._protected import something'
import_protected_from = 'from some.path import _protected'
import_from_public = 'from public import something'
import_from_public_path = 'from public.path import something'
import_protected_as_alias = 'from some.path import _protected as not_protected'
@pytest.mark.parametrize('code', [
import_public,
import_from_public,
import_from_public_path,
])
def test_correct_import(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that correct imports are allowed."""
tree = parse_ast_tree(code)
visitor = WrongImportVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
import_protected,
import_from_protected,
import_from_protected_path,
])
def test_incorrect_modules_import(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that imports from protected modules are restricted."""
tree = parse_ast_tree(code)
visitor = WrongImportVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ProtectedModuleViolation])
@pytest.mark.parametrize('code', [
import_protected_from,
import_protected_as_alias,
])
def test_incorrect_module_members_import(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that importing of protected objects is restricted."""
tree = parse_ast_tree(code)
visitor = WrongImportVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ProtectedModuleMemberViolation])
|
[
"[email protected]"
] | |
be8508f0365a7e8e553b47eb14de29e6433f5c5c
|
14324a581c4f22c7ea8a96cc79725cdb84960e43
|
/trees/migrations/0001_initial.py
|
3c8b0a061d9d005b156b6ecbbccf9c5d97f9ef14
|
[] |
no_license
|
dogger123/django-treeapi
|
a3f141f87bb515e4af4f820a80daf6bacc40199d
|
942da122d6c9909c21321a1aea2849428ba47120
|
refs/heads/master
| 2020-05-22T13:20:22.798164 | 2019-05-13T06:45:13 | 2019-05-13T06:45:13 | 186,357,054 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,374 |
py
|
# Generated by Django 2.2 on 2019-05-10 11:03
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DataSet',
fields=[
('dataSet_id', models.AutoField(primary_key=True, serialize=False)),
('dataSet_name', models.CharField(max_length=200, null=True)),
('dataSet_type', models.CharField(max_length=200, null=True)),
('table_name', models.CharField(max_length=200, null=True)),
('size', models.IntegerField(default=0)),
('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='cretetime')),
],
),
migrations.CreateModel(
name='Trees',
fields=[
('tree_id', models.AutoField(primary_key=True, serialize=False)),
('tree_name', models.CharField(default=0, max_length=200)),
('tree_type', models.CharField(max_length=200, null=True)),
('tree_dict', models.TextField(default=0)),
('detpth', models.IntegerField(default=0)),
('nodes_num', models.IntegerField(default=0)),
('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='cretetime')),
('dataSet', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='trees.DataSet')),
],
),
migrations.CreateModel(
name='Analysis',
fields=[
('analysis_id', models.AutoField(primary_key=True, serialize=False)),
('analysis_name', models.CharField(max_length=200)),
('accuracy', models.FloatField()),
('ifthen', models.TextField()),
('content', models.TextField(null=True)),
('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='cretetime')),
('dataSet', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='trees.DataSet')),
('tree', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='trees.Trees')),
],
),
]
|
[
"="
] |
=
|
a53aa1a02c0c228ee59d066579a76805bac6c7cf
|
0ac2dbd8427971ae05a2ba80b625c5c027b6c978
|
/hue.py
|
a3e50a905eb8ff81ede8b96487f90edd6620a9ca
|
[
"MIT"
] |
permissive
|
kalliope-project/kalliope_neuron_hue
|
02b783e92170cc1d23516478df8a5b1770bac759
|
12729702c7c4827f0945c6bf71ef030dee8f4058
|
refs/heads/master
| 2021-01-11T20:11:33.053097 | 2020-07-29T15:33:46 | 2020-07-29T15:33:46 | 79,062,591 | 2 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,534 |
py
|
import ipaddress
import logging
from kalliope.core.NeuronModule import NeuronModule, MissingParameterException, InvalidParameterException
from phue import Bridge
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Hue(NeuronModule):
def __init__(self, **kwargs):
super(Hue, self).__init__(**kwargs)
self.bridge_ip = kwargs.get('bridge_ip', None)
self.groups_name = kwargs.get('groups_name', None)
self.lights_name = kwargs.get('lights_name', None)
self.group_name = kwargs.get('group_name', None)
self.light_name = kwargs.get('light_name', None)
self.state = kwargs.get('state', None)
self.brightness = kwargs.get('brightness', None)
# check if parameters have been provided
if self._is_parameters_ok():
# connect to the bridge
self.b = Bridge(self.bridge_ip)
# get all groups
groups = self.b.get_group()
if self.groups_name is not None:
for group_name in self.groups_name:
# get all lights id from in the target group name
lights_ids = self._get_lights_id_by_from_group_name(groups, group_name)
# switch status of each light in the group depending on the state
logger.debug("Lights id: %s" % lights_ids)
if lights_ids is not None:
for light_id in lights_ids:
self.switch_light(int(light_id))
if self.lights_name is not None:
for light_name in self.lights_name:
# get the id of the target light by its name
light = self.b.get_light(light_name)
if light is not None:
self.switch_light(light["name"])
if self.light_name is not None:
# get the id of the target light by its name
light = self.b.get_light(self.light_name)
if light is not None:
self.switch_light(light["name"])
if self.group_name is not None:
lights_ids = self._get_lights_id_by_from_group_name(groups, self.group_name)
# switch status of each light in the group depending on the state
logger.debug("Lights id: %s" % lights_ids)
if lights_ids is not None:
for light_id in lights_ids:
self.switch_light(int(light_id))
def _is_parameters_ok(self):
# test bridge ip is set
if self.bridge_ip is None:
raise MissingParameterException("Hue neuron needs a bridge_ip")
# test if the ip is a valid ip. The following line will raise an exception
ipaddress.ip_address(self.bridge_ip)
# user must set at least one parameter that concern group or light name
if self.groups_name is None and self.lights_name is None \
and self.group_name is None and self.light_name is None:
raise MissingParameterException("Hue neuron needs at least one of following parameters: "
"group_name, light_name, groups_name, lights_name")
# test groups_name or lights_name are a list
if self.groups_name is not None:
if not isinstance(self.groups_name, list):
raise InvalidParameterException(
"Hue neuron: groups_name must be a list")
if self.lights_name is not None:
if not isinstance(self.lights_name, list):
raise InvalidParameterException(
"Hue neuron: lights_name must be a list")
# test groups_name or lights_name are a list
if self.group_name is not None:
if not isinstance(self.group_name, str):
raise InvalidParameterException(
"Hue neuron: group_name must be a string")
if self.light_name is not None:
if not isinstance(self.light_name, str):
raise InvalidParameterException(
"Hue neuron: light_name must be a string")
# test state ok
if self.state is None:
raise MissingParameterException("Hue neuron needs a state \"on\" or \"off\"")
if self.state not in ["on", "off"]:
raise InvalidParameterException("Hue: state must be \"on\" or \"off\"")
if self.brightness is not None:
r = range(0, 101)
if int(self.brightness) not in r:
raise InvalidParameterException("Hue: brightness must be in range 0:100")
return True
@staticmethod
def _get_lights_id_by_from_group_name(groups, group_name_to_find):
"""
Return a list of light ID of the group by its name
:param groups: list of group from the bridge api
:param group_name_to_find: string group to find in the list
:return: list of lights IDs
"""
lights_id = None
for group in groups:
group_id = str(group)
group_dict = groups[group_id]
if group_dict["name"] == group_name_to_find:
lights_id = group_dict["lights"]
break
return lights_id
@staticmethod
def _get_boolean_from_state(state):
if state == "on":
return True
return False
def switch_light(self, light_identifier):
"""
Call the HUE api to switch the light depending on the desired state
:param light_identifier: ID or name of the light
"""
logger.debug("HUE: Switching light %s to state %s" % (light_identifier, self.state))
boolean_state = self._get_boolean_from_state(self.state)
self.b.set_light(light_identifier, 'on', boolean_state)
if boolean_state and self.brightness is not None:
brightness_number = self.get_brightness_number_from_percent(self.brightness)
logger.debug("HUE: Set brightness to %s" % self.brightness)
self.b.set_light(light_identifier, 'bri', brightness_number)
@staticmethod
def get_brightness_number_from_percent(brightness_percent):
"""
The phue lib wants a number between 0 and 254. The neuron ask for a percent between 0 and 100.
We need to convert
:param brightness_percent: integer between 0 and 100
:return:
"""
return int(round((254 * int(brightness_percent))/100))
|
[
"[email protected]"
] | |
0176a59c63441c166cfeb79c09e228a5f8d8e60a
|
f62fd455e593a7ad203a5c268e23129473d968b6
|
/murano-3.2.0/murano/tests/unit/engine/system/test_workflowclient.py
|
e109fe367da54e038fac1b25d37a1fb6d2067d9d
|
[
"Apache-2.0"
] |
permissive
|
MinbinGong/OpenStack-Ocata
|
5d17bcd47a46d48ff9e71e2055f667836174242f
|
8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3
|
refs/heads/master
| 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 |
Python
|
UTF-8
|
Python
| false | false | 6,229 |
py
|
# Copyright (c) 2016 AT&T
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from mistralclient.api import client as mistralcli
except ImportError as mistral_import_error:
mistralcli = None
import mock
from oslo_config import cfg
from murano.dsl import murano_method
from murano.dsl import murano_type
from murano.engine.system import workflowclient
from murano.tests.functional.common import utils as test_utils
from murano.tests.unit import base
CONF = cfg.CONF
rand_name = test_utils.DeployTestMixin.rand_name
class TestMistralClient(base.MuranoTestCase):
def setUp(self):
super(TestMistralClient, self).setUp()
self.mistral_client_mock = mock.Mock()
self.mistral_client_mock.client = mock.MagicMock(
spec=mistralcli.client)
self._patch_client()
self.mock_class = mock.MagicMock(spec=murano_type.MuranoClass)
self.mock_method = mock.MagicMock(spec=murano_method.MuranoMethod)
self._this = mock.MagicMock()
self._this.owner = None
self.addCleanup(mock.patch.stopall)
def _patch_client(self):
self.mock_client = mock.Mock(return_value=self.mistral_client_mock)
self.client_patcher = mock.patch.object(workflowclient.MistralClient,
'_client', self.mock_client)
self.client_patcher.start()
self.mock_create_client = mock.Mock(
return_value=self.mistral_client_mock)
self.create_client_patcher = mock.patch.object(
workflowclient.MistralClient, '_create_client',
self.mock_create_client)
self.create_client_patcher.start()
def _unpatch_client(self):
self.client_patcher.stop()
self.create_client_patcher.stop()
def test_run_with_execution_success_state(self):
test_output = '{"openstack": "foo", "__execution": "bar", "task":'\
' "baz"}'
mock_execution = mock.MagicMock(
id='123', state='SUCCESS', output=test_output)
self.mock_client.executions.create.return_value = mock_execution
self.mock_client.executions.get.return_value = mock_execution
run_name = rand_name('test')
timeout = 1
mc = workflowclient.MistralClient(self._this, 'regionOne')
output = mc.run(run_name, timeout)
for prop in ['openstack', '__execution', 'task']:
self.assertFalse(hasattr(output, prop))
self.assertEqual({}, output)
def test_run_with_execution_error_state(self):
mock_execution = mock.MagicMock(
id='123', state='ERROR', output="{'test_attr': 'test_val'}")
self.mock_client.executions.create.return_value = mock_execution
self.mock_client.executions.get.return_value = mock_execution
run_name = rand_name('test')
timeout = 1
mc = workflowclient.MistralClient(self._this, 'regionOne')
expected_error_msg = 'Mistral execution completed with ERROR.'\
' Execution id: {0}. Output: {1}'\
.format(mock_execution.id, mock_execution.output)
with self.assertRaisesRegexp(workflowclient.MistralError,
expected_error_msg):
mc.run(run_name, timeout)
def test_run_except_timeout_error(self):
mock_execution = mock.MagicMock(
id='123', state='TEST_STATE', output="{'test_attr': 'test_val'}")
self.mock_client.executions.create.return_value = mock_execution
self.mock_client.executions.get.return_value = mock_execution
run_name = rand_name('test')
timeout = 1
mc = workflowclient.MistralClient(self._this, 'regionOne')
expected_error_msg = 'Mistral run timed out. Execution id: {0}.'\
.format(mock_execution.id)
with self.assertRaisesRegexp(workflowclient.MistralError,
expected_error_msg):
mc.run(run_name, timeout)
def test_run_with_immediate_timeout(self):
mock_execution = mock.MagicMock(
id='123', state='ERROR', output="{'test_attr': 'test_val'}")
self.mock_client.executions.create.return_value = mock_execution
run_name = rand_name('test')
timeout = 0
mc = workflowclient.MistralClient(self._this, 'regionOne')
self.assertEqual(mock_execution.id, mc.run(run_name, timeout))
def test_upload(self):
mc = workflowclient.MistralClient(self._this, 'regionOne')
definition = rand_name('test')
self.assertIsNone(mc.upload(definition))
self.assertTrue(workflowclient.MistralClient.
_client.workflows.create.called)
@mock.patch('murano.engine.system.workflowclient.auth_utils')
def test_client_property(self, _):
self._unpatch_client()
test_mistral_settings = {
'url': rand_name('test_mistral_url'),
'project_id': rand_name('test_project_id'),
'endpoint_type': rand_name('test_endpoint_type'),
'auth_token': rand_name('test_auth_token'),
'user_id': rand_name('test_user_id'),
'insecure': rand_name('test_insecure'),
'cacert': rand_name('test_ca_cert')
}
with mock.patch('murano.engine.system.workflowclient.CONF')\
as mock_conf:
mock_conf.mistral = mock.MagicMock(**test_mistral_settings)
region_name = rand_name('test_region_name')
mc = workflowclient.MistralClient(self._this, region_name)
mistral_client = mc._client
self.assertIsNotNone(mistral_client)
|
[
"[email protected]"
] | |
9afe0a1a5bce3f2082734a16953c0bbd764400f5
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/7_graph/二分图/无权二部图最大匹配问题/lcp4覆盖.py
|
61be67685f06f8a62381a377bfa0e43cbe726510
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null |
UTF-8
|
Python
| false | false | 764 |
py
|
from typing import List
from 匈牙利算法 import Hungarian
DIR4 = [(0, 1), (1, 0), (0, -1), (-1, 0)]
class Solution:
def domino(self, row: int, col: int, broken: List[List[int]]) -> int:
H = Hungarian()
grid = [[0] * col for _ in range(row)]
for r, c in broken:
grid[r][c] = 1
for r in range(row):
for c in range(col):
if grid[r][c] == 1 or (r + c) & 1:
continue
cur = r * col + c
for dr, dc in DIR4:
nr, nc = r + dr, c + dc
if 0 <= nr < row and 0 <= nc < col and grid[nr][nc] == 0:
H.addEdge(cur, nr * col + nc)
return len(H.work())
|
[
"[email protected]"
] | |
8fe1be9af65e823f2f90637c4f4eac341e0fec56
|
70d39e4ee19154a62e8c82467ef75b601e584738
|
/devops/terminate_exact_ec2.py
|
631b42526a94ae6b2852b48145767358a038e527
|
[
"Apache-2.0"
] |
permissive
|
babywyrm/sysadmin
|
6f2724be13ae7e5b9372278856a8c072073beffb
|
2a5f3d29c7529bc917d4ff9be03af30ec23948a5
|
refs/heads/master
| 2023-08-16T03:50:38.717442 | 2023-08-16T03:05:55 | 2023-08-16T03:05:55 | 210,228,940 | 10 | 5 | null | 2023-05-01T23:15:31 | 2019-09-22T23:42:50 |
PowerShell
|
UTF-8
|
Python
| false | false | 320 |
py
|
#!/usr/bin/python3
##
##
##
########################################3
import sys
import boto3
##
##
#### _input_instance_id_to_kill_it__
##
##
ec2 = boto3.resource('ec2')
for instance_id in sys.argv[1:]:
instance = ec2.Instance(instance_id)
response = instance.terminate()
print(response)
###########
##
##
##
|
[
"[email protected]"
] | |
3a37b7b47d59f49963becb153844bc7c178688c7
|
1bd073f585706c31c406bceb81eb400f8ac27c1d
|
/tools/Polygraphy/polygraphy/tools/convert/convert.py
|
aa2311a379ccc4b9e4dc8acfdfe187ee41b5a4f9
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
neevaco/TensorRT
|
7b5e54c6a7cc6d0fc545e47ab7cf6656f23d5e19
|
650a4a6ed29403bec1a55663b48ef41a075d0b3c
|
refs/heads/neeva
| 2023-05-29T19:20:26.431716 | 2022-08-19T23:09:26 | 2022-08-26T19:09:39 | 526,771,012 | 0 | 0 |
Apache-2.0
| 2022-08-19T23:09:27 | 2022-08-19T22:49:25 | null |
UTF-8
|
Python
| false | false | 4,377 |
py
|
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from polygraphy import mod
from polygraphy.logger import G_LOGGER
from polygraphy.tools.args import (
DataLoaderArgs,
ModelArgs,
OnnxFromTfArgs,
OnnxInferShapesArgs,
OnnxLoadArgs,
OnnxSaveArgs,
TfLoadArgs,
TrtConfigArgs,
TrtLoadEngineArgs,
TrtLoadNetworkArgs,
TrtLoadPluginsArgs,
TrtSaveEngineArgs,
)
from polygraphy.tools.base import Tool
onnx_backend = mod.lazy_import("polygraphy.backend.onnx")
trt_backend = mod.lazy_import("polygraphy.backend.trt")
class Convert(Tool):
"""
Convert models to other formats.
"""
def __init__(self):
super().__init__("convert")
def get_subscriptions(self):
return [
ModelArgs(model_opt_required=True),
TfLoadArgs(allow_artifacts=False),
OnnxFromTfArgs(),
OnnxInferShapesArgs(),
OnnxLoadArgs(allow_from_tf=True),
OnnxSaveArgs(output_opt=False),
DataLoaderArgs(), # For int8 calibration
TrtConfigArgs(),
TrtLoadPluginsArgs(),
TrtLoadNetworkArgs(),
TrtLoadEngineArgs(),
TrtSaveEngineArgs(output_opt=False),
]
def add_parser_args(self, parser):
parser.add_argument("-o", "--output", help="Path to save the converted model", required=True)
parser.add_argument(
"--convert-to",
help="The format to attempt to convert the model to."
"'onnx-like-trt-network' is EXPERIMETNAL and converts a TensorRT network to a format usable for visualization. "
"See 'OnnxLikeFromNetwork' for details. ",
choices=["onnx", "trt", "onnx-like-trt-network"],
)
onnx_args = self.arg_groups[OnnxLoadArgs].group
onnx_args.add_argument(
"--fp-to-fp16",
help="Convert all floating point tensors in an ONNX model to 16-bit precision. "
"This is *not* needed in order to use TensorRT's fp16 precision, but may be useful for other backends. "
"Requires onnxmltools. ",
action="store_true",
default=None,
)
def run(self, args):
if not args.convert_to:
_, ext = os.path.splitext(args.output)
if ext not in ModelArgs.EXT_MODEL_TYPE_MAPPING:
G_LOGGER.critical(
f"Could not automatically determine model type based on output path: {args.output}\nPlease specify the desired output format with --convert-to"
)
convert_type = ModelArgs.ModelType(ModelArgs.EXT_MODEL_TYPE_MAPPING[ext])
elif args.convert_to == "onnx-like-trt-network":
convert_type = "onnx-like-trt-network"
else:
CONVERT_TO_MODEL_TYPE_MAPPING = {"onnx": "onnx", "trt": "engine"}
convert_type = ModelArgs.ModelType(CONVERT_TO_MODEL_TYPE_MAPPING[args.convert_to])
if convert_type == "onnx-like-trt-network":
onnx_like = trt_backend.onnx_like_from_network(self.arg_groups[TrtLoadNetworkArgs].load_network())
onnx_backend.save_onnx(onnx_like, args.output)
elif convert_type.is_onnx():
model = self.arg_groups[OnnxLoadArgs].load_onnx()
if args.fp_to_fp16:
model = onnx_backend.convert_to_fp16(model)
self.arg_groups[OnnxSaveArgs].save_onnx(model, args.output)
elif convert_type.is_trt():
with self.arg_groups[TrtLoadEngineArgs].load_engine() as engine:
self.arg_groups[TrtSaveEngineArgs].save_engine(engine, args.output)
else:
G_LOGGER.critical(f"Cannot convert to model type: {convert_type}")
|
[
"[email protected]"
] | |
73ff13809669834f4c7b58d502e2a2ec8d0f8a55
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_271/ch153_2020_04_13_20_28_57_339505.py
|
bc782a307772aadce3e74cda6678a4513b48cff7
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 546 |
py
|
def agrupa_por_idade(dicionario1):
dicionario2={}
for nome, idade in dicionario1.items():
if idade<=11:
dicionario2['criança']='{0}, {1}'.format(dicionario2['criança'], nome)
if 12<=idade<=17:
dicionario2['adolescente']='{0}, {1}'.format(dicionario2['adolescente'], nome)
if 18<=idade<=59:
dicionario2['adulto']='{0}, {1}'.format(dicionario2['adulto'], nome)
else:
dicionario2['idoso']='{0}, {1}'.format(dicionario2['idoso'], nome)
return dicionario2
|
[
"[email protected]"
] | |
caf5b92bfa055fb5a5ae7e31b1e15c75f38f4e95
|
946d72e6b44e5fdad5b10a9cbca40260d3202413
|
/old/titles.py
|
a16f19c9755a0039d21f53755b06b58c9cd7eb10
|
[] |
no_license
|
pudo/wahlprogramme
|
323527b7271a5a2af53530a8b2e2357b3bf1144e
|
2cf794f10001d183678c3cc1a39b73f4c87035c3
|
refs/heads/master
| 2020-06-06T04:56:09.890984 | 2013-08-09T14:58:39 | 2013-08-09T14:58:39 | 11,253,695 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 749 |
py
|
from common import PARTIES, load_doc, nomenklatura
def all_titles():
for party in PARTIES:
doc = load_doc(party)
for i, h in enumerate(doc.findall('.//*')):
if not h.tag in ['h1', 'h2']:
continue
#titles.upsert({
# 'party': party,
# 'index': i,
# 'element': h.tag,
# 'text': h.text
#}, ['party', 'text'])
print [party, h.tag, h.text]
fp = '[%s:%s] %s' % (party, h.tag, h.text)
try:
entity = nomenklatura.lookup(fp)
print [h.text, entity.name]
except Exception, e:
print e
if __name__ == '__main__':
all_titles()
|
[
"[email protected]"
] | |
9975c0b7a9b56900ff37076e86ba832eeb79c265
|
9477ff0926416001b7c801ff36fbc8e74009e3ae
|
/excel-reformat/excelhandler/drmamma/migrations/0015_auto_20201029_1314.py
|
28260e6b99d3124c802f72495b3e9c0dcc7121cd
|
[] |
no_license
|
Tedhoon/business-automation
|
ab0cbcc405f646bcce7cdb4904073f1b0cd7ca9d
|
17247fe8979060db3c00d3ff6a2ff2918a4c7ea5
|
refs/heads/master
| 2023-03-25T13:48:22.755946 | 2021-03-23T08:43:45 | 2021-03-23T08:43:45 | 285,768,349 | 0 | 0 | null | 2021-03-23T08:43:46 | 2020-08-07T07:37:09 |
Python
|
UTF-8
|
Python
| false | false | 3,137 |
py
|
# Generated by Django 3.0.8 on 2020-10-29 04:14
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('drmamma', '0014_auto_20201029_1115'),
]
operations = [
migrations.CreateModel(
name='NaverFarmTemp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('store_code', models.TextField(blank=True, null=True, verbose_name='발주처 코드')),
('order_pk', models.TextField(blank=True, null=True, verbose_name='주문번호')),
('product_num', models.TextField(blank=True, null=True, verbose_name='품목번호')),
('product_order_num', models.TextField(blank=True, null=True, verbose_name='품목별 주문번호')),
('receiver', models.TextField(blank=True, null=True, verbose_name='수령인')),
('address', models.TextField(blank=True, null=True, verbose_name='수령인 주소(전체)')),
('post_num', models.TextField(blank=True, null=True, verbose_name='수령인 우편번호')),
('phone_num', models.TextField(blank=True, null=True, verbose_name='수령인 휴대전화')),
('phone_num2', models.TextField(blank=True, null=True, verbose_name='수령인 전화번호')),
('message', models.TextField(blank=True, null=True, verbose_name='배송메시지')),
('product_name', models.TextField(blank=True, null=True, verbose_name='주문상품명(옵션포함)')),
('product_code', models.TextField(blank=True, null=True, verbose_name='자체품목코드')),
('amount', models.TextField(blank=True, null=True, verbose_name='수량')),
('price', models.TextField(blank=True, null=True, verbose_name='상품구매금액(KRW)')),
('discount', models.TextField(blank=True, null=True, verbose_name='상품별 추가할인금액')),
('total_price', models.TextField(blank=True, null=True, verbose_name='총 주문 금액')),
],
options={
'verbose_name': '네이버 스토어팜',
'verbose_name_plural': '네이버 스토어팜',
},
),
migrations.DeleteModel(
name='Cafe24',
),
migrations.AlterModelOptions(
name='cafe24temp',
options={'verbose_name': 'Cafe24', 'verbose_name_plural': 'Cafe24'},
),
migrations.AlterField(
model_name='deliveryexcel',
name='uploaded_at',
field=models.DateField(default=datetime.datetime(2020, 10, 29, 13, 14, 48, 524036), verbose_name='업로드 날짜'),
),
migrations.AddField(
model_name='naverfarmtemp',
name='made_by_source',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='drmamma.DeliveryExcel', verbose_name='연관 엑셀'),
),
]
|
[
"[email protected]"
] | |
c68dc8b29ed0c767a60726b18d7726f906aed1b2
|
878a3094430bb914717d641a4f4b06574e872518
|
/hm_03_面向对象/hm_11_上架管理4.py
|
4e2cbca0ab8b874e2983fc1dc9d370b7edfc341a
|
[] |
no_license
|
2020668/python2019
|
3f33eea85fdd3f2866d867859d5694abb71effe9
|
f8a98389fa09f95e72914afa4935afc5c68eaccd
|
refs/heads/master
| 2020-06-07T23:36:17.871376 | 2019-08-29T09:45:10 | 2019-08-29T09:45:10 | 193,116,002 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,119 |
py
|
# 空库位
bcdefgh_list = ["1B022","1B023","1B013","1B033","1B043","1B053","1B063","1B073","1B083"]
mnpq_list = ["1M053","1Q023"]
# sku基础数据
class SkuData():
def __init__(self,sku,layer_num,layer,box_num,length,width,heigth,weigth):
self.sku = sku
self.layer_num = layer_num
self.layer = layer
self.box_num = box_num
self.length = length
self.width = width
self.heigth = heigth
self.weigth = weigth
# 维护基础数据
sku11059905 = SkuData("11059905",10,6,16,36,28,21,15.5)
sku11039131 = SkuData("11039131",9,3,1,23.5,18,25,5.5)
# 主程序
class Sku(object):
def __init__(self,sku,total):
self.sku = sku
if self.sku == "11059905":
total_heigth = total/sku11059905.box_num/sku11059905.layer_num*sku11059905.heigth
t_num = sku11059905.layer_num*sku11059905.layer*sku11059905.box_num
t_height = sku11059905.layer*sku11059905.heigth
if self.sku == "11039131":
total_heigth = total/sku11039131.box_num/sku11039131.layer_num*sku11039131.heigth
t_num = sku11039131.layer_num*sku11039131.layer*sku11039131.box_num
t_height = sku11039131.layer*sku11039131.heigth
if total_heigth > 80:
for i in range(len(bcdefgh_list)-1,-1,-1):
print("数量%d,请入库位%s"% (t_num,bcdefgh_list[i]))
bcdefgh_list.pop(i)
# print(bcdefgh_list)
total_heigth -= t_height
total -= t_num
if total_heigth <= 80:
for r in range(len(mnpq_list)-1,-1,-1):
print("数量%d,请入库位%s" % (total, mnpq_list[r]))
mnpq_list.pop(r)
return
else:
for r in range(len(mnpq_list)-1,-1,-1):
print("数量%d,请入库位%s"% (total,mnpq_list[r]))
mnpq_list.pop(r)
return
input_sku = input("请输入SKU:")
input_total = int(input("请输入数量:"))
Sku = Sku(input_sku,input_total)
|
[
"[email protected]"
] | |
f0c5e3397709ca4ce121c0f482289221424aac74
|
9bb83bf5f6c2b5d2da4dda711591ef9987490c66
|
/3DdetectionPrototype/Yolo-Pytorch-nms-updated/dataset/__init__.py
|
d919b198f149734f06916b220e93721264ed2db8
|
[] |
no_license
|
nudlesoup/DeepLearning
|
cb5b7039a9de6098194b56143d1a72a564fed1c9
|
336e415b0353d6e18d106f894a97d8873a55e544
|
refs/heads/master
| 2021-06-25T00:50:40.339768 | 2020-12-23T02:38:58 | 2020-12-23T02:38:58 | 172,002,661 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,122 |
py
|
from .coco import get_dataset as get_COCO
from .pascal import get_dataset as get_VOC
from dataset.augment.bbox import bbox_flip
from dataset.augment.image import makeImgPyramids
import os
import torch
from torch.utils.data import DataLoader
import numpy as np
def get_imgdir(dataset_root, batch_size, net_size):
from torchvision.transforms import transforms
from PIL import Image
class dataset:
def __init__(self, root, transform):
self.imglist = os.listdir(root)
self.root = root
self.transform = transform
def __len__(self):
return len(self.imglist)
def __getitem__(self, item):
path=os.path.join(self.root,self.imglist[item])
img=Image.open(path)
ori_shape=np.array(img.size)
img=self.transform(img)
return path,img,torch.from_numpy(ori_shape.astype(np.float32))
transform=transforms.Compose([
transforms.Resize((net_size,net_size)),
transforms.ToTensor(),
transforms.Normalize(mean=(0,0,0),std=(1,1,1))
])
dataloader=DataLoader(dataset=dataset(dataset_root,transform),shuffle=False,batch_size=batch_size)
return dataloader
|
[
"[email protected]"
] | |
fcdc76019f54d93a78ba8f653e0c6cb535f38400
|
5e879f2ae5b5d1c1202bd0905b2c1a4d2de63072
|
/cadnano/document.py
|
8506c64a304830bf1d860a3b5b28aa55f3d56cf1
|
[
"MIT"
] |
permissive
|
n-corpuz/cadnano2.5
|
bac385e103bfd0ffebdffc6ac023c182d5a2d3e1
|
66041865bbe164e14135584b10c5f6b6510d878e
|
refs/heads/master
| 2020-12-28T19:08:32.602263 | 2016-03-24T01:57:26 | 2016-03-24T01:57:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 19,668 |
py
|
#!/usr/bin/env python
# encoding: utf-8
from operator import itemgetter
import cadnano.util as util
import cadnano.preferences as prefs
from cadnano.cnproxy import ProxyObject, ProxySignal
from cadnano.cnproxy import UndoStack, UndoCommand
from cadnano.strand import Strand
from cadnano.oligo import Oligo
from cadnano.strandset import StrandSet
from cadnano.virtualhelix import VirtualHelix
from cadnano.part import Part
from cadnano.part import HoneycombPart
from cadnano.part import SquarePart
from cadnano import app
class Document(ProxyObject):
"""
The Document class is the root of the model. It has two main purposes:
1. Serve as the parent all Part objects within the model.
2. Track all sub-model actions on its undoStack.
"""
def __init__(self, parent=None):
super(Document, self).__init__(parent)
self._undostack = UndoStack()
self._parts = []
self._assemblies = []
self._controller = None
self._selected_part = None
# the dictionary maintains what is selected
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._selected_changed_dict = {}
app().documentWasCreatedSignal.emit(self)
# end def
### SIGNALS ###
documentPartAddedSignal = ProxySignal(object,
ProxyObject,
name='documentPartAddedSignal') # doc, part
# dict of tuples of objects using the reference as the key,
# and the value is a tuple with meta data
# in the case of strands the metadata would be which endpoints of selected
# e.g. { objectRef: (value0, value1), ...}
documentSelectedChangedSignal = ProxySignal(dict,
name='documentSelectedChangedSignal') # tuples of items + data
documentSelectionFilterChangedSignal = ProxySignal(list,
name='documentSelectionFilterChangedSignal')
documentViewResetSignal = ProxySignal(ProxyObject,
name='documentViewResetSignal')
documentClearSelectionsSignal = ProxySignal(ProxyObject,
name='documentClearSelectionsSignal')
### SLOTS ###
### ACCESSORS ###
def undoStack(self):
"""
This is the actual undoStack to use for all commands. Any children
needing to perform commands should just ask their parent for the
undoStack, and eventually the request will get here.
"""
return self._undostack
def parts(self):
"""Returns a list of parts associated with the document."""
return self._parts
def assemblies(self):
"""Returns a list of assemblies associated with the document."""
return self._assemblies
### PUBLIC METHODS FOR QUERYING THE MODEL ###
def selectedPart(self):
return self._selected_part
def addToSelection(self, obj, value):
self._selection_dict[obj] = value
self._selected_changed_dict[obj] = value
# end def
def removeFromSelection(self, obj):
if obj in self._selection_dict:
del self._selection_dict[obj]
self._selected_changed_dict[obj] = (False, False)
return True
else:
return False
# end def
def clearSelections(self):
"""
Only clear the dictionary
"""
self._selection_dict = {}
# end def
def addStrandToSelection(self, strand, value):
ss = strand.strandSet()
if ss in self._selection_dict:
self._selection_dict[ss][strand] = value
else:
self._selection_dict[ss] = {strand: value}
self._selected_changed_dict[strand] = value
# end def
def removeStrandFromSelection(self, strand):
ss = strand.strandSet()
if ss in self._selection_dict:
temp = self._selection_dict[ss]
if strand in temp:
del temp[strand]
if len(temp) == 0:
del self._selection_dict[ss]
self._selected_changed_dict[strand] = (False, False)
return True
else:
return False
else:
return False
# end def
def selectionDict(self):
return self._selection_dict
# end def
def selectedOligos(self):
"""
as long as one endpoint of a strand is in the selection, then the oligo
is considered selected
"""
s_dict = self._selection_dict
selected_oligos = set()
for ss in s_dict.keys():
for strand in ss:
selected_oligos.add(strand.oligo())
# end for
# end for
return selected_oligos if len(selected_oligos) > 0 else None
#end def
def clearAllSelected(self):
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._selected_changed_dict = {}
self.documentClearSelectionsSignal.emit(self)
# end def
def isModelSelected(self, obj):
return obj in self._selection_dict
# end def
def isModelStrandSelected(self, strand):
ss = strand.strandSet()
if ss in self._selection_dict:
if strand in self._selection_dict[ss]:
return True
else:
return False
else:
return False
# end def
def getSelectedValue(self, obj):
"""
obj is an objects to look up
it is prevetted to be in the dictionary
"""
return self._selection_dict[obj]
def getSelectedStrandValue(self, strand):
"""
strand is an objects to look up
it is prevetted to be in the dictionary
"""
return self._selection_dict[strand.strandSet()][strand]
# end def
def sortedSelectedStrands(self, strandset):
# outList = self._selection_dict[strandset].keys()
# outList.sort(key=Strand.lowIdx)
out_list = [x for x in self._selection_dict[strandset].items()]
getLowIdx = lambda x: Strand.lowIdx(itemgetter(0)(x))
out_list.sort(key=getLowIdx)
return out_list
# end def
def determineStrandSetBounds(self, selected_strand_list, strandset):
min_low_delta = strandset.partMaxBaseIdx()
min_high_delta = strandset.partMaxBaseIdx() # init the return values
ss_dict = self._selection_dict[strandset]
# get the StrandSet index of the first item in the list
ss_idx = strandset._findIndexOfRangeFor(selected_strand_list[0][0])[2]
ss_list = strandset._strand_list
len_ss_list = len(ss_list)
max_ss_idx = len_ss_list - 1
i = 0
for strand, value in selected_strand_list:
while strand != ss_list[ss_idx]:
# incase there are gaps due to double xovers
ss_idx += 1
# end while
idxL, idxH = strand.idxs()
if value[0]: # the end is selected
if ss_idx > 0:
low_neighbor = ss_list[ss_idx - 1]
if low_neighbor in ss_dict:
valueN = ss_dict[low_neighbor]
# we only care if the low neighbor is not selected
temp = min_low_delta if valueN[1] \
else idxL - low_neighbor.highIdx() - 1
# end if
else: # not selected
temp = idxL - low_neighbor.highIdx() - 1
# end else
else:
temp = idxL - 0
# end else
if temp < min_low_delta:
min_low_delta = temp
# end if
# check the other end of the strand
if not value[1]:
temp = idxH - idxL - 1
if temp < min_high_delta:
min_high_delta = temp
# end if
if value[1]:
if ss_idx < max_ss_idx:
high_neighbor = ss_list[ss_idx + 1]
if high_neighbor in ss_dict:
valueN = ss_dict[high_neighbor]
# we only care if the low neighbor is not selected
temp = min_high_delta if valueN[0] \
else high_neighbor.lowIdx() - idxH - 1
# end if
else: # not selected
temp = high_neighbor.lowIdx() - idxH - 1
# end else
else:
temp = strandset.partMaxBaseIdx() - idxH
# end else
if temp < min_high_delta:
min_high_delta = temp
# end if
# check the other end of the strand
if not value[0]:
temp = idxH - idxL - 1
if temp < min_low_delta:
min_low_delta = temp
# end if
# increment counter
ss_idx += 1
# end for
return (min_low_delta, min_high_delta)
# end def
def getSelectionBounds(self):
min_low_delta = -1
min_high_delta = -1
for strandset in self._selection_dict.keys():
selected_list = self.sortedSelectedStrands(strandset)
temp_low, temp_high = self.determineStrandSetBounds(
selected_list, strandset)
if temp_low < min_low_delta or min_low_delta < 0:
min_low_delta = temp_low
if temp_high < min_high_delta or min_high_delta < 0:
min_high_delta = temp_high
# end for Mark train bus to metro
return (min_low_delta, min_high_delta)
# end def
# def operateOnStrandSelection(self, method, arg, both=False):
# pass
# # end def
def deleteSelection(self, use_undostack=True):
"""
Delete selected strands. First iterates through all selected strands
and extracts refs to xovers and strands. Next, calls removeXover
on xoverlist as part of its own macroed command for isoluation
purposes. Finally, calls removeStrand on all strands that were
fully selected (low and high), or had at least one non-xover
endpoint selected.
"""
xoList = []
strand_dict = {}
for strandset_dict in self._selection_dict.values():
for strand, selected in strandset_dict.items():
part = strand.virtualHelix().part()
idxL, idxH = strand.idxs()
strand5p = strand.connection5p()
strand3p = strand.connection3p()
# both ends are selected
strand_dict[strand] = selected[0] and selected[1]
# only look at 3' ends to handle xover deletion
sel3p = selected[0] if idxL == strand.idx3Prime() else selected[1]
if sel3p: # is idx3p selected?
if strand3p: # is there an xover
xoList.append((part, strand, strand3p, use_undostack))
else: # idx3p is a selected endpoint
strand_dict[strand] = True
else:
if not strand5p: # idx5p is a selected endpoint
strand_dict[strand] = True
if use_undostack and xoList:
self.undoStack().beginMacro("Delete xovers")
for part, strand, strand3p, useUndo in xoList:
Part.removeXover(part, strand, strand3p, useUndo)
self.removeStrandFromSelection(strand)
self.removeStrandFromSelection(strand3p)
self._selection_dict = {}
self.documentClearSelectionsSignal.emit(self)
if use_undostack:
if xoList: # end xover macro if it was started
self.undoStack().endMacro()
if True in strand_dict.values():
self.undoStack().beginMacro("Delete selection")
else:
return # nothing left to do
for strand, delete in strand_dict.items():
if delete:
strand.strandSet().removeStrand(strand)
if use_undostack:
self.undoStack().endMacro()
def paintSelection(self, scafColor, stapColor, use_undostack=True):
"""Delete xovers if present. Otherwise delete everything."""
scaf_oligos = {}
stap_oligos = {}
for strandset_dict in self._selection_dict.values():
for strand, value in strandset_dict.items():
if strand.isScaffold():
scaf_oligos[strand.oligo()] = True
else:
stap_oligos[strand.oligo()] = True
if use_undostack:
self.undoStack().beginMacro("Paint strands")
for olg in scaf_oligos.keys():
olg.applyColor(scafColor)
for olg in stap_oligos.keys():
olg.applyColor(stapColor)
if use_undostack:
self.undoStack().endMacro()
def resizeSelection(self, delta, use_undostack=True):
"""
Moves the selected idxs by delta by first iterating over all strands
to calculate new idxs (method will return if snap-to behavior would
create illegal state), then applying a resize command to each strand.
"""
resize_list = []
# calculate new idxs
for strandset_dict in self._selection_dict.values():
for strand, selected in strandset_dict.items():
part = strand.virtualHelix().part()
idxL, idxH = strand.idxs()
newL, newH = strand.idxs()
deltaL = deltaH = delta
# process xovers to get revised delta
if selected[0] and strand.connectionLow():
newL = part.xoverSnapTo(strand, idxL, delta)
if newL == None:
return
deltaH = newL-idxL
if selected[1] and strand.connectionHigh():
newH = part.xoverSnapTo(strand, idxH, delta)
if newH == None:
return
deltaL = newH-idxH
# process endpoints
if selected[0] and not strand.connectionLow():
newL = idxL + deltaL
if selected[1] and not strand.connectionHigh():
newH = idxH + deltaH
if newL > newH: # check for illegal state
return
resize_list.append((strand, newL, newH))
# end for
# end for
# execute the resize commands
if use_undostack:
self.undoStack().beginMacro("Resize Selection")
for strand, idxL, idxH in resize_list:
Strand.resize(strand, (idxL, idxH), use_undostack)
if use_undostack:
self.undoStack().endMacro()
# end def
def updateSelection(self):
"""
do it this way in the future when we have
a better signaling architecture between views
"""
# self.documentSelectedChangedSignal.emit(self._selected_changed_dict)
"""
For now, individual objects need to emit signals
"""
for obj, value in self._selected_changed_dict.items():
obj.selectedChangedSignal.emit(obj, value)
# end for
self._selected_changed_dict = {}
# for ss in self._selection_dict:
# print self.sortedSelectedStrands(ss)
# end def
def resetViews(self):
# This is a fast way to clear selections and the views.
# We could manually deselect each item from the Dict, but we'll just
# let them be garbage collect
# the dictionary maintains what is selected
self._selection_dict = {}
# the added list is what was recently selected or deselected
self._selected_changed_dict = {}
self.documentViewResetSignal.emit(self)
# end def
### PUBLIC METHODS FOR EDITING THE MODEL ###
def addHoneycombPart(self, max_row=prefs.HONEYCOMB_PART_MAXROWS,
max_col=prefs.HONEYCOMB_PART_MAXCOLS,
max_steps=prefs.HONEYCOMB_PART_MAXSTEPS):
"""
Create and store a new DNAPart and instance, and return the instance.
"""
dnapart = None
if len(self._parts) == 0:
dnapart = HoneycombPart(document=self, max_row=max_row,
max_col=max_col, max_steps=max_steps)
self._addPart(dnapart)
return dnapart
def addSquarePart(self, max_row=prefs.SQUARE_PART_MAXROWS,
max_col=prefs.SQUARE_PART_MAXCOLS,
max_steps=prefs.SQUARE_PART_MAXSTEPS):
"""
Create and store a new DNAPart and instance, and return the instance.
"""
dnapart = None
if len(self._parts) == 0:
dnapart = SquarePart(document=self, max_row=max_row,
max_col=max_col, max_steps=max_steps)
self._addPart(dnapart)
return dnapart
def removeAllParts(self):
"""Used to reset the document. Not undoable."""
self.documentClearSelectionsSignal.emit(self)
for part in self._parts:
part.remove(use_undostack=False)
# end def
def removePart(self, part):
self.documentClearSelectionsSignal.emit(self)
self._parts.remove(part)
### PUBLIC SUPPORT METHODS ###
def controller(self):
return self._controller
def setController(self, controller):
"""Called by DocumentController setDocument method."""
self._controller = controller
# end def
def setSelectedPart(self, newPart):
if self._selected_part == newPart:
return
self._selected_part = newPart
# end def
### PRIVATE SUPPORT METHODS ###
def _addPart(self, part, use_undostack=True):
"""Add part to the document via AddPartCommand."""
c = self.AddPartCommand(self, part)
util.execCommandList(
self, [c], desc="Add part", use_undostack=use_undostack)
return c.part()
# end def
### COMMANDS ###
class AddPartCommand(UndoCommand):
"""
Undo ready command for deleting a part.
"""
def __init__(self, document, part):
super(Document.AddPartCommand, self).__init__("add part")
self._doc = document
self._part = part
# end def
def part(self):
return self._part
# end def
def redo(self):
if len(self._doc._parts) == 0:
self._doc._parts.append(self._part)
self._part.setDocument(self._doc)
self._doc.setSelectedPart(self._part)
self._doc.documentPartAddedSignal.emit(self._doc, self._part)
# end def
def undo(self):
self._doc.removePart(self._part)
self._part.setDocument(None)
self._doc.setSelectedPart(None)
self._part.partRemovedSignal.emit(self._part)
# self._doc.documentPartAddedSignal.emit(self._doc, self._part)
# end def
# end class
# end class
|
[
"[email protected]"
] | |
d9d22ee6b4dc770f86c458638636032fe0fcc083
|
ffc563a34204ee65c5a518de07c78310c668c316
|
/opennem/monitors/aemo_intervals.py
|
221c76b1d35e35ce8cbd489b7544c2ef495630db
|
[
"MIT"
] |
permissive
|
MarnieShaw/opennem
|
45924ac132d199751958eade224684d867118145
|
062178a9e64764e2bd89352b223280c8eeff60e4
|
refs/heads/master
| 2023-03-29T07:04:02.435282 | 2021-03-25T12:57:08 | 2021-03-25T12:57:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,277 |
py
|
import logging
from datetime import datetime, timedelta
from opennem.monitors.aemo_wem_live_intervals import (
get_aemo_wem_live_facility_intervals_recent_date,
)
from opennem.notifications.slack import slack_message
from opennem.schema.network import NetworkWEM
from opennem.settings import settings
from opennem.utils.dates import chop_microseconds
logger = logging.getLogger("opennem.monitors.aemo")
def aemo_wem_live_interval() -> bool:
"""
Monitors the delay from the AEMO live scada data on the portal
"""
network = NetworkWEM
now_date = datetime.now().astimezone(network.get_timezone())
live_most_recent = get_aemo_wem_live_facility_intervals_recent_date()
live_delta = chop_microseconds(now_date - live_most_recent)
logger.debug(
"Live time: {}, delay: {}".format(live_most_recent, live_delta)
)
# @TODO move the minutes into settings
if live_delta > timedelta(minutes=90):
slack_message(
"*WARNING*: AEMO Live intervals for WEM on {} curently delayed by {}\n\nAEMO feed most recent: {}".format(
settings.env, live_delta, live_most_recent
)
)
return True
return False
if __name__ == "__main__":
delay = aemo_wem_live_interval()
|
[
"[email protected]"
] | |
75f54c77a9ee1903b8b9431bc9f974b241471089
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2022_04_02_preview/aio/operations/_maintenance_configurations_operations.py
|
98bc829d3d02f593cedf079f1a3759a9a9385799
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 |
MIT
| 2019-07-25T22:28:52 | 2019-04-19T20:59:15 |
Python
|
UTF-8
|
Python
| false | false | 19,865 |
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._maintenance_configurations_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_managed_cluster_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MaintenanceConfigurationsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.containerservice.v2022_04_02_preview.aio.ContainerServiceClient`'s
:attr:`maintenance_configurations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_managed_cluster(
self, resource_group_name: str, resource_name: str, **kwargs: Any
) -> AsyncIterable["_models.MaintenanceConfiguration"]:
"""Gets a list of maintenance configurations in the specified managed cluster.
Gets a list of maintenance configurations in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MaintenanceConfiguration or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2022_04_02_preview.models.MaintenanceConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-04-02-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.MaintenanceConfigurationListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_managed_cluster_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_managed_cluster.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("MaintenanceConfigurationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_managed_cluster.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations"} # type: ignore
@distributed_trace_async
async def get(
self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Gets the specified maintenance configuration of a managed cluster.
Gets the specified maintenance configuration of a managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_04_02_preview.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-04-02-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.MaintenanceConfiguration]
request = build_get_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}"} # type: ignore
@overload
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: _models.MaintenanceConfiguration,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:param parameters: The maintenance configuration to create or update. Required.
:type parameters:
~azure.mgmt.containerservice.v2022_04_02_preview.models.MaintenanceConfiguration
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_04_02_preview.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:param parameters: The maintenance configuration to create or update. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_04_02_preview.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
config_name: str,
parameters: Union[_models.MaintenanceConfiguration, IO],
**kwargs: Any
) -> _models.MaintenanceConfiguration:
"""Creates or updates a maintenance configuration in the specified managed cluster.
Creates or updates a maintenance configuration in the specified managed cluster.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:param parameters: The maintenance configuration to create or update. Is either a model type or
a IO type. Required.
:type parameters:
~azure.mgmt.containerservice.v2022_04_02_preview.models.MaintenanceConfiguration or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MaintenanceConfiguration or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2022_04_02_preview.models.MaintenanceConfiguration
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-04-02-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.MaintenanceConfiguration]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "MaintenanceConfiguration")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("MaintenanceConfiguration", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, resource_name: str, config_name: str, **kwargs: Any
) -> None:
"""Deletes a maintenance configuration.
Deletes a maintenance configuration.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource. Required.
:type resource_name: str
:param config_name: The name of the maintenance configuration. Required.
:type config_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-04-02-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
resource_name=resource_name,
config_name=config_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/maintenanceConfigurations/{configName}"} # type: ignore
|
[
"[email protected]"
] | |
6402742e91d560e57aa48ce294e2c9c7563695af
|
39f8535e6b4aaf313278d65c2561c57db1425a44
|
/web/register/migrations/0010_registeredperson_borough_gss.py
|
97bbba8cf8f0192ef65af1880694fbc437324756
|
[] |
no_license
|
DemocracyClub/TakePart.london
|
39c61f58740400597a24bd525eff78939a3f30ed
|
816d427c37ad4f485aa392ff1d376f0f2681746a
|
refs/heads/master
| 2022-02-11T15:01:04.782754 | 2017-11-09T15:10:25 | 2017-11-09T15:10:25 | 60,079,282 | 0 | 2 | null | 2022-01-21T19:27:25 | 2016-05-31T10:12:47 |
Python
|
UTF-8
|
Python
| false | false | 492 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-26 12:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('register', '0009_auto_20160526_1010'),
]
operations = [
migrations.AddField(
model_name='registeredperson',
name='borough_gss',
field=models.CharField(blank=True, db_index=True, max_length=100),
),
]
|
[
"[email protected]"
] | |
b5152f8763fc21f32e7530a4d1b55be7ae98c02a
|
faabe34af6297530617395bcc6811350765da847
|
/platforms/leetcode/PalindromeLinkedList.py
|
bb7cf2be3376bd29d8d2a59cce309f2d4124781c
|
[] |
no_license
|
pqnguyen/CompetitiveProgramming
|
44a542aea299bd553dd022a9e737e087285b8b6d
|
27330e7ff79c4ac883d7e1fcdf2f0d30939c3f78
|
refs/heads/master
| 2021-07-21T12:15:47.366599 | 2021-06-27T14:58:48 | 2021-06-27T14:58:48 | 132,837,831 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,091 |
py
|
# https://leetcode.com/explore/learn/card/linked-list/219/classic-problems/1209/
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
if not head: return True
mid = self.findMid(head)
lastNode = self.reverse(mid)
while lastNode:
if head.val != lastNode.val: return False
lastNode = lastNode.next
head = head.next
return True
def reverse(self, head):
if not head: return None
prev, next = head, head.next
while next:
tmp = next.next
next.next = prev
prev = next
next = tmp
head.next = None
return prev
def findMid(self, head):
slow = fast = head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast:
tmp = slow.next
slow.next = None
slow = tmp
return slow
|
[
"[email protected]"
] | |
ecadf6700b1b850bb101d6a8ca65912bbfd57019
|
49e87fd199287ea7234c9bbfb3ec40447b9ed3d4
|
/app/main/forms.py
|
cdf55969ee17ccd31d37bf6e400ca91820058ac5
|
[
"MIT"
] |
permissive
|
mornicamwende/Pitching-site
|
ae732d3d9ba38f1878b49113a6adc81769d2c1b6
|
04336aa97f2a1c806ad6668f104ff64d27c8d995
|
refs/heads/master
| 2022-12-20T10:48:11.003418 | 2020-09-25T04:59:35 | 2020-09-25T04:59:35 | 296,598,453 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,072 |
py
|
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField, RadioField,SelectField
from wtforms.validators import Required
class CommentsForm(FlaskForm):
comment = TextAreaField('Comment', validators=[Required()])
# vote=RadioField('default field arguments', choices=[('1', 'UpVote'), ('1', 'DownVote')])
submit = SubmitField('SUBMIT')
class PitchForm(FlaskForm):
category_id = SelectField('Select Category', choices=[('1', 'Interview'), ('2', 'Pick Up Lines'), ('3', 'Promotion'),('4','Product')])
content = TextAreaField('make a pitch', validators=[Required()])
submit = SubmitField('Create Pitch')
class UpvoteForm(FlaskForm):
'''
Class to create a wtf form for upvoting a pitch
'''
submit = SubmitField('Upvote')
class DownvoteForm(FlaskForm):
'''
Class to create a wtf form for downvoting a pitch
'''
submit = SubmitField('Downvote')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
|
[
"[email protected]"
] | |
f2bb49281f0fba43c8434c29776d7e6336e7bdab
|
0010a92176b766f4bdf37c1144fa0f724cfaf564
|
/env/lib/python3.5/site-packages/aliyunsdkecs/request/v20140526/DeleteDeploymentSetRequest.py
|
3bd5bc595a1de6573918990b939e99dd35fd5e19
|
[] |
no_license
|
pengjinfu/My-Admin
|
bc2d8b53da8be0fad60e1d8979bdca3f2c4560d9
|
26206d1def673adb7dfe5c8044c654a0e65320d1
|
refs/heads/master
| 2021-08-30T02:17:57.432743 | 2017-12-15T17:05:05 | 2017-12-15T17:05:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,950 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DeleteDeploymentSetRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DeleteDeploymentSet')
def get_DeploymentSetId(self):
return self.get_query_params().get('DeploymentSetId')
def set_DeploymentSetId(self,DeploymentSetId):
self.add_query_param('DeploymentSetId',DeploymentSetId)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
|
[
"[email protected]"
] | |
671e4b30e3dff24f2ef3c935f670c8ca01e9944a
|
aeb2f0bb7b01f87a1b6c65b88b216bed47025fe5
|
/experiment/model244.py
|
db116321214dca543600494dc929d47788834d3c
|
[] |
no_license
|
kurupical/riiid
|
7e68239cd50243fbb734bf433d60ebd7469cb180
|
7bab580ce03d03873748a6afc91092c11871465f
|
refs/heads/master
| 2023-03-30T04:15:54.109815 | 2021-04-04T01:20:33 | 2021-04-04T01:20:33 | 302,828,112 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 30,472 |
py
|
import numpy as np
import pandas as pd
from typing import Optional, Any
import gc
import random
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from datetime import datetime as dt
import os
import glob
import pickle
import json
from feature_engineering.feature_factory_for_transformer import FeatureFactoryForTransformer
from feature_engineering.feature_factory import \
FeatureFactoryManager, \
DurationPreviousContent, \
ElapsedTimeBinningEncoder, \
UserContentRateEncoder, \
QuestionQuestionTableEncoder2, \
PreviousAnswer2, \
StudyTermEncoder2, \
MeanAggregator, \
ElapsedTimeMeanByContentIdEncoder, \
DurationFeaturePostProcess
from experiment.common import get_logger
import time
from transformers import AdamW, get_linear_schedule_with_warmup
torch.manual_seed(0)
np.random.seed(0)
is_debug = False
is_make_feature_factory = False
load_pickle = True
epochs = 12
device = torch.device("cuda")
wait_time = 0
class SAKTDataset(Dataset):
def __init__(self, group, n_skill, n_part=8, max_seq=100, is_test=False, predict_mode=False):
super(SAKTDataset, self).__init__()
self.max_seq = max_seq
self.n_skill = n_skill
self.samples = group
self.is_test = is_test
self.n_part = n_part
self.predict_mode = predict_mode
self.user_ids = []
for user_id in group.keys():
q = group[user_id][("content_id", "content_type_id")]
if not is_test:
self.user_ids.append([user_id, -1])
else:
is_val = group[user_id]["is_val"]
for i in range(len(q)):
if is_val[i]:
self.user_ids.append([user_id, i+1])
def __len__(self):
return len(self.user_ids)
def __getitem__(self, index):
user_id = self.user_ids[index][0]
end = self.user_ids[index][1]
# =============================
# setting index
# =============================
idx_dict = {
("content_id", "content_type_id"): 0,
"user_answer": 1,
"part": 2,
"prior_question_elapsed_time_bin300": 3,
"duration_previous_content_bin300": 4,
"answered_correctly": 5,
"prior_question_had_explanation": 6,
"rating_diff_content_user_id": 7,
"task_container_id_bin300": 8,
"previous_answer_index_content_id": 9,
"previous_answer_content_id": 10,
"timediff-elapsedtime_bin500": 11
}
num_sequence = len(idx_dict)
item_ary = np.zeros((num_sequence, self.max_seq))
data_length = len(self.samples[user_id][("content_id", "content_type_id")])
if self.is_test:
start = np.max([0, end - self.max_seq])
else:
start = 0
end = data_length
seq_length = end - start
for item_name, idx in idx_dict.items():
item_ary[idx, -seq_length:] = self.samples[user_id][item_name][start:end]
def get_data(key, remove_now=False):
if remove_now:
return item_ary[idx_dict[key], :][:-1]
else:
return item_ary[idx_dict[key], :][1:]
return {
"x": get_data(key="answered_correctly", remove_now=True) + 2, # 1: lecture, 2: not correct, 3: correct
"user_answer": get_data(key="user_answer", remove_now=True) + 1,
"target_id": get_data(key=("content_id", "content_type_id")),
"part": get_data(key="part"),
"elapsed_time": get_data(key="prior_question_elapsed_time_bin300"),
"duration_previous_content": get_data(key="duration_previous_content_bin300"),
"label": get_data(key="answered_correctly"),
"prior_q": get_data(key="prior_question_had_explanation"),
"rate_diff": get_data(key="rating_diff_content_user_id"),
"container_id": get_data(key="task_container_id_bin300"),
"previous_answer_index_content_id": get_data(key="previous_answer_index_content_id"),
"previous_answer_content_id": get_data(key="previous_answer_content_id"),
"timediff-elapsedtime_bin500": get_data(key="timediff-elapsedtime_bin500"),
"prior_content_id": get_data(key=("content_id", "content_type_id"), remove_now=True)
}
class FFN(nn.Module):
def __init__(self, state_size=200):
super(FFN, self).__init__()
self.state_size = state_size
self.lr1 = nn.Linear(state_size, state_size)
self.ln1 = nn.LayerNorm(state_size)
self.relu = nn.ReLU()
self.lr2 = nn.Linear(state_size, state_size)
self.ln2 = nn.LayerNorm(state_size)
def forward(self, x):
x = self.lr1(x)
x = self.ln1(x)
x = self.relu(x)
x = self.lr2(x)
x = self.ln2(x)
return x
def _get_activation_fn(activation):
if activation == "relu":
return nn.ReLU()
elif activation == "gelu":
return nn.GELU()
raise RuntimeError("activation should be relu/gelu, not {}".format(activation))
class LITLayer(nn.Module):
"""
https://arxiv.org/pdf/2012.14164.pdf
"""
def __init__(self, input_dim, embed_dim, dropout, activation):
super(LITLayer, self).__init__()
self.input_dim = input_dim
self.embed_dim = embed_dim
self.activation = activation
self.linear1 = nn.Linear(input_dim, embed_dim)
self.lstm = nn.LSTM(embed_dim, embed_dim)
self.linear2 = nn.Linear(embed_dim, input_dim)
self.norm_lstm = nn.LayerNorm(embed_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.linear1(x)
x = self.activation(x)
x = self.dropout(x)
lstm, _ = self.lstm(x)
x = self.norm_lstm(lstm) + x
x = self.dropout(x)
x = self.linear2(x)
return x
class LITTransformerEncoderLayer(nn.Module):
r"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward=256, dropout=0.1, activation="relu"):
super(LITTransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.lit_layer = LITLayer(input_dim=d_model, embed_dim=dim_feedforward, dropout=dropout, activation=self.activation)
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = nn.ReLU
super(LITTransformerEncoderLayer, self).__setstate__(state)
def forward(self, src: torch.Tensor, src_mask: Optional[torch.Tensor] = None, src_key_padding_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
r"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
src2 = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.lit_layer(src)
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def future_mask(seq_length):
future_mask = np.triu(np.ones((seq_length, seq_length)), k=1).astype('bool')
return torch.from_numpy(future_mask)
class CatEmbedding(nn.Module):
def __init__(self, embed_dim):
super(CatEmbedding, self).__init__()
self.embed_dim = embed_dim
self.ln1 = nn.LayerNorm(embed_dim)
self.gru = nn.GRU(input_size=embed_dim, hidden_size=embed_dim // 2)
self.ln2 = nn.LayerNorm(embed_dim // 2)
def forward(self, x):
x = self.ln1(x)
x, _ = self.gru(x)
x = self.ln2(x)
return x
class ContEmbedding(nn.Module):
def __init__(self, input_dim, embed_dim, seq_len):
super(ContEmbedding, self).__init__()
self.embed_dim = embed_dim
self.bn = nn.BatchNorm1d(seq_len-1)
self.gru = nn.GRU(input_size=input_dim, hidden_size=embed_dim)
self.ln2 = nn.LayerNorm(embed_dim)
def forward(self, x):
x = self.bn(x)
x, _ = self.gru(x)
x = self.ln2(x)
return x
class SAKTModel(nn.Module):
def __init__(self, n_skill, max_seq=100, embed_dim=128, num_heads=8, dropout=0.2,
cont_emb=None):
super(SAKTModel, self).__init__()
self.n_skill = n_skill
self.embed_dim_cat = embed_dim
embed_dim_small_cat = 32
embed_dim_middle_cat = 32
embed_dim_cat_all = embed_dim_small_cat*5 + embed_dim_middle_cat*5 + embed_dim
embed_dim_all = embed_dim_cat_all + cont_emb
self.embedding = nn.Embedding(4, embed_dim_small_cat)
self.user_answer_embedding = nn.Embedding(6, self.embed_dim_cat)
self.prior_question_had_explanation_embedding = nn.Embedding(4, embed_dim_small_cat)
self.e_embedding = nn.Embedding(n_skill + 1, self.embed_dim_cat)
self.part_embedding = nn.Embedding(8, embed_dim_small_cat)
self.elapsed_time_embedding = nn.Embedding(302, embed_dim_middle_cat)
self.duration_previous_content_embedding = nn.Embedding(302, embed_dim_middle_cat)
self.container_embedding = nn.Embedding(302, embed_dim_middle_cat)
self.prev_ans_idx_embedding = nn.Embedding(302, embed_dim_middle_cat)
self.prev_ans_content_id_embedding = nn.Embedding(4, embed_dim_small_cat)
self.timediff_elapsedtime_embedding = nn.Embedding(502, embed_dim_middle_cat)
encoder_layer = LITTransformerEncoderLayer(d_model=embed_dim_all, nhead=num_heads, dropout=dropout)
self.transformer_enc = nn.TransformerEncoder(encoder_layer=encoder_layer, num_layers=4)
self.gru = nn.GRU(input_size=embed_dim_all, hidden_size=embed_dim_all)
self.continuous_embedding = ContEmbedding(input_dim=1, embed_dim=cont_emb, seq_len=max_seq)
self.prior_content_embedding = nn.Sequential(
nn.Linear(self.embed_dim_cat, embed_dim_small_cat),
nn.LayerNorm(embed_dim_small_cat)
)
self.cat_embedding = nn.Sequential(
nn.Linear(embed_dim_cat_all, embed_dim_cat_all),
nn.LayerNorm(embed_dim_cat_all)
)
self.layer_normal = nn.LayerNorm(embed_dim_all)
self.ffn = FFN(embed_dim_all)
self.dropout = nn.Dropout(dropout/2)
self.pred = nn.Linear(embed_dim_all, 1)
def forward(self, item, device):
x = item["x"].to(device).long()
question_ids = item["target_id"].to(device).long()
parts = item["part"].to(device).long()
label = item["label"].to(device).float()
elapsed_time = item["elapsed_time"].to(device).long()
duration_previous_content = item["duration_previous_content"].to(device).long()
prior_q = item["prior_q"].to(device).long()
user_answer = item["user_answer"].to(device).long()
rate_diff = item["rate_diff"].to(device).float()
container_id = item["container_id"].to(device).long()
prev_ans_idx = item["previous_answer_index_content_id"].to(device).long()
prior_content_id_ans_correctly = item["previous_answer_content_id"].to(device).long()
prior_content_id = item["prior_content_id"].to(device).long()
timediff_elapsedtime = item["timediff-elapsedtime_bin500"].to(device).long()
att_mask = future_mask(x.size(1)).to(device)
e = self.e_embedding(question_ids)
p = self.part_embedding(parts)
prior_q_emb = self.prior_question_had_explanation_embedding(prior_q)
user_answer_emb = self.user_answer_embedding(user_answer)
prior_content_id_emb = self.e_embedding(prior_content_id)
prior_content_user_answer_emb = self.prior_content_embedding(user_answer_emb + prior_content_id_emb)
timediff_elapsedtime_emb = self.timediff_elapsedtime_embedding(timediff_elapsedtime)
# decoder
x = self.embedding(x)
el_time_emb = self.elapsed_time_embedding(elapsed_time)
dur_emb = self.duration_previous_content_embedding(duration_previous_content)
container_emb = self.container_embedding(container_id)
prev_ans_idx_emb = self.prev_ans_idx_embedding(prev_ans_idx)
prev_ans_content_id_emb = self.prev_ans_content_id_embedding(prior_content_id_ans_correctly)
x = torch.cat([x, el_time_emb, dur_emb, e, p, prior_q_emb, container_emb,
prev_ans_idx_emb, prev_ans_content_id_emb,
prior_content_user_answer_emb,
timediff_elapsedtime_emb], dim=2)
cont = rate_diff
cont_emb = self.continuous_embedding(cont.view(x.size(0), x.size(1), -1))
x = self.cat_embedding(x)
x = torch.cat([x, cont_emb], dim=2)
x = x.permute(1, 0, 2) # x: [bs, s_len, embed] => [s_len, bs, embed]
att_dec = self.transformer_enc(x,
mask=att_mask)
att_dec, _ = self.gru(att_dec)
att_dec = att_dec.permute(1, 0, 2) # att_output: [s_len, bs, embed] => [bs, s_len, embed]
x = self.layer_normal(att_dec)
x = self.ffn(x) + att_dec
x = self.dropout(x)
x = self.pred(x)
return x.squeeze(-1)
def train_epoch(model, train_iterator, val_iterator, optim, criterion, scheduler, epoch, device="cuda"):
model.train()
train_loss = []
num_corrects = 0
num_total = 0
labels = []
outs = []
tbar = tqdm(train_iterator)
for item in tbar:
optim.zero_grad()
label = item["label"].to(device).float()
output = model(item, device)
target_idx = (label.view(-1) >= 0).nonzero()
loss = criterion(output.view(-1)[target_idx], label.view(-1)[target_idx])
loss.backward()
optim.step()
scheduler.step()
train_loss.append(loss.item())
output = output[:, -1]
label = label[:, -1]
target_idx = (label.view(-1) >= 0).nonzero()
pred = (torch.sigmoid(output) >= 0.5).long()
num_corrects += (pred.view(-1)[target_idx] == label.view(-1)[target_idx]).sum().item()
num_total += len(label)
labels.extend(label.view(-1)[target_idx].data.cpu().numpy())
outs.extend(output.view(-1)[target_idx].data.cpu().numpy())
tbar.set_description('loss - {:.4f}'.format(loss))
acc = num_corrects / num_total
auc = roc_auc_score(labels, outs)
loss = np.mean(train_loss)
preds = []
labels = []
model.eval()
i = 0
with torch.no_grad():
for item in tqdm(val_iterator):
label = item["label"].to(device).float()
output = model(item, device)
preds.extend(torch.nn.Sigmoid()(output[:, -1]).view(-1).data.cpu().numpy().tolist())
labels.extend(label[:, -1].view(-1).data.cpu().numpy())
i += 1
if i > 100 and epoch < 8:
break
auc_val = roc_auc_score(labels, preds)
return loss, acc, auc, auc_val
def main(params: dict,
output_dir: str):
import mlflow
print("start params={}".format(params))
model_id = "train_0"
logger = get_logger()
# df = pd.read_pickle("../input/riiid-test-answer-prediction/train_merged.pickle")
df = pd.read_pickle("../input/riiid-test-answer-prediction/split10/train_0.pickle").sort_values(["user_id", "timestamp"]).reset_index(drop=True)
if is_debug:
df = df.head(30000)
df["prior_question_had_explanation"] = df["prior_question_had_explanation"].fillna(-1)
df["answered_correctly"] = df["answered_correctly"].replace(-1, np.nan)
column_config = {
("content_id", "content_type_id"): {"type": "category"},
"user_answer": {"type": "leakage_feature"},
"answered_correctly": {"type": "leakage_feature"},
"part": {"type": "category"},
"prior_question_elapsed_time_bin300": {"type": "category"},
"duration_previous_content_bin300": {"type": "category"},
"prior_question_had_explanation": {"type": "category"},
"rating_diff_content_user_id": {"type": "numeric"},
"task_container_id_bin300": {"type": "category"},
"previous_answer_index_content_id": {"type": "category"},
"previous_answer_content_id": {"type": "category"},
"timediff-elapsedtime_bin500": {"type": "category"}
}
if not load_pickle or is_debug:
feature_factory_dict = {"user_id": {}}
feature_factory_dict["user_id"]["DurationPreviousContent"] = DurationPreviousContent(is_partial_fit=True)
feature_factory_dict["user_id"]["ElapsedTimeBinningEncoder"] = ElapsedTimeBinningEncoder()
feature_factory_dict["user_id"]["UserContentRateEncoder"] = UserContentRateEncoder(rate_func="elo",
column="user_id")
feature_factory_dict["user_id"]["PreviousAnswer2"] = PreviousAnswer2(groupby="user_id",
column="content_id",
is_debug=is_debug,
model_id=model_id,
n=300)
feature_factory_dict["user_id"]["StudyTermEncoder2"] = StudyTermEncoder2(is_partial_fit=True)
feature_factory_dict["user_id"][f"MeanAggregatorStudyTimebyUserId"] = MeanAggregator(column="user_id",
agg_column="study_time",
remove_now=False)
feature_factory_dict["user_id"]["ElapsedTimeMeanByContentIdEncoder"] = ElapsedTimeMeanByContentIdEncoder()
feature_factory_dict["post"] = {
"DurationFeaturePostProcess": DurationFeaturePostProcess()
}
feature_factory_manager = FeatureFactoryManager(feature_factory_dict=feature_factory_dict,
logger=logger,
split_num=1,
model_id=model_id,
load_feature=not is_debug,
save_feature=not is_debug)
print("all_predict")
df = feature_factory_manager.all_predict(df)
def f(x):
x = x // 1000
if x < -100:
return -100
if x > 400:
return 400
return x
df["task_container_id_bin300"] = [x if x < 300 else 300 for x in df["task_container_id"]]
df["timediff-elapsedtime_bin500"] = [f(x) for x in df["timediff-elapsedtime"].values]
df = df[["user_id", "content_id", "content_type_id", "part", "user_answer", "answered_correctly",
"prior_question_elapsed_time_bin300", "duration_previous_content_bin300",
"prior_question_had_explanation", "rating_diff_content_user_id", "task_container_id_bin300",
"previous_answer_index_content_id", "previous_answer_content_id", "row_id",
"timediff-elapsedtime_bin500"]]
print(df.head(10))
print("data preprocess")
ff_for_transformer = FeatureFactoryForTransformer(column_config=column_config,
dict_path="../feature_engineering/",
sequence_length=params["max_seq"],
logger=logger)
ff_for_transformer.make_dict(df=df)
n_skill = len(ff_for_transformer.embbed_dict[("content_id", "content_type_id")])
if not load_pickle or is_debug:
df_val_row = pd.read_feather("../../riiid_takoi/notebook/fe/validation_row_id.feather").head(len(df))
if is_debug:
df_val_row = df_val_row.head(3000)
df_val_row["is_val"] = 1
df = pd.merge(df, df_val_row, how="left", on="row_id")
df["is_val"] = df["is_val"].fillna(0)
print(df["is_val"].value_counts())
w_df = df[df["is_val"] == 0]
w_df["group"] = (w_df.groupby("user_id")["user_id"].transform("count") - w_df.groupby("user_id").cumcount()) // params["max_seq"]
w_df["user_id"] = w_df["user_id"].astype(str) + "_" + w_df["group"].astype(str)
group = ff_for_transformer.all_predict(w_df)
dataset_train = SAKTDataset(group,
n_skill=n_skill,
max_seq=params["max_seq"])
del w_df
gc.collect()
ff_for_transformer = FeatureFactoryForTransformer(column_config=column_config,
dict_path="../feature_engineering/",
sequence_length=params["max_seq"],
logger=logger)
if not load_pickle or is_debug:
group = ff_for_transformer.all_predict(df[df["content_type_id"] == 0])
dataset_val = SAKTDataset(group,
is_test=True,
n_skill=n_skill,
max_seq=params["max_seq"])
os.makedirs("../input/feature_engineering/model218", exist_ok=True)
if not is_debug and not load_pickle:
with open(f"../input/feature_engineering/model218/train.pickle", "wb") as f:
pickle.dump(dataset_train, f)
with open(f"../input/feature_engineering/model218/val.pickle", "wb") as f:
pickle.dump(dataset_val, f)
if not is_debug and load_pickle:
with open(f"../input/feature_engineering/model218/train.pickle", "rb") as f:
dataset_train = pickle.load(f)
with open(f"../input/feature_engineering/model218/val.pickle", "rb") as f:
dataset_val = pickle.load(f)
print("loaded!")
dataloader_train = DataLoader(dataset_train, batch_size=params["batch_size"], shuffle=True)
dataloader_val = DataLoader(dataset_val, batch_size=params["batch_size"], shuffle=False)
model = SAKTModel(n_skill, embed_dim=params["embed_dim"], max_seq=params["max_seq"], dropout=dropout,
cont_emb=params["cont_emb"])
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=params["lr"],
weight_decay=0.01,
)
num_train_optimization_steps = int(len(dataloader_train) * 20)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=params["num_warmup_steps"],
num_training_steps=num_train_optimization_steps)
criterion = nn.BCEWithLogitsLoss()
model.to(device)
criterion.to(device)
for epoch in range(epochs):
loss, acc, auc, auc_val = train_epoch(model, dataloader_train, dataloader_val, optimizer, criterion, scheduler,
epoch, device)
print("epoch - {} train_loss - {:.3f} auc - {:.4f} auc-val: {:.4f}".format(epoch, loss, auc, auc_val))
preds = []
labels = []
with torch.no_grad():
for item in tqdm(dataloader_val):
label = item["label"].to(device).float()
output = model(item, device)
preds.extend(torch.nn.Sigmoid()(output[:, -1]).view(-1).data.cpu().numpy().tolist())
labels.extend(label[:, -1].view(-1).data.cpu().numpy().tolist())
auc_transformer = roc_auc_score(labels, preds)
print("single transformer: {:.4f}".format(auc_transformer))
df_oof = pd.DataFrame()
# df_oof["row_id"] = df.loc[val_idx].index
print(len(dataloader_val))
print(len(preds))
df_oof["predict"] = preds
df_oof["target"] = labels
df_oof.to_csv(f"{output_dir}/transformers1.csv", index=False)
"""
df_oof2 = pd.read_csv("../output/ex_237/20201213110353/oof_train_0_lgbm.csv")
df_oof2.columns = ["row_id", "predict_lgbm", "target"]
df_oof2 = pd.merge(df_oof, df_oof2, how="inner")
auc_lgbm = roc_auc_score(df_oof2["target"].values, df_oof2["predict_lgbm"].values)
print("lgbm: {:.4f}".format(auc_lgbm))
print("ensemble")
max_auc = 0
max_nn_ratio = 0
for r in np.arange(0, 1.05, 0.05):
auc = roc_auc_score(df_oof2["target"].values, df_oof2["predict_lgbm"].values*(1-r) + df_oof2["predict"].values*r)
print("[nn_ratio: {:.2f}] AUC: {:.4f}".format(r, auc))
if max_auc < auc:
max_auc = auc
max_nn_ratio = r
print(len(df_oof2))
"""
if not is_debug:
mlflow.start_run(experiment_id=10,
run_name=os.path.basename(__file__))
for key, value in params.items():
mlflow.log_param(key, value)
mlflow.log_metric("auc_val", auc_transformer)
mlflow.end_run()
torch.save(model.state_dict(), f"{output_dir}/transformers.pth")
del model
torch.cuda.empty_cache()
with open(f"{output_dir}/transformer_param.json", "w") as f:
json.dump(params, f)
if is_make_feature_factory:
# feature factory
feature_factory_dict = {"user_id": {}}
feature_factory_dict["user_id"]["DurationPreviousContent"] = DurationPreviousContent(is_partial_fit=True)
feature_factory_dict["user_id"]["ElapsedTimeBinningEncoder"] = ElapsedTimeBinningEncoder()
feature_factory_manager = FeatureFactoryManager(feature_factory_dict=feature_factory_dict,
logger=logger,
split_num=1,
model_id="all",
load_feature=not is_debug,
save_feature=not is_debug)
ff_for_transformer = FeatureFactoryForTransformer(column_config=column_config,
dict_path="../feature_engineering/",
sequence_length=params["max_seq"],
logger=logger)
df = pd.read_pickle("../input/riiid-test-answer-prediction/train_merged.pickle")
if is_debug:
df = df.head(10000)
df = df.sort_values(["user_id", "timestamp"]).reset_index(drop=True)
feature_factory_manager.fit(df)
df = feature_factory_manager.all_predict(df)
for dicts in feature_factory_manager.feature_factory_dict.values():
for factory in dicts.values():
factory.logger = None
feature_factory_manager.logger = None
with open(f"{output_dir}/feature_factory_manager.pickle", "wb") as f:
pickle.dump(feature_factory_manager, f)
ff_for_transformer.fit(df)
ff_for_transformer.logger = None
with open(f"{output_dir}/feature_factory_manager_for_transformer.pickle", "wb") as f:
pickle.dump(ff_for_transformer, f)
if __name__ == "__main__":
if not is_debug:
for _ in tqdm(range(wait_time)):
time.sleep(1)
output_dir = f"../output/{os.path.basename(__file__).replace('.py', '')}/{dt.now().strftime('%Y%m%d%H%M%S')}/"
os.makedirs(output_dir, exist_ok=True)
for cont_emb in [8]:
for cat_emb in [256]:
dropout = 0.5
lr = 0.9e-3
if is_debug:
batch_size = 8
else:
batch_size = 128
params = {"embed_dim": cat_emb,
"cont_emb": cont_emb,
"max_seq": 100,
"batch_size": batch_size,
"num_warmup_steps": 3000,
"lr": lr,
"dropout": dropout}
main(params, output_dir=output_dir)
|
[
"[email protected]"
] | |
539ae6e0ecf887e6d1d29a89a317d920bcd8899a
|
c6f946097032432c787a4e3f2b1c7839a5966bfa
|
/problem 0120.py
|
0e0fc987f8d8b2772162ed041c676b86767c43ef
|
[] |
no_license
|
a100kpm/daily_training
|
70a3bfdc1a773025bc03dad64310f7ad9f58eb22
|
dc80f1708cba6f46a51d2e385bc16613acb5e710
|
refs/heads/master
| 2020-05-17T21:20:21.067730 | 2019-05-21T16:41:52 | 2019-05-21T16:41:52 | 183,969,271 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 672 |
py
|
'''
Good morning! Here's your coding interview problem for today.
This problem was asked by Microsoft.
Implement the singleton pattern with a twist.
First, instead of storing one instance, store two instances.
And in every even call of getInstance(), return the first instance and
in every odd call of getInstance(), return the second instance.
'''
class Singleton:
def __init__(self,val1,val2):
self.val1=val1
self.val2=val2
self.compteur=1
def getInstance(self):
if self.compteur==1:
self.compteur=2
return self.val1
else:
self.compteur=1
return self.val2
|
[
"[email protected]"
] | |
c146b0e1db17224998ba1eda4eafc3f9d27dd06b
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/EightTeV/RSGraviton/RSGravitonToWW_kMpl04_M_4000_TuneZ2star_8TeV_pythia6_cff.py
|
227007a6d47128f5bc79ef6b8f9004b7add104ff
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 |
Python
|
UTF-8
|
Python
| false | false | 894 |
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1),
comEnergy = cms.double(8000.0),
crossSection = cms.untracked.double(1.137e-4),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'MSEL = 0',
'MSUB(391) = 1',
'MSUB(392) = 1',
'PMAS(347,1) = 4000',
'PARP(50) = 2.16',
'5000039:ALLOFF',
'5000039:ONIFANY 24',
),
parameterSets = cms.vstring(
'pythiaUESettings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"[email protected]"
] | |
ab01bcb3ab68c2fc29a7e2bd4b6c8a4e7377d5c9
|
0d2503015d98cdb3fc1e4cda3a7c2053658e4e79
|
/virtual/bin/flask
|
10fa3f3435b188049bea61fba98adf01742219de
|
[
"MIT"
] |
permissive
|
juru-juliette/BlogApp
|
fe2e2f96fd7c676a3f025176912b103c9c3ab085
|
146a5d1bea24f872d598be5181ad79f309887b93
|
refs/heads/master
| 2022-09-29T11:12:11.148345 | 2019-10-03T10:22:57 | 2019-10-03T10:22:57 | 211,067,370 | 0 | 0 | null | 2021-03-20T01:44:34 | 2019-09-26T10:54:44 |
Python
|
UTF-8
|
Python
| false | false | 250 |
#!/home/wecode/Documents/flask/BlogApp/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
7418efa19b2b99b91268d1c1c3eaecb2564be978
|
069c90561a55dbb050102914ba92a786137f3dd4
|
/setup.py
|
0d4d001769f4749905acc1374f364ba1baf66cff
|
[] |
no_license
|
reorx/yaml2pac
|
a9075d8d706e26b4629e8fb98398bf0a5321c758
|
78a42e9bb5f2f27bdef5a3236cf242d3828ba5a1
|
refs/heads/master
| 2021-01-10T05:06:10.319371 | 2017-04-30T13:07:41 | 2017-04-30T13:07:41 | 48,587,654 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,168 |
py
|
#!/usr/bin/env python
# coding=utf-8
from setuptools import setup
# Use semantic versioning: MAJOR.MINOR.PATCH
version = '0.1.0'
def get_requires():
try:
with open('requirements.txt', 'r') as f:
requires = [i for i in map(lambda x: x.strip(), f.readlines()) if i]
return requires
except IOError:
return []
def get_long_description():
try:
with open('README.md', 'r') as f:
return f.read()
except IOError:
return ''
setup(
# license='License :: OSI Approved :: MIT License',
name='yaml2pac',
version=version,
author='reorx',
author_email='[email protected]',
description='Generate decent pac file from a set of yaml rules',
url='https://github.com/reorx/yaml2pac',
long_description=get_long_description(),
packages=['yaml2pac'],
# Or use (make sure find_packages is imported from setuptools):
# packages=find_packages()
install_requires=get_requires(),
package_data={
'yaml2pac': ['template.pac']
},
entry_points={
'console_scripts': [
'yaml2pac = yaml2pac.__main__:main'
]
}
)
|
[
"[email protected]"
] | |
a0e3e218a5b7ec2bf7ffd20d00fc6f0888cf6ea0
|
3c6fc92c8de309cd287e6f1b1b6e0af1e1aaf5c9
|
/fix_date.py
|
7e72e5bba097726d748338a2f8b498060b29976c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"CC-PDDC"
] |
permissive
|
caltechlibrary/caltechdata_migrate
|
37e2b0fff3814eb69e413796a2ef8513959ebfda
|
839feab326eb0b412d740db64e96d7ad0048c295
|
refs/heads/master
| 2021-10-27T08:38:30.755678 | 2021-10-20T19:06:28 | 2021-10-20T19:06:28 | 106,311,559 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 961 |
py
|
from caltechdata_api import get_metadata,caltechdata_edit,decustomize_schema
import requests
import os
idvs = [1163,1164,1165,1166,1167,1168,1169]
#Get access token from TIND sed as environment variable with source token.bash
token = os.environ['TINDTOK']
metadata = {}
for idv in idvs:
api_url = "https://data.caltech.edu/api/record/"
r = requests.get(api_url+str(idv))
r_data = r.json()
if 'message' in r_data:
raise AssertionError('id '+idv+' expected http status 200, got '+r_data.status+r_data.message)
if not 'metadata' in r_data:
raise AssertionError('expected as metadata property in response, got '+r_data)
metadata = r_data['metadata']
for d in metadata['relevantDates']:
if d['relevantDateType'] == 'created':
d['relevantDateType'] = 'Created'
metadata = decustomize_schema(metadata)
response = caltechdata_edit(token,idv,metadata,production=True)
print(response)
|
[
"[email protected]"
] | |
34e6fc1924b4bf98002f93525d9a60d3efac008f
|
fb8cbebdf034b2f478943752d5443afc82c6eef5
|
/tuirer/venv/lib/python3.6/site-packages/IPython/core/tests/test_paths.py
|
a7dc1fddc233e5b268dcea9533b91a8ace9c0cd8
|
[] |
no_license
|
fariasjr/CitiTuirer
|
f64e0ec93ef088f8140bb0961d2ad4ed3b59448a
|
deb3f7a9c2d45b8a7f54639037f097b99abdac11
|
refs/heads/master
| 2020-03-24T05:10:36.261050 | 2018-08-01T20:24:30 | 2018-08-01T20:24:30 | 142,477,521 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,394 |
py
|
import errno
import os
import shutil
import sys
import tempfile
import warnings
from unittest.mock import patch
from IPython import paths
from IPython.testing.decorators import skip_win32
from IPython.utils.tempdir import TemporaryDirectory
import nose.tools as nt
from testpath import assert_isdir, assert_isfile, modified_env
TMP_TEST_DIR = os.path.realpath(tempfile.mkdtemp())
HOME_TEST_DIR = os.path.join(TMP_TEST_DIR, "home_test_dir")
XDG_TEST_DIR = os.path.join(HOME_TEST_DIR, "xdg_test_dir")
XDG_CACHE_DIR = os.path.join(HOME_TEST_DIR, "xdg_cache_dir")
IP_TEST_DIR = os.path.join(HOME_TEST_DIR,'.ipython')
def setup():
"""Setup testenvironment for the module:
- Adds dummy home dir tree
"""
# Do not mask exceptions here. In particular, catching WindowsError is a
# problem because that exception is only defined on Windows...
os.makedirs(IP_TEST_DIR)
os.makedirs(os.path.join(XDG_TEST_DIR, 'ipython'))
os.makedirs(os.path.join(XDG_CACHE_DIR, 'ipython'))
def teardown():
"""Teardown testenvironment for the module:
- Remove dummy home dir tree
"""
# Note: we remove the parent test dir, which is the root of all test
# subdirs we may have created. Use shutil instead of os.removedirs, so
# that non-empty directories are all recursively removed.
shutil.rmtree(TMP_TEST_DIR)
def patch_get_home_dir(dirpath):
return patch.object(paths, 'get_home_dir', return_value=dirpath)
def test_get_ipython_dir_1():
"""test_get_ipython_dir_1, Testcase to see if we can call get_ipython_dir without Exceptions."""
env_ipdir = os.path.join("someplace", ".ipython")
with patch.object(paths, '_writable_dir', return_value=True), \
modified_env({'IPYTHONDIR': env_ipdir}):
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, env_ipdir)
def test_get_ipython_dir_2():
"""test_get_ipython_dir_2, Testcase to see if we can call get_ipython_dir without Exceptions."""
with patch_get_home_dir('someplace'), \
patch.object(paths, 'get_xdg_dir', return_value=None), \
patch.object(paths, '_writable_dir', return_value=True), \
patch('os.name', "posix"), \
modified_env({'IPYTHON_DIR': None,
'IPYTHONDIR': None,
'XDG_CONFIG_HOME': None
}):
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, os.path.join("someplace", ".ipython"))
def test_get_ipython_dir_3():
"""test_get_ipython_dir_3, move XDG if defined, and .ipython doesn't exist."""
tmphome = TemporaryDirectory()
try:
with patch_get_home_dir(tmphome.name), \
patch('os.name', 'posix'), \
modified_env({
'IPYTHON_DIR': None,
'IPYTHONDIR': None,
'XDG_CONFIG_HOME': XDG_TEST_DIR,
}), warnings.catch_warnings(record=True) as w:
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, os.path.join(tmphome.name, ".ipython"))
if sys.platform != 'darwin':
nt.assert_equal(len(w), 1)
nt.assert_in('Moving', str(w[0]))
finally:
tmphome.cleanup()
def test_get_ipython_dir_4():
"""test_get_ipython_dir_4, warn if XDG and home both exist."""
with patch_get_home_dir(HOME_TEST_DIR), \
patch('os.name', 'posix'):
try:
os.mkdir(os.path.join(XDG_TEST_DIR, 'ipython'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
with modified_env({
'IPYTHON_DIR': None,
'IPYTHONDIR': None,
'XDG_CONFIG_HOME': XDG_TEST_DIR,
}), warnings.catch_warnings(record=True) as w:
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, os.path.join(HOME_TEST_DIR, ".ipython"))
if sys.platform != 'darwin':
nt.assert_equal(len(w), 1)
nt.assert_in('Ignoring', str(w[0]))
def test_get_ipython_dir_5():
"""test_get_ipython_dir_5, use .ipython if exists and XDG defined, but doesn't exist."""
with patch_get_home_dir(HOME_TEST_DIR), \
patch('os.name', 'posix'):
try:
os.rmdir(os.path.join(XDG_TEST_DIR, 'ipython'))
except OSError as e:
if e.errno != errno.ENOENT:
raise
with modified_env({
'IPYTHON_DIR': None,
'IPYTHONDIR': None,
'XDG_CONFIG_HOME': XDG_TEST_DIR,
}):
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, IP_TEST_DIR)
def test_get_ipython_dir_6():
"""test_get_ipython_dir_6, use home over XDG if defined and neither exist."""
xdg = os.path.join(HOME_TEST_DIR, 'somexdg')
os.mkdir(xdg)
shutil.rmtree(os.path.join(HOME_TEST_DIR, '.ipython'))
print(paths._writable_dir)
with patch_get_home_dir(HOME_TEST_DIR), \
patch.object(paths, 'get_xdg_dir', return_value=xdg), \
patch('os.name', 'posix'), \
modified_env({
'IPYTHON_DIR': None,
'IPYTHONDIR': None,
'XDG_CONFIG_HOME': None,
}), warnings.catch_warnings(record=True) as w:
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, os.path.join(HOME_TEST_DIR, '.ipython'))
nt.assert_equal(len(w), 0)
def test_get_ipython_dir_7():
"""test_get_ipython_dir_7, test home directory expansion on IPYTHONDIR"""
home_dir = os.path.normpath(os.path.expanduser('~'))
with modified_env({'IPYTHONDIR': os.path.join('~', 'somewhere')}), \
patch.object(paths, '_writable_dir', return_value=True):
ipdir = paths.get_ipython_dir()
nt.assert_equal(ipdir, os.path.join(home_dir, 'somewhere'))
@skip_win32
def test_get_ipython_dir_8():
"""test_get_ipython_dir_8, test / home directory"""
with patch.object(paths, '_writable_dir', lambda path: bool(path)), \
patch.object(paths, 'get_xdg_dir', return_value=None), \
modified_env({
'IPYTHON_DIR': None,
'IPYTHONDIR': None,
'HOME': '/',
}):
nt.assert_equal(paths.get_ipython_dir(), '/.ipython')
def test_get_ipython_cache_dir():
with modified_env({'HOME': HOME_TEST_DIR}):
if os.name == 'posix' and sys.platform != 'darwin':
# test default
os.makedirs(os.path.join(HOME_TEST_DIR, ".cache"))
with modified_env({'XDG_CACHE_HOME': None}):
ipdir = paths.get_ipython_cache_dir()
nt.assert_equal(os.path.join(HOME_TEST_DIR, ".cache", "ipython"),
ipdir)
assert_isdir(ipdir)
# test env override
with modified_env({"XDG_CACHE_HOME": XDG_CACHE_DIR}):
ipdir = paths.get_ipython_cache_dir()
assert_isdir(ipdir)
nt.assert_equal(ipdir, os.path.join(XDG_CACHE_DIR, "ipython"))
else:
nt.assert_equal(paths.get_ipython_cache_dir(),
paths.get_ipython_dir())
def test_get_ipython_package_dir():
ipdir = paths.get_ipython_package_dir()
assert_isdir(ipdir)
def test_get_ipython_module_path():
ipapp_path = paths.get_ipython_module_path('IPython.terminal.ipapp')
assert_isfile(ipapp_path)
|
[
"[email protected]"
] | |
cf5eab74fe573d51a22c2f3485b293846afe840e
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/types/ResolveToAnotherFileClassWithBuiltinNameField/foo.py
|
bfb5ad1dcf26fa6d998d1e54b6d605d64cc93e4c
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 |
Apache-2.0
| 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null |
UTF-8
|
Python
| false | false | 64 |
py
|
class Foo:
def __init__(self, id: int):
self.id = id
|
[
"[email protected]"
] | |
3191f5b594eccb4f79d85a07b133cb6f7a2535c0
|
9d0195aa83cc594a8c61f334b90375961e62d4fe
|
/JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano857.py
|
04b4cd41f6e1d1d64132d0089fc6468a9aa5cf29
|
[] |
no_license
|
rsk146/CMS
|
4e49592fc64f6438051544c5de18598db36ed985
|
5f8dab8c59ae556598b9747b52b88205fffc4dbe
|
refs/heads/master
| 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,292 |
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/270000/8AED26BC-E17D-4643-A7BA-9ED388728487.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest857.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
|
[
"[email protected]"
] | |
b98d7f3a306f0489f526d005559bda871a9dae63
|
e18da3301b53da6792f581159a953da71c00422b
|
/aula_1/ex_3.py
|
989a228d4c99271fe93379b33bf46298099135f3
|
[] |
no_license
|
fernandolago/python-521
|
6c4998b60784d966e8ed4aff0e500f1ab4f1fab0
|
e103b055c9cf04da889a380d55192307f66d34f4
|
refs/heads/master
| 2022-12-09T20:13:47.982648 | 2019-08-08T20:02:27 | 2019-08-08T20:02:27 | 200,073,861 | 0 | 0 | null | 2022-12-08T05:57:51 | 2019-08-01T15:16:36 |
Python
|
UTF-8
|
Python
| false | false | 270 |
py
|
import requests
URL = 'https://viacep.com.br/ws/{}/json'
cep = input('Digite seu cep: ')
print(cep)
URL_FORMATADA = URL.format(cep)
print(URL_FORMATADA)
response = requests.get(URL_FORMATADA)
print(response)
print(dir(response))
x = response.json()
print(type(x))
|
[
"[email protected]"
] | |
43129de68025bc596b16c6a2f1e3e13749c0063e
|
4892b326dfd98d9513ba583868b1298c8a4e276c
|
/experiments/04_benchmark_memory/shared.py
|
ec654e7e21ec078f1a29ec876fa6a3fd6eef3cce
|
[
"MIT"
] |
permissive
|
MeNicefellow/cockpit
|
019640a610b05c46426d35eb9e6c8d49a3d970c7
|
5bd5ab3cda03eda0b0bf276f29d5c28b83d70b06
|
refs/heads/master
| 2023-01-23T06:59:43.329922 | 2020-11-24T07:56:03 | 2020-11-24T07:56:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,413 |
py
|
"""Compare memory footprint w/o individual gradient transformations."""
import os
import sys
import warnings
import pandas
from memory_profiler import memory_usage
from torch.optim import SGD
from cockpit.runners.scheduled_runner import _ScheduleCockpitRunner
HERE = os.path.abspath(__file__)
DIR = os.path.join(os.path.dirname(HERE), "data")
FIG_DIR = os.path.join(os.path.dirname(HERE), "fig")
os.makedirs(DIR, exist_ok=True)
os.makedirs(FIG_DIR, exist_ok=True)
def set_up():
from cockpit.utils import fix_deepobs_data_dir
from deepobs.pytorch.config import set_default_device
fix_deepobs_data_dir()
FORCE_CPU = True
if FORCE_CPU:
set_default_device("cpu")
INTERVAL = 0.01
def report_memory(f):
mem_usage = memory_usage(f, interval=INTERVAL)
mem_time = [INTERVAL * idx for idx in range(len(mem_usage))]
return pandas.DataFrame(data={"time": mem_time, "usage": mem_usage})
def lr_schedule(num_epochs):
"""Some Learning rate schedule.
Example:
>>> # Halving the learning rate every epoch:
>>> lambda epoch: 0.5 ** epoch
>>> # A less aggressive decay:
>>> lambda epoch: 0.9 ** epoch
>>> # Constant learning rate (using init lr):
>>> lambda epoch: 1.0
"""
return lambda epoch: 1.0
def run(quants, testproblem):
optimizer_class = SGD
hyperparams = {
"lr": {"type": float, "default": 0.001},
"momentum": {"type": float, "default": 0.0},
"nesterov": {"type": bool, "default": False},
}
def plot_schedule(global_step):
return False
runner = MemoryBenchmarkRunner(
optimizer_class,
hyperparams,
quantities=quants,
plot=False,
plot_schedule=plot_schedule,
)
runner.run(
testproblem=testproblem,
num_epochs=1,
l2_reg=0.0, # necessary for backobs!
track_interval=1,
plot_interval=1,
show_plots=False,
save_plots=False,
save_final_plot=False,
save_animation=False,
lr_schedule=lr_schedule,
)
class MemoryBenchmarkRunner(_ScheduleCockpitRunner):
"""Run first forward-backward pass and update step of training, then quit.
Note:
Disables DeepOBS' additional metrics. Performs one step per epoch.
"""
STOP_BATCH_COUNT_PER_EPOCH = 1
def _maybe_stop_iteration(self, global_step, batch_count):
"""Stop after first step of each epoch."""
if batch_count == self.STOP_BATCH_COUNT_PER_EPOCH:
warnings.warn(
"The memory benchmark runner performs only "
+ f"{self.STOP_BATCH_COUNT_PER_EPOCH} steps per epoch."
)
raise StopIteration
def _should_eval(self):
"""Disable DeepOBS' evaluation of test/train/valid losses and accuracies."""
return False
def hotfix_deepobs_argparse():
"""Truncate command line arguments from pytest call to make DeepOBS arparse work.
TODO Think about good alternatives.
"""
sys.argv = sys.argv[:1]
def parse():
testproblem = sys.argv[1]
try:
num_run = int(sys.argv[2])
except IndexError:
num_run = None
hotfix_deepobs_argparse()
return testproblem, num_run
def skip_if_exists(filename):
if os.path.exists(filename):
print(f"Skipping as file already exists: {filename}")
sys.exit(0)
|
[
"Anonymous"
] |
Anonymous
|
bc4d811f9de41de53688ff53da3865f5f47c9c4f
|
926b3c52070f6e309567c8598248fd5c57095be9
|
/src/mmgeneration/tests/test_cores/test_fp16_utils.py
|
a4cf5a42c2cf57c0146ba1cc930cd875facaca6c
|
[
"Apache-2.0"
] |
permissive
|
fengbingchun/PyTorch_Test
|
410f7cd2303707b0141d433fb9d144a961e1f4c8
|
df5c2169f0b699bcd6e74adb4cb0e57f7dcd9348
|
refs/heads/master
| 2023-05-23T16:42:29.711338 | 2023-03-25T11:31:43 | 2023-03-25T11:31:43 | 167,339,907 | 15 | 4 | null | 2023-03-25T11:31:45 | 2019-01-24T09:24:59 |
C++
|
UTF-8
|
Python
| false | false | 6,975 |
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.utils import TORCH_VERSION
from mmgen.core.runners.fp16_utils import (auto_fp16, cast_tensor_type,
nan_to_num)
def test_nan_to_num():
a = torch.tensor([float('inf'), float('nan'), 2.])
res = nan_to_num(a, posinf=255., neginf=-255.)
assert (res == torch.tensor([255., 0., 2.])).all()
res = nan_to_num(a)
assert res.shape == (3, )
with pytest.raises(TypeError):
nan_to_num(1)
def test_cast_tensor_type():
inputs = torch.FloatTensor([5.])
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert outputs.dtype == dst_type
inputs = 'tensor'
src_type = str
dst_type = str
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, str)
inputs = np.array([5.])
src_type = np.ndarray
dst_type = np.ndarray
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, np.ndarray)
inputs = dict(
tensor_a=torch.FloatTensor([1.]), tensor_b=torch.FloatTensor([2.]))
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, dict)
assert outputs['tensor_a'].dtype == dst_type
assert outputs['tensor_b'].dtype == dst_type
inputs = [torch.FloatTensor([1.]), torch.FloatTensor([2.])]
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, list)
assert outputs[0].dtype == dst_type
assert outputs[1].dtype == dst_type
inputs = 5
outputs = cast_tensor_type(inputs, None, None)
assert isinstance(outputs, int)
inputs = nn.Sequential(nn.Conv2d(2, 2, 3), nn.ReLU())
outputs = cast_tensor_type(inputs, None, None)
assert isinstance(outputs, nn.Module)
@pytest.mark.skipif(
not TORCH_VERSION >= '1.6.0', reason='Lower PyTorch version')
def test_auto_fp16_func():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
class ExampleObject(object):
@auto_fp16()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
# apply to all input args
class ExampleModule(nn.Module):
@auto_fp16()
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
# apply to specified input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', ))
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
# apply to optional input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
# out_fp32=True
class ExampleModule(nn.Module):
def __init__(self):
super().__init__()
self.out_fp32 = True
@auto_fp16(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
model.fp16_enabled = True
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
# out_fp32=True
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'), out_fp32=True)
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
|
[
"[email protected]"
] | |
395244d5698ec1b8f201d619d17776ad10b0251a
|
42c578d7d9056e3caab7263cde114626d7ad3450
|
/p/lib/python3.8/smtpd.py
|
1ee11938504efa638fbff8b12e6910b420d1442e
|
[] |
no_license
|
javier290595/mythicalclass
|
66a38ae916468aa80e62d9cc5881406e9beae84b
|
34f3255d4720834dbe0aabf8c5daa4a270a415bd
|
refs/heads/master
| 2023-01-08T12:33:13.232972 | 2020-11-12T01:52:09 | 2020-11-12T01:52:09 | 312,138,106 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 73 |
py
|
/home/linuxbrew/.linuxbrew/Cellar/[email protected]/3.8.6/lib/python3.8/smtpd.py
|
[
"[email protected]"
] | |
1ccc6390564eddf80e6cf1914171d5f378c81954
|
2fe726cefb4448adf900dfe1242238a4622026ea
|
/simple_library/library_app/admin.py
|
bf6c366952106be918901ebb17550a835e83ca79
|
[] |
no_license
|
romankarki/Library-Book-App
|
2623e1f3f0d2b3bc04ab7386a81e521bd2907305
|
571d3d843bb612e8fdbdb78a02cde1be781ab5fa
|
refs/heads/master
| 2022-07-25T04:19:50.510807 | 2020-05-11T06:22:55 | 2020-05-11T06:22:55 | 262,956,328 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 204 |
py
|
from django.contrib import admin
from library_app.models import Books, Student, BookUser
# Register your models here.
admin.site.register(Books)
admin.site.register(Student)
admin.site.register(BookUser)
|
[
"[email protected]"
] | |
7ce6a01f3c5650d970ef32d681f884d359a3815e
|
e3c17b642b827e798f3f2e3eb3ba108aeb1e5769
|
/Unit 26/judge_set_intersection.py
|
06ad5d53382c41d1d1937daa0bff2cab437d0d5f
|
[] |
no_license
|
ckiekim/Python-Lecture-1903
|
d6c2eecbf56168e5e5da26dc31f1740979ea7c8c
|
4fa84301f9f1b567ba240823309e9c8d0f3f5c64
|
refs/heads/master
| 2020-04-28T15:13:58.892548 | 2019-03-27T14:03:40 | 2019-03-27T14:03:40 | 175,365,462 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 240 |
py
|
x, y = map(int, input().split())
a = {i for i in range(1, x+1) if x % i == 0}
b = {i for i in range(1, x+1) if y % i == 0}
divisor = a & b
print(a, b, divisor)
result = 0
if type(divisor) == set:
result = sum(divisor)
print(result)
|
[
"[email protected]"
] | |
aa07192cc7cff07ad907477514b91befd340672b
|
40e7e12de3a4c2e3c55d064898f331eb89093ff0
|
/seleniumbase/console_scripts/sb_behave_gui.py
|
19b73a535867507fcb1503f6aa1b601c52b85ae3
|
[
"MIT"
] |
permissive
|
bryoh/SeleniumBase
|
0f2ed8701557d3c512a65e050271ff1f2a2e02e2
|
fda7a286c4a0b2cb9015baa19d825b89834c8c1b
|
refs/heads/master
| 2023-05-26T16:29:26.919583 | 2023-05-12T16:51:41 | 2023-05-12T16:51:41 | 132,666,520 | 0 | 0 |
MIT
| 2023-02-01T10:38:22 | 2018-05-08T21:19:55 |
Python
|
UTF-8
|
Python
| false | false | 15,167 |
py
|
"""
Launches SeleniumBase Behave Commander | GUI for Behave.
Usage:
seleniumbase behave-gui [OPTIONAL PATH or TEST FILE]
sbase behave-gui [OPTIONAL PATH or TEST FILE]
Examples:
sbase behave-gui
sbase behave-gui features/
sbase behave-gui features/calculator.feature
Output:
Launches SeleniumBase Behave Commander | GUI for Behave.
"""
import colorama
import subprocess
import sys
if sys.version_info <= (3, 7):
current_version = ".".join(str(ver) for ver in sys.version_info[:3])
raise Exception(
"\n* SBase Commander requires Python 3.7 or newer!"
"\n** You are currently using Python %s" % current_version
)
import tkinter as tk # noqa: E402
from tkinter.scrolledtext import ScrolledText # noqa: E402
is_windows = False
if sys.platform in ["win32", "win64", "x64"]:
is_windows = True
def set_colors(use_colors):
c0 = ""
c1 = ""
c2 = ""
c3 = ""
c4 = ""
c5 = ""
c6 = ""
cr = ""
if use_colors:
colorama.init(autoreset=True)
c0 = colorama.Fore.BLUE + colorama.Back.LIGHTCYAN_EX
c1 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c2 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
c3 = colorama.Fore.BLACK + colorama.Back.LIGHTGREEN_EX
c4 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
c5 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
c6 = colorama.Fore.MAGENTA + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
return c0, c1, c2, c3, c4, c5, c6, cr
def send_window_to_front(root):
root.lift()
root.attributes("-topmost", True)
root.after_idle(root.attributes, "-topmost", False)
def do_behave_run(
root,
tests,
selected_tests,
command_string,
browser_string,
rs_string,
quiet_mode,
demo_mode,
mobile_mode,
dashboard,
headless,
save_screenshots,
additional_options,
):
total_tests = len(tests)
total_selected_tests = 0
test_to_run = None
for selected_test in selected_tests:
if selected_tests[selected_test].get():
total_selected_tests += 1
full_run_command = "%s -m behave" % sys.executable
if total_selected_tests == 0 or total_tests == total_selected_tests:
if command_string:
full_run_command += " "
full_run_command += command_string
else:
for test_number, test in enumerate(tests):
if selected_tests[test_number].get():
full_run_command += " "
test_to_run = test
if test.startswith("(GROUP) "):
test_to_run = test.split("(GROUP) ")[1]
full_run_command += test_to_run.split(" => ")[0]
else:
full_run_command += test.split(" => ")[0]
if "(-D edge)" in browser_string:
full_run_command += " -D edge"
elif "(-D firefox)" in browser_string:
full_run_command += " -D firefox"
elif "(-D safari)" in browser_string:
full_run_command += " -D safari"
if "(-D rs)" in rs_string:
full_run_command += " -D rs"
elif "(-D rs -D crumbs)" in rs_string:
full_run_command += " -D rs -D crumbs"
elif "(-D rcs)" in rs_string:
full_run_command += " -D rcs"
elif "(-D rcs -D crumbs)" in rs_string:
full_run_command += " -D rcs -D crumbs"
if quiet_mode:
full_run_command += " --quiet"
if demo_mode:
full_run_command += " -D demo"
if mobile_mode:
full_run_command += " -D mobile"
if dashboard:
full_run_command += " -D dashboard"
if headless:
full_run_command += " -D headless"
elif "linux" in sys.platform:
full_run_command += " -D gui"
if save_screenshots:
full_run_command += " -D screenshot"
additional_options_list = additional_options.split(" ")
dash_T_needed = False
if (
"-T" not in additional_options_list
and "--no-timings" not in additional_options_list
and "--show-timings" not in additional_options_list
):
dash_T_needed = True
dash_k_needed = False
if (
"-k" not in additional_options_list
and "--no-skipped" not in additional_options_list
and "--show-skipped" not in additional_options_list
):
dash_k_needed = True
additional_options = additional_options.strip()
if additional_options:
full_run_command += " "
full_run_command += additional_options
if dash_T_needed:
full_run_command += " -T"
if dash_k_needed:
full_run_command += " -k"
print(full_run_command)
if not additional_options or " " not in additional_options:
subprocess.Popen(full_run_command, shell=True)
else:
proc = subprocess.Popen(
full_run_command, stderr=subprocess.PIPE, shell=True
)
(output, error) = proc.communicate()
if error and proc.returncode == 2:
if str(error).startswith("b'") and str(error).endswith("\\n'"):
error = str(error)[2:-3]
elif str(error).startswith("b'") and str(error).endswith("'"):
error = str(error)[2:-1]
else:
error = str(error)
error = error.replace("\\n", "\n")
print(error)
send_window_to_front(root)
def create_tkinter_gui(tests, command_string, t_count, f_count, s_tests):
root = tk.Tk()
root.title("SeleniumBase Behave Commander | GUI for Behave")
root.minsize(820, 656)
tk.Label(root, text="").pack()
options_list = [
"Use Chrome Browser (Default)",
"Use Edge Browser (-D edge)",
"Use Firefox Browser (-D firefox)",
]
if "darwin" in sys.platform:
options_list.append("Use Safari Browser (-D safari)")
brx = tk.StringVar(root)
brx.set(options_list[0])
question_menu = tk.OptionMenu(root, brx, *options_list)
question_menu.pack()
options_list = [
"New Session Per Test (Default)",
"Reuse Session for ALL the tests (-D rs)",
"Reuse Session and clear cookies (-D rs -D crumbs)",
"Reuse Session in the SAME class/feature (-D rcs)",
"Reuse Session in class and clear cookies (-D rcs -D crumbs)",
]
rsx = tk.StringVar(root)
rsx.set(options_list[0])
question_menu = tk.OptionMenu(root, rsx, *options_list)
question_menu.pack()
qmx = tk.IntVar()
chk = tk.Checkbutton(
root, text="Quiet Mode (--quiet)", variable=qmx, pady=0
)
chk.pack()
dmx = tk.IntVar()
chk = tk.Checkbutton(
root, text="Demo Mode (-D demo)", variable=dmx, pady=0
)
chk.pack()
mmx = tk.IntVar()
chk = tk.Checkbutton(
root, text="Mobile Mode (-D mobile)", variable=mmx, pady=0
)
chk.pack()
dbx = tk.IntVar()
chk = tk.Checkbutton(
root, text="Dashboard (-D dashboard)", variable=dbx, pady=0
)
chk.pack()
chk.select()
hbx = tk.IntVar()
chk = tk.Checkbutton(
root, text="Headless Browser (-D headless)", variable=hbx, pady=0
)
chk.pack()
ssx = tk.IntVar()
chk = tk.Checkbutton(
root, text="Save Screenshots (-D screenshot)", variable=ssx, pady=0
)
chk.pack()
tk.Label(root, text="").pack()
plural = "s"
if f_count == 1:
plural = ""
run_display = (
"Select from %s rows (%s feature%s with %s scenarios): "
"(All tests will run if none are selected)"
% (len(tests), f_count, plural, t_count)
)
if t_count == 1:
run_display = "Only ONE TEST was found and will be run:"
tests = s_tests
tk.Label(root, text=run_display, bg="yellow", fg="magenta").pack()
text_area = ScrolledText(
root, width=100, height=12, wrap="word", state=tk.DISABLED
)
text_area.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
count = 0
ara = {}
for row in tests:
row += " " * 200
ara[count] = tk.IntVar()
cb = None
if not is_windows:
cb = tk.Checkbutton(
text_area,
text=(row),
bg="teal",
fg="yellow",
anchor="w",
pady=0,
variable=ara[count],
)
else:
cb = tk.Checkbutton(
text_area,
text=(row),
bg="teal",
fg="yellow",
anchor="w",
pady=0,
borderwidth=0,
highlightthickness=0,
variable=ara[count],
)
text_area.window_create("end", window=cb)
text_area.insert("end", "\n")
count += 1
tk.Label(root, text="").pack()
additional_options = ""
aopts = tk.StringVar(value=additional_options)
tk.Label(
root,
text='Additional "behave" Options: (Eg. "-D incognito --junit")',
bg="yellow", fg="blue",
).pack()
entry = tk.Entry(root, textvariable=aopts)
entry.pack()
entry.focus()
entry.bind(
"<Return>",
(
lambda _: do_behave_run(
root,
tests,
ara,
command_string,
brx.get(),
rsx.get(),
qmx.get(),
dmx.get(),
mmx.get(),
dbx.get(),
hbx.get(),
ssx.get(),
aopts.get(),
)
),
)
tk.Button(
root,
text="Run Selected Tests",
fg="green",
bg="gray",
command=lambda: do_behave_run(
root,
tests,
ara,
command_string,
brx.get(),
rsx.get(),
qmx.get(),
dmx.get(),
mmx.get(),
dbx.get(),
hbx.get(),
ssx.get(),
aopts.get(),
),
).pack()
tk.Label(root, text="\n").pack()
# Bring form window to front
send_window_to_front(root)
# Use decoy to set correct focus on main window
decoy = tk.Tk()
decoy.geometry("1x1")
decoy.iconify()
decoy.update()
decoy.deiconify()
decoy.destroy()
# Start tkinter
root.mainloop()
def main():
use_colors = True
if "linux" in sys.platform:
use_colors = False
c0, c1, c2, c3, c4, c5, c6, cr = set_colors(use_colors)
command_args = sys.argv[2:]
command_string = " ".join(command_args)
message = ""
message += c2
message += "*"
message += c4
message += " Starting the "
message += c0
message += "Selenium"
message += c1
message += "Base"
message += c2
message += " "
message += c6
message += "Behave"
message += c4
message += " "
message += c3
message += "Commander"
message += c4
message += " GUI App"
message += c2
message += "..."
message += cr
print(message)
command_string = command_string.replace("--quiet", "")
command_string = command_string.replace("-q", "")
proc = subprocess.Popen(
"%s -m behave -d %s --show-source"
% (sys.executable, command_string),
stdout=subprocess.PIPE,
shell=True,
)
(output, error) = proc.communicate()
if error:
error_msg = "Error collecting tests: %s" % str(error)
error_msg = c5 + error_msg + cr
print(error_msg)
return
filename = None
feature_name = None
scenario_name = None
f_tests = [] # Features
s_tests = [] # Scenarios
tests = [] # All tests
file_scenario_count = {}
f_count = 0
s_count = 0
t_count = 0
if is_windows:
output = output.decode("latin1")
else:
output = output.decode("utf-8")
for row in output.replace("\r", "").split("\n"):
if row.startswith("Feature: "):
if f_count > 0:
file_scenario_count[str(f_count)] = s_count
f_count += 1
s_count = 0
elif row.startswith(" Scenario: "):
s_count += 1
file_scenario_count[str(f_count)] = s_count
elif row.startswith(" Scenario Outline: "):
s_count += 1
file_scenario_count[str(f_count)] = s_count
file_scenario_count[str(f_count)] = s_count
f_count = 0
s_count = 0
for row in output.replace("\r", "").split("\n"):
if row.startswith("Feature: "):
f_count += 1
feature_name = row.split("Feature: ")[1]
if " # features/" in feature_name:
filename = feature_name.split(" # features/")[-1]
filename = "features/" + filename.split(":")[0]
feature_name = feature_name.split(" # features/")[0]
elif " # features\\" in feature_name:
filename = feature_name.split(" # features\\")[-1]
filename = "features\\" + filename.split(":")[0]
feature_name = feature_name.split(" # features\\")[0]
else:
filename = feature_name.split(" # ")[-1]
filename = filename.split(":")[0]
feature_name = feature_name.split(" # ")[-1]
s_count = file_scenario_count[str(f_count)]
filename = filename.strip()
t_name = "(GROUP) %s => %s" % (filename, feature_name)
t_name += " <> (%s Total)" % s_count
f_tests.append(t_name)
elif (
row.startswith(" Scenario: ")
or row.startswith(" Scenario Outline: ")
):
t_count += 1
line_num = row.split(":")[-1]
scenario_name = None
if row.startswith(" Scenario: "):
scenario_name = row.split(" Scenario: ")[-1]
else:
scenario_name = row.split(" Scenario Outline: ")[-1]
if " -- @" in scenario_name:
scenario_name = scenario_name.split(" # ")[0].rstrip()
elif " # features/" in scenario_name:
scenario_name = scenario_name.split(" # features/")[0]
else:
scenario_name = scenario_name.split(" # ")[0]
scenario_name = scenario_name.strip()
s_tests.append("%s:%s => %s" % (filename, line_num, scenario_name))
tests = f_tests + s_tests
if not tests:
err_msg_0 = c5 + "ERROR:" + cr + "\n"
err_msg_1 = ' No "behave" tests found! Expecting "*.feature" files!'
err_msg_1 = c6 + err_msg_1 + cr + "\n"
err_msg_2 = ' "*.feature" files would live in a "features/" folder.'
err_msg_2 = c6 + err_msg_2 + cr + "\n"
err_msg_3 = "Exiting SBase Behave Commander..."
err_msg_3 = c5 + err_msg_3 + cr
error_msg = err_msg_0 + err_msg_1 + err_msg_2 + err_msg_3
print(error_msg)
return
create_tkinter_gui(tests, command_string, t_count, f_count, s_tests)
if __name__ == "__main__":
print('To open SBase Behave Commander, type "sbase behave-gui"')
|
[
"[email protected]"
] | |
183661cb8b23d1df34f583e6b5382ed03c4b6ce8
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqpt/ingrstorm5min.py
|
b3222a2b328d64900531c4742e3aa46ea97d0615
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 |
Python
|
UTF-8
|
Python
| false | false | 20,191 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class IngrStorm5min(Mo):
"""
A class that represents the most current statistics for Ingress Storm Ctrl in a 5 minute sampling interval. This class updates every 10 seconds.
"""
meta = StatsClassMeta("cobra.model.eqpt.IngrStorm5min", "Ingress Storm Ctrl")
counter = CounterMeta("dropBytesRate", CounterCategory.GAUGE, "bytes-per-second", "Storm Ctrl Drop Bytes rate")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "dropBytesRateLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "dropBytesRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "dropBytesRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "dropBytesRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "dropBytesRateSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "dropBytesRateTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "dropBytesRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "dropBytesRateTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "dropBytesRateTr"
meta._counters.append(counter)
counter = CounterMeta("dropBytes", CounterCategory.COUNTER, "bytes", "Storm Ctrl Drop Bytes")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "dropBytesLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "dropBytesCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "dropBytesPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "dropBytesMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "dropBytesMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "dropBytesAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "dropBytesSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "dropBytesBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "dropBytesThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "dropBytesTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "dropBytesTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "dropBytesRate"
meta._counters.append(counter)
meta.moClassName = "eqptIngrStorm5min"
meta.rnFormat = "CDeqptIngrStorm5min"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current Ingress Storm Ctrl stats in 5 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.pc.AggrIf")
meta.parentClasses.add("cobra.model.l1.PhysIf")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.eqpt.IngrStorm")
meta.superClasses.add("cobra.model.stats.Item")
meta.rnPrefixes = [
('CDeqptIngrStorm5min', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "dropBytesAvg", "dropBytesAvg", 18046, PropCategory.IMPLICIT_AVG)
prop.label = "Storm Ctrl Drop Bytes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesAvg", prop)
prop = PropMeta("str", "dropBytesBase", "dropBytesBase", 18041, PropCategory.IMPLICIT_BASELINE)
prop.label = "Storm Ctrl Drop Bytes baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesBase", prop)
prop = PropMeta("str", "dropBytesCum", "dropBytesCum", 18042, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Storm Ctrl Drop Bytes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesCum", prop)
prop = PropMeta("str", "dropBytesLast", "dropBytesLast", 18040, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Storm Ctrl Drop Bytes current value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesLast", prop)
prop = PropMeta("str", "dropBytesMax", "dropBytesMax", 18045, PropCategory.IMPLICIT_MAX)
prop.label = "Storm Ctrl Drop Bytes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesMax", prop)
prop = PropMeta("str", "dropBytesMin", "dropBytesMin", 18044, PropCategory.IMPLICIT_MIN)
prop.label = "Storm Ctrl Drop Bytes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesMin", prop)
prop = PropMeta("str", "dropBytesPer", "dropBytesPer", 18043, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Storm Ctrl Drop Bytes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesPer", prop)
prop = PropMeta("str", "dropBytesRate", "dropBytesRate", 18051, PropCategory.IMPLICIT_RATE)
prop.label = "Storm Ctrl Drop Bytes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesRate", prop)
prop = PropMeta("str", "dropBytesRateAvg", "dropBytesRateAvg", 18064, PropCategory.IMPLICIT_AVG)
prop.label = "Storm Ctrl Drop Bytes rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesRateAvg", prop)
prop = PropMeta("str", "dropBytesRateLast", "dropBytesRateLast", 18061, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Storm Ctrl Drop Bytes rate current value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesRateLast", prop)
prop = PropMeta("str", "dropBytesRateMax", "dropBytesRateMax", 18063, PropCategory.IMPLICIT_MAX)
prop.label = "Storm Ctrl Drop Bytes rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesRateMax", prop)
prop = PropMeta("str", "dropBytesRateMin", "dropBytesRateMin", 18062, PropCategory.IMPLICIT_MIN)
prop.label = "Storm Ctrl Drop Bytes rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesRateMin", prop)
prop = PropMeta("str", "dropBytesRateSpct", "dropBytesRateSpct", 18065, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Storm Ctrl Drop Bytes rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesRateSpct", prop)
prop = PropMeta("str", "dropBytesRateThr", "dropBytesRateThr", 18067, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Storm Ctrl Drop Bytes rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("dropBytesRateThr", prop)
prop = PropMeta("str", "dropBytesRateTr", "dropBytesRateTr", 18069, PropCategory.IMPLICIT_TREND)
prop.label = "Storm Ctrl Drop Bytes rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesRateTr", prop)
prop = PropMeta("str", "dropBytesRateTrBase", "dropBytesRateTrBase", 18068, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Storm Ctrl Drop Bytes rate trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesRateTrBase", prop)
prop = PropMeta("str", "dropBytesRateTtl", "dropBytesRateTtl", 18066, PropCategory.IMPLICIT_TOTAL)
prop.label = "Storm Ctrl Drop Bytes rate total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesRateTtl", prop)
prop = PropMeta("str", "dropBytesSpct", "dropBytesSpct", 18047, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Storm Ctrl Drop Bytes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesSpct", prop)
prop = PropMeta("str", "dropBytesThr", "dropBytesThr", 18048, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Storm Ctrl Drop Bytes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("dropBytesThr", prop)
prop = PropMeta("str", "dropBytesTr", "dropBytesTr", 18050, PropCategory.IMPLICIT_TREND)
prop.label = "Storm Ctrl Drop Bytes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesTr", prop)
prop = PropMeta("str", "dropBytesTrBase", "dropBytesTrBase", 18049, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Storm Ctrl Drop Bytes trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("dropBytesTrBase", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("l1EthIfToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
be1275ae642e75f3b7013809e57c2dd830e8801e
|
f531c56db4cd2776c765b9aca0c4cebaea864ec2
|
/ABC152/b.py
|
67ad0d250e393f4fab6a6c5be90863c24befc29a
|
[] |
no_license
|
SatoKeiju/AtCoder-Python3
|
1c76f8ec5d99470b6e316115f0433b4b3cb64024
|
9c2860e2cfda490d5848b0557876ef616eff01a2
|
refs/heads/master
| 2021-06-23T05:59:46.911733 | 2021-03-30T08:00:34 | 2021-03-30T08:00:34 | 212,088,768 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 128 |
py
|
def main():
a, b = map(int, input().split())
print(str(min(a, b)) * max(a, b))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
7e045832ecae12c48e1e1b63db2b5658de3529ec
|
2293c76c3d18e2fcd44ded90bd40113d26285663
|
/pyeccodes/defs/grib1/localConcepts/eswi/sort_table.py
|
03e1224d57722c17f3827e62f20c2c5f19e1c0d2
|
[
"Apache-2.0"
] |
permissive
|
ecmwf/pyeccodes
|
b1f121dbddf68d176a03805ed5144ba0b37ac211
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
refs/heads/master
| 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,112 |
py
|
def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'none'},
{'abbr': 1, 'code': 1, 'title': 'Atm. conc. [g/kg]'},
{'abbr': 2, 'code': 2, 'title': 'Log Atm. conc. [g/kg]'},
{'abbr': 3, 'code': 3, 'title': 'Atm. conc. [g/m3]'},
{'abbr': 3, 'code': 3, 'title': 'Atm. conc. [g/m3]'},
{'abbr': 4, 'code': 4, 'title': 'Log Atm. conc. [g/m3]'},
{'abbr': 7, 'code': 7, 'title': 'Atm. conc. [number/m3]'},
{'abbr': 9, 'code': 9, 'title': 'Atm. conc. [Bq/m3]'},
{'abbr': 10, 'code': 10, 'title': 'Log Atm. conc. [Bq/m3]'},
{'abbr': 11, 'code': 11, 'title': 'Atm. conc. at reference height [g/kg]'},
{'abbr': 12, 'code': 12, 'title': 'Atm. conc. at reference height [g/m3]'},
{'abbr': 13, 'code': 13, 'title': 'Log Atm. conc. at reference height [g/m3]'},
{'abbr': 14, 'code': 14, 'title': 'Total column [g/m2]'},
{'abbr': 14, 'code': 14, 'title': 'Column up to 6000m [g/m2]'},
{'abbr': 14, 'code': 14, 'title': 'Column up above 6000m [g/m2]'},
{'abbr': 14, 'code': 14, 'title': 'Max in column up to 6000m [g/m3]'},
{'abbr': 14, 'code': 14, 'title': 'Max in column above 6000m [g/m3]'},
{'abbr': 15, 'code': 15, 'title': 'Level at max in column up to 6000m [m]'},
{'abbr': 15, 'code': 15, 'title': 'Level at max in column above 6000m [m]'},
{'abbr': 21, 'code': 21, 'title': 'Integrated atm. conc. s [g/kg]'},
{'abbr': 22, 'code': 22, 'title': 'Log Integrated atm. conc. s [g/kg]'},
{'abbr': 23, 'code': 23, 'title': 'Integrated atm. conc. s [g/m3]'},
{'abbr': 24,
'code': 24,
'title': 'Logarith of Integrated atm. conc. s [g/m3]'},
{'abbr': 27, 'code': 27, 'title': 'Integrated atm. conc. s [number/m3]'},
{'abbr': 29, 'code': 29, 'title': 'Integrated atm. conc. s [Bq/m3]'},
{'abbr': 30, 'code': 30, 'title': 'Log Integrated atm. conc. s [Bq/m3]'},
{'abbr': 51, 'code': 51, 'title': 'Conc. in liquid water [g/m3]'},
{'abbr': 53, 'code': 53, 'title': 'Conc. in liquid water Equivalents/m3'},
{'abbr': 54, 'code': 54, 'title': 'Conc. in liquid water [number/m3]'},
{'abbr': 55, 'code': 55, 'title': 'Conc. in liquid water [Bq/m3]'},
{'abbr': 61, 'code': 61, 'title': 'Conc. in ice water [g/m3]'},
{'abbr': 63, 'code': 63, 'title': 'Conc. in ice water Equivalents/m3'},
{'abbr': 64, 'code': 64, 'title': 'Conc. in ice water [number/m3]'},
{'abbr': 65, 'code': 65, 'title': 'Conc. in ice water [Bq/m3]'},
{'abbr': 71,
'code': 71,
'title': 'Conc. in precipitation [g/m3]',
'units': 'mg/l'},
{'abbr': 73, 'code': 73, 'title': 'Conc. in precipitation Equivalents/m3'},
{'abbr': 74, 'code': 74, 'title': 'Conc. in precipitation [number/m3]'},
{'abbr': 75, 'code': 75, 'title': 'Conc. in precipitation [Bq/m3]'},
{'abbr': 81, 'code': 81, 'title': 'Dry deposition [g/m2]'},
{'abbr': 82, 'code': 82, 'title': 'Log Dry deposition [g/m2]'},
{'abbr': 84, 'code': 84, 'title': 'Dry deposition [number/m2]'},
{'abbr': 85, 'code': 85, 'title': 'Dry deposition [Bq/m2]'},
{'abbr': 91, 'code': 91, 'title': 'Wet deposition [g/m2]'},
{'abbr': 92, 'code': 92, 'title': 'Log Wet deposition [g/m2]'},
{'abbr': 94, 'code': 94, 'title': 'Wet deposition [number/m2]'},
{'abbr': 95, 'code': 95, 'title': 'Wet deposition [Bq/m2]'},
{'abbr': 101, 'code': 101, 'title': 'Total deposition [g/m2]'},
{'abbr': 102, 'code': 102, 'title': 'Log Total deposition [g/m2]'},
{'abbr': 104, 'code': 104, 'title': 'Total deposition [number/m2]'},
{'abbr': 105, 'code': 105, 'title': 'Total deposition [Bq/m2]'},
{'abbr': 110, 'code': 110, 'title': 'Emissions [ton]'},
{'abbr': 111, 'code': 111, 'title': 'Emissions [kg]'},
{'abbr': 112, 'code': 112, 'title': 'Emissions [g]'},
{'abbr': 114, 'code': 114, 'title': 'Emissions [number]'},
{'abbr': 115, 'code': 115, 'title': 'Emissions [Bq]'},
{'abbr': 121, 'code': 121, 'title': 'Emissions [kg/s]'},
{'abbr': 122, 'code': 122, 'title': 'Emissions [g/s]'},
{'abbr': 124, 'code': 124, 'title': 'Emissions [number/s]'},
{'abbr': 125, 'code': 125, 'title': 'Emissions [Bq/s]'},
{'abbr': 131, 'code': 131, 'title': 'Emissions [kg/(m2 s)]'},
{'abbr': 132, 'code': 132, 'title': 'Emissions [g/(m2 s)]'},
{'abbr': 134, 'code': 134, 'title': 'Emissions [number/(m2 s)]'},
{'abbr': 135, 'code': 135, 'title': 'Emissions [Bq/(m2 s)]'},
{'abbr': 136, 'code': 136, 'title': 'Surface emissions [kg/(m2 s)]'},
{'abbr': 137, 'code': 137, 'title': 'Surface emissions [g/(m2 s)]'},
{'abbr': 138, 'code': 138, 'title': 'Surface emissions [number/(m2 s)]'},
{'abbr': 139, 'code': 139, 'title': 'Surface emissions [Bq/(m2 s)]'},
{'abbr': 150, 'code': 150, 'title': 'Inhalation dose [nSv]'},
{'abbr': 151, 'code': 151, 'title': 'Ground dose [nSv]'},
{'abbr': 152, 'code': 152, 'title': 'Infinite cloud dose [nSv]'},
{'abbr': 153, 'code': 153, 'title': 'Sum of cloud and ground dose [nSv]'},
{'abbr': 201, 'code': 201, 'title': 'Dry deposition velocity [m/s]'},
{'abbr': 202, 'code': 202, 'title': 'Settling velocity [m/s]'},
{'abbr': 203, 'code': 203, 'title': 'Scavenging coefficient [1/s]'},
{'abbr': 205, 'code': 205, 'title': 'Degree hours or days for last day [K]'},
{'abbr': 206, 'code': 206, 'title': 'Current degree days [K]'},
{'abbr': 207, 'code': 207, 'title': 'Critical degree days [K]'},
{'abbr': 208, 'code': 208, 'title': 'Accum pollen emission [grains/m2]'},
{'abbr': 209, 'code': 209, 'title': 'Correction factor [fraction]'},
{'abbr': 210, 'code': 210, 'title': 'Aerosol optical depth []'},
{'abbr': 240,
'code': 240,
'title': 'Deposition arrival since 1 Jan 1971 [days]'},
{'abbr': 241,
'code': 241,
'title': 'Latest deposition since 1 Jan 1971 [days]'},
{'abbr': 242,
'code': 242,
'title': 'Time of max activity since 1 Jan 1971 [days]'},
{'abbr': 243, 'code': 243, 'title': 'Max radioactive activity [Bq/m2]'},
{'abbr': 244, 'code': 244, 'title': 'Log Max radioactive activity'},
{'abbr': 250, 'code': 250, 'title': 'Relative occurrence []'},
{'abbr': 251, 'code': 251, 'title': 'statistics [kg]'},
{'abbr': 252, 'code': 252, 'title': 'statistics [mol]'},
{'abbr': None, 'code': 255, 'title': 'missing value'})
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.