metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JiachenRen/wcd-idf-app-prediction",
"score": 3
} |
#### File: wcd-idf-app-prediction/vec4ir/combination.py
```python
from sklearn.base import BaseEstimator
from collections import defaultdict, OrderedDict
from sklearn.preprocessing import maxabs_scale
from functools import reduce
from operator import itemgetter
from numpy import product
def aggregate_dicts(dicts, agg_fn=sum):
"""
Aggregates the contents of two dictionaries by key
@param agg_fn is used to aggregate the values (defaults to sum)
>>> dict1 = {'a': 0.8, 'b': 0.4, 'd': 0.4}
>>> dict2 = {'a': 0.7, 'c': 0.3, 'd': 0.3}
>>> agg = aggregate_dicts([dict1, dict2])
>>> OrderedDict(sorted(agg.items(), key=itemgetter(1), reverse=True))
OrderedDict([('a', 1.5), ('d', 0.7), ('b', 0.4), ('c', 0.3)])
"""
acc = defaultdict(list)
for d in dicts:
for k in d:
acc[k].append(d[k])
for key, values in acc.items():
acc[key] = agg_fn(values)
return dict(acc) # no need to default to list anymore
def fuzzy_or(values):
"""
Applies fuzzy-or to a list of values
>>> fuzzy_or([0.5])
0.5
>>> fuzzy_or([0.5, 0.5])
0.75
>>> fuzzy_or([0.5, 0.5, 0.5])
0.875
"""
if min(values) < 0 or max(values) > 1:
raise ValueError("fuzzy_or expects values in [0,1]")
return reduce(lambda x, y: 1 - (1 - x) * (1 - y), values)
class CombinatorMixin(object):
""" Creates a computational tree with retrieval models as leafs
"""
def __get_weights(self, other):
if not isinstance(other, CombinatorMixIn):
raise ValueError("other is not Combinable")
if hasattr(self, '__weight'):
weight = self.__weight
else:
weight = 1.0
if hasattr(other, '__weight'):
otherweight = other.__weight
else:
otherweight = 1.0
return weight, otherweight
# This is evil since it can exceed [0,1], rescaling at the end would be not
# that beautiful
# def __add__(self, other):
# weights = self.__get_weights(other)
# return Combined([self, other], weights=weights, agg_fn=sum)
def __and__(self, other):
weights = self.__get_weights(other)
return Combined([self, other], weights=weights, agg_fn=product)
def __or__(self, other):
weights = self.__get_weights(other)
return Combined([self, other], weights=weights, agg_fn=fuzzy_or)
def __mul__(self, scalar):
self.__weight = scalar
return self
class Combined(BaseEstimator, CombinatorMixin):
def __init__(self, retrieval_models, weights=None, aggregation_fn=sum):
self.retrieval_models = retrieval_models
self.aggregation_fn = aggregation_fn
if weights is not None:
self.weights = weights
else:
self.weights = [1.0] * len(retrieval_models)
def query(self, query, k=1, sort=True):
models = self.retrieval_models
weights = maxabs_scale(self.weights) # max 1 does not crash [0,1]
agg_fn = self.aggregation_fn
# we only need to sort in the final run
combined = [m.query(query, k=k, sort=False) for m in models]
if weights is not None:
combined = [{k: v * w for k, v in r.items()} for r, w in
zip(combined, weights)]
combined = aggregate_dicts(combined, agg_fn=agg_fn, sort=True)
if sort:
# only cut-off at k if this is the final (sorted) output
combined = OrderedDict(sorted(combined.items(), key=itemgetter(1),
reverse=True)[:k])
return combined
``` |
{
"source": "jiachens/auto_LiRPA",
"score": 2
} |
#### File: auto_LiRPA/operators/dropout.py
```python
from .base import *
class BoundDropout(Bound):
def __init__(self, input_name, name, ori_name, attr, inputs, output_index, options, device):
super().__init__(input_name, name, ori_name, attr, inputs, output_index, options, device)
self.dropout = nn.Dropout(p=attr['ratio'])
self.scale = 1 / (1 - attr['ratio'])
@Bound.save_io_shape
def forward(self, x):
res = self.dropout(x)
self.mask = res == 0
return res
def bound_backward(self, last_lA, last_uA, x):
def _bound_oneside(last_A):
if last_A is None:
return None
return torch.where(self.mask.unsqueeze(0), torch.tensor(0).to(last_A), last_A * self.scale)
lA = _bound_oneside(last_lA)
uA = _bound_oneside(last_uA)
return [(lA, uA)], 0, 0
def bound_forward(self, dim_in, x):
assert (torch.min(self.mask) >= 0)
lw = x.lw * self.mask.unsqueeze(1)
lb = x.lb * self.mask
uw = x.uw * self.mask.unsqueeze(1)
ub = x.ub * self.mask
return LinearBound(lw, lb, uw, ub)
def interval_propagate(self, *v):
h_L, h_U = v[0]
if not self.training:
return h_L, h_U
else:
lower = torch.where(self.mask, torch.tensor(0).to(h_L), h_L * self.scale)
upper = torch.where(self.mask, torch.tensor(0).to(h_U), h_U * self.scale)
return lower, upper
```
#### File: auto_LiRPA/doc/conf.py
```python
import os
import subprocess
import inspect
import sys
from pygit2 import Repository
sys.path.insert(0, '..')
import auto_LiRPA
subprocess.run(['python', 'process.py'])
# -- Project information -----------------------------------------------------
project = 'auto_LiRPA'
author = '<a href="https://github.com/KaidiXu/auto_LiRPA#developers-and-copyright">auto-LiRPA authors</a>'
copyright = f'2021, {author}'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.linkcode',
'm2r2',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'src', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
repo = Repository('../')
branch = repo.head.shorthand
# Resolve function for the linkcode extension.
def linkcode_resolve(domain, info):
def find_source():
obj = auto_LiRPA
parts = info['fullname'].split('.')
if info['module'].endswith(f'.{parts[0]}'):
module = info['module'][:-len(parts[0])-1]
else:
module = info['module']
obj = sys.modules[module]
for part in parts:
obj = getattr(obj, part)
fn = inspect.getsourcefile(obj)
source, lineno = inspect.getsourcelines(obj)
return fn, lineno, lineno + len(source) - 1
fn, lineno_start, lineno_end = find_source()
filename = f'{fn}#L{lineno_start}-L{lineno_end}'
return f"https://github.com/KaidiXu/auto_LiRPA/blob/{branch}/doc/{filename}"
```
#### File: examples/vision/weight_perturbation_training.py
```python
import random
import time
import argparse
import logging
import torch.optim as optim
from torch.nn import CrossEntropyLoss
from auto_LiRPA import BoundedModule, CrossEntropyWrapper, BoundDataParallel, BoundedParameter
from auto_LiRPA.bound_ops import BoundExp
from auto_LiRPA.perturbations import *
from auto_LiRPA.utils import MultiAverageMeter, logger, get_spec_matrix
from datasets import mnist_loaders
import torchvision.datasets as datasets
import models
from auto_LiRPA.eps_scheduler import LinearScheduler, AdaptiveScheduler, SmoothedScheduler, FixedScheduler
def get_exp_module(bounded_module):
for _, node in bounded_module.named_modules():
# Find the Exp neuron in computational graph
if isinstance(node, BoundExp):
return node
return None
parser = argparse.ArgumentParser()
parser.add_argument("--verify", action="store_true", help='verification mode, do not train')
parser.add_argument("--load", type=str, default="", help='Load pretrained model')
parser.add_argument("--device", type=str, default="cuda", choices=["cpu", "cuda"], help='use cpu or cuda')
parser.add_argument("--data", type=str, default="MNIST", choices=["MNIST", "FashionMNIST"], help='dataset')
parser.add_argument("--ratio", type=float, default=None, help='percent of training used, None means whole training data')
parser.add_argument("--seed", type=int, default=100, help='random seed')
parser.add_argument("--eps", type=float, default=0.1, help='Target training epsilon for weight perturbations')
parser.add_argument("--norm", type=float, default='inf', help='p norm for epsilon perturbation')
parser.add_argument("--bound_type", type=str, default="CROWN-IBP",
choices=["IBP", "CROWN-IBP", "CROWN"], help='method of bound analysis')
parser.add_argument("--opt", type=str, default='ADAM', choices=["ADAM", "SGD"], help='optimizer')
parser.add_argument("--num_epochs", type=int, default=150, help='number of total epochs')
parser.add_argument("--batch_size", type=int, default=256, help='batch size')
parser.add_argument("--lr", type=float, default=0.001, help='learning rate')
parser.add_argument("--lr_decay_milestones", nargs='+', type=int, default=[120, 140], help='learning rate dacay milestones')
parser.add_argument("--scheduler_name", type=str, default="LinearScheduler",
choices=["LinearScheduler", "AdaptiveScheduler", "SmoothedScheduler"], help='epsilon scheduler')
parser.add_argument("--scheduler_opts", type=str, default="start=10,length=100", help='options for epsilon scheduler')
parser.add_argument("--bound_opts", type=str, default=None, choices=["same-slope", "zero-lb", "one-lb"],
help='bound options')
parser.add_argument('--clip_grad_norm', type=float, default=8.0)
parser.add_argument('--truncate_data', type=int, help='Truncate the training/test batches in unit test')
parser.add_argument('--multigpu', action='store_true', help='MultiGPU training')
num_class = 10
args = parser.parse_args()
exp_name = 'mlp_MNIST'+'_b'+str(args.batch_size)+'_'+str(args.bound_type)+'_epoch'+str(args.num_epochs)+'_'+args.scheduler_opts+'_'+str(args.eps)[:6]
log_file = f'{exp_name}{"_test" if args.verify else ""}.log'
file_handler = logging.FileHandler(log_file)
logger.addHandler(file_handler)
## Training one epoch.
def Train(model, t, loader, eps_scheduler, norm, train, opt, bound_type, method='robust', loss_fusion=True, final_node_name=None):
meter = MultiAverageMeter()
if train:
model.train()
eps_scheduler.train()
eps_scheduler.step_epoch(verbose=False)
eps_scheduler.set_epoch_length(int((len(loader.dataset) + loader.batch_size - 1) / loader.batch_size))
else:
model.eval()
eps_scheduler.eval()
# Used for loss-fusion. Get the exp operation in computational graph.
exp_module = get_exp_module(model)
def get_bound_loss(x=None, c=None):
if loss_fusion:
# When loss fusion is used, we need the upper bound for the final loss function.
bound_lower, bound_upper = False, True
else:
# When loss fusion is not used, we need the lower bound for the logit layer.
bound_lower, bound_upper = True, False
if bound_type == 'IBP':
lb, ub = model(method_opt="compute_bounds", x=x, C=c, method="IBP", final_node_name=final_node_name, no_replicas=True)
elif bound_type == 'CROWN':
lb, ub = model(method_opt="compute_bounds", x=x, C=c, method="backward",
bound_lower=bound_lower, bound_upper=bound_upper)
elif bound_type == 'CROWN-IBP':
# we use a mixed IBP and CROWN-IBP bounds, leading to better performance (Zhang et al., ICLR 2020)
# factor = (eps_scheduler.get_max_eps() - eps_scheduler.get_eps()) / eps_scheduler.get_max_eps()
ilb, iub = model(method_opt="compute_bounds", x=x, C=c, method="IBP", final_node_name=final_node_name, no_replicas=True)
lb, ub = model(method_opt="compute_bounds", C=c, method="CROWN-IBP",
bound_lower=bound_lower, bound_upper=bound_upper, final_node_name=final_node_name, average_A=True, no_replicas=True)
if loss_fusion:
# When loss fusion is enabled, we need to get the common factor before softmax.
if isinstance(model, BoundDataParallel):
max_input = model(get_property=True, node_class=BoundExp, att_name='max_input')
else:
max_input = exp_module.max_input
return None, torch.mean(torch.log(ub) + max_input)
else:
# Pad zero at the beginning for each example, and use fake label '0' for all examples
lb_padded = torch.cat((torch.zeros(size=(lb.size(0), 1), dtype=lb.dtype, device=lb.device), lb), dim=1)
fake_labels = torch.zeros(size=(lb.size(0),), dtype=torch.int64, device=lb.device)
robust_ce = CrossEntropyLoss()(-lb_padded, fake_labels)
return lb, robust_ce
for i, (data, labels) in enumerate(loader):
# For unit test. We only use a small number of batches
if args.truncate_data:
if i >= args.truncate_data:
break
start = time.time()
eps_scheduler.step_batch()
eps = eps_scheduler.get_eps()
# For small eps just use natural training, no need to compute LiRPA bounds
batch_method = method
if eps < 1e-50:
batch_method = "natural"
if train:
opt.zero_grad()
if list(model.parameters())[0].is_cuda:
data, labels = data.cuda(), labels.cuda()
model.ptb.eps = eps
x = data
if loss_fusion:
if batch_method == 'natural' or not train:
output = model(x, labels) # , disable_multi_gpu=True
regular_ce = torch.mean(torch.log(output))
else:
model(x, labels)
regular_ce = torch.tensor(0., device=data.device)
meter.update('CE', regular_ce.item(), x.size(0))
x = (x, labels)
c = None
else:
# Generate speicification matrix (when loss fusion is not used).
c = get_spec_matrix(data, labels, num_class)
x = (x, labels)
output = model(x, final_node_name=final_node_name)
regular_ce = CrossEntropyLoss()(output, labels) # regular CrossEntropyLoss used for warming up
meter.update('CE', regular_ce.item(), x[0].size(0))
meter.update('Err', torch.sum(torch.argmax(output, dim=1) != labels).item() / x[0].size(0), x[0].size(0))
if batch_method == 'robust':
lb, robust_ce = get_bound_loss(x=x, c=c)
loss = robust_ce
elif batch_method == 'natural':
loss = regular_ce
if train:
loss.backward()
if args.clip_grad_norm:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=args.clip_grad_norm)
meter.update('grad_norm', grad_norm)
if isinstance(eps_scheduler, AdaptiveScheduler):
eps_scheduler.update_loss(loss.item() - regular_ce.item())
opt.step()
meter.update('Loss', loss.item(), data.size(0))
if batch_method != 'natural':
meter.update('Robust_CE', robust_ce.item(), data.size(0))
if not loss_fusion:
# For an example, if lower bounds of margins is >0 for all classes, the output is verifiably correct.
# If any margin is < 0 this example is counted as an error
meter.update('Verified_Err', torch.sum((lb < 0).any(dim=1)).item() / data.size(0), data.size(0))
meter.update('Time', time.time() - start)
if (i + 1) % 50 == 0 and train:
logger.info('[{:2d}:{:4d}]: eps={:.12f} {}'.format(t, i + 1, eps, meter))
logger.info('[{:2d}:{:4d}]: eps={:.12f} {}'.format(t, i + 1, eps, meter))
return meter
def main(args):
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
## Load the model with BoundedParameter for weight perturbation.
model_ori = models.Models['mlp_3layer_weight_perturb']()
epoch = 0
## Load a checkpoint, if requested.
if args.load:
checkpoint = torch.load(args.load)
epoch, state_dict = checkpoint['epoch'], checkpoint['state_dict']
opt_state = None
try:
opt_state = checkpoint['optimizer']
except KeyError:
print('no opt_state found')
for k, v in state_dict.items():
assert torch.isnan(v).any().cpu().numpy() == 0 and torch.isinf(v).any().cpu().numpy() == 0
model_ori.load_state_dict(state_dict)
logger.info('Checkpoint loaded: {}'.format(args.load))
## Step 2: Prepare dataset as usual
dummy_input = torch.randn(2, 1, 28, 28)
train_data, test_data = mnist_loaders(datasets.MNIST, batch_size=args.batch_size, ratio=args.ratio)
train_data.mean = test_data.mean = torch.tensor([0.0])
train_data.std = test_data.std = torch.tensor([1.0])
## Step 3: wrap model with auto_LiRPA
# The second parameter dummy_input is for constructing the trace of the computational graph.
model = BoundedModule(model_ori, dummy_input, bound_opts={'relu':args.bound_opts}, device=args.device)
final_name1 = model.final_name
model_loss = BoundedModule(CrossEntropyWrapper(model_ori), (dummy_input, torch.zeros(1, dtype=torch.long)),
bound_opts= { 'relu': args.bound_opts, 'loss_fusion': True }, device=args.device)
# after CrossEntropyWrapper, the final name will change because of one more input node in CrossEntropyWrapper
final_name2 = model_loss._modules[final_name1].output_name[0]
assert type(model._modules[final_name1]) == type(model_loss._modules[final_name2])
if args.multigpu:
model_loss = BoundDataParallel(model_loss)
model_loss.ptb = model.ptb = model_ori.ptb # Perturbation on the parameters
## Step 4 prepare optimizer, epsilon scheduler and learning rate scheduler
if args.opt == 'ADAM':
opt = optim.Adam(model_loss.parameters(), lr=args.lr, weight_decay=0.01)
elif args.opt == 'SGD':
opt = optim.SGD(model_loss.parameters(), lr=args.lr, weight_decay=0.01)
norm = float(args.norm)
lr_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=args.lr_decay_milestones, gamma=0.1)
eps_scheduler = eval(args.scheduler_name)(args.eps, args.scheduler_opts)
logger.info(str(model_ori))
# Skip epochs if we continue training from a checkpoint.
if epoch > 0:
epoch_length = int((len(train_data.dataset) + train_data.batch_size - 1) / train_data.batch_size)
eps_scheduler.set_epoch_length(epoch_length)
eps_scheduler.train()
for i in range(epoch):
lr_scheduler.step()
eps_scheduler.step_epoch(verbose=True)
for j in range(epoch_length):
eps_scheduler.step_batch()
logger.info('resume from eps={:.12f}'.format(eps_scheduler.get_eps()))
if args.load:
if opt_state:
opt.load_state_dict(opt_state)
logger.info('resume opt_state')
## Step 5: start training.
if args.verify:
eps_scheduler = FixedScheduler(args.eps)
with torch.no_grad():
Train(model, 1, test_data, eps_scheduler, norm, False, None, 'CROWN-IBP', loss_fusion=False, final_node_name=None)
else:
timer = 0.0
best_loss = 1e10
# Main training loop
for t in range(epoch + 1, args.num_epochs+1):
logger.info("Epoch {}, learning rate {}".format(t, lr_scheduler.get_last_lr()))
start_time = time.time()
# Training one epoch
Train(model_loss, t, train_data, eps_scheduler, norm, True, opt, args.bound_type, loss_fusion=True)
lr_scheduler.step()
epoch_time = time.time() - start_time
timer += epoch_time
logger.info('Epoch time: {:.4f}, Total time: {:.4f}'.format(epoch_time, timer))
logger.info("Evaluating...")
torch.cuda.empty_cache()
# remove 'model.' in state_dict (hack for saving models so far...)
state_dict_loss = model_loss.state_dict()
state_dict = {}
for name in state_dict_loss:
assert (name.startswith('model.'))
state_dict[name[6:]] = state_dict_loss[name]
# Test one epoch.
with torch.no_grad():
m = Train(model_loss, t, test_data, eps_scheduler, norm, False, None, args.bound_type,
loss_fusion=False, final_node_name=final_name2)
# Save checkpoints.
save_dict = {'state_dict': state_dict, 'epoch': t, 'optimizer': opt.state_dict()}
if not os.path.exists('saved_models'):
os.mkdir('saved_models')
if t < int(eps_scheduler.params['start']):
torch.save(save_dict, 'saved_models/natural_' + exp_name)
elif t > int(eps_scheduler.params['start']) + int(eps_scheduler.params['length']):
current_loss = m.avg('Loss')
if current_loss < best_loss:
best_loss = current_loss
torch.save(save_dict, 'saved_models/' + exp_name + '_best_' + str(best_loss)[:6])
else:
torch.save(save_dict, 'saved_models/' + exp_name)
else:
torch.save(save_dict, 'saved_models/' + exp_name)
torch.cuda.empty_cache()
if __name__ == "__main__":
main(args)
```
#### File: auto_LiRPA/tests/test_1d_activation.py
```python
import torch
import os
from testcase import TestCase
from auto_LiRPA import BoundedModule, BoundedTensor
from auto_LiRPA.perturbations import *
from auto_LiRPA.utils import logger
# Wrap the computation with a nn.Module
class test_model(nn.Module):
def __init__(self, act_func):
super().__init__()
self.act_func = act_func
def forward(self, x):
return self.act_func(x)
class Test1DActivation(TestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName)
def create_test(self, act_func, low, high, ntests=10000, nsamples=1000, method='IBP'):
model = test_model(act_func)
image = torch.zeros(1, ntests)
bounded_model = BoundedModule(model, image)
# Generate randomly bounded inputs.
p = torch.rand(1, ntests) * (high - low ) + low
q = torch.rand(1, ntests) * (high - low ) + low
input_lb = torch.min(p, q)
input_ub = torch.max(p, q)
input_center = (input_lb + input_ub) / 2.0
ptb = PerturbationLpNorm(norm=float("inf"), eps=None, x_L=input_lb, x_U=input_ub)
ptb_data = BoundedTensor(input_center, ptb)
# Generate reference results.
table = act_func(torch.linspace(start=low, end=high, steps=nsamples+1))
def lookup(l, u):
assert torch.all(u <= high)
assert torch.all(l >= low)
shape = l.size()
l = l.squeeze()
u = u.squeeze()
# select all sample points between l and u.
low_index = torch.ceil((l - low) / (high - low) * nsamples).int() # Make sure we do not have index 0.
high_index = torch.floor((u - low) / (high - low) * nsamples).int()
real_lb = torch.empty_like(l)
real_ub = torch.empty_like(u)
for i, (li, hi) in enumerate(zip(low_index, high_index)):
if li == hi + 1:
# Not enough precision. l and u are too close so we cannot tell.
real_lb[i] = float("inf")
real_ub[i] = float("-inf")
else:
selected = table[li : hi+1]
real_lb[i] = torch.min(selected)
real_ub[i] = torch.max(selected)
real_lb = real_lb.view(*shape)
real_ub = real_ub.view(*shape)
return real_lb, real_ub
# These are reference results. IBP results should be very close to these. Linear bound results can be looser than these.
ref_forward = model(input_center)
ref_output_lb, ref_output_ub = lookup(input_lb, input_ub)
# Get bounding results.
forward = bounded_model(ptb_data)
output_lb, output_ub = bounded_model.compute_bounds(x=(ptb_data,), method = method)
# Compare.
assert torch.allclose(forward, ref_forward)
for i in range(ntests):
show = False
if output_ub[0,i] < ref_output_ub[0,i] - 1e-5:
logger.warn(f'upper bound is wrong {ref_output_ub[0,i] - output_ub[0,i]}')
show = True
if output_lb[0,i] > ref_output_lb[0,i] + 1e-5:
logger.warn(f'lower bound is wrong {output_lb[0,i] - ref_output_lb[0,i]}')
show = True
if show:
logger.warn(f'input_lb={input_lb[0,i]:8.3f}, input_ub={input_ub[0,i]:8.3f}, lb={output_lb[0,i]:8.3f}, ref_lb={ref_output_lb[0,i]:8.3f}, ub={output_ub[0,i]:8.3f}, ref_ub={ref_output_ub[0,i]:8.3f}')
assert torch.all(output_ub + 1e-5 >= ref_output_ub)
assert torch.all(output_lb - 1e-5 <= ref_output_lb)
def test_relu(self):
self.create_test(act_func=torch.nn.functional.relu, low=-10, high=10, method='IBP')
self.create_test(act_func=torch.nn.functional.relu, low=-10, high=10, method='CROWN')
def test_exp(self):
self.create_test(act_func=torch.exp, low=-3, high=3, method='IBP')
self.create_test(act_func=torch.exp, low=-3, high=3, method='CROWN')
def test_reciprocal(self):
# So far only positive values are supported.
self.create_test(act_func=torch.reciprocal, low=0.01, high=10, method='IBP')
self.create_test(act_func=torch.reciprocal, low=0.01, high=10, method='CROWN')
def test_tanh(self):
self.create_test(act_func=torch.tanh, low=-5, high=5, method='IBP')
self.create_test(act_func=torch.tanh, low=-5, high=5, method='CROWN')
def test_sin(self):
self.create_test(act_func=torch.sin, low=-10, high=10, method='IBP')
# self.create_test(act_func=torch.sin, low=-10, high=10, method='CROWN')
def test_cos(self):
self.create_test(act_func=torch.cos, low=-10, high=10, method='IBP')
# self.create_test(act_func=torch.cos, low=-10, high=10, method='CROWN')
if __name__ == '__main__':
testcase = Test1DActivation()
testcase.test_relu()
testcase.test_reciprocal()
testcase.test_exp()
testcase.test_tanh()
testcase.test_sin()
testcase.test_cos()
```
#### File: auto_LiRPA/tests/test_examples.py
```python
import pytest
import subprocess
import os
import sys
import shlex
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--test', type=str, default=None)
args = parser.parse_args()
pytest_skip = pytest.mark.skip(
reason="It should be tested on a GPU server and excluded from CI")
if not 'CACHE_DIR' in os.environ:
cache_dir = os.path.join(os.getcwd(), '.cache')
else:
cache_dir = os.environ['CACHE_DIR']
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
def download_data_language():
url = "http://download.huan-zhang.com/datasets/language/data_language.tar.gz"
if not os.path.exists('../examples/language/data/sst'):
subprocess.run(shlex.split(f"wget {url}"), cwd="../examples/language")
subprocess.run(shlex.split(f"tar xvf data_language.tar.gz"),
cwd="../examples/language")
@pytest_skip
def test_transformer():
cmd = f"""python train.py --dir {cache_dir} --robust
--method IBP+backward_train --train --num_epochs 2 --num_epochs_all_nodes 2
--eps_start 2 --eps_length 1 --eps 0.1"""
print(cmd, file=sys.stderr)
download_data_language()
subprocess.run(shlex.split(cmd), cwd='../examples/language')
@pytest_skip
def test_lstm():
cmd = f"""python train.py --dir {cache_dir}
--model lstm --lr 1e-3 --dropout 0.5 --robust
--method IBP+backward_train --train --num_epochs 2 --num_epochs_all_nodes 2
--eps_start 2 --eps_length 1 --eps 0.1
--hidden_size 2 --embedding_size 2 --intermediate_size 2 --max_sent_length 4"""
print(cmd, file=sys.stderr)
download_data_language()
subprocess.run(shlex.split(cmd), cwd='../examples/language')
#FIXME this is broken
@pytest_skip
def test_lstm_seq():
cmd = f"""python train.py --dir {cache_dir}
--hidden_size 2 --num_epochs 2 --num_slices 4"""
print(cmd, file=sys.stderr)
subprocess.run(shlex.split(cmd), cwd='../examples/sequence')
@pytest_skip
def test_simple_verification():
cmd = "python simple_verification.py"
print(cmd, file=sys.stderr)
subprocess.run(shlex.split(cmd), cwd='../examples/vision')
@pytest_skip
def test_simple_training():
cmd = """python simple_training.py
--num_epochs 5 --scheduler_opts start=2,length=2"""
print(cmd, file=sys.stderr)
subprocess.run(shlex.split(cmd), cwd='../examples/vision')
@pytest_skip
def test_cifar_training():
cmd = """python cifar_training.py
--batch_size 64 --model ResNeXt_cifar
--num_epochs 5 --scheduler_opts start=2,length=2"""
print(cmd, file=sys.stderr)
subprocess.run(shlex.split(cmd), cwd='../examples/vision')
@pytest_skip
def test_weight_perturbation():
cmd = """python weight_perturbation_training.py
--norm 2 --bound_type CROWN-IBP
--num_epochs 3 --scheduler_opts start=2,length=1 --eps 0.01"""
print(cmd, file=sys.stderr)
subprocess.run(shlex.split(cmd), cwd='../examples/vision')
@pytest_skip
def test_tinyimagenet():
cmd = f"""python tinyimagenet_training.py
--batch_size 32 --model wide_resnet_imagenet64
--num_epochs 3 --scheduler_opts start=2,length=1 --eps {0.1/255}
--in_planes 2 --widen_factor 2"""
print(cmd, file=sys.stderr)
if not os.path.exists('../examples/vision/data/tinyImageNet/tiny-imagenet-200'):
subprocess.run(shlex.split("bash tinyimagenet_download.sh"),
cwd="../examples/vision/data/tinyImageNet")
subprocess.run(shlex.split(cmd), cwd='../examples/vision')
@pytest_skip
def test_imagenet():
cmd = f"""python imagenet_training.py
--batch_size 32 --model wide_resnet_imagenet64_1000class
--num_epochs 3 --scheduler_opts start=2,length=1 --eps {0.1/255}
--in_planes 2 --widen_factor 2"""
print(cmd)
if (not os.path.exists('../examples/vision/data/ImageNet64/train') or
not os.path.exists('../examples/vision/data/ImageNet64/test')):
print('Error: ImageNet64 dataset is not ready.')
return -1
subprocess.run(shlex.split(cmd), cwd='../examples/vision')
@pytest_skip
def test_release():
if args.test:
# Only run a specified test
eval(f'test_{args.test}')()
else:
# Run all tests
test_simple_training()
test_transformer()
test_lstm()
test_lstm_seq()
test_simple_verification()
test_cifar_training()
test_weight_perturbation()
test_tinyimagenet()
if __name__ == '__main__':
test_release()
``` |
{
"source": "jiachens/ModelNet40-C",
"score": 2
} |
#### File: core/models/curvenet_seg.py
```python
import torch.nn as nn
import torch.nn.functional as F
from .curvenet_util import *
curve_config = {
'default': [[100, 5], [100, 5], None, None, None]
}
class CurveNet(nn.Module):
def __init__(self, num_classes=50, category=16, k=32, setting='default'):
super(CurveNet, self).__init__()
assert setting in curve_config
additional_channel = 32
self.lpfa = LPFA(9, additional_channel, k=k, mlp_num=1, initial=True)
# encoder
self.cic11 = CIC(npoint=2048, radius=0.2, k=k, in_channels=additional_channel, output_channels=64, bottleneck_ratio=2, curve_config=curve_config[setting][0])
self.cic12 = CIC(npoint=2048, radius=0.2, k=k, in_channels=64, output_channels=64, bottleneck_ratio=4, curve_config=curve_config[setting][0])
self.cic21 = CIC(npoint=512, radius=0.4, k=k, in_channels=64, output_channels=128, bottleneck_ratio=2, curve_config=curve_config[setting][1])
self.cic22 = CIC(npoint=512, radius=0.4, k=k, in_channels=128, output_channels=128, bottleneck_ratio=4, curve_config=curve_config[setting][1])
self.cic31 = CIC(npoint=128, radius=0.8, k=k, in_channels=128, output_channels=256, bottleneck_ratio=2, curve_config=curve_config[setting][2])
self.cic32 = CIC(npoint=128, radius=0.8, k=k, in_channels=256, output_channels=256, bottleneck_ratio=4, curve_config=curve_config[setting][2])
self.cic41 = CIC(npoint=32, radius=1.2, k=31, in_channels=256, output_channels=512, bottleneck_ratio=2, curve_config=curve_config[setting][3])
self.cic42 = CIC(npoint=32, radius=1.2, k=31, in_channels=512, output_channels=512, bottleneck_ratio=4, curve_config=curve_config[setting][3])
self.cic51 = CIC(npoint=8, radius=2.0, k=7, in_channels=512, output_channels=1024, bottleneck_ratio=2, curve_config=curve_config[setting][4])
self.cic52 = CIC(npoint=8, radius=2.0, k=7, in_channels=1024, output_channels=1024, bottleneck_ratio=4, curve_config=curve_config[setting][4])
self.cic53 = CIC(npoint=8, radius=2.0, k=7, in_channels=1024, output_channels=1024, bottleneck_ratio=4, curve_config=curve_config[setting][4])
# decoder
self.fp4 = PointNetFeaturePropagation(in_channel=1024 + 512, mlp=[512, 512], att=[1024, 512, 256])
self.up_cic5 = CIC(npoint=32, radius=1.2, k=31, in_channels=512, output_channels=512, bottleneck_ratio=4)
self.fp3 = PointNetFeaturePropagation(in_channel=512 + 256, mlp=[256, 256], att=[512, 256, 128])
self.up_cic4 = CIC(npoint=128, radius=0.8, k=k, in_channels=256, output_channels=256, bottleneck_ratio=4)
self.fp2 = PointNetFeaturePropagation(in_channel=256 + 128, mlp=[128, 128], att=[256, 128, 64])
self.up_cic3 = CIC(npoint=512, radius=0.4, k=k, in_channels=128, output_channels=128, bottleneck_ratio=4)
self.fp1 = PointNetFeaturePropagation(in_channel=128 + 64, mlp=[64, 64], att=[128, 64, 32])
self.up_cic2 = CIC(npoint=2048, radius=0.2, k=k, in_channels=128+64+64+category+3, output_channels=256, bottleneck_ratio=4)
self.up_cic1 = CIC(npoint=2048, radius=0.2, k=k, in_channels=256, output_channels=256, bottleneck_ratio=4)
self.global_conv2 = nn.Sequential(
nn.Conv1d(1024, 128, kernel_size=1, bias=False),
nn.BatchNorm1d(128),
nn.LeakyReLU(negative_slope=0.2))
self.global_conv1 = nn.Sequential(
nn.Conv1d(512, 64, kernel_size=1, bias=False),
nn.BatchNorm1d(64),
nn.LeakyReLU(negative_slope=0.2))
self.conv1 = nn.Conv1d(256, 256, 1, bias=False)
self.bn1 = nn.BatchNorm1d(256)
self.drop1 = nn.Dropout(0.5)
self.conv2 = nn.Conv1d(256, num_classes, 1)
self.se = nn.Sequential(nn.AdaptiveAvgPool1d(1),
nn.Conv1d(256, 256//8, 1, bias=False),
nn.BatchNorm1d(256//8),
nn.LeakyReLU(negative_slope=0.2),
nn.Conv1d(256//8, 256, 1, bias=False),
nn.Sigmoid())
def forward(self, xyz, l=None):
batch_size = xyz.size(0)
l0_points = self.lpfa(xyz, xyz)
l1_xyz, l1_points = self.cic11(xyz, l0_points)
l1_xyz, l1_points = self.cic12(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic21(l1_xyz, l1_points)
l2_xyz, l2_points = self.cic22(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic31(l2_xyz, l2_points)
l3_xyz, l3_points = self.cic32(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic41(l3_xyz, l3_points)
l4_xyz, l4_points = self.cic42(l4_xyz, l4_points)
l5_xyz, l5_points = self.cic51(l4_xyz, l4_points)
l5_xyz, l5_points = self.cic52(l5_xyz, l5_points)
l5_xyz, l5_points = self.cic53(l5_xyz, l5_points)
# global features
emb1 = self.global_conv1(l4_points)
emb1 = emb1.max(dim=-1, keepdim=True)[0] # bs, 64, 1
emb2 = self.global_conv2(l5_points)
emb2 = emb2.max(dim=-1, keepdim=True)[0] # bs, 128, 1
# Feature Propagation layers
l4_points = self.fp4(l4_xyz, l5_xyz, l4_points, l5_points)
l4_xyz, l4_points = self.up_cic5(l4_xyz, l4_points)
l3_points = self.fp3(l3_xyz, l4_xyz, l3_points, l4_points)
l3_xyz, l3_points = self.up_cic4(l3_xyz, l3_points)
l2_points = self.fp2(l2_xyz, l3_xyz, l2_points, l3_points)
l2_xyz, l2_points = self.up_cic3(l2_xyz, l2_points)
l1_points = self.fp1(l1_xyz, l2_xyz, l1_points, l2_points)
if l is not None:
l = l.view(batch_size, -1, 1)
emb = torch.cat((emb1, emb2, l), dim=1) # bs, 128 + 16, 1
l = emb.expand(-1,-1, xyz.size(-1))
x = torch.cat((l1_xyz, l1_points, l), dim=1)
xyz, x = self.up_cic2(l1_xyz, x)
xyz, x = self.up_cic1(xyz, x)
x = F.leaky_relu(self.bn1(self.conv1(x)), 0.2, inplace=True)
se = self.se(x)
x = x * se
x = self.drop1(x)
x = self.conv2(x)
return x
```
#### File: ModelNet40-C/models/curvenet.py
```python
import torch.nn as nn
import torch.nn.functional as F
from CurveNet.core.models.curvenet_cls import CurveNet as CurveNet_og
from all_utils import DATASET_NUM_CLASS
class CurveNet(nn.Module):
def __init__(self, task, dataset):
super().__init__()
self.task = task
self.dataset = dataset
if task == "cls":
num_classes = DATASET_NUM_CLASS[dataset]
self.model = CurveNet_og(num_classes=num_classes)
else:
assert False
def forward(self, pc, cls=None):
pc = pc.to(next(self.parameters()).device)
pc = pc.permute(0, 2, 1).contiguous()
if self.task == 'cls':
assert cls is None
logit = self.model(pc)
out = {'logit': logit}
else:
assert False
return out
``` |
{
"source": "jiachens/semseg",
"score": 2
} |
#### File: semseg/tool/attack.py
```python
import numpy as np
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
# from torchvision import datasets, transforms
from torch.autograd import Variable
from torch.nn import functional as F
import cv2
import kornia
class TVLoss(nn.Module):
def __init__(self,TVLoss_weight=1):
super(TVLoss,self).__init__()
self.TVLoss_weight = TVLoss_weight
def forward(self,x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self._tensor_size(x[:,:,1:,:])
count_w = self._tensor_size(x[:,:,:,1:])
h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()
w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()
return self.TVLoss_weight*2*(h_tv/count_h+w_tv/count_w)/batch_size
def _tensor_size(self,t):
return t.size()[1]*t.size()[2]*t.size()[3]
def pgd_t(model, image, label, mean, std, target_mask, patch_init, patch_orig, step_size = 1, eps=10, iters=10, alpha = 1, beta = 2., restarts=1, target_label=None, rap=False, init_tf_pts=None, patch_mask = None):
images = image.cuda()
t_labels = torch.ones_like(label)
labels = t_labels.cuda(async=True)
patches = patch_init.cuda()
u_labels = label.cuda(async=True)
u_labels = torch.autograd.Variable(u_labels)
target_mask = torch.from_numpy(target_mask).cuda()
mean = torch.FloatTensor(mean).cuda().unsqueeze(0)
mean = mean[..., None, None]
std = torch.FloatTensor(std).cuda().unsqueeze(0)
std = std[..., None, None]
loss = nn.CrossEntropyLoss(ignore_index=255)
#loss = nn.NLLLoss2d(ignore_index=255)
tv_loss = TVLoss()
best_adv_img = [torch.zeros_like(images.data), -1e8]
# init transformation matrix
h, w = images.shape[-2:] # destination size
points_src = torch.FloatTensor(init_tf_pts[0]).unsqueeze(0)
# the destination points are the image vertexes
points_dst = torch.FloatTensor(init_tf_pts[1]).unsqueeze(0)
M: torch.tensor = kornia.get_perspective_transform(points_dst, points_src).cuda()
if patch_mask is None:
patch_mask_var = torch.ones_like(patches)
else:
patch_mask_var = patch_mask
t_patch_mask_var = kornia.warp_perspective(patch_mask_var.float(), M, dsize=(h, w))
#print(t_patch_mask_var.clone().cpu().data.numpy().shape)
# cv2.imwrite('mask.png', np.uint8(t_patch_mask_var.clone().squeeze().cpu().data.numpy().transpose((1,2,0))*255))
# cv2.imwrite('mask1.png', np.uint8(patch_mask_var.clone().squeeze().cpu().data.numpy().transpose((1,2,0))*255))
ori_patches = patch_orig.data
best_adv_patches = [torch.zeros_like(patches),-1e8]
for j in range(restarts):
# delta = torch.rand_like(patches, requires_grad=True)
delta = torch.zeros_like(patches, requires_grad=True)
# delta.data = (delta.data * 2 * eps - eps) * perturb_mask
for i in range(iters) :
step_size = np.max([1e-3, step_size * 0.99])
images.requires_grad = False
patches.requires_grad = False
delta.requires_grad = True
patch_mask_var.requires_grad = False
t_patch: torch.tensor = kornia.warp_perspective((patches+delta).float(), M, dsize=(h, w))
adv_images = (torch.clamp(t_patch*t_patch_mask_var+(1-t_patch_mask_var)*(images),min=0, max=255) - mean) / std
outputs = model(adv_images)[0]
model.zero_grad()
# remove attack
# cost = - loss(outputs*target_mask*upper_mask, labels*2*target_mask*upper_mask) - alpha * loss(outputs*perturb_mask[:,0,:,:], u_labels*perturb_mask[:,0,:,:])
# rap attack
if rap:
if target_label != None:
# target attack
# print(outputs.shape,labels.shape)
obj_loss_value = - loss(outputs.unsqueeze(0)*target_mask, labels.long()*target_label*target_mask)
tv_loss_value = - tv_loss((ori_patches + delta) / 255.)
cost = alpha * obj_loss_value + (1-alpha) * tv_loss_value
else:
# untargeted attack
obj_loss_value = loss(outputs.unsqueeze(0)*target_mask, u_labels.long()*target_mask)
tv_loss_value = tv_loss(ori_patches + delta)
cost = alpha * obj_loss_value + (1-alpha) * tv_loss_value
cost.backward()
print(i,cost.data, obj_loss_value.data, tv_loss_value.data)
adv_patches = patches + delta + step_size*eps*delta.grad.sign()
eta = torch.clamp(adv_patches - ori_patches, min=-eps, max=eps)
delta = torch.clamp(ori_patches + eta, min=0, max=255).detach_() - ori_patches
if cost.cpu().data.numpy() > best_adv_patches[1]:
best_adv_patches = [delta.data, cost.cpu().data.numpy()]
t_patch: torch.tensor = kornia.warp_perspective((ori_patches+best_adv_patches[0]).float(), M, dsize=(h, w))
# cv2.imwrite('./test.png',np.uint8(torch.clamp(t_patch*t_patch_mask_var+(1-t_patch_mask_var)*(images),min=0, max=255).clone().squeeze(0).cpu().numpy().transpose((1,2,0))))
adv_images_orig = torch.clamp(t_patch*t_patch_mask_var+(1-t_patch_mask_var)*(images),min=0, max=255)
adv_images = (torch.clamp(t_patch*t_patch_mask_var+(1-t_patch_mask_var)*(images),min=0, max=255)- mean)/std
return adv_images, adv_images_orig, best_adv_patches[0]+ori_patches, t_patch_mask_var.cpu().data.numpy()
``` |
{
"source": "JiaChenwei/OpenTrafficSimulation",
"score": 3
} |
#### File: JiaChenwei/OpenTrafficSimulation/moos.py
```python
import copy
import time
from typing import List, Dict
import pandas as pd
from std_interface import *
class Car(CarInfo):
"""
基础车类
"""
def __init__(self, car_name: str, car_type: int, following_model: FollowingModel, expecting_headway: float,
limiting_acceleration: [float, float], limiting_speed: [float, float], stopping_distance: float,
observation_error: float, operation_error: float, response_time_delay: float,
car_color: [float, float, float], car_size: [float, float]):
"""
构造函数
初始化所有静态数据
:param car_type: 车辆类型,str类型
:param following_model: 跟驰模型,引用跟驰模型接口的实例类
:param expecting_headway: 期望车头时距,float类型,单位s
:param limiting_acceleration: 加速度范围,list类型,例如[最大减速度,最大加速度],例如[-6,2],单位m/s**2
:param limiting_speed: 速度范围,list类型,例如[最小速度,最大速度],例如[0,25],单位m/s
:param stopping_distance: 停车间距,float类型,跟驰车车头与前车车尾间距,单位m
:param observation_error: 观测误差,float类型,范围[0,1]
:param operation_error: 操作误差,float类型,范围[0,1]
:param response_time_delay: 反应延迟,float类型,指一秒中的有效响应比例,范围[0,1]
:param car_color: 车辆颜色,list类型,例如[0.5, 0.5, 0.5],rbg颜色,颜色范围[0,1]
:param car_size: 车辆尺寸,list类型,例如[长度,宽度],参考捷达尺寸[4.5,1.7],单位m
"""
super().__init__()
self.name = car_name
self.id = hash(time.time())
time.sleep(0.001)
self.car_type = car_type
self.following_model = following_model
self.init_location = float()
self.init_speed = float()
self.init_acceleration = float()
self.expecting_headway = expecting_headway
self.car_size = car_size # [length, width]
self.limiting_acceleration = limiting_acceleration
self.limiting_speed = limiting_speed
self.stopping_distance = stopping_distance
self.observation_error = observation_error
self.operation_error = operation_error
self.response_time_delay = response_time_delay
self.car_color = car_color
self.real_mileage = 0
self.real_location = self.init_location
self.real_speed = self.init_speed
self.real_acceleration = float()
self.real_headway = float()
self.real_spacing = float()
self.real_speed_difference = float()
self.real_acceleration_difference = float()
self.preceding_car = self
self.following_car = self
self.is_linked = False
self.is_initialized = False
self.road_length = float()
self.time = 0
self._time = self.time
self.real_position = 0
self._real_position = 0
self._real_mileage = 0
self._real_location = self.init_location
self._real_speed = self.init_speed
self._real_acceleration = float()
self._real_headway = float()
self._real_spacing = float()
self._real_speed_difference = float()
self._real_acceleration_difference = float()
self._response_time_delay = np.random.random() * self.response_time_delay
def __call__(self):
self.id = hash(time.time())
time.sleep(0.001)
def __repr__(self):
return self.name
def get_info(self):
ret = CarInfo()
ret.name = self.name
ret.id = self.id
ret.car_type = self.car_type
ret.following_model = self.following_model
ret.init_location = self.init_location
ret.init_speed = self.init_speed
ret.init_acceleration = self.init_acceleration
ret.expecting_headway = self.expecting_headway
ret.car_size = self.car_size
ret.limiting_acceleration = self.limiting_acceleration
ret.limiting_speed = self.limiting_speed
ret.stopping_distance = self.stopping_distance
ret.observation_error = self.observation_error
ret.operation_error = self.operation_error
ret.response_time_delay = self.response_time_delay
ret.car_color = self.car_color
ret.real_mileage = self.real_mileage
ret.real_location = self.real_location
ret.real_speed = self.real_speed
ret.real_acceleration = self.real_acceleration
ret.real_headway = self.real_headway
ret.real_spacing = self.real_spacing
ret.real_speed_difference = self.real_speed_difference
ret.real_acceleration_difference = self.real_acceleration_difference
ret.preceding_car = self.preceding_car
ret.following_car = self.following_car
ret.road_length = self.road_length
ret.time = self.time
ret.real_position = self.real_position
return ret
def initialize(self, loc: float, speed: float, acceleration: float, road_length: float):
"""
动态变量初始化函数
:param road_length:
:param loc: 位置,float类型,单位m
:param speed: 速度,float类型,单位m/s
:param acceleration: 加速度,float类型,单位m/s**2
:return:
"""
self.init_location = loc
self.real_location = loc
self.real_position = loc
self.init_speed = speed
self.real_speed = speed
self.init_acceleration = acceleration
self.real_acceleration = acceleration
self.road_length = road_length
self.is_initialized = True
pass
def count_difference(self, location_correction: float):
"""
差值计算
:param location_correction: 位置修正,float类型,解决首车与尾车的位置计算偏差
:return:
"""
self.real_speed_difference = (
self.preceding_car.real_speed
- self.real_speed
)
self.real_acceleration_difference = (
self.preceding_car.real_acceleration
- self.real_acceleration
)
self.real_spacing = (
self.preceding_car.real_location
- self.preceding_car.car_size[0]
- self.real_location
+ location_correction
)
def count_real_headway(dx: float, dv: float) -> float:
re = float()
if dv == 0:
re = float("inf") * dx
elif dv != 0:
re = -dx / dv
if re < 0:
re = float("inf")
pass
if dx < 0:
re = -float("inf")
pass
else:
pass
return re
self.real_headway = count_real_headway(
self.real_spacing,
self.real_speed_difference
)
pass
def link(self, following_car, preceding_car):
"""
链接函数,链接跟驰车与前车
:param following_car:
:param preceding_car: 前车实例
:return:
"""
self.following_car = following_car
self.preceding_car = preceding_car
self.is_linked = True
def update(self, step: float):
"""
更新动态数据,此方法仅作为更新准备,并不会赋值
:param step: 步长,float类型
:return: 无
"""
if self._response_time_delay > 0:
self._response_time_delay -= 0.001
_a = self._real_acceleration
else:
self._response_time_delay = self.response_time_delay
_a = self.following_model(self)
_a = self._check_acceleration(_a) * np.random.normal(1, self.operation_error)
pass
_a = self._check_acceleration(_a)
_v = self.real_speed + step * _a
_v = self._check_speed(_v)
loc = self.real_location
if _a == 0:
dloc = _v * step
else:
dloc = (_v ** 2 - self.real_speed ** 2) / (2 * _a)
loc = dloc + loc
self._real_acceleration = _a
self._real_speed = _v
self._real_location = loc
self._real_mileage += dloc
self._time += step
self._real_position = loc % self.road_length
pass
def apply(self):
"""
更新赋值,此方法使用前应使用update做更新准备
:return:
"""
self.time = self._time
self.real_acceleration = self._real_acceleration
self.real_speed = self._real_speed
self.real_location = self._real_location
self.real_mileage = self._real_mileage
self.real_position = self._real_position
pass
def switch_following_model(self, following_model: FollowingModel):
self.following_model = following_model
def get_difference(self):
return np.array([self.real_spacing, self.real_speed_difference, self.real_acceleration_difference])
def _check_acceleration(self, acceleration: float) -> float:
if (self.real_speed <= self.limiting_speed[0] and acceleration < 0) \
or \
(self.real_speed >= self.limiting_speed[1] and acceleration > 0):
acceleration = 0
pass
else:
if acceleration < self.limiting_acceleration[0]:
acceleration = self.limiting_acceleration[0]
elif acceleration > self.limiting_acceleration[1]:
acceleration = self.limiting_acceleration[1]
else:
pass
return acceleration
def _check_speed(self, speed: float) -> float:
if speed < self.limiting_speed[0]:
speed = self.limiting_speed[0]
elif speed > self.limiting_speed[1]:
speed = self.limiting_speed[1]
else:
pass
return speed
class FVDModel(FollowingModel):
def __init__(self):
pass
def _run(self, following_car: Car) -> float:
b = 28
alpha = 0.16
beta = 1.1
_lambda = 0.5
optimal_speed = (
0.5 * following_car.limiting_speed[1]
* (np.tanh(following_car.real_spacing / b - beta) - np.tanh(-beta))
)
_a = (
alpha * (optimal_speed - following_car.real_speed)
+ _lambda * following_car.real_speed_difference
)
# print(optimal_speed, _a)
return _a
pass
class GippsModel(FollowingModel):
def _run(self, following_car: Car) -> float:
e = ((following_car.limiting_acceleration[0] * following_car.response_time_delay) ** 2
- following_car.limiting_acceleration[0]
* (2 * (following_car.real_spacing
* np.random.normal(1, following_car.observation_error)
- following_car.stopping_distance)
# - following_car.expecting_headway
# * following_car.limiting_speed[1])
- following_car.real_speed * following_car.response_time_delay
- (following_car.preceding_car.real_speed
* np.random.normal(1, following_car.observation_error)) ** 2
/ following_car.preceding_car.limiting_acceleration[0]))
if e < 0:
e = 0
pass
v1 = (following_car.real_speed
+ 2.5
* following_car.limiting_acceleration[1]
* (1 - following_car.real_speed / following_car.limiting_speed[1])
* (0.0025 + following_car.real_speed / following_car.limiting_speed[1]) ** 0.5)
v2 = (following_car.limiting_acceleration[0] * following_car.response_time_delay + e ** 0.5)
v = min(v1, v2)
_a = (v - following_car.real_speed) / following_car.response_time_delay
return _a
pass
class IDMModel(FollowingModel):
def __init__(self, beta=4):
self.beta = beta
def _run(self, following_car: Car) -> float:
exp_spacing = (
following_car.stopping_distance
+ following_car.real_speed
* following_car.expecting_headway
+ following_car.real_speed
* following_car.real_speed_difference * 0.5
/ abs(following_car.limiting_acceleration[1]
* following_car.limiting_acceleration[0]) ** 0.5
)
_a = float()
if following_car.real_spacing > 0:
_a = (
abs(following_car.limiting_acceleration[1])
* (1
- abs(following_car.real_speed
/ following_car.limiting_speed[1]) ** self.beta
- abs(exp_spacing
/ following_car.real_spacing
* np.random.normal(1, following_car.observation_error)) ** 2
)
)
elif following_car.real_spacing <= 0:
_a = -float('inf')
return _a
pass
class PATHModelACC(FollowingModel):
def __init__(self, k1=0.23, k2=0.07):
self.k1 = k1
self.k2 = k2
def _run(self, following_car: Car) -> float:
e = (
following_car.real_spacing
* np.random.normal(1, following_car.observation_error)
- following_car.stopping_distance
- following_car.expecting_headway
* following_car.real_speed
)
_a = (self.k1 * e
+ self.k2
* following_car.real_speed_difference
* np.random.normal(1, following_car.observation_error))
return _a
pass
class PATHModelCACC(FollowingModel):
def __init__(self, k1=1.1, k2=0.23, k3=0.07):
self.k1 = k1
self.k2 = k2
self.k3 = k3
def _run(self, following_car: Car) -> float:
e = (
following_car.real_spacing
* np.random.normal(1, following_car.observation_error)
- following_car.stopping_distance
- following_car.expecting_headway
* following_car.real_speed
)
_a = (self.k1
* following_car.preceding_car.real_acceleration
* np.random.normal(1, following_car.observation_error)
+ self.k2 * e
+ self.k3
* following_car.real_speed_difference
* np.random.normal(1, following_car.observation_error))
return _a
pass
class IDMWithGipps(FollowingModel):
def __init__(self, beta=4):
self.beta = beta
self.idm = IDMModel(beta)
self.gipps = GippsModel()
def _run(self, following_car) -> float:
re = 0
a1 = self.idm(following_car)
a2 = self.gipps(following_car)
t = np.min((0, a1, a2))
if t == 0:
re = a1
elif t != 0:
re = t
else:
pass
return re
class PATHModelACCWithGipps(FollowingModel):
def __init__(self, k1=0.23, k2=0.07):
self.k1 = k1
self.k2 = k2
self.path_acc = PATHModelACC(k1, k2)
self.gipps = GippsModel()
def _run(self, following_car) -> float:
re = 0
a1 = self.path_acc(following_car)
a2 = self.gipps(following_car)
t = np.min((0, a1, a2))
if t == 0:
re = a1
elif t != 0:
re = t
else:
pass
return re
class PATHModelCACCWithGipps(FollowingModel):
def __init__(self, k1=1.1, k2=0.23, k3=0.07):
self.k1 = k1
self.k2 = k2
self.k3 = k3
self.path_cacc = PATHModelCACC(k1, k2, k3)
self.gipps = GippsModel()
def _run(self, following_car) -> float:
re = 0
a1 = self.path_cacc(following_car)
a2 = self.gipps(following_car)
t = np.min((0, a1, a2))
if t == 0:
re = a1
elif t != 0:
re = t
else:
pass
return re
class IntelligentDrivingCarModel(FollowingModel):
dict_mode = {0: 'head', 1: 'body', 2: 'tail'}
def __init__(self, model: FollowingModel, alpha: float = 0.5, beta: float = 1, gamma: float = 1, name='IDC',
max_search_index=5):
self.model = model
self.name = name
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.max_search_index = max_search_index
pass
def _run(self, following_car: Car) -> float:
_following_car = following_car.get_info()
a = 0
cars = []
tmp_car = following_car
for i in range(self.max_search_index + 1):
cars.append(tmp_car)
tmp_car = tmp_car.preceding_car
pass
head = []
body = []
tail = []
tag = 2
for c in cars[:-1]:
if tag == 2 and type(c.preceding_car.following_model) == type(self):
t = c.get_difference()
tail.append(t + np.sum(tail, 0))
continue
elif tag == 2 and type(c.preceding_car.following_model) != type(self):
tag = 1
t = c.get_difference()
body.append(t + np.sum(tail, 0))
continue
elif tag == 1 and type(c.preceding_car.following_model) != type(self):
t = c.get_difference()
body.append(t + np.sum(tail, 0) + np.sum(body, 0))
continue
elif tag == 1 and type(c.preceding_car.following_model) == type(self):
tag = 0
t = c.get_difference()
head.append(t + np.sum(tail, 0) + np.sum(body, 0))
continue
else:
pass
pass
if len(body) == 0 or len(head) == 0:
a = self.model(_following_car)
pass
elif type(_following_car.following_car.following_model) is not type(self):
# head
if len(tail) != 0:
vector_pre_car = tail[0]
else:
vector_pre_car = body[0]
vector_mean_body = np.mean(body, 0)
vector_head = head[0]
dx = np.max((
vector_pre_car[0]
+ self.gamma
* np.min((0, vector_pre_car[1], vector_mean_body[1], vector_head[1]))
, 0
))
dv = vector_pre_car[1] + self.gamma * np.min(
(0, vector_pre_car[2], vector_mean_body[2], vector_head[2])
)
da = np.min((vector_pre_car[2], vector_mean_body[2], vector_head[2]))
_following_car.real_spacing = dx
_following_car.real_speed_difference = dv
_following_car.real_acceleration_difference = da
a = self.model(_following_car)
pass
elif type(_following_car.following_car.following_model) is type(self):
# tail
if len(tail) != 0:
vector_pre_car = tail[0]
else:
vector_pre_car = body[0]
vector_mean_body = np.mean(body, 0)
vector_head = head[0]
t = _following_car.expecting_headway
t = t - self.alpha * t * np.tanh(self.beta * (np.max((vector_pre_car[1],
vector_mean_body[1],
vector_head[1]))
+ self.gamma
* np.max((vector_pre_car[2],
vector_mean_body[2],
vector_head[2]))))
_following_car.expecting_headway = t
a = self.model(_following_car)
pass
else:
print("ERROR")
exit()
return a
SET_INIT_LOC_TYPE = {"L", "U", "R"}
SET_INIT_CARS_TYPE = {"U", "R"}
class Fleet:
def __init__(self,
car_num: int,
init_type_car: str,
proportion: Dict,
road_length: float,
init_type_loc: str):
self.car_num = car_num
self.init_type_car = init_type_car
self.proportion = proportion
self.road_length = road_length
self.init_type_loc = init_type_loc
self.cars = self._init_cars()
self._make_cars_link()
self._init_loc()
self._index = np.linspace(1, self.car_num, self.car_num)
def update(self, step):
for c in self.cars:
c.update(step)
pass
for c in self.cars:
c.apply()
pass
for c in self.cars[:-1]:
c.count_difference(location_correction=0)
pass
self.cars[-1].count_difference(location_correction=self.road_length)
pass
def _init_cars(self) -> List[Car]:
proportion_keys = list(self.proportion.keys())
proportion_values = list(self.proportion.values())
re = []
if self.init_type_car is "U":
pass
elif self.init_type_car is "R":
list_rand_num = []
num_and_car = []
list_num = list(
np.array(np.array(proportion_values)
* self.car_num
/ np.sum(proportion_values),
dtype=int)
)
for ln in list_num:
list_rand_num.append(list(np.random.rand(ln)))
pass
for lrn, c in zip(list_rand_num, proportion_keys):
for u in lrn:
num_and_car.append([u, copy.deepcopy(c)])
pass
pass
num_and_car = np.array(num_and_car)
index = np.argsort(num_and_car[:, 0])
for _i in index:
re.append(num_and_car[_i, 1])
else:
pass
for r in re:
r()
return re
def _make_cars_link(self):
index = len(self.cars)
for x in range(index - 1):
self.cars[x].link(self.cars[x - 1], self.cars[x + 1])
pass
self.cars[-1].link(self.cars[-2], self.cars[0])
pass
def _init_loc(self) -> None:
if self.init_type_loc is "L":
num = len(self.cars)
self.cars[0].initialize(0, 0, 0, self.road_length)
for _i in range(1, num):
self.cars[_i].initialize(
self.cars[_i - 1].init_location
+ self.cars[_i - 1].stopping_distance
+ self.cars[_i].car_size[0],
0,
0,
self.road_length
)
pass
pass
elif self.init_type_loc is "U":
num = len(self.cars)
loc = np.linspace(0, self.road_length, num)
for cl, car in zip(loc, self.cars):
car.initialize(cl, 0, 0, self.road_length)
pass
pass
elif self.init_type_loc is "R":
pass
else:
pass
for c in self.cars[:-1]:
c.count_difference(location_correction=0)
pass
self.cars[-1].count_difference(location_correction=self.road_length)
pass
def get_cars_location(self) -> List:
re = []
for c in self.cars:
re.append(c.real_location)
pass
return re
def get_cars_speed(self) -> List:
re = []
for c in self.cars:
re.append(c.real_speed)
pass
return re
def get_cars_acceleration(self) -> List:
re = []
for c in self.cars:
re.append(c.real_acceleration)
pass
return re
def get_cars_headway(self) -> List:
re = []
for c in self.cars:
re.append(c.real_headway)
pass
return re
def get_cars_spacing(self) -> List:
re = []
for c in self.cars:
re.append(c.real_spacing)
pass
return re
def get_cars_type(self) -> List:
re = []
for c in self.cars:
re.append(c.car_type)
pass
return re
def get_data(self) -> pd.DataFrame:
re = []
rows = np.linspace(1, self.car_num, self.car_num, dtype=np.int)
cols = ['sub_index', 'time', 'type', 'pos', 'v', 'a', 'dl', 'dv', 'da', 'hw']
for c in self.cars:
tmp = [c.time, c.car_type, c.real_position, c.real_speed, c.real_acceleration, c.real_spacing,
c.real_speed_difference, c.real_acceleration_difference, c.real_headway]
re.append(tmp)
pass
re = np.c_[np.array([self._index]).T, re]
re = pd.DataFrame(re, index=rows, columns=cols, dtype='double')
re.index.name = 'index'
return re
def get_data_by_list(self) -> List:
re = []
for c in self.cars:
tmp = [c.time, c.id, c.car_type, c.real_position, c.real_speed, c.real_acceleration, c.real_spacing,
c.real_speed_difference, c.real_acceleration_difference, c.real_headway]
re.append(tmp)
pass
re = np.c_[np.array([self._index]).T, re]
return re
``` |
{
"source": "Jiachen-Zhang/DNS_Proxy",
"score": 2
} |
#### File: DNS_Proxy/test/test_tun.py
```python
from src.core.sys_manage import TunManager
def test_create_tun():
'''
测试创建Tun (未启动)
'''
tun_name = 'tun2'
_tun_fn, name = TunManager.create_tunnel('tun2')
assert name == tun_name
def test_close_tun():
'''
测试关闭Tun
'''
TunManager.del_tunnel(2)
``` |
{
"source": "Jiachen-Zhang/esp32_client",
"score": 3
} |
#### File: Jiachen-Zhang/esp32_client/socketio_client.py
```python
import asyncio
import socketio
sio = socketio.AsyncClient()
@sio.event
async def connect():
"""
连接
:return:
"""
print('connection established')
await sio.emit('csi_amplitude', 'ping')
@sio.event
async def chat_message(data):
"""
聊天消息
:param data:
:return:
"""
print('message received with ', data)
# await sio.emit('chat_message', 'ping')
@sio.event
async def csi_amplitude(date):
"""
CSI幅度数据
:param date:
:return:
"""
print('received: {}'.format(date))
@sio.event
async def disconnect():
"""
断开连接
:return:
"""
print('disconnected from server')
async def main():
"""
主函数
:return:
"""
await sio.connect('http://localhost:5005')
await sio.wait()
if __name__ == '__main__':
asyncio.run(main())
```
#### File: Jiachen-Zhang/esp32_client/tcp_client.py
```python
import time
from queue import Full
from socket import socket, AF_INET, SOCK_STREAM
from utils.logger import log
from utils.channel import SERIAL_QUEUE
HOST = '127.0.0.1' # The server's hostname or IP address
PORT = 5000 # The port used by the server
class TCPClient:
"""
TCP客户端对象
"""
def __init__(self, server_host: str = '127.0.0.1', server_port: int = 5000):
self.server_host: str = server_host
self.server_port: int = server_port
def __try_connect(self):
"""
尝试连接 socket 并忽略连接错误
:return:
"""
_s = socket(AF_INET, SOCK_STREAM)
try:
_s.connect((self.server_host, self.server_port))
except ConnectionRefusedError:
pass
return _s
def send_serial_data(self):
"""
从队列中读取组装好的串口数据并通过TCP socket 发送给服务端
:return:
"""
_s = self.__try_connect()
i = 0
while True:
i += 1
serial_data = SERIAL_QUEUE.get()
assert isinstance(serial_data, str), 'wrong type of data read from SERIAL_QUEUE'
_data: bytes = serial_data.encode('utf-8')
assert _data.endswith(b'\n')
if i % 2 == 0:
continue
try:
_s.sendall(_data)
except BrokenPipeError:
log.warning('BrokenPipeError when sending to server SERIAL_QUEUE.qsize() = %d',
SERIAL_QUEUE.qsize())
_s = self.__try_connect()
@staticmethod
def flush_queue():
"""
清除队列缓冲的所有消息
:return:
"""
while not SERIAL_QUEUE.empty():
SERIAL_QUEUE.get_nowait()
return True
if __name__ == '__main__':
import _thread
from threading import Timer
data: str = 'CSI_DATA,AP,3C:71:BF:6D:2A:78,-73,11,1,0,1,1,1,0,0,0,0,-93,0,1,1,80272146,0,101,' \
'0,0,80.363225,384,[99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 ' \
'99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 ' \
'99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 ' \
'99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 99 0 ' \
'99 0 99 0 99 0 99 0], {}\n '
tcp_client = TCPClient()
_thread.start_new_thread(tcp_client.send_serial_data, ())
def produce_data(_num: int, _batch: int = 50):
"""
模拟数据产生,向管道写入50条数据
:return:
"""
log.info('produce_data %f', time.time())
for _ in range(_batch):
try:
_num += 1
val: str = str(_num) # str(int(sin(num/30.0) * 50 + 50))
_data = data.replace('99', val)
SERIAL_QUEUE.put_nowait(_data.format(time.time()))
time.sleep(0.015)
log.info('val = %s', val)
except Full:
log.warning('Write Failed to SERIAL_QUEUE [Full]')
NUM = 0
BATCH = 50
while producer_timer := Timer(interval=1, function=produce_data, args=(NUM, BATCH)):
producer_timer.start()
producer_timer.join()
NUM += BATCH
```
#### File: esp32_client/utils/logger.py
```python
import logging
def get_logger(name: str):
"""
为调用方生成自定义名称的日志对象
:param name:
:return:
"""
print('init logger')
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s %(filename)s[:%(lineno)d]',
datefmt='%H:%M:%S')
return logging.getLogger(name)
log = get_logger(__name__)
``` |
{
"source": "jiachien1206/stanCode-projects",
"score": 4
} |
#### File: stanCode_projects/boggle_game/boggle.py
```python
FILE = 'dictionary.txt'
dictionary = []
def main():
"""
This is a boggle game program.
User can input 4*4 letters.
This program will find words in sequences of adjacent letters.
"""
read_dictionary()
# list to save the letters input
all_list = [0, 0, 0, 0]
answers = []
# answers have printed
for i in range(4):
character = input(str(i + 1) + ' row of letters: ')
character = character.lower()
if len(character) < 7 or character[1] != ' ' or character[3] != ' ' or character[5] != ' ' or\
character[0].isalpha() is False or character[2].isalpha() is False or character[4].isalpha() is False or\
character[6].isalpha() is False:
print('Illegal input')
break
else:
character += ' '
all_list[i] = character.split()
# letters may be able to spell a word
s = ''
for y in range(4):
for x in range(4):
# used coordinate
used = []
# add first word into string
s += all_list[y][x]
# add the position to used list
used.append((x, y))
# look for words in sequences of adjacent letters
boggle(all_list, answers, s, used, x, y)
# clear the letters to start a new round
s = ''
print('There are', len(answers), 'words in total.')
def boggle(all_list, answers, s, used, x, y):
# find word and has not been printed yet
if s in dictionary and len(s) >= 4 and s not in answers:
answers.append(s)
print('Found "'+s+'"')
# look for longer words with the beginning of the previous word
boggle(all_list, answers, s, used, x, y)
else:
# look for neighbors
for i in range(-1, 2, 1):
for j in range(-1, 2, 1):
if 0 <= y + i <= 3 and 0 <= x + j <= 3 and (x + j, y + i) not in used:
used.append((x + j, y + i))
s += all_list[y + i][x + j]
if has_prefix(s):
boggle(all_list, answers, s, used, x + j, y + i)
# no prefix and pop the last letter
s = s[:len(s)-1]
# no prefix and pop the coordinate
used.pop()
def read_dictionary():
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list
"""
with open(FILE, 'r') as f:
for word in f:
word = word.strip('\n')
dictionary.append(word)
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
for word in dictionary:
if word.startswith(sub_s):
return True
if __name__ == '__main__':
main()
``` |
{
"source": "jiachiun/opensea-python-wrapper",
"score": 3
} |
#### File: responses/tests/_response_helpers.py
```python
from typing import Type
from unittest import TestCase
from open_sea_v1.endpoints.abc import BaseEndpoint
from open_sea_v1.responses.abc import BaseResponse
class ResponseTestHelper(TestCase):
@classmethod
def create_and_get(cls, endpoint_client: Type[BaseEndpoint], **kwargs) -> list[list[BaseResponse]]:
"""Shortcut"""
client = endpoint_client(**kwargs) # type: ignore
flattened = client.get_parsed_pages(flat=True)
return flattened
@staticmethod
def assert_attributes_do_not_raise_unexpected_exceptions(target_obj):
attrs = [n for n in dir(target_obj) if not n.startswith('__')]
for a in attrs:
getattr(target_obj, a)
@staticmethod
def assert_no_missing_class_attributes_from_original_json_keys(response_obj, json):
for key in json:
getattr(response_obj, key)
``` |
{
"source": "JiacongXu666/lalala",
"score": 2
} |
#### File: lalala/datasets/camvid.py
```python
import os
import cv2
import numpy as np
from PIL import Image
import torch
from torch.nn import functional as F
from .base_dataset import BaseDataset
class CamVid(BaseDataset):
def __init__(self,
root,
list_path,
num_samples=None,
num_classes=11,
multi_scale=True,
flip=True,
ignore_label=255,
base_size=960,
crop_size=(720, 960),
downsample_rate=1,
scale_factor=16,
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
bd_dilate_size=4):
super(CamVid, self).__init__(ignore_label, base_size,
crop_size, downsample_rate, scale_factor, mean, std,)
self.root = root
self.list_path = list_path
self.num_classes = num_classes
self.multi_scale = multi_scale
self.flip = flip
self.img_list = [line.strip().split() for line in open(root+list_path)]
self.files = self.read_files()
if num_samples:
self.files = self.files[:num_samples]
self.ignore_label = ignore_label
self.color_list = [[0, 128, 192], [128, 0, 0], [64, 0, 128],
[192, 192, 128], [64, 64, 128], [64, 64, 0],
[128, 64, 128], [0, 0, 192], [192, 128, 128],
[128, 128, 128], [128, 128, 0]]
self.class_weights = torch.FloatTensor([1.87037042, 0.19432342, 0.86141965, 1.51101858, 1.26610983,
1.76284923, 0.16097535, 0.52201845, 2.20493199, 0.27010653,
0.37587654]).cuda()
self.bd_dilate_size = bd_dilate_size
def read_files(self):
files = []
for item in self.img_list:
image_path, label_path = item
name = os.path.splitext(os.path.basename(label_path))[0]
files.append({
"img": image_path,
"label": label_path,
"name": name,
"weight": 1
})
return files
def color2label(self, color_map):
label = np.ones(color_map.shape[:2])*self.ignore_label
#rgb_color_map = cv2.cvtColor(color_map, cv2.COLOR_BGR2RGB)
for i, v in enumerate(self.color_list):
label[(color_map == v).sum(2)==3] = i
return label.astype(np.uint8)
def label2color(self, label):
color_map = np.zeros(label.shape+(3,))
for i, v in enumerate(self.color_list):
color_map[label==i] = self.color_list[i]
return color_map.astype(np.uint8)
def __getitem__(self, index):
item = self.files[index]
name = item["name"]
image = Image.open(os.path.join(self.root,'camvid',item["img"]))
image = np.array(image)
size = image.shape
color_map = Image.open(os.path.join(self.root,'camvid',item["label"])).convert('RGB')
color_map = np.array(color_map)
label = self.color2label(color_map)
image, label, edge = self.gen_sample(image, label,
self.multi_scale, self.flip, edge_pad=False,
edge_size=self.bd_dilate_size)
return image.copy(), label.copy(), edge.copy(), np.array(size), name
def multi_scale_inference(self, config, model, image, scales=[1], flip=False):
batch, _, ori_height, ori_width = image.size()
assert batch == 1, "only supporting batchsize 1."
image = image.numpy()[0].transpose((1,2,0)).copy()
stride_h = np.int(self.crop_size[0] * 1.0)
stride_w = np.int(self.crop_size[1] * 1.0)
final_pred = torch.zeros([1, self.num_classes,
ori_height,ori_width]).cuda()
for scale in scales:
new_img = self.multi_scale_aug(image=image,
rand_scale=scale,
rand_crop=False)
height, width = new_img.shape[:-1]
if scale <= 1.0:
new_img = new_img.transpose((2, 0, 1))
new_img = np.expand_dims(new_img, axis=0)
new_img = torch.from_numpy(new_img).cuda()
preds = self.inference(config, model, new_img, flip)
preds = preds[:, :, 0:height, 0:width]
else:
new_h, new_w = new_img.shape[:-1]
rows = np.int(np.ceil(1.0 * (new_h -
self.crop_size[0]) / stride_h)) + 1
cols = np.int(np.ceil(1.0 * (new_w -
self.crop_size[1]) / stride_w)) + 1
preds = torch.zeros([1, self.num_classes,
new_h,new_w]).cuda()
count = torch.zeros([1,1, new_h, new_w]).cuda()
for r in range(rows):
for c in range(cols):
h0 = r * stride_h
w0 = c * stride_w
h1 = min(h0 + self.crop_size[0], new_h)
w1 = min(w0 + self.crop_size[1], new_w)
h0 = max(int(h1 - self.crop_size[0]), 0)
w0 = max(int(w1 - self.crop_size[1]), 0)
crop_img = new_img[h0:h1, w0:w1, :]
crop_img = crop_img.transpose((2, 0, 1))
crop_img = np.expand_dims(crop_img, axis=0)
crop_img = torch.from_numpy(crop_img).cuda()
pred = self.inference(config, model, crop_img, flip)
preds[:,:,h0:h1,w0:w1] += pred[:,:, 0:h1-h0, 0:w1-w0]
count[:,:,h0:h1,w0:w1] += 1
preds = preds / count
preds = preds[:,:,:height,:width]
preds = F.interpolate(
preds, (ori_height, ori_width),
mode='bilinear', align_corners=config.MODEL.ALIGN_CORNERS
)
final_pred += preds
return final_pred
def save_pred(self, preds, sv_path, name):
preds = np.asarray(np.argmax(preds.cpu(), axis=1), dtype=np.uint8)
for i in range(preds.shape[0]):
pred = self.label2color(preds[i])
save_img = Image.fromarray(pred)
save_img.save(os.path.join(sv_path, name[i]+'.png'))
```
#### File: lalala/utils/criterion.py
```python
import torch
import torch.nn as nn
from torch.nn import functional as F
import logging
from configs import config
class CrossEntropy(nn.Module):
def __init__(self, ignore_label=-1, weight=None):
super(CrossEntropy, self).__init__()
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(
weight=weight,
ignore_index=ignore_label
)
def _forward(self, score, target):
loss = self.criterion(score, target)
return loss
def forward(self, score, target):
if config.MODEL.NUM_OUTPUTS == 1:
score = [score]
#weights = config.LOSS.BALANCE_WEIGHTS
balance_weights = config.LOSS.BALANCE_WEIGHTS
sb_weights = config.LOSS.SB_WEIGHTS
if len(balance_weights) == len(score):
return sum([w * self._forward(x, target) for (w, x) in zip(balance_weights, score)])
elif len(score) == 1:
return sb_weights * self._forward(score[0], target)
else:
raise ValueError("lengths of prediction and target are not identical!")
class OhemCrossEntropy(nn.Module):
def __init__(self, ignore_label=-1, thres=0.7,
min_kept=100000, weight=None):
super(OhemCrossEntropy, self).__init__()
self.thresh = thres
self.min_kept = max(1, min_kept)
self.ignore_label = ignore_label
self.criterion = nn.CrossEntropyLoss(
weight=weight,
ignore_index=ignore_label,
reduction='none'
)
def _ce_forward(self, score, target):
loss = self.criterion(score, target)
return loss
def _ohem_forward(self, score, target, **kwargs):
pred = F.softmax(score, dim=1)
pixel_losses = self.criterion(score, target).contiguous().view(-1)
mask = target.contiguous().view(-1) != self.ignore_label
tmp_target = target.clone()
tmp_target[tmp_target == self.ignore_label] = 0
pred = pred.gather(1, tmp_target.unsqueeze(1))
pred, ind = pred.contiguous().view(-1,)[mask].contiguous().sort()
min_value = pred[min(self.min_kept, pred.numel() - 1)]
threshold = max(min_value, self.thresh)
pixel_losses = pixel_losses[mask][ind]
pixel_losses = pixel_losses[pred < threshold]
return pixel_losses.mean()
def forward(self, score, target):
if not (isinstance(score, list) or isinstance(score, tuple)):
score = [score]
balance_weights = config.LOSS.BALANCE_WEIGHTS
sb_weights = config.LOSS.SB_WEIGHTS
if len(balance_weights) == len(score):
functions = [self._ce_forward] * \
(len(balance_weights) - 1) + [self._ohem_forward]
return sum([
w * func(x, target)
for (w, x, func) in zip(balance_weights, score, functions)
])
elif len(score) == 1:
return sb_weights * self._ohem_forward(score[0], target)
else:
raise ValueError("lengths of prediction and target are not identical!")
def dice_loss_func(bd_pre, target):
smooth = 1.
n = bd_pre.size(0)
iflat = bd_pre.view(n, -1)
tflat = target.view(n, -1)
intersection = (iflat * tflat).sum(1)
loss = 1 - ((2. * intersection + smooth) /
((iflat**2).sum(1) + (tflat**2).sum(1) + smooth))
return loss.mean()
def weighted_bce(bd_pre, target):
n, c, h, w = bd_pre.size()
log_p = bd_pre.permute(0,2,3,1).contiguous().view(1, -1)
target_t = target.view(1, -1)
pos_index = (target_t == 1)
neg_index = (target_t == 0)
weight = torch.zeros_like(log_p)
pos_num = pos_index.sum()
neg_num = neg_index.sum()
sum_num = pos_num + neg_num
weight[pos_index] = neg_num * 1.0 / sum_num
weight[neg_index] = pos_num * 1.0 / sum_num
loss = F.binary_cross_entropy_with_logits(log_p, target_t, weight, reduction='mean')
return loss
class JointBondaryLoss(nn.Module):
def __init__(self, coeff_bce = 20.0, coeff_dice = 1.0):
super(JointBondaryLoss, self).__init__()
#self.coeff_dice = coeff_dice
self.coeff_bce = coeff_bce
def forward(self, bd_pre, bd_gt):
bce_loss = self.coeff_bce * weighted_bce(bd_pre, bd_gt)
#dice_loss = self.coeff_dice * dice_loss_func(torch.sigmoid(bd_pre), bd_gt.float())
loss = bce_loss #+ dice_loss
return loss
if __name__ == '__main__':
a = torch.zeros(2,64,64)
a[:,5,:] = 1
pre = torch.randn(2,1,16,16)
Loss_fc = JointBondaryLoss()
loss_list = Loss_fc(pre, a.to(torch.uint8))
print("total loss: {}".format(loss_list[0]))
print("dice loss: {}".format(loss_list[1]))
print("bce loss: {}".format(loss_list[2]))
``` |
{
"source": "jiadaizhao/LeetCode",
"score": 3
} |
#### File: 0001-0100/0006-ZigZag Conversion/0006-ZigZag Conversion.py
```python
class Solution:
def convert(self, s: str, numRows: int) -> str:
if numRows == 1:
return s
result = ['']*numRows
step = 1
index = 0
for c in s:
result[index] += c
if index == 0:
step = 1
elif index == numRows - 1:
step = -1
index += step
return ''.join(result)
```
#### File: 0001-0100/0020-Valid Parentheses/0020-Valid Parentheses.py
```python
class Solution:
def isValid(self, s: str) -> bool:
St = []
table = {')':'(', ']':'[', '}':'{'}
for c in s:
if c in table:
if not St or St[-1] != table[c]:
return False
St.pop()
else:
St.append(c)
return not St
```
#### File: 0001-0100/0032-Longest Valid Parentheses/0032-Longest Valid Parentheses.py
```python
class Solution:
def longestValidParentheses(self, s: str) -> int:
left = right = maxLen = 0
for c in s:
if c == '(':
left += 1
else:
right += 1
if left == right:
maxLen = max(maxLen, 2*left)
elif right > left:
left = right = 0
left = right = 0
for c in s[::-1]:
if c == ')':
right += 1
else:
left += 1
if left == right:
maxLen = max(maxLen, 2*left)
elif left > right:
left = right = 0
return maxLen
```
#### File: 0001-0100/0038-Count and Say/0038-Count and Say.py
```python
class Solution:
def countAndSay(self, n: int) -> str:
result = '1'
for _ in range(n - 1):
curr, count = '', 1
for j in range(1, len(result)):
if result[j] != result[j - 1]:
curr += str(count) + result[j - 1]
count = 1
else:
count += 1
curr += str(count) + result[-1]
result = curr
return result
```
#### File: 0001-0100/0042-Trapping Rain Water/Trapping Rain Water.py
```python
class Solution:
def trap(self, height: List[int]) -> int:
left = 0
right = len(height) - 1
maxLeft = maxRight = total = 0
while left < right:
if height[left] <= height[right]:
total += max(maxLeft - height[left], 0)
maxLeft = max(maxLeft, height[left])
left += 1
else:
total += max(maxRight - height[right], 0)
maxRight = max(maxRight, height[right])
right -= 1
return total
```
#### File: 0001-0100/0047-Permutations II/0047-Permutations II.py
```python
class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
result = []
used = [False]*len(nums)
path = []
nums.sort()
def dfs():
if len(path) == len(nums):
result.append(path[:])
return
for i in range(len(nums)):
if used[i]:
continue
if i > 0 and nums[i] == nums[i - 1] and not used[i - 1]:
continue
used[i] = True
path.append(nums[i])
dfs()
path.pop()
used[i] = False
dfs()
return result
```
#### File: 0001-0100/0059-Spiral Matrix II/0059-Spiral Matrix II.py
```python
class Solution:
def generateMatrix(self, n: int) -> List[List[int]]:
result = [[0]*n for _ in range(n)]
rowStart = colStart = 0
rowEnd = colEnd = n - 1
num = 1
while rowStart <= rowEnd and colStart <= colEnd:
for j in range(colStart, colEnd + 1):
result[rowStart][j] = num
num += 1
rowStart += 1
for i in range(rowStart, rowEnd + 1):
result[i][colEnd] = num
num += 1
colEnd -= 1
if rowStart <= rowEnd:
for j in range(colEnd, colStart - 1, -1):
result[rowEnd][j] = num
num += 1
rowEnd -= 1
if colStart <= colEnd:
for i in range(rowEnd, rowStart - 1, -1):
result[i][colStart] = num
num += 1
colStart += 1
return result
```
#### File: 0001-0100/0060-Permutation Sequence/0060-Permutation Sequence.py
```python
import math
class Solution:
def getPermutation(self, n: int, k: int) -> str:
fac = math.factorial(n - 1)
nums = [i for i in range(1, 1+n)]
base = n - 1
k -= 1
result = [0] * n
for i in range(n):
index = k // fac
k %= fac
result[i] = str(nums[index])
nums.pop(index)
if base:
fac //= base
base -= 1
return ''.join(result)
```
#### File: 0001-0100/0070-Climbing Stairs/0070-Climbing Stairs.py
```python
class Solution:
def climbStairs(self, n: int) -> int:
dp0 = dp1 = 1
for i in range(n - 1):
dp2 = dp1 + dp0
dp0 = dp1
dp1 = dp2
return dp1
class Solution2:
def climbStairs(self, n: int) -> int:
base = [[1, 1], [1, 0]]
def multiply(a, b):
c = [[0, 0], [0, 0]]
for i in range(2):
for j in range(2):
c[i][j] = a[i][0]*b[0][j] + a[i][1]*b[1][j]
return c
def pow(a, n):
result = [[1, 0], [0, 1]]
while n > 0:
if n&1:
result = multiply(result, a)
n >>= 1
a = multiply(a, a)
return result
return pow(base, n)[0][0]
```
#### File: 0001-0100/0073-Set Matrix Zeroes/0073-Set Matrix Zeroes.py
```python
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
if len(matrix) == 0 or len(matrix[0]) == 0:
return
firstRowHasZero = any(num == 0 for num in matrix[0])
firstColHasZero = any(matrix[i][0] == 0 for i in range(len(matrix)))
for i in range(1, len(matrix)):
for j in range(1, len(matrix[0])):
if matrix[i][j] == 0:
matrix[i][0] = matrix[0][j] = 0
for i in range(1, len(matrix)):
for j in range(1, len(matrix[0])):
if matrix[i][0] == 0 or matrix[0][j] == 0:
matrix[i][j] = 0
if firstRowHasZero:
for j in range(len(matrix[0])):
matrix[0][j] = 0
if firstColHasZero:
for i in range(len(matrix)):
matrix[i][0] = 0
```
#### File: 0001-0100/0079-Word Search/0079-Word Search.py
```python
class Solution:
def exist(self, board: List[List[str]], word: str) -> bool:
if len(board) == 0 or len(board[0]) == 0:
return word == ''
def dfs(start, row, col):
if start == len(word):
return True
if row < 0 or row >= len(board) or col < 0 or col >= len(board[0]) or \
board[row][col] == '#' or board[row][col] != word[start]:
return False
c = board[row][col]
board[row][col] = '#'
result = dfs(start+1, row-1, col) or dfs(start+1, row+1, col) or dfs(start+1, row, col-1) or dfs(start+1, row, col+1)
board[row][col] = c
return result
return any(dfs(0, row, col) for row in range(len(board)) for col in range(len(board[0])))
```
#### File: 0001-0100/0087-Scramble String/0087-Scramble String.py
```python
import collections
class Solution:
def isScramble(self, s1: str, s2: str) -> bool:
if len(s1) != len(s2):
return False
if s1 == s2:
return True
if collections.Counter(s1) != collections.Counter(s2):
return False
return any((self.isScramble(s1[:k], s2[:k]) and self.isScramble(s1[k:], s2[k:]))
or (self.isScramble(s1[:k], s2[-k:]) and self.isScramble(s1[k:], s2[:-k]))
for k in range(1, len(s1)))
```
#### File: 0101-0200/0113-Path Sum II/0113-Path Sum II.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:
result = []
path = []
def dfs(root, sum):
if root is None:
return
sum -= root.val
path.append(root.val)
if sum == 0 and root.left is None and root.right is None:
result.append(path[:])
else:
dfs(root.left, sum)
dfs(root.right, sum)
path.pop()
dfs(root, sum)
return result
```
#### File: 0101-0200/0120-Triangle/0120-Triangle.py
```python
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
minSum = triangle[-1]
for i in range(len(triangle) - 2, -1, -1):
for j in range(i + 1):
minSum[j] = min(minSum[j], minSum[j + 1]) + triangle[i][j]
return minSum[0]
```
#### File: 0101-0200/0123-Best Time to Buy and Sell Stock III/0123-Best Time to Buy and Sell Stock III.py
```python
import math
class Solution:
def maxProfit(self, prices: List[int]) -> int:
buy1 = buy2 = -math.inf
sell1 = sell2 = 0
for p in prices:
buy1 = max(buy1, -p)
sell1 = max(sell1, buy1 + p)
buy2 = max(buy2, sell1 - p)
sell2 = max(sell2, buy2 + p)
return sell2
```
#### File: 0101-0200/0125-Valid Palindrome/0125-Valid Palindrome.py
```python
class Solution:
def isPalindrome(self, s: str) -> bool:
start = 0
end = len(s) - 1
while start < end:
while start < end and not s[start].isalnum():
start += 1
while start < end and not s[end].isalnum():
end -= 1
if start < end:
if s[start].lower() != s[end].lower():
return False
start += 1
end -= 1
return True
```
#### File: 0101-0200/0127-Word Ladder/0127-Word Ladder.py
```python
import collections
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
table = collections.defaultdict(list)
L = len(beginWord)
for word in wordList:
for i in range(L):
table[word[:i] + '*' + word[i + 1:]].append(word)
Q = collections.deque([(beginWord, 1)])
visited = set([beginWord])
while Q:
currw, step = Q.popleft()
for i in range(L):
nextw = currw[:i] + '*' + currw[i+1:]
for word in table[nextw]:
if word == endWord:
return step + 1
if word not in visited:
Q.append((word, step + 1))
visited.add(word)
return 0
# Bidirection
class Solution2:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
if endWord not in wordList:
return 0
table = collections.defaultdict(list)
L = len(beginWord)
for word in wordList:
for i in range(L):
table[word[:i] + '*' + word[i + 1:]].append(word)
qBegin = collections.deque([(beginWord, 1)])
visitedBegin = {beginWord: 1}
qEnd = collections.deque([(endWord, 1)])
visitedEnd = {endWord: 1}
def visitWord(Q, visited1, visited2):
currw, step = Q.popleft()
for i in range(L):
nextw = currw[:i] + '*' + currw[i+1:]
for word in table[nextw]:
if word in visited2:
return step + visited2[word]
if word not in visited1:
visited1[word] = step + 1
Q.append((word, step + 1))
return None
while qBegin and qEnd:
step = visitWord(qBegin, visitedBegin, visitedEnd)
if step:
return step
step = visitWord(qEnd, visitedEnd, visitedBegin)
if step:
return step
return 0
# Faster bidirection with swap
class Solution3:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
if endWord not in wordList:
return 0
table = collections.defaultdict(list)
L = len(beginWord)
for word in wordList:
for i in range(L):
table[word[:i] + '*' + word[i + 1:]].append(word)
step = 1
visitedBegin = set([beginWord])
visitedEnd = set([endWord])
visited = set([beginWord, endWord])
while visitedBegin and visitedEnd:
if len(visitedBegin) > len(visitedEnd):
visitedBegin, visitedEnd = visitedEnd, visitedBegin
step += 1
temp = set()
for w in visitedBegin:
for i in range(L):
nw = w[:i] + '*' + w[i+1:]
for word in table[nw]:
if word in visitedEnd:
return step
if word not in visited:
temp.add(word)
visited.add(word)
visitedBegin = temp
return 0
```
#### File: 0101-0200/0130-Surrounded Regions/0130-Surrounded Regions.py
```python
import collections
class Solution:
def solve(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
Q = collections.deque()
for i in range(len(board)):
for j in range(len(board[i])):
if (i in (0, len(board) - 1) or j in (0, len(board[i]) - 1)) and board[i][j] == 'O':
Q.append((i, j))
board[i][j] = '#'
while Q:
row, col = Q.popleft()
for nr, nc in ((row-1, col), (row+1, col), (row, col-1), (row, col+1)):
if 0 <= nr < len(board) and 0 <= nc < len(board[0]) and board[nr][nc] == 'O':
Q.append((nr, nc))
board[nr][nc] = '#'
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == 'O':
board[i][j] = 'X'
elif board[i][j] == '#':
board[i][j] = 'O'
```
#### File: 0101-0200/0132-Palindrome Partitioning II/0132-Palindrome Partitioning II.py
```python
class Solution:
def minCut(self, s: str) -> int:
dp = [i - 1 for i in range(len(s) + 1)]
for i in range(len(s)):
j = 0
while i - j >= 0 and i + j < len(s) and s[i - j] == s[i + j]:
dp[i + j + 1] = min(dp[i + j + 1], 1 + dp[i - j])
j += 1
j = 1
while i - j + 1 >= 0 and i + j < len(s) and s[i - j + 1] == s[i + j]:
dp[i + j + 1] = min(dp[i + j + 1], 1 + dp[i - j + 1])
j += 1
return dp[-1]
```
#### File: 0101-0200/0149-Max Points on a Line/0149-Max Points on a Line.py
```python
import collections
class Solution:
def maxPoints(self, points: List[List[int]]) -> int:
def gcd(a, b):
while b:
a, b = b, a%b
return a
maxNum = 0
for i in range(len(points)):
same = localMax = 0
table = collections.Counter()
for j in range(i):
if points[i][0] == points[j][0] and points[i][1] == points[j][1]:
same += 1
else:
dx = points[i][0] - points[j][0]
dy = points[i][1] - points[j][1]
gc = gcd(dx, dy)
dx //= gc
dy //= gc
table[(dx, dy)] += 1
localMax = max(localMax, table[(dx, dy)])
maxNum = max(maxNum, localMax + same + 1)
return maxNum
```
#### File: 0101-0200/0156-Binary Tree Upside Down/0156-Binary Tree Upside Down.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def upsideDownBinaryTree(self, root: TreeNode) -> TreeNode:
if root is None:
return root
curr = root
prev = prevRight = None
while curr:
next = curr.left
curr.left = prevRight
prevRight = curr.right
curr.right = prev
prev = curr
curr = next
return prev
```
#### File: 0101-0200/0165-Compare Version Numbers/0165-Compare Version Numbers.py
```python
from itertools import zip_longest
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
v1 = map(int, version1.split('.'))
v2 = map(int, version2.split('.'))
v1, v2 = zip(*zip_longest(v1, v2, fillvalue=0))
return (v1 > v2) - (v1 < v2)
```
#### File: 0101-0200/0173-Binary Search Tree Iterator/0173-Binary Search Tree Iterator.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class BSTIterator:
def __init__(self, root: TreeNode):
self.St = []
while root:
self.St.append(root)
root = root.left
def next(self) -> int:
"""
@return the next smallest number
"""
node = self.St.pop()
result = node.val
node = node.right
while node:
self.St.append(node)
node = node.left
return result
def hasNext(self) -> bool:
"""
@return whether we have a next smallest number
"""
return len(self.St) > 0
# Your BSTIterator object will be instantiated and called as such:
# obj = BSTIterator(root)
# param_1 = obj.next()
# param_2 = obj.hasNext()
```
#### File: 0201-0300/0203-Remove Linked List Elements/0203-Remove Linked List Elements.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
p = dummy = ListNode(-1)
while head:
if head.val != val:
p.next = head
p = p.next
head = head.next
p.next = None
return dummy.next
```
#### File: 0201-0300/0209-Minimum Size Subarray Sum/0209-Minimum Size Subarray Sum.py
```python
class Solution:
def minSubArrayLen(self, s: int, nums: List[int]) -> int:
minLen = len(nums) + 1
start = sum = 0
for i in range(len(nums)):
sum += nums[i]
while sum >= s:
minLen = min(minLen, i - start + 1)
sum -= nums[start]
start += 1
return minLen if minLen != len(nums) + 1 else 0
```
#### File: 0201-0300/0213-House Robber II/0213-House Robber II.py
```python
class Solution:
def rob(self, nums: List[int]) -> int:
def rob(start, end):
dp0 = dp1 = 0
for i in range(start, end + 1):
dp2 = max(dp1, dp0 + nums[i])
dp0 = dp1
dp1 = dp2
return dp1
if len(nums) == 0:
return 0
elif len(nums) == 1:
return nums[0]
else:
return max(rob(0, len(nums) - 2), rob(1, len(nums) - 1))
```
#### File: 0201-0300/0219-Contains Duplicate II/0219-Contains Duplicate II.py
```python
class Solution:
def containsNearbyDuplicate(self, nums: List[int], k: int) -> bool:
table = {}
for i, num in enumerate(nums):
if num in table and i - table[num] <= k:
return True
else:
table[num] = i
return False
```
#### File: 0201-0300/0223-Rectangle Area/0223-Rectangle Area.py
```python
class Solution:
def computeArea(self, A: int, B: int, C: int, D: int, E: int, F: int, G: int, H: int) -> int:
left = max(A, E)
right = max(min(C, G), left)
bottom = max(B, F)
up = max(min(D, H), bottom)
return (C - A)*(D - B) + (G - E)*(H - F) - (right - left)*(up - bottom)
```
#### File: 0201-0300/0251-Flatten 2D Vector/0251-Flatten 2D Vector.py
```python
class Vector2D:
def __init__(self, v: List[List[int]]):
def getIt():
for row in v:
for val in row:
yield val
self.it = iter(getIt())
self.val = next(self.it, None)
def next(self) -> int:
result = self.val
self.val = next(self.it, None)
return result
def hasNext(self) -> bool:
return self.val is not None
# Your Vector2D object will be instantiated and called as such:
# obj = Vector2D(v)
# param_1 = obj.next()
# param_2 = obj.hasNext()
```
#### File: 0201-0300/0259-3Sum Smaller/0259-3Sum Smaller.py
```python
class Solution:
def threeSumSmaller(self, nums: List[int], target: int) -> int:
nums.sort()
count = 0
for i in range(len(nums) - 2):
start = i + 1
end = len(nums) - 1
t = target - nums[i]
while start < end:
if nums[start] + nums[end] < t:
count += end - start
start += 1
else:
end -= 1
return count
```
#### File: 0201-0300/0264-Ugly Number II/0264-Ugly Number II.py
```python
class Solution:
def nthUglyNumber(self, n: int) -> int:
p2 = p3 = p5 = 0
ugly = [1] * n
for i in range(1, n):
ugly[i] = min(ugly[p2]*2, ugly[p3]*3, ugly[p5]*5)
if ugly[p2]*2 == ugly[i]:
p2 += 1
if ugly[p3]*3 == ugly[i]:
p3 += 1
if ugly[p5]*5 == ugly[i]:
p5 += 1
return ugly[-1]
```
#### File: 0201-0300/0270-Closest Binary Search Tree Value/0270-Closest Binary Search Tree Value.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def closestValue(self, root: TreeNode, target: float) -> int:
result = root.val
while root:
if abs(root.val - target) < abs(result - target):
result = root.val
if abs(result - target) <= 0.5:
break
root = root.right if root.val < target else root.left
return result
```
#### File: 0201-0300/0271-Encode and Decode Strings/0271-Encode and Decode Strings.py
```python
class Codec:
def encode(self, strs: [str]) -> str:
"""Encodes a list of strings to a single string.
"""
return ''.join(str(len(s)) + '@' + s for s in strs)
def decode(self, s: str) -> [str]:
"""Decodes a single string to a list of strings.
"""
result = []
i = 0
while i < len(s):
j = s.find('@', i)
i = j + 1 + int(s[i: j])
result.append(s[j + 1: i])
return result
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(strs))
```
#### File: 0201-0300/0290-Word Pattern/0290-Word Pattern.py
```python
class Solution:
def wordPattern(self, pattern: str, str: str) -> bool:
table = {}
mapped = set()
words = str.split()
if len(words) != len(pattern):
return False
for i, word in enumerate(words):
if word in table:
if table[word] != pattern[i]:
return False
else:
if pattern[i] in mapped:
return False
table[word] = pattern[i]
mapped.add(pattern[i])
return True
```
#### File: 0201-0300/0296-Best Meeting Point/0296-Best Meeting Point.py
```python
class Solution:
def minTotalDistance(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
rows = []
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
rows.append(i)
cols = []
for j in range(n):
for i in range(m):
if grid[i][j] == 1:
cols.append(j)
def getDist(nums):
total = start = 0
end = len(nums) - 1
while start < end:
total += nums[end] - nums[start]
start += 1
end -= 1
return total
return getDist(rows) + getDist(cols)
```
#### File: 0201-0300/0298-Binary Tree Longest Consecutive Sequence/0298-Binary Tree Longest Consecutive Sequence.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def longestConsecutive(self, root: TreeNode) -> int:
maxLen = 0
def dfs(root):
if root is None:
return 0
currLen = 1
leftLen = dfs(root.left)
rightLen = dfs(root.right)
if root.left and root.left.val == root.val + 1:
currLen += leftLen
if root.right and root.right.val == root.val + 1:
currLen = max(currLen, 1 + rightLen)
nonlocal maxLen
maxLen = max(maxLen, currLen)
return currLen
dfs(root)
return maxLen
```
#### File: 0301-0400/0304-Range Sum Query 2D - Immutable/0304-Range Sum Query 2D - Immutable.py
```python
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
if not matrix:
return
self.presum = [[0] * (1 + len(matrix[0])) for _ in range(1 + len(matrix))]
for i in range(len(matrix)):
for j in range(len(matrix[0])):
self.presum[i + 1][j + 1] = self.presum[i][j + 1] + self.presum[i + 1][j] - self.presum[i][j] + matrix[i][j]
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
return self.presum[row2 + 1][col2 + 1] - self.presum[row1][col2 + 1] - self.presum[row2 + 1][col1] + self.presum[row1][col1]
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
```
#### File: 0301-0400/0309-Best Time to Buy and Sell Stock with Cooldown/0309-Best Time to Buy and Sell Stock with Cooldown.py
```python
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if not prices:
return 0
buy = [0] * (1 + len(prices))
sell = [0] * (1 + len(prices))
buy[1] = -prices[0]
for i in range(2, 1 + len(prices)):
sell[i] = max(sell[i - 1], buy[i - 1] + prices[i - 1])
buy[i] = max(buy[i - 1], sell[i - 2] - prices[i - 1])
return sell[-1]
class Solution2:
def maxProfit(self, prices: List[int]) -> int:
if not prices:
return 0
buy = -prices[0]
sell1 = sell2 = 0
for i in range(2, 1 + len(prices)):
temp = sell2
sell2 = max(sell2, buy + prices[i - 1])
buy = max(buy, sell1 - prices[i - 1])
sell1 = temp
return sell2
```
#### File: 0301-0400/0338-Counting Bits/0338-Counting Bits.py
```python
class Solution:
def countBits(self, num: int) -> List[int]:
result = [0] * (1 + num)
for i in range(1, 1 + num):
result[i] = result[i&(i - 1)] + 1
return result
```
#### File: 0301-0400/0341-Flatten Nested List Iterator/0341-Flatten Nested List Iterator.py
```python
class NestedIterator(object):
def __init__(self, nestedList):
"""
Initialize your data structure here.
:type nestedList: List[NestedInteger]
"""
self.St = [[nestedList, 0]]
def next(self):
"""
:rtype: int
"""
nl, index = self.St[-1]
self.St[-1][1] += 1
return nl[index].getInteger()
def hasNext(self):
"""
:rtype: bool
"""
while self.St:
nl, index = self.St[-1]
if index == len(nl):
self.St.pop()
else:
ni = nl[index]
if ni.isInteger():
return True
self.St[-1][1] += 1
self.St.append([ni.getList(), 0])
return False
# Your NestedIterator object will be instantiated and called as such:
# i, v = NestedIterator(nestedList), []
# while i.hasNext(): v.append(i.next())
# Your NestedIterator object will be instantiated and called as such:
# i, v = NestedIterator(nestedList), []
# while i.hasNext(): v.append(i.next())
class NestedIterator(object):
def __init__(self, nestedList):
"""
Initialize your data structure here.
:type nestedList: List[NestedInteger]
"""
self.St = nestedList[::-1]
def next(self):
"""
:rtype: int
"""
return self.St.pop().getInteger()
def hasNext(self):
"""
:rtype: bool
"""
while self.St:
ni = self.St[-1]
if ni.isInteger():
return True
self.St.pop()
self.St += ni.getList()[::-1]
return False
```
#### File: 0301-0400/0342-Power of Four/0342-Power of Four.py
```python
class Solution:
def isPowerOfFour(self, num: int) -> bool:
return num > 0 and num&(num - 1) == 0 and (num - 1)%3 == 0
```
#### File: 0301-0400/0358-Rearrange String k Distance Apart/0358-Rearrange String k Distance Apart.py
```python
import collections
import heapq
class Solution:
def rearrangeString(self, s: str, k: int) -> str:
if not s or k == 0:
return s
table = collections.Counter(s)
pq = [(-val, key) for key, val in table.items()]
heapq.heapify(pq)
result = []
left = len(s)
while pq:
dist = min(left, k)
if dist > len(pq):
return ''
temp = []
for i in range(dist):
negCount, c = heapq.heappop(pq)
result.append(c)
left -= 1
if negCount != -1:
temp.append((negCount + 1, c))
pq += temp
heapq.heapify(pq)
return ''.join(result)
```
#### File: 0301-0400/0365-Water and Jug Problem/0365-Water and Jug Problem.py
```python
import math
class Solution:
def canMeasureWater(self, x: 'int', y: 'int', z: 'int') -> 'bool':
if x + y < z:
return False
if x == 0 and y == 0:
return z == 0
return z % math.gcd(x, y) == 0
```
#### File: 0301-0400/0372-Super Pow/372. Super Pow.py
```python
from functools import reduce
class Solution:
def superPow(self, a: 'int', b: 'List[int]') -> 'int':
p = reduce(lambda x, y: (10*x + y)%1140, b)
return pow(a, p, 1337)
```
#### File: 0301-0400/0388-Longest Absolute File Path/0388-Longest Absolute File Path.py
```python
class Solution:
def lengthLongestPath(self, input: str) -> int:
lens = [0]
maxLen = 0
for line in input.splitlines():
name = line.lstrip('\t')
level = len(line) - len(name)
if '.' in name:
maxLen = max(maxLen, lens[level] + len(name))
else:
if level + 1 == len(lens):
lens.append(lens[-1] + 1 + len(name))
else:
lens[level + 1] = lens[level] + 1 + len(name)
return maxLen
```
#### File: 0301-0400/0394-Decode String/0394-Decode String.py
```python
class Solution:
def decodeString(self, s: str) -> str:
St = []
num = 0
curr = ''
for c in s:
if c.isdigit():
num = num*10 + int(c)
elif c == '[':
St.append([num, curr])
num = 0
curr = ''
elif c == ']':
count, prev = St.pop()
curr = prev + count*curr
else:
curr += c
return curr
class Solution2:
def decodeString(self, s: str) -> str:
i = 0
def decode(s):
nonlocal i
result = []
while i < len(s) and s[i] != ']':
if s[i].isdigit():
num = 0
while i < len(s) and s[i].isdigit():
num = num*10 + int(s[i])
i += 1
i += 1
temp = decode(s)
i += 1
result += temp*num
else:
result.append(s[i])
i += 1
return result
return ''.join(decode(s))
```
#### File: 0301-0400/0397-Integer Replacement/0397-Integer Replacement.py
```python
class Solution:
def integerReplacement(self, n: 'int') -> 'int':
table = {1:0}
def dfs(n):
if n in table:
return table[n]
if n & 1:
result = 1 + min(dfs(n + 1), dfs(n - 1))
else:
result = 1 + dfs(n // 2)
table[n] = result
return result
return dfs(n)
```
#### File: 0401-0500/0408-Valid Word Abbreviation/0408-Valid Word Abbreviation.py
```python
class Solution:
def validWordAbbreviation(self, word: str, abbr: str) -> bool:
i = num = 0
for c in abbr:
if c.isdigit():
num = num * 10 + ord(c) - ord('0')
if num == 0:
return False
else:
i += num
if i >= len(word) or word[i] != c:
return False
num = 0
i += 1
i += num
return i == len(word)
```
#### File: 0401-0500/0409-Longest Palindrome/0409-Longest Palindrome.py
```python
import collections
class Solution:
def longestPalindrome(self, s: str) -> int:
odds = sum(v&1 for v in collections.Counter(s).values())
return len(s) - odds + bool(odds)
```
#### File: 0401-0500/0414-Third Maximum Number/0414-Third Maximum Number.py
```python
import math
class Solution:
def thirdMax(self, nums: List[int]) -> int:
maxNums = [-math.inf] * 3
for num in nums:
if num > maxNums[0]:
maxNums = [num, maxNums[0], maxNums[1]]
elif num != maxNums[0] and num > maxNums[1]:
maxNums[1], maxNums[2] = num, maxNums[1]
elif num != maxNums[0] and num != maxNums[1] and num > maxNums[2]:
maxNums[2] = num
return maxNums[2] if -math.inf not in maxNums else maxNums[0]
```
#### File: 0401-0500/0416-Partition Equal Subset Sum/0416-Partition Equal Subset Sum.py
```python
class Solution:
def canPartition(self, nums: List[int]) -> bool:
total = sum(nums)
if total & 1:
return False
target = total // 2
dp = [False] * (1 + target)
dp[0] = True
for num in nums:
for j in range(target, num - 1, -1):
if dp[j - num]:
dp[j] = True
return dp[target]
```
#### File: 0401-0500/0426-Convert Binary Search Tree to Sorted Doubly Linked List/0426-Convert Binary Search Tree to Sorted Doubly Linked List.py
```python
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def treeToDoublyList(self, root: 'Node') -> 'Node':
if root is None:
return None
St = []
head = tail = prev = None
curr = root
while St or curr:
if curr:
St.append(curr)
curr = curr.left
else:
curr = St.pop()
if prev is None:
head = curr
else:
curr.left = prev
prev.right = curr
tail = curr
prev = curr
curr = curr.right
head.left = tail
tail.right = head
return head
```
#### File: 0401-0500/0443-String Compression/0443-String Compression.py
```python
class Solution:
def compress(self, chars: List[str]) -> int:
count = start = 0
for i in range(len(chars)):
if i == len(chars) - 1 or chars[i] != chars[i + 1]:
chars[count] = chars[i]
count += 1
if i > start:
for c in str(i - start + 1):
chars[count] = c
count += 1
start = i + 1
return count
```
#### File: 0401-0500/0444-Sequence Reconstruction/0444-Sequence Reconstruction.py
```python
import collections
class Solution:
def sequenceReconstruction(self, org: List[int], seqs: List[List[int]]) -> bool:
n = len(org)
validInput = False
graph = collections.defaultdict(set)
degrees = [0] * (n + 1)
for seq in seqs:
if not seq:
continue
validInput = True
for i in range(len(seq)):
if seq[i] > n or seq[i] < 1:
return False
if i < len(seq) - 1 and 0 < seq[i + 1] <= n and seq[i + 1] not in graph[seq[i]]:
graph[seq[i]].add(seq[i + 1])
degrees[seq[i + 1]] += 1
if not validInput:
return False
count = 0
Q = collections.deque([i for i in range(1, n + 1) if degrees[i] == 0])
while len(Q) == 1:
curr = Q.popleft()
if org[count] != curr:
return False
count += 1
for neighbor in graph[curr]:
degrees[neighbor] -= 1
if degrees[neighbor] == 0:
Q.append(neighbor)
return count == n
```
#### File: 0401-0500/0447-Number of Boomerangs/0447-Number of Boomerangs.py
```python
import collections
class Solution:
def numberOfBoomerangs(self, points: List[List[int]]) -> int:
count = 0
for p in points:
table = collections.Counter()
for q in points:
diff = (p[0] - q[0])**2 + (p[1] - q[1])**2
count += table[diff]*2
table[diff] += 1
return count
```
#### File: 0401-0500/0449-Serialize and Deserialize BST/0449-Serialize and Deserialize BST.py
```python
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
import collections
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root is None:
return '[]'
nodes = [root]
i = 0
while i < len(nodes):
if nodes[i]:
nodes.append(nodes[i].left)
nodes.append(nodes[i].right)
i += 1
while nodes[-1] is None:
nodes.pop()
return '[' + ','.join(str(node.val) if node else 'null' for node in nodes) +']'
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
if data == '[]':
return None
strs = data[1:-1].split(',')
root = TreeNode(int(strs[0]))
Q = collections.deque([root])
leftChild = True
for s in strs[1:]:
if s != 'null':
node = TreeNode(int(s))
if leftChild:
Q[0].left = node
else:
Q[0].right = node
Q.append(node)
leftChild = not leftChild
if leftChild:
Q.popleft()
return root
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
```
#### File: 0401-0500/0464-Can I Win/0464-Can I Win.py
```python
class Solution:
def canIWin(self, maxChoosableInteger: int, desiredTotal: int) -> bool:
if maxChoosableInteger >= desiredTotal:
return True
if (1 + maxChoosableInteger)*maxChoosableInteger//2 < desiredTotal:
return False
used = 0
cache = {}
def dfs(used, target):
if target <= 0:
return False
if used in cache:
return cache[used]
result = False
key = used
for bit in range(1, maxChoosableInteger + 1):
mask = 1 << bit
if used & mask == 0:
used |= mask
if not dfs(used, target - bit):
result = True
break
used &= (~mask)
cache[key] = result
return result
return dfs(0, desiredTotal)
```
#### File: 0401-0500/0470-Implement Rand10() Using Rand7()/0470-Implement Rand10() Using Rand7().py
```python
class Solution:
def rand10(self):
"""
:rtype: int
"""
while True:
row = rand7()
col = rand7()
index = (row - 1) * 7 + col
if index <= 40:
break
return 1 + (index - 1) % 10
```
#### File: 0401-0500/0492-Construct the Rectangle/0492-Construct the Rectangle.py
```python
import math
class Solution:
def constructRectangle(self, area: int) -> List[int]:
W = int(math.sqrt(area))
while area % W:
W -= 1
return [area//W, W]
```
#### File: 0401-0500/0495-Teemo Attacking/0495-Teemo Attacking.py
```python
class Solution:
def findPoisonedDuration(self, timeSeries: List[int], duration: int) -> int:
if not timeSeries:
return 0
total = duration
for i in range(1, len(timeSeries)):
total += min(duration, timeSeries[i] - timeSeries[i - 1])
return total
```
#### File: 0401-0500/0496-Next Greater Element I/0496-Next Greater Element I.py
```python
class Solution:
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
St = []
table = {}
for num in nums2:
while St and St[-1] < num:
table[St.pop()] = num
St.append(num)
return [table[num] if num in table else -1 for num in nums1]
```
#### File: 0501-0600/0503-Next Greater Element II/0503-Next Greater Element II.py
```python
class Solution:
def nextGreaterElements(self, nums: List[int]) -> List[int]:
St = []
n = len(nums)
result = [-1] * n
for i in range(2 * n - 1):
while St and nums[St[-1]] < nums[i % n]:
result[St.pop()] = nums[i % n]
St.append(i % n)
return result
```
#### File: 0501-0600/0508-Most Frequent Subtree Sum/0508-Most Frequent Subtree Sum.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
import collections
class Solution:
def findFrequentTreeSum(self, root: TreeNode) -> List[int]:
table = collections.Counter()
maxCount = 0
def treeSum(root):
nonlocal maxCount
if root is None:
return 0
currSum = treeSum(root.left) + root.val + treeSum(root.right)
table[currSum] += 1
if table[currSum] > maxCount:
maxCount = table[currSum]
return currSum
treeSum(root)
return [k for k, v in table.items() if v == maxCount]
```
#### File: 0501-0600/0509-Fibonacci Number/0509-Fibonacci Number.py
```python
class Solution:
def fib(self, N: int) -> int:
def multiply(A, B):
result = [[0] * len(B[0]) for _ in range(len(A))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(A[0])):
result[i][j] += A[i][k]*B[k][j]
return result
def pow(A, N):
result = [[1, 0], [0, 1]]
while N:
if N&1:
result = multiply(result, A)
A = multiply(A, A)
N >>= 1
return result
A = [[1, 1], [1, 0]]
result = multiply(pow(A, N), [[0], [1]])
return result[0][0]
```
#### File: 0501-0600/0530-Minimum Absolute Difference in BST/0530-Minimum Absolute Difference in BST.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
import math
class Solution:
def getMinimumDifference(self, root: TreeNode) -> int:
St = []
minDiff = math.inf
prev = None
node = root
while St or node:
if node:
St.append(node)
node = node.left
else:
node = St.pop()
if prev:
minDiff = min(minDiff, node.val - prev.val)
prev = node
node = node.right
return minDiff
```
#### File: 0501-0600/0539-Minimum Time Difference/0539-Minimum Time Difference.py
```python
class Solution:
def findMinDifference(self, timePoints: List[str]) -> int:
bucket = [0] * 1440
for time in timePoints:
index = int(time[:2]) * 60 + int(time[3:])
bucket[index] += 1
if bucket[index] > 1:
return 0
first = last = -1
minDiff = 1440
for i, b in enumerate(bucket):
if b:
if first == -1:
first = i
else:
minDiff = min(minDiff, i - last)
last = i
return min(minDiff, 1440 - (last - first))
```
#### File: 0501-0600/0556-Next Greater Element III/0556-Next Greater Element III.py
```python
class Solution:
def nextGreaterElement(self, n: int) -> int:
num = list(str(n))
i = len(num) - 2
while i >= 0:
if num[i] < num[i + 1]:
break
i -= 1
if i == -1:
return -1
j = len(num) - 1
while num[j] <= num[i]:
j -= 1
num[i], num[j] = num[j], num[i]
result = int(''.join(num[:i + 1] + num[i + 1:][::-1]))
return result if result < (1 << 31) else -1
```
#### File: 0501-0600/0560-Subarray Sum Equals K/0560-Subarray Sum Equals K.py
```python
import collections
class Solution:
def subarraySum(self, nums: List[int], k: int) -> int:
presum = count = 0
table = collections.Counter([0])
for num in nums:
presum += num
count += table[presum - k]
table[presum] += 1
return count
```
#### File: 0501-0600/0590-N-ary Tree Postorder Traversal/0590-N-ary Tree Postorder Traversal.py
```python
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
class Solution:
def postorder(self, root: 'Node') -> List[int]:
if root is None:
return []
St = [root]
result = []
while St:
node = St.pop()
result.append(node.val)
St += node.children
return result[::-1]
```
#### File: 0601-0700/0634-Find the Derangement of An Array/0634-Find the Derangement of An Array.py
```python
class Solution:
def findDerangement(self, n: int) -> int:
if n == 1:
return 0
MOD = 10 ** 9 + 7
dp1 = 0
dp2 = 1
for i in range(3, n + 1):
dp3 = (i - 1) * (dp1 + dp2) % MOD
dp1 = dp2
dp2 = dp3
return dp2
```
#### File: 0601-0700/0636-Exclusive Time of Functions/0636-Exclusive Time of Functions.py
```python
class Solution:
def exclusiveTime(self, n: int, logs: List[str]) -> List[int]:
time = [0] * n
prev = 0
St = []
for log in logs:
fid, start, timestamp = log.split(':')
fid = int(fid)
timestamp = int(timestamp)
if start == 'start':
if St:
time[St[-1]] += timestamp - prev
St.append(fid)
prev = timestamp
else:
time[St.pop()] += timestamp - prev + 1
prev = timestamp + 1
return time
```
#### File: 0601-0700/0638-Shopping Offers/0638-Shopping Offers.py
```python
from functools import reduce, lru_cache
class Solution:
def shoppingOffers(self, price: List[int], special: List[List[int]], needs: List[int]) -> int:
key = reduce(lambda x, y : 10 * x + y, needs)
base = [10 ** (len(price) - 1 - i) for i in range(len(price))]
@lru_cache(None)
def dfs(key):
total = 0
curr = [key // base[i] % 10 for i in range(len(price))]
for i in range(len(price)):
total += curr[i] * price[i]
for s in special:
nextKey = 0
for i in range(len(price)):
if curr[i] >= s[i]:
nextKey = nextKey * 10 + curr[i] - s[i]
else:
break
else:
total = min(total, s[-1] + dfs(nextKey))
return total
return dfs(key)
```
#### File: 0601-0700/0647-Palindromic Substrings/0647-Palindromic Substrings.py
```python
class Solution:
def countSubstrings(self, s: str) -> int:
count = 0
for center in range(len(s)*2 - 1):
left = center // 2
right = left + (center&1)
while left >= 0 and right < len(s) and s[left] == s[right]:
count += 1
left -= 1
right += 1
return count
class Solution2:
def countSubstrings(self, s: str) -> int:
T = '#'.join('^{}$'.format(s))
n = len(T)
P = [0] * n
C = R = 0
for i in range(1, n - 1):
if R > i:
P[i] = min(R - i, P[2*C - i])
while T[i + 1 + P[i]] == T[i - 1 - P[i]]:
P[i] += 1
if i + P[i] > R:
C, R = i, i + P[i]
return sum((l + 1) // 2 for l in P)
```
#### File: 0601-0700/0657-Robot Return to Origin/0657-Robot Return to Origin.py
```python
class Solution:
def judgeCircle(self, moves: str) -> bool:
x = y = 0
for m in moves:
if m == 'R':
x += 1
elif m == 'L':
x -= 1
elif m == 'U':
y -= 1
else:
y += 1
return x == 0 and y == 0
```
#### File: 0601-0700/0658-Find K Closest Elements/0658-Find K Closest Elements.py
```python
K Closest Elements/0658-Find K Closest Elements.py
import bisect
class Solution:
def findClosestElements(self, arr: List[int], k: int, x: int) -> List[int]:
right = bisect.bisect_left(arr, x)
left = right - 1
for _ in range(k):
if left >= 0 and (right == len(arr) or x - arr[left] <= arr[right] - x):
left -= 1
else:
right += 1
return arr[left + 1: right]
```
#### File: 0601-0700/0672-Bulb Switcher II/0672-Bulb Switcher II.py
```python
class Solution:
def flipLights(self, n: int, m: int) -> int:
if n == 0 or m == 0:
return 1
if n == 1:
return 2
if n == 2:
return 3 if m == 1 else 4
if m == 1:
return 4
return 7 if m == 2 else 8
```
#### File: 0601-0700/0673-Number of Longest Increasing Subsequence/0673-Number of Longest Increasing Subsequence.py
```python
class Solution:
def findNumberOfLIS(self, nums: List[int]) -> int:
if len(nums) <= 1:
return len(nums)
dp = [1] * len(nums)
count = [1] * len(nums)
maxLen = total = 1
for j in range(1, len(nums)):
for i in range(j):
if nums[j] > nums[i]:
if 1 + dp[i] > dp[j]:
dp[j] = 1 + dp[i]
count[j] = count[i]
elif 1 + dp[i] == dp[j]:
count[j] += count[i]
if dp[j] > maxLen:
maxLen = dp[j]
total = count[j]
elif dp[j] == maxLen:
total += count[j]
return total
```
#### File: 0601-0700/0674-Longest Continuous Increasing Subsequence/0674-Longest Continuous Increasing Subsequence.py
```python
class Solution:
def findLengthOfLCIS(self, nums: List[int]) -> int:
start = maxLen = 0
for i in range(1, len(nums) + 1):
if i == len(nums) or nums[i] <= nums[i - 1]:
maxLen = max(maxLen, i - start)
start = i
return maxLen
```
#### File: 0601-0700/0678-Valid Parenthesis String/0678-Valid Parenthesis String.py
```python
class Solution:
def checkValidString(self, s: str) -> bool:
low = high = 0
for c in s:
low += 1 if c == '(' else -1
high += 1 if c != ')' else -1
if high < 0:
return False
low = max(low, 0)
return low == 0
```
#### File: 0601-0700/0679-24 Game/0679-24 Game.py
```python
class Solution:
def judgePoint24(self, nums: List[int]) -> bool:
used = 0
def dfs(used, curr, target):
if used == 15:
return abs(curr - target) < 0.001
for bit in range(4):
mask = 1 << bit
if (used & mask) == 0:
used |= mask
if (dfs(used, curr + nums[bit], target) or dfs(used, curr - nums[bit], target) or
dfs(used, nums[bit] - curr, target) or dfs(used, curr*nums[bit], target) or
dfs(used, curr / nums[bit], target)):
return True
if (dfs(used, nums[bit], target - curr) or dfs(used, nums[bit], curr - target) or
dfs(used, nums[bit], target + curr)):
return True
if curr != 0 and (dfs(used, nums[bit], target/curr) or dfs(used, nums[bit], target*curr)):
return True
if target != 0 and curr != 0 and dfs(used, nums[bit], curr/target):
return True
used &= ~mask
return False
return any(dfs(1 << bit, nums[bit], 24) for bit in range(4))
```
#### File: 0601-0700/0682-Baseball Game/0682-Baseball Game.py
```python
class Solution:
def calPoints(self, ops: List[str]) -> int:
St = []
for op in ops:
if op == '+':
St.append(St[-1] + St[-2])
elif op == 'D':
St.append(St[-1]*2)
elif op == 'C':
St.pop()
else:
St.append(int(op))
return sum(St)
```
#### File: 0601-0700/0685-Redundant Connection II/0685-Redundant Connection II.py
```python
class Solution:
def findRedundantDirectedConnection(self, edges: List[List[int]]) -> List[int]:
parent = [i for i in range(len(edges) + 1)]
def findParent(x):
while parent[x] != x:
parent[x] = parent[parent[x]]
x = parent[x]
return x
visited = [-1] * (len(edges) + 1)
first = second = last = -1
for i, edge in enumerate(edges):
n1 = edge[0]
n2 = edge[1]
if visited[n2] != -1:
first = visited[n2]
second = i
continue
visited[n2] = i
p1 = findParent(n1)
if p1 == n2:
last = i
else:
parent[n2] = p1
if last == -1:
return edges[second]
if second == -1:
return edges[last]
return edges[first]
```
#### File: 0601-0700/0692-Top K Frequent Words/0692-Top K Frequent Words.py
```python
import heapq
import collections
class Cell:
def __init__(self, count, word):
self.count = count
self.word = word
def __lt__(self, other):
if self.count == other.count:
return self.word > other.word
return self.count < other.count
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
table = collections.Counter(words)
pq = []
for word, count in table.items():
heapq.heappush(pq, Cell(count, word))
if len(pq) > k:
heapq.heappop(pq)
return [heapq.heappop(pq).word for _ in range(k)][::-1]
```
#### File: 0601-0700/0695-Max Area of Island/0695-Number of Distinct Islands.py
```python
class Solution:
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
maxArea = 0
m, n = len(grid), len(grid[0])
def dfs(r, c):
if grid[r][c] == 0:
return 0
total = 1
grid[r][c] = 0
for nr, nc in (r-1, c), (r+1, c), (r, c-1), (r, c+1):
if 0 <= nr < m and 0 <= nc < n:
total += dfs(nr, nc)
return total
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
maxArea = max(maxArea, dfs(i, j))
return maxArea
```
#### File: 0601-0700/0696-Count Binary Substrings/0696-Count Binary Substrings.py
```python
class Solution:
def countBinarySubstrings(self, s: str) -> int:
count, prev, curr = 0, 0, 1
for i in range(1, len(s)):
if s[i] != s[i - 1]:
count += min(prev, curr)
prev = curr
curr = 1
else:
curr += 1
return count + min(prev, curr)
```
#### File: 0701-0800/0701-Insert into a Binary Search Tree/0701-Insert into a Binary Search Tree.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:
inode = TreeNode(val)
if root is None:
return inode
node = root
while node:
prev = node
if node.val < val:
node = node.right
else:
node = node.left
if prev.val < val:
prev.right = inode
else:
prev.left = inode
return root
```
#### File: 0701-0800/0709-To Lower Case/0709-To Lower Case.py
```python
class Solution:
def toLowerCase(self, str: str) -> str:
return ''.join(chr(ord(c) + 32) if 'A' <= c <= 'Z' else c for c in str)
```
#### File: 0701-0800/0710-Random Pick with Blacklist/0710-Random Pick with Blacklist.py
```python
import random
class Solution:
def __init__(self, N: int, blacklist: List[int]):
self.wlen = N - len(blacklist)
self.table = {b: -1 for b in blacklist}
mapped = N - 1
for b in blacklist:
if b < self.wlen:
while mapped in self.table:
mapped -= 1
self.table[b] = mapped
mapped -= 1
def pick(self) -> int:
index = random.randrange(self.wlen)
return self.table.get(index, index)
# Your Solution object will be instantiated and called as such:
# obj = Solution(N, blacklist)
# param_1 = obj.pick()
```
#### File: 0701-0800/0713-Subarray Product Less Than K/0713-Subarray Product Less Than K.py
```python
class Solution:
def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:
if k <= 1:
return 0
start = count = 0
product = 1
for i, num in enumerate(nums):
product *= num
while product >= k:
product //= nums[start]
start += 1
count += i - start + 1
return count
```
#### File: 0701-0800/0730-Count Different Palindromic Subsequences/0730-Count Different Palindromic Subsequences.py
```python
class Solution:
def countPalindromicSubsequences(self, S: str) -> int:
n = len(S)
MOD = 1000000007
dp = [[[0]*n for _ in range(n)] for _ in range(4)]
for i in range(n):
for k in range(4):
if ord(S[i]) == ord('a') + k:
dp[k][i][i] = 1
for l in range(2, 1 + n):
for i in range(n - l + 1):
j = i + l - 1
for k in range(4):
c = chr(ord('a') + k)
if S[i] != c:
dp[k][i][j] = dp[k][i + 1][j]
elif S[j] != c:
dp[k][i][j] = dp[k][i][j - 1]
else:
dp[k][i][j] = 2
if l != 2:
for m in range(4):
dp[k][i][j] = (dp[k][i][j] + dp[m][i + 1][j - 1]) % MOD
return sum(dp[k][0][n - 1] for k in range(4)) % MOD
# With preprocess
class Solution2:
def countPalindromicSubsequences(self, S: str) -> int:
n = len(S)
MOD = 1000000007
prev = [[-1]*4 for _ in range(n)]
next = [[-1]*4 for _ in range(n)]
dp = [[0]*n for _ in range(n)]
last = [-1]*4
for i in range(n):
last[ord(S[i]) - ord('a')] = i
for j in range(4):
prev[i][j] = last[j]
last = [-1]*4
for i in range(n - 1, -1, -1):
last[ord(S[i]) - ord('a')] = i
for j in range(4):
next[i][j] = last[j]
def dfs(start, end):
if start > end:
return 0
if dp[start][end]:
return dp[start][end]
count = 0
for k in range(4):
i = next[start][k]
j = prev[end][k]
if i == -1 or i > j:
continue
if i == j:
count = (count + 1) % MOD
else:
count = (count + 2 + dfs(i + 1, j - 1)) % MOD
dp[start][end] = count
return count
return dfs(0, n - 1)
```
#### File: 0701-0800/0732-My Calendar III/0732-My Calendar III.py
```python
import collections
class MyCalendarThree:
def __init__(self):
self.table = collections.Counter()
def book(self, start: int, end: int) -> int:
self.table[start] += 1
self.table[end] -= 1
curr = maxCount = 0
for k in sorted(self.table):
curr += self.table[k]
maxCount = max(maxCount, curr)
return maxCount
# Your MyCalendarThree object will be instantiated and called as such:
# obj = MyCalendarThree()
# param_1 = obj.book(start,end)
```
#### File: 0701-0800/0746-Min Cost Climbing Stairs/0746-Min Cost Climbing Stairs.py
```python
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
dp0 = cost[0]
dp1 = cost[1]
for i in range(2, len(cost)):
dp2 = min(dp0, dp1) + cost[i]
dp0 = dp1
dp1 = dp2
return min(dp0, dp1)
```
#### File: 0701-0800/0761-Special Binary String/0761-Special Binary String.py
```python
class Solution:
def makeLargestSpecial(self, S: str) -> str:
strs = []
start = bal = 0
for i, c in enumerate(S):
if c == '1':
bal += 1
else:
bal -= 1
if bal == 0:
strs.append('1{}0'.format(self.makeLargestSpecial(S[start+1 : i])))
start = i + 1
return ''.join(sorted(strs, reverse=True))
```
#### File: 0701-0800/0768-Max Chunks To Make Sorted II/0768-Max Chunks To Make Sorted II.py
```python
Chunks To Make Sorted II/0768-Max Chunks To Make Sorted II.py
class Solution:
def maxChunksToSorted(self, arr: List[int]) -> int:
maxLeft = [arr[0]] * len(arr)
for i in range(1, len(arr)):
maxLeft[i] = max(maxLeft[i - 1], arr[i])
minRight = arr[-1]
count = 1
for i in range(len(arr) - 2, -1, -1):
minRight = min(minRight, arr[i + 1])
if maxLeft[i] <= minRight:
count += 1
return count
```
#### File: 0701-0800/0769-Max Chunks To Make Sorted/0769-Max Chunks To Make Sorted.py
```python
class Solution:
def maxChunksToSorted(self, arr: List[int]) -> int:
maxNum = count = 0
for i, a in enumerate(arr):
maxNum = max(maxNum, a)
if maxNum == i:
count += 1
return count
```
#### File: 0701-0800/0772-Basic Calculator III/0772-Basic Calculator III.py
```python
class Solution:
def calculate(self, s: str) -> int:
vals = []
ops = []
sign = 1
def apply(a, b, op):
if op == '+':
return a + b
elif op == '-':
return a - b
elif op == '*':
return a * b
elif op == '/':
return a // b
def precedence(op):
if op == '+' or op == '-':
return 1
else:
return 2
i = 0
while i < len(s):
if s[i] == ' ':
i += 1
continue
if s[i].isdigit():
num = 0
while i < len(s) and s[i].isdigit():
num = num * 10 + int(s[i])
i += 1
vals.append(sign * num)
sign = 1
else:
if s[i] == '(':
ops.append(s[i])
elif s[i] == ')':
while ops[-1] != '(':
b = vals.pop()
a = vals.pop()
op = ops.pop()
vals.append(apply(a, b, op))
ops.pop()
else:
if s[i] == '-' and (i == 0 or s[i - 1] == '('):
sign = -1
else:
while ops and ops[-1] != '(' and precedence(s[i]) <= precedence(ops[-1]):
b = vals.pop()
a = vals.pop()
op = ops.pop()
vals.append(apply(a, b, op))
ops.append(s[i])
i += 1
while ops:
b = vals.pop()
a = vals.pop()
op = ops.pop()
vals.append(apply(a, b, op))
return vals[-1]
```
#### File: 0701-0800/0790-Domino and Tromino Tiling/0790-Domino and Tromino Tiling.py
```python
class Solution:
def numTilings(self, N: int) -> int:
MOD = 1000000007
dp = [0] * 1001
'''
dp[i] = dp[i - 1] + dp[i - 2] + 2 * (dp[i - 3] + dp[i - 4] + ... + dp[0])
dp[i - 1] = dp[i - 2] + dp[i - 3] + 2 * (dp[i - 4] + dp[i - 4] + ... + dp[0])
dp[i] - dp[i - 1] = dp[i - 1] + dp[i - 3]
dp[i] = dp[i - 1]*2 + dp[i - 3]
'''
dp[0] = 1
dp[1] = 1
dp[2] = 2
for i in range(3, N + 1):
dp[i] = (dp[i - 1] * 2 + dp[i - 3]) % MOD
return dp[N]
```
#### File: 0801-0900/0804-Unique Morse Code Words/0804-Unique Morse Code Words.py
```python
class Solution:
def uniqueMorseRepresentations(self, words: List[str]) -> int:
table = [".-","-...","-.-.","-..",".","..-.","--.","....","..",".---","-.-",".-..","--","-.","---",".--.","--.-",".-.","...","-","..-","...-",".--","-..-","-.--","--.."]
return len({''.join(table[ord(c) - ord('a')] for c in word) for word in words})
```
#### File: 0801-0900/0805-Split Array With Same Average/0805-Split Array With Same Average.py
```python
class Solution:
def splitArraySameAverage(self, A: List[int]) -> bool:
total = sum(A)
n = len(A)
if all(total*i%n for i in range(n//2 + 1)):
return False
sums = [set() for _ in range(1 + n // 2)]
sums[0].add(0)
for num in A:
for i in range(n//2, 0, -1):
for s in sums[i - 1]:
sums[i].add(s + num)
return any(total*i%n == 0 and total*i//n in sums[i] for i in range(1, 1 + n//2))
```
#### File: 0801-0900/0813-Largest Sum of Averages/0813-Largest Sum of Averages.py
```python
class Solution:
def largestSumOfAverages(self, A: List[int], K: int) -> float:
presum = [0] * (1 + len(A))
for i in range(1, len(presum)):
presum[i] = presum[i - 1] + A[i - 1]
dp = [0] + [presum[j] / j for j in range(1, len(presum))]
result = dp[-1]
for k in range(2, K + 1):
for j in range(len(A), 0, -1):
for i in range(k - 1, j):
dp[j] = max(dp[j], dp[i] + (presum[j] - presum[i]) / (j - i))
result = max(result, dp[-1])
return result
```
#### File: 0801-0900/0817-Linked List Components/0817-Linked List Components.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def numComponents(self, head: ListNode, G: List[int]) -> int:
table = set(G)
curr = head
count = 0
while curr:
if curr.val in table and (curr.next is None or curr.next.val not in table):
count += 1
curr = curr.next
return count
```
#### File: 0801-0900/0818-Race Car/0818-Race Car.py
```python
class Solution:
def racecar(self, target: int) -> int:
memo = {}
def dfs(target):
if target not in memo:
n = target.bit_length()
if (1 << n) - 1 == target:
result = n
else:
result = dfs((1 << n) - 1 - target) + n + 1
for m in range(n - 1):
result = min(result, dfs(target - (1 << (n - 1)) + (1 << m)) + n + m + 1)
memo[target] = result
return memo[target]
return dfs(target)
```
#### File: 0801-0900/0821-Shortest Distance to a Character/0821-Shortest Distance to a Character.py
```python
class Solution:
def shortestToChar(self, S: str, C: str) -> List[int]:
prev = -len(S)
result = [0] * len(S)
for i, c in enumerate(S):
if c == C:
prev = i
else:
result[i] = i - prev
prev = len(S) * 2
for i in range(len(S) - 1, -1, -1):
if S[i] == C:
prev = i
else:
result[i] = min(result[i], prev - i)
return result
```
#### File: 0801-0900/0837-New 21 Game/0837-New 21 Game.py
```python
class Solution:
def new21Game(self, N: int, K: int, W: int) -> float:
if K == 0:
return 1
presum = [0] * (1 + N)
presum[0] = 1
for i in range(1, 1 + N):
presum[i] = presum[i - 1]
if i > W:
presum[i] += (presum[i - 1] - presum[i - 1 - W]) / W
else:
presum[i] += presum[i - 1] / W
if i > K:
presum[i] -= (presum[i - 1] - presum[K - 1]) / W
return presum[N] - presum[K - 1]
class Solution2:
def new21Game(self, N: int, K: int, W: int) -> float:
if K == 0:
return 1
dp = [0] * (1 + N)
dp[0] = 1
Wsum = 1
total = 0
for i in range(1, 1 + N):
dp[i] = Wsum / W
if i < K:
Wsum += dp[i]
else:
total += dp[i]
if i >= W:
Wsum -= dp[i - W]
return total
```
#### File: 0801-0900/0839-Similar String Groups/0839-Similar String Groups.py
```python
class Solution:
def numSimilarGroups(self, A: List[str]) -> int:
m, n = len(A), len(A[0])
def isSimilar(X, Y):
diff = 0
for x, y in zip(X, Y):
if x != y:
diff += 1
if diff > 2:
return False
return True
def findParent(i):
while parent[i] != i:
parent[i] = parent[parent[i]]
i = parent[i]
return i
if m < n:
count = m
parent = [i for i in range(m)]
for i in range(m):
for j in range(i):
if isSimilar(A[i], A[j]):
pa = findParent(i)
pb = findParent(j)
if pa != pb:
parent[pb] = pa
count -= 1
else:
A = set(A)
count = len(A)
parent = {x: x for x in A}
for x in A:
for j in range(n):
for i in range(j):
y = x[:i] + x[j] + x[i+1:j] + x[i] + x[j+1:]
if y in parent:
pa = findParent(x)
pb = findParent(y)
if pa != pb:
parent[pb] = pa
count -= 1
return count
```
#### File: 0801-0900/0848-Shifting Letters/0848-Shifting Letters.py
```python
class Solution:
def shiftingLetters(self, S: str, shifts: List[int]) -> str:
result = []
s = 0
for i in range(len(S) - 1, -1, -1):
s = (s + shifts[i]) % 26
result.append(chr(ord('a') + (ord(S[i]) - ord('a') + s) % 26))
return ''.join(result[::-1])
```
#### File: 0801-0900/0856-Score of Parentheses/0856-Score of Parentheses.py
```python
class Solution:
def scoreOfParentheses(self, S: str) -> int:
St = [0]
for c in S:
if c == '(':
St.append(0)
else:
v = St.pop()
St[-1] += max(v*2, 1)
return St.pop()
class Solution2:
def scoreOfParentheses(self, S: str) -> int:
score = bal = 0
for i in range(len(S)):
if S[i] == '(':
bal += 1
else:
bal -= 1
if S[i - 1] == '(':
score += (1 << bal)
return score
```
#### File: 0801-0900/0858-Mirror Reflection/0858-Mirror Reflection.py
```python
import math
class Solution:
def mirrorReflection(self, p: int, q: int) -> int:
g = math.gcd(p, q)
a = p // g
if a & 1:
return 1 if q//g&1 else 0
else:
return 2
```
#### File: 0801-0900/0862-Shortest Subarray with Sum at Least K/0862-Shortest Subarray with Sum at Least K.py
```python
import collections
class Solution:
def shortestSubarray(self, A: List[int], K: int) -> int:
presum = [0] * (1 + len(A))
for i in range(1, len(presum)):
presum[i] = presum[i - 1] + A[i - 1]
dq = collections.deque()
minLen = len(presum)
for i in range(len(presum)):
while dq and presum[i] - presum[dq[0]] >= K:
minLen = min(minLen, i - dq.popleft())
while dq and presum[i] <= presum[dq[-1]]:
dq.pop()
dq.append(i)
return minLen if minLen <= len(A) else -1
```
#### File: 0801-0900/0863-All Nodes Distance K in Binary Tree/0863-All Nodes Distance K in Binary Tree.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
import collections
class Solution:
def distanceK(self, root: TreeNode, target: TreeNode, K: int) -> List[int]:
parents = {}
def dfs(root, parent):
if root is None:
return
parents[root] = parent
dfs(root.left, root)
dfs(root.right, root)
dfs(root, None)
Q = collections.deque([target])
dist = 0
visited = set([target])
while Q:
if dist == K:
return [node.val for node in Q]
for _ in range(len(Q)):
node = Q.popleft()
if node.left and node.left not in visited:
Q.append(node.left)
visited.add(node.left)
if node.right and node.right not in visited:
Q.append(node.right)
visited.add(node.right)
if node != root and parents[node] not in visited:
Q.append(parents[node])
visited.add(parents[node])
dist += 1
return []
```
#### File: 0801-0900/0868-Binary Gap/0868-Binary Gap.py
```python
class Solution:
def binaryGap(self, N: int) -> int:
maxDist = curr = 0
prev = -1
while N:
if N & 1:
if prev != -1:
maxDist = max(maxDist, curr - prev)
prev = curr
curr += 1
N >>= 1
return maxDist
```
#### File: 0801-0900/0887-Super Egg Drop/0887-Super Egg Drop.py
```python
class Solution:
def superEggDrop(self, K: int, N: int) -> int:
dp = [0] * (K + 1)
m = 0
while dp[K] < N:
for k in range(K, 0, -1):
dp[k] = dp[k - 1] + dp[k] + 1
m += 1
return m
```
#### File: 0901-1000/0906-Super Palindromes/0906-Super Palindromes.py
```python
class Solution:
def superpalindromesInRange(self, L: str, R: str) -> int:
L, R = int(L), int(R)
MAGIC = 100000
count = 0
for i in range(MAGIC):
left = str(i)
candidate = int(left + left[-2::-1]) ** 2
if candidate > R:
break
cs = str(candidate)
if candidate >= L and cs == cs[::-1]:
count += 1
for i in range(MAGIC):
left = str(i)
candidate = int(left + left[::-1]) ** 2
if candidate > R:
break
cs = str(candidate)
if candidate >= L and cs == cs[::-1]:
count += 1
return count
```
#### File: 0901-1000/0907-Sum of Subarray Minimums/0907-Sum of Subarray Minimums.py
```python
class Solution:
def sumSubarrayMins(self, A: List[int]) -> int:
A.append(0)
St = []
total = 0
MOD = 10 ** 9 + 7
for i, a in enumerate(A):
while St and A[St[-1]] > a:
index = St.pop()
left = St[-1] if St else -1
total = (total + A[index] * (i - index) * (index - left)) % MOD
St.append(i)
return total
```
#### File: 0901-1000/0912-Sort an Array/0912-Sort an Array.py
```python
class Solution:
def sortArray(self, nums: List[int]) -> List[int]:
temp = [0] * len(nums)
def mergeSort(start, end):
if start < end:
mid = (start + end) // 2
mergeSort(start, mid)
mergeSort(mid + 1, end)
i = k = start
j = mid + 1
while i <= mid:
while j <= end and nums[j] < nums[i]:
temp[k] = nums[j]
j += 1
k += 1
temp[k] = nums[i]
i += 1
k += 1
while j <= end:
temp[k] = nums[j]
j += 1
k += 1
nums[start: end + 1] = temp[start: end + 1]
mergeSort(0, len(nums) - 1)
return nums
```
#### File: 0901-1000/0913-Cat and Mouse/0913-Cat and Mouse.py
```python
import collections
class Solution:
def catMouseGame(self, graph: List[List[int]]) -> int:
N = len(graph)
# What nodes could play their turn to
# arrive at node (m, c, t) ?
def parents(m, c, t):
if t == 2:
for m2 in graph[m]:
yield m2, c, 3-t
else:
for c2 in graph[c]:
if c2:
yield m, c2, 3-t
DRAW, MOUSE, CAT = 0, 1, 2
color = collections.defaultdict(int)
# degree[node] : the number of neutral children of this node
degree = {}
for m in range(N):
for c in range(N):
degree[m,c,1] = len(graph[m])
degree[m,c,2] = len(graph[c]) - (0 in graph[c])
# enqueued : all nodes that are colored
queue = collections.deque([])
for i in range(N):
for t in range(1, 3):
color[0, i, t] = MOUSE
queue.append((0, i, t, MOUSE))
if i > 0:
color[i, i, t] = CAT
queue.append((i, i, t, CAT))
# percolate
while queue:
# for nodes that are colored :
i, j, t, c = queue.popleft()
# for every parent of this node i, j, t :
for i2, j2, t2 in parents(i, j, t):
# if this parent is not colored :
if color[i2, j2, t2] is DRAW:
# if the parent can make a winning move (ie. mouse to MOUSE), do so
if t2 == c: # winning move
color[i2, j2, t2] = c
queue.append((i2, j2, t2, c))
# else, this parent has degree[parent]--, and enqueue if all children
# of this parent are colored as losing moves
else:
degree[i2, j2, t2] -= 1
if degree[i2, j2, t2] == 0:
color[i2, j2, t2] = 3 - t2
queue.append((i2, j2, t2, 3 - t2))
return color[1, 2, 1]
```
#### File: 0901-1000/0921-Minimum Add to Make Parentheses Valid/0921-Minimum Add to Make Parentheses Valid.py
```python
class Solution:
def minAddToMakeValid(self, S: str) -> int:
bal = left = 0
for c in S:
bal += 1 if c == '(' else -1
if bal < 0:
bal += 1
left += 1
return bal + left
```
#### File: 0901-1000/0926-Flip String to Monotone Increasing/0926-Flip String to Monotone Increasing.py
```python
class Solution:
def minFlipsMonoIncr(self, S: str) -> int:
minFlip = count1 = 0
for c in S:
if c == '1':
count1 += 1
else:
minFlip = min(minFlip + 1, count1)
return minFlip
```
#### File: 0901-1000/0932-Beautiful Array/0932-Beautiful Array.py
```python
class Solution:
def beautifulArray(self, N: int) -> List[int]:
result = [1]
while len(result) < N:
result = [i * 2 - 1 for i in result] + [i * 2 for i in result]
return [i for i in result if i <= N]
```
#### File: 0901-1000/0933-Number of Recent Calls/0933-Number of Recent Calls.py
```python
import collections
class RecentCounter:
def __init__(self):
self.Q = collections.deque()
def ping(self, t: int) -> int:
self.Q.append(t)
while self.Q[0] < t - 3000:
self.Q.popleft()
return len(self.Q)
# Your RecentCounter object will be instantiated and called as such:
# obj = RecentCounter()
# param_1 = obj.ping(t)
```
#### File: 0901-1000/0946-Validate Stack Sequences/0946-Validate Stack Sequences.py
```python
class Solution:
def validateStackSequences(self, pushed: 'List[int]', popped: 'List[int]') -> 'bool':
St = []
j = 0
for i in pushed:
St.append(i)
while St and St[-1] == popped[j]:
St.pop()
j += 1
return len(St) == 0
```
#### File: 0901-1000/0954-Array of Doubled Pairs/0954-Array of Doubled Pairs.py
```python
import collections
class Solution:
def canReorderDoubled(self, A):
"""
:type A: List[int]
:rtype: bool
"""
table = collections.Counter(A)
keys = list(table.keys())
keys.sort(key=abs)
for key in keys:
if table[key] > table[key * 2]:
return False
table[key * 2] -= table[key]
return True
```
#### File: 0901-1000/0956-Tallest Billboard/0956-Tallest Billboard.py
```python
import collections
class Solution:
def tallestBillboard(self, rods: List[int]) -> int:
maxH = sum(rods) // 2
dp = [[-1] * (maxH * 2 + 1) for _ in range(2)]
dp[0][maxH] = 0
for i in range(len(rods)):
for j in range(len(dp[0])):
if j - rods[i] >= 0 and dp[i&1][j - rods[i]] != -1:
dp[(i + 1) & 1][j] = max(dp[(i + 1) & 1][j], dp[i&1][j - rods[i]] + rods[i])
if j + rods[i] < len(dp[0]) and dp[i&1][j + rods[i]] != -1:
dp[(i + 1) & 1][j] = max(dp[(i + 1) & 1][j], dp[i&1][j + rods[i]])
dp[(i + 1) & 1][j] = max(dp[(i + 1) & 1][j], dp[i&1][j])
return dp[len(rods) & 1][maxH]
class Solution2:
def tallestBillboard(self, rods: List[int]) -> int:
dp = {0:0}
for rod in rods:
curr = collections.defaultdict(int)
for h in dp:
curr[h + rod] = max(curr[h + rod], dp[h] + rod)
curr[h] = max(curr[h], dp[h])
curr[h - rod] = max(curr[h - rod], dp[h])
dp = curr
return dp[0]
```
#### File: 0901-1000/0965-Univalued Binary Tree/0965-Univalued Binary Tree.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isUnivalTree(self, root: TreeNode) -> bool:
if root is None:
return True
if root.left and root.val != root.left.val:
return False
if root.right and root.val != root.right.val:
return False
return self.isUnivalTree(root.left) and self.isUnivalTree(root.right)
```
#### File: 0901-1000/0966-Vowel Spellchecker/0966-Vowel Spellchecker.py
```python
class Solution:
def spellchecker(self, wordlist: List[str], queries: List[str]) -> List[str]:
raw = set(wordlist)
cap = {word.lower() : word for word in wordlist[::-1]}
vowel = {}
for word in wordlist[::-1]:
vowel[''.join(['#' if c in 'aeiou' else c for c in word.lower()])] = word
result = [''] * len(queries)
for i, query in enumerate(queries):
if query in raw:
result[i] = query
elif query.lower() in cap:
result[i] = cap[query.lower()]
else:
key = ''.join(['#' if c in 'aeiou' else c for c in query.lower()])
if key in vowel:
result[i] = vowel[key]
return result
```
#### File: 0901-1000/0971-Flip Binary Tree To Match Preorder Traversal/0971-Flip Binary Tree To Match Preorder Traversal.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def flipMatchVoyage(self, root: TreeNode, voyage: List[int]) -> List[int]:
result = []
i = 0
def dfs(root):
if root is None:
return True
nonlocal i
if root.val != voyage[i]:
return False
i += 1
if root.left and root.left.val != voyage[i]:
result.append(root.val)
return dfs(root.right) and dfs(root.left)
return dfs(root.left) and dfs(root.right)
return result if dfs(root) else [-1]
```
#### File: 0901-1000/0980-Unique Paths III/0980-Unique Paths III.py
```python
class Solution:
def uniquePathsIII(self, grid: List[List[int]]) -> int:
empty = 1
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == 1:
sr = i
sc = j
elif grid[i][j] == 0:
empty += 1
total = 0
def dfs(row, col):
nonlocal empty, total
if grid[row][col] == 2:
if empty == 0:
total += 1
return
grid[row][col] = -1
empty -= 1
for nr, nc in (row-1, col), (row+1, col), (row, col-1), (row, col+1):
if 0 <= nr < len(grid) and 0 <= nc < len(grid[0]) and grid[nr][nc] != -1:
dfs(nr, nc)
empty += 1
grid[row][col] = 0
dfs(sr, sc)
return total
```
#### File: 0901-1000/0983-Minimum Cost For Tickets/0983-Minimum Cost For Tickets.py
```python
class Solution:
def mincostTickets(self, days: List[int], costs: List[int]) -> int:
dp = [0] * (days[-1] + 1)
table = set(days)
for i in range(1, len(dp)):
if i in table:
dp[i] = min(dp[i - 1] + costs[0], dp[max(0, i - 7)] + costs[1], dp[max(0, i - 30)] + costs[2])
else:
dp[i] = dp[i - 1]
return dp[-1]
```
#### File: 0901-1000/0985-Sum of Even Numbers After Queries/0985-Sum of Even Numbers After Queries.py
```python
class Solution:
def sumEvenAfterQueries(self, A: List[int], queries: List[List[int]]) -> List[int]:
s = sum(a for a in A if (a & 1) == 0)
result = [0] * len(queries)
for i, (val, index) in enumerate(queries):
if A[index] & 1:
if val & 1:
s += A[index] + val
else:
if val & 1:
s -= A[index]
else:
s += val
A[index] += val
result[i] = s
return result
```
#### File: 0901-1000/0987-Vertical Order Traversal of a Binary Tree/0987-Vertical Order Traversal of a Binary Tree.py
```python
import collections
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
table = collections.defaultdict(lambda : collections.defaultdict(list))
def dfs(node, x, y):
if node:
table[x][y].append(node.val)
dfs(node.left, x - 1, y + 1)
dfs(node.right, x + 1, y + 1)
dfs(root, 0, 0)
result = []
for x in sorted(table):
col = []
for y in sorted(table[x]):
col.extend(sorted(val for val in table[x][y]))
result.append(col)
return result
```
#### File: 0901-1000/0996-Number of Squareful Arrays/0996-Number of Squareful Arrays.py
```python
import collections
class Solution:
def numSquarefulPerms(self, A: List[int]) -> int:
table = collections.Counter(A)
nextNum = {x : {y for y in table if int((x + y) ** 0.5) ** 2 == x + y} for x in table}
count = 0
def dfs(x, num):
if num == len(A):
nonlocal count
count += 1
return
table[x] -= 1
for y in nextNum[x]:
if table[y] > 0:
dfs(y, num + 1)
table[x] += 1
for x in table:
dfs(x, 1)
return count
from functools import lru_cache
import math
class Solution2:
def numSquarefulPerms(self, A: List[int]) -> int:
graph = [[] for _ in range(len(A))]
for i in range(len(A)):
for j in range(i):
if int((A[i] + A[j]) ** 0.5) ** 2 == A[i] + A[j]:
graph[i].append(j)
graph[j].append(i)
@lru_cache(None)
def dfs(node, visited):
if visited == (1 << len(A)) - 1:
return 1
count = 0
for nei in graph[node]:
if (1 << nei) & visited == 0:
count += dfs(nei, visited | (1 << nei))
return count
total = sum(dfs(i, 1 << i) for i in range(len(A)))
table = collections.Counter(A)
for v in table.values():
total //= math.factorial(v)
return total
```
#### File: 1001-1100/1005-Maximize Sum Of Array After K Negations/1005-Maximize Sum Of Array After K Negations.py
```python
import heapq
import math
class Solution:
def largestSumAfterKNegations(self, A: List[int], K: int) -> int:
s = sneg = neg = 0
absmin = math.inf
negpq = []
for a in A:
absmin = min(absmin, abs(a))
if a >= 0:
s += a
else:
heapq.heappush(negpq, a)
neg += 1
sneg += a
if K < neg:
while K:
s -= heapq.heappop(negpq)
K -= 1
while negpq:
s += heapq.heappop(negpq)
else:
s -= sneg
if (K - neg) & 1:
s -= absmin * 2
return s
```
#### File: 1001-1100/1011-Capacity To Ship Packages Within D Days/1011-Capacity To Ship Packages Within D Days.py
```python
class Solution:
def shipWithinDays(self, weights: List[int], D: int) -> int:
low = max(weights)
high = sum(weights)
while low < high:
mid = (low + high) // 2
count = 1
s = 0
for w in weights:
s += w
if s > mid:
count += 1
s = w
if count <= D:
high = mid
else:
low = mid + 1
return low
```
#### File: 1001-1100/1015-Smallest Integer Divisible by K/1015-Smallest Integer Divisible by K.py
```python
class Solution:
def smallestRepunitDivByK(self, K: int) -> int:
if K % 2 == 0 or K % 5 == 0:
return -1
curr = 0
visited = set()
for i in range(1, K + 1):
curr = (curr * 10 + 1) % K
if curr == 0:
return i
if curr in visited:
return -1
visited.add(curr)
return -1
class Solution2:
def smallestRepunitDivByK(self, K: int) -> int:
if K % 2 == 0 or K % 5 == 0:
return -1
curr = 0
for i in range(1, K + 1):
curr = (curr * 10 + 1) % K
if curr == 0:
return i
return -1
```
#### File: 1001-1100/1020-Number of Enclaves/1020-Number of Enclaves.py
```python
import collections
class Solution:
def numEnclaves(self, A: List[List[int]]) -> int:
Q = collections.deque()
total = 0
for i in range(len(A)):
for j in range(len(A[0])):
if A[i][j] == 1:
if i == 0 or i == len(A) - 1 or j == 0 or j == len(A[0]) - 1:
A[i][j] = 0
Q.append((i, j))
else:
total += 1
while Q:
row, col = Q.popleft()
for nr, nc in (row-1, col), (row+1, col), (row, col-1), (row, col+1):
if 0 <= nr < len(A) and 0 <= nc < len(A[0]) and A[nr][nc] == 1:
A[nr][nc] = 0
Q.append((nr, nc))
total -= 1
return total
```
#### File: 1001-1100/1023-Camelcase Matching/1023-Camelcase Matching.py
```python
class Solution:
def camelMatch(self, queries: List[str], pattern: str) -> List[bool]:
def match(query, pattern):
j = 0
for c in query:
if j < len(pattern) and c == pattern[j]:
j += 1
elif not c.islower():
return False
return j == len(pattern)
return [match(query, pattern) for query in queries]
```
#### File: 1001-1100/1025-Divisor Game/1025-Divisor Game.py
```python
Game/1025-Divisor Game.py<gh_stars>10-100
class Solution:
def divisorGame(self, N: int) -> bool:
return (N & 1) == 0
```
#### File: 1001-1100/1036-Escape a Large Maze/1036-Escape a Large Maze.py
```python
import collections
class Solution:
def isEscapePossible(self, blocked: List[List[int]], source: List[int], target: List[int]) -> bool:
def bfs(source, target):
block = set((row, col) for row, col in blocked)
limit = len(blocked) * (len(blocked) - 1) // 2
Q = collections.deque([source])
area = 0
block.add((source[0], source[1]))
while Q:
row, col = Q.popleft()
area += 1
if area > limit:
return True
for nr, nc in (row-1, col), (row+1, col), (row, col-1), (row, col+1):
if 0 <= nr < 10**6 and 0 <= nc < 10**6 and (nr, nc) not in block:
if nr == target[0] and nc == target[1]:
return True
Q.append([nr, nc])
block.add((nr, nc))
return False
return bfs(source, target) and bfs(target, source)
```
#### File: 1001-1100/1039-Minimum Score Triangulation of Polygon/1039-Minimum Score Triangulation of Polygon.py
```python
from functools import lru_cache
import math
class Solution:
def minScoreTriangulation(self, A: List[int]) -> int:
@lru_cache(None)
def dfs(start, end):
if end - start < 2:
return 0
minScore = math.inf
for i in range(start + 1, end):
minScore = min(minScore, A[start] * A[i] * A[end] + dfs(start, i) + dfs(i, end))
return minScore
return dfs(0, len(A) - 1)
```
#### File: 1001-1100/1040-Moving Stones Until Consecutive II/1040-Moving Stones Until Consecutive II.py
```python
class Solution:
def numMovesStonesII(self, stones: List[int]) -> List[int]:
stones.sort()
maxMove = max(stones[-2] - stones[0] - len(stones) + 2, stones[-1] - stones[1] - len(stones) + 2)
minMove = len(stones)
start = 0
for i in range(len(stones)):
while stones[i] - stones[start] >= len(stones):
start += 1
if i - start + 1 == len(stones) - 1 and stones[i] - stones[start] == len(stones) - 2:
minMove = min(minMove, 2)
else:
minMove = min(minMove, len(stones) - (i - start + 1))
return [minMove, maxMove]
```
#### File: 1001-1100/1046-Last Stone Weight/1046-Last Stone Weight.py
```python
import heapq
class Solution:
def lastStoneWeight(self, stones: List[int]) -> int:
pq = [-stone for stone in stones]
heapq.heapify(pq)
while len(pq) > 1:
y = heapq.heappop(pq)
x = heapq.heappop(pq)
if x != y:
heapq.heappush(pq, y - x)
return -pq[0] if pq else 0
```
#### File: 1001-1100/1052-Grumpy Bookstore Owner/1052-Grumpy Bookstore Owner.py
```python
class Solution:
def maxSatisfied(self, customers: List[int], grumpy: List[int], X: int) -> int:
curr = sum(customers[i] for i in range(len(customers)) if grumpy[i] == 0)
for i in range(X):
if grumpy[i]:
curr += customers[i]
maxNum = curr
for i in range(X, len(customers)):
if grumpy[i]:
curr += customers[i]
if grumpy[i - X]:
curr -= customers[i - X]
maxNum = max(maxNum, curr)
return maxNum
# One pass
class Solution2:
def maxSatisfied(self, customers: List[int], grumpy: List[int], X: int) -> int:
satisfied = make_satisfied = max_make_satisfied = 0
for i in range(len(customers)):
if grumpy[i] == 0:
satisfied += customers[i]
else:
make_satisfied += customers[i]
if i >= X and grumpy[i - X]:
make_satisfied -= customers[i - X]
max_make_satisfied = max(max_make_satisfied, make_satisfied)
return satisfied + max_make_satisfied
```
#### File: 1001-1100/1061-Lexicographically Smallest Equivalent String/1061-Lexicographically Smallest Equivalent String.py
```python
import string
class Solution:
def smallestEquivalentString(self, A: str, B: str, S: str) -> str:
parent = {c: c for c in string.ascii_lowercase}
def findParent(x):
while parent[x] != x:
parent[x] = parent[parent[x]]
x = parent[x]
return x
for c1, c2 in zip(A, B):
p1 = findParent(c1)
p2 = findParent(c2)
if p1 < p2:
parent[p2] = p1
elif p1 > p2:
parent[p1] = p2
return ''.join(findParent(c) for c in S)
```
#### File: 1001-1100/1071-Greatest Common Divisor of Strings/1071-Greatest Common Divisor of Strings.py
```python
Common Divisor of Strings/1071-Greatest Common Divisor of Strings.py
class Solution:
def gcdOfStrings(self, str1: str, str2: str) -> str:
if len(str1) < len(str2):
str1, str2 = str2, str1
if not str2:
return str1
if str1[:len(str2)] != str2:
return ''
return self.gcdOfStrings(str1[len(str2):], str2)
```
#### File: 1001-1100/1072-Flip Columns For Maximum Number of Equal Rows/1072-Flip Columns For Maximum Number of Equal Rows.py
```python
import collections
class Solution:
def maxEqualRowsAfterFlips(self, matrix: List[List[int]]) -> int:
return max(collections.Counter(tuple(x ^ row[0] for x in row) for row in matrix).values())
```
#### File: 1001-1100/1074-Number of Submatrices That Sum to Target/1074-Number of Submatrices That Sum to Target.py
```python
import collections
class Solution:
def numSubmatrixSumTarget(self, matrix: List[List[int]], target: int) -> int:
presum = [[0] * (len(matrix[0]) + 1) for _ in range(len(matrix))]
for i in range(len(matrix)):
for j in range(len(matrix[0])):
presum[i][j + 1] = presum[i][j] + matrix[i][j]
total = 0
for j in range(len(matrix[0])):
for i in range(j + 1):
curr = 0
table = collections.Counter([0])
for k in range(len(matrix)):
curr += presum[k][j + 1] - presum[k][i]
total += table[curr - target]
table[curr] += 1
return total
```
#### File: 1001-1100/1078-Occurrences After Bigram/1078-Occurrences After Bigram.py
```python
class Solution:
def findOcurrences(self, text: str, first: str, second: str) -> List[str]:
words = text.split()
result = []
for i in range(len(words) - 2):
if words[i] == first and words[i + 1] == second:
result.append(words[i + 2])
return result
```
#### File: 1001-1100/1081-Smallest Subsequence of Distinct Characters/1081-Smallest Subsequence of Distinct Characters.py
```python
import collections
class Solution:
def smallestSubsequence(self, text: str) -> str:
table = collections.Counter(text)
visited = set()
result = []
for c in text:
table[c] -= 1
if c in visited:
continue
while result and c < result[-1] and table[result[-1]] > 0:
visited.remove(result.pop())
result.append(c)
visited.add(c)
return ''.join(result)
```
#### File: 1001-1100/1096-Brace Expansion II/1096-Brace Expansion II.py
```python
import itertools
class Solution:
def braceExpansionII(self, expression: str) -> List[str]:
group = [[]]
bal = 0
for i, c in enumerate(expression):
if c == '{':
if bal == 0:
start = i + 1
bal += 1
elif c == '}':
bal -= 1
if bal == 0:
group[-1].append(self.braceExpansionII(expression[start: i]))
elif bal == 0:
if c == ',':
group.append([])
else:
group[-1].append([c])
word = set()
for g in group:
word |= set(map(''.join, itertools.product(*g)))
return sorted(word)
```
#### File: 1101-1200/1103-Distribute Candies to People/1103-Distribute Candies to People.py
```python
class Solution:
def distributeCandies(self, candies: int, num_people: int) -> List[int]:
result = [0] * num_people
curr = 1
while candies > 0:
for i in range(num_people):
if candies >= curr + i:
result[i] += curr + i
candies -= curr + i
else:
result[i] += candies
candies = 0
break
curr += num_people
return result
class Solution2:
def distributeCandies(self, candies: int, num_people: int) -> List[int]:
result = [0] * num_people
curr = 0
while candies > 0:
result[curr % num_people] += min(candies, curr + 1)
curr += 1
candies -= curr
return result
```
#### File: 1101-1200/1116-Print Zero Even Odd/1116-Print Zero Even Odd.py
```python
from threading import Semaphore
class ZeroEvenOdd:
def __init__(self, n):
self.n = n
self.zero_gate = Semaphore(1)
self.even_gate = Semaphore(0)
self.odd_gate = Semaphore(0)
# printNumber(x) outputs "x", where x is an integer.
def zero(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(self.n):
self.zero_gate.acquire()
printNumber(0)
if i & 1:
self.even_gate.release()
else:
self.odd_gate.release()
def even(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(2, self.n + 1, 2):
self.even_gate.acquire()
printNumber(i)
self.zero_gate.release()
def odd(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(1, self.n + 1, 2):
self.odd_gate.acquire()
printNumber(i)
self.zero_gate.release()
```
#### File: 1101-1200/1121-Divide Array Into Increasing Sequences/1121-Divide Array Into Increasing Sequences.py
```python
import collections
class Solution:
def canDivideIntoSubsequences(self, nums: List[int], K: int) -> bool:
return len(nums) >= K * max(v for v in collections.Counter(nums).values())
```
#### File: 1101-1200/1133-Largest Unique Number/1133-Largest Unique Number.py
```python
import collections
class Solution:
def largestUniqueNumber(self, A: List[int]) -> int:
table = collections.Counter(A)
return max([k for k, v in table.items() if v == 1] or [-1])
```
#### File: 1101-1200/1135-Connecting Cities With Minimum Cost/1135-Connecting Cities With Minimum Cost.py
```python
class Solution:
def minimumCost(self, N: int, connections: List[List[int]]) -> int:
parent = [i for i in range(1 + N)]
def findParent(x):
while parent[x] != x:
parent[x] = parent[parent[x]]
x = parent[x]
return x
connections.sort(key=lambda x: x[2])
total = num = 0
for city1, city2, cost in connections:
p1 = findParent(city1)
p2 = findParent(city2)
if p1 != p2:
parent[p2] = p1
total += cost
num += 1
if num == N - 1:
return total
return -1
```
#### File: 1101-1200/1139-Largest 1-Bordered Square/1139-Largest 1-Bordered Square.py
```python
class Solution:
def largest1BorderedSquare(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
left = [[0] * n for _ in range(m)]
top = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
left[i][j] = left[i][j - 1] + 1 if j > 0 else 1
top[i][j] = top[i - 1][j] + 1 if i > 0 else 1
maxLen = 0
for i in range(m - 1, -1, -1):
for j in range(n - 1, -1, -1):
candidate = min(left[i][j], top[i][j])
while candidate > maxLen:
if (left[i - candidate + 1][j] >= candidate and
top[i][j - candidate + 1] >= candidate):
maxLen = candidate
break
candidate -= 1
return maxLen * maxLen
```
#### File: 1101-1200/1152-Analyze User Website Visit Pattern/1152-Analyze User Website Visit Pattern.py
```python
import collections
from itertools import combinations
from collections import Counter
class Solution:
def mostVisitedPattern(self, username: List[str], timestamp: List[int], website: List[str]) -> List[str]:
visit = collections.defaultdict(list)
for t, u, w in sorted(zip(timestamp, username, website)):
visit[u].append(w)
table = sum([Counter(set(combinations(w, 3))) for w in visit.values()], Counter())
return list(min(table, key=lambda k: (-table[k], k)))
```
#### File: 1101-1200/1155-Number of Dice Rolls With Target Sum/1155-Number of Dice Rolls With Target Sum.py
```python
class Solution:
def numRollsToTarget(self, d: int, f: int, target: int) -> int:
dp = [[0] * (1 + target) for _ in range(1 + d)]
dp[0][0] = 1
MOD = 10 ** 9 + 7
for i in range(1, 1 + d):
for j in range(1, 1 + target):
for k in range(1, 1 + min(f, j)):
dp[i][j] = (dp[i][j] + dp[i - 1][j - k]) % MOD
return dp[d][target]
class Solution2:
def numRollsToTarget(self, d: int, f: int, target: int) -> int:
dp = [0] * (1 + target)
dp[0] = 1
MOD = 10 ** 9 + 7
for i in range(1, 1 + d):
temp = [0] * (1 + target)
for j in range(1, 1 + target):
for k in range(1, 1 + min(f, j)):
temp[j] = (temp[j] + dp[j - k]) % MOD
dp = temp
return dp[target]
```
#### File: 1101-1200/1162-As Far from Land as Possible/1162-As Far from Land as Possible.py
```python
import collections
class Solution:
def maxDistance(self, grid: List[List[int]]) -> int:
N = len(grid)
Q = collections.deque()
visited = set()
for i in range(N):
for j in range(N):
if grid[i][j] == 1:
Q.append((i, j))
visited.add(i * N + j)
maxD = -1
while Q:
for _ in range(len(Q)):
row, col = Q.popleft()
for nr, nc in (row - 1, col), (row + 1, col), (row, col - 1), (row, col + 1):
if 0 <= nr < N and 0 <= nc < N and (nr * N + nc) not in visited:
Q.append((nr, nc))
visited.add(nr * N + nc)
maxD += 1
return -1 if maxD <= 0 else maxD
```
#### File: 1101-1200/1168-Optimize Water Distribution in a Village/1168-Optimize Water Distribution in a Village.py
```python
class Solution:
def minCostToSupplyWater(self, n: int, wells: List[int], pipes: List[List[int]]) -> int:
parent = [i for i in range(1 + n)]
def findParent(x):
while parent[x] != x:
parent[x] = parent[parent[x]]
x = parent[x]
return x
edges = [(c, 0, i) for i, c in enumerate(wells, 1)]
edges += [(c, i, j) for i, j, c in pipes]
cost = 0
for c, x, y in sorted(edges):
px = findParent(x)
py = findParent(y)
if px != py:
cost += c
parent[py] = px
n -= 1
if n == 0:
break
return cost
```
#### File: 1101-1200/1182-Shortest Distance to Target Color/1182-Shortest Distance to Target Color.py
```python
import collections
import bisect
class Solution:
def shortestDistanceColor(self, colors: List[int], queries: List[List[int]]) -> List[int]:
table = collections.defaultdict(list)
for i, c in enumerate(colors):
table[c].append(i)
result = []
for i, c in queries:
if c in table:
index = bisect.bisect_left(table[c], i)
if index == 0:
result.append(table[c][0] - i)
elif index == len(table[c]):
result.append(i - table[c][-1])
else:
result.append(min(table[c][index] - i, i - table[c][index - 1]))
else:
result.append(-1)
return result
class Solution2:
def shortestDistanceColor(self, colors: List[int], queries: List[List[int]]) -> List[int]:
dist = [[-1] * 3 for _ in range(len(colors))]
dist[0][colors[0] - 1] = 0
for i in range(1, len(colors)):
for c in range(3):
if dist[i - 1][c] != -1:
dist[i][c] = dist[i - 1][c] + 1
dist[i][colors[i] - 1] = 0
for i in range(len(colors) - 2, -1, -1):
for c in range(3):
if dist[i + 1][c] != -1 and (dist[i][c] == -1 or dist[i + 1][c] + 1 < dist[i][c]):
dist[i][c] = dist[i + 1][c] + 1
return [dist[i][c - 1] for i, c in queries]
```
#### File: 1101-1200/1190-Reverse Substrings Between Each Pair of Parentheses/1190-Reverse Substrings Between Each Pair of Parentheses.py
```python
class Solution:
def reverseParentheses(self, s: str) -> str:
s = list(s)
St = []
for i, c in enumerate(s):
if c == '(':
St.append(i)
elif c == ')':
left = St.pop()
s[left + 1 : i] = s[left + 1 : i][::-1]
return ''.join(c if c.islower() else '' for c in s)
```
#### File: 1201-1300/1207-Unique Number of Occurrences/1207-Unique Number of Occurrences.py
```python
import collections
class Solution:
def uniqueOccurrences(self, arr: List[int]) -> bool:
table = collections.Counter(arr)
return len(table) == len(set(table.values()))
```
#### File: 1201-1300/1220-Count Vowels Permutation/1220-Count Vowels Permutation.py
```python
class Solution:
def countVowelPermutation(self, n: int) -> int:
dp = [1] * 5
MOD = 10 ** 9 + 7
for _ in range(n - 1):
temp = [0] * 5
temp[0] = (dp[1] + dp[2] + dp[4]) % MOD
temp[1] = (dp[0] + dp[2]) % MOD
temp[2] = (dp[1] + dp[3]) % MOD
temp[3] = dp[2]
temp[4] = (dp[2] + dp[3]) % MOD
dp = temp
return sum(dp) % MOD
```
#### File: 1201-1300/1226-The Dining Philosophers/1226-The Dining Philosophers.py
```python
from threading import Semaphore, Lock
class DiningPhilosophers:
def __init__(self):
self.sem = Semaphore(4)
self.locks = [Lock() for _ in range(5)]
def pickFork(self, id, fun):
self.locks[id].acquire()
fun()
def putFork(self, id, fun):
fun()
self.locks[id].release()
# call the functions directly to execute, for example, eat()
def wantsToEat(self,
philosopher: int,
pickLeftFork: 'Callable[[], None]',
pickRightFork: 'Callable[[], None]',
eat: 'Callable[[], None]',
putLeftFork: 'Callable[[], None]',
putRightFork: 'Callable[[], None]') -> None:
left = philosopher
right = (philosopher + 4) % 5
self.sem.acquire()
self.pickFork(left, pickLeftFork)
self.pickFork(right, pickRightFork)
eat()
self.putFork(right, putRightFork)
self.putFork(left, putLeftFork)
self.sem.release()
```
#### File: 1201-1300/1232-Check If It Is a Straight Line/1232-Check If It Is a Straight Line.py
```python
If It Is a Straight Line/1232-Check If It Is a Straight Line.py
class Solution:
def checkStraightLine(self, coordinates: List[List[int]]) -> bool:
difx1 = coordinates[1][0] - coordinates[0][0]
dify1 = coordinates[1][1] - coordinates[0][1]
for i in range(2, len(coordinates)):
difx2 = coordinates[i][0] - coordinates[0][0]
dify2 = coordinates[i][1] - coordinates[0][1]
if difx2 * dify1 != difx1 * dify2:
return False
return True
```
#### File: 1201-1300/1240-Tiling a Rectangle with the Fewest Squares/1240-Tiling a Rectangle with the Fewest Squares.py
```python
import heapq
class Solution:
def tilingRectangle(self, n: int, m: int) -> int:
total_area = n * m
dp = [0] * (total_area + 1)
for i in range(1, total_area):
dp[i] = 1 + min(dp[i - k * k] for k in range(1, int(i ** 0.5) + 1))
height = [0] * m
pq = []
for i in range(1, min(n, m) + 1):
h = height[:]
for j in range(i):
h[j] = i
heapq.heappush(pq, (1 + dp[total_area - i * i], 1, h))
while pq:
guess, count, height = heapq.heappop(pq)
if all(h == n for h in height):
return count
minHeight = min(height)
minIdx = height.index(minHeight)
rIdx = minIdx + 1
while rIdx < m and height[rIdx] == minHeight and rIdx - minIdx + 1 + minHeight <= n:
rIdx += 1
for i in range(1, rIdx - minIdx + 1):
h = height[:]
for j in range(i):
h[minIdx + j] += i
guess = count + 1 + dp[total_area - sum(h)]
heapq.heappush(pq, (guess, count + 1, h))
return -1
```
#### File: 1201-1300/1254-Number of Closed Islands/1254-Number of Closed Islands.py
```python
class Solution:
def closedIsland(self, grid: List[List[int]]) -> int:
def dfs(r, c, val):
grid[r][c] = val
for nr, nc in (r-1, c), (r+1, c), (r, c-1), (r, c+1):
if 0 <= nr < len(grid) and 0 <= nc < len(grid[0]) and grid[nr][nc] != val:
dfs(nr, nc, val)
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == 0 and (i == 0 or i == len(grid) - 1 or j == 0 or j == len(grid[i]) - 1):
dfs(i, j, 1)
count = 0
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == 0:
count += 1
dfs(i, j, 1)
return count
```
#### File: 1201-1300/1257-Smallest Common Region/1257-Smallest Common Region.py
```python
class Solution:
def findSmallestRegion(self, regions: List[List[str]], region1: str, region2: str) -> str:
parent = {r : p for p, *rr in regions for r in rr}
path = set([region1])
while region1 in parent:
region1 = parent[region1]
path.add(region1)
while region2 not in path:
region2 = parent[region2]
return region2
```
#### File: 1201-1300/1276-Number of Burgers with No Waste of Ingredients/1276-Number of Burgers with No Waste of Ingredients.py
```python
class Solution:
def numOfBurgers(self, tomatoSlices: int, cheeseSlices: int) -> List[int]:
if (tomatoSlices < cheeseSlices * 2 or
(tomatoSlices & 1) or tomatoSlices > cheeseSlices * 4):
return []
'''
4 * x + 2 * y = t
x + y = c
'''
x = tomatoSlices // 2 - cheeseSlices
y = cheeseSlices * 2 - tomatoSlices // 2
return [x, y]
```
#### File: 1201-1300/1284-Minimum Number of Flips to Convert Binary Matrix to Zero Matrix/1284-Minimum Number of Flips to Convert Binary Matrix to Zero Matrix.py
```python
import collections
class Solution:
def minFlips(self, mat: List[List[int]]) -> int:
m, n = len(mat), len(mat[0])
state = sum(cell << (i * n + j) for i, row in enumerate(mat) for j, cell in enumerate(row))
Q = collections.deque([state])
visited = set([state])
step = 0
while Q:
for _ in range(len(Q)):
curr = Q.popleft()
if curr == 0:
return step
for r in range(m):
for c in range(n):
state = curr ^ (1 << (r * n + c))
for nr, nc in (r-1, c), (r+1, c), (r, c-1), (r, c+1):
if 0 <= nr < m and 0 <= nc < n:
state ^= 1 << (nr * n + nc)
if state not in visited:
Q.append(state)
visited.add(state)
step += 1
return -1
```
#### File: 1201-1300/1288-Remove Covered Intervals/1288-Remove Covered Intervals.py
```python
class Solution:
def removeCoveredIntervals(self, intervals: List[List[int]]) -> int:
intervals.sort(key=lambda x: (x[0], -x[1]))
count = 0
end = -1
for a, b in intervals:
if b > end:
count += 1
end = b
return count
```
#### File: 1201-1300/1289-Minimum Falling Path Sum II/1289-Minimum Falling Path Sum II.py
```python
class Solution:
def minFallingPathSum(self, arr: List[List[int]]) -> int:
min1 = min2 = -1
for j in range(len(arr[0])):
if min1 == -1 or arr[0][j] < arr[0][min1]:
min2 = min1
min1 = j
elif min2 == -1 or arr[0][j] < arr[0][min2]:
min2 = j
for i in range(1, len(arr)):
currMin1 = currMin2 = -1
for j in range(len(arr[i])):
if j == min1:
arr[i][j] += arr[i - 1][min2]
else:
arr[i][j] += arr[i - 1][min1]
if currMin1 == -1 or arr[i][j] < arr[i][currMin1]:
currMin2 = currMin1
currMin1 = j
elif currMin2 == -1 or arr[i][j] < arr[i][currMin2]:
currMin2 = j
min1, min2 = currMin1, currMin2
return arr[-1][min1]
```
#### File: 1201-1300/1297-Maximum Number of Occurrences of a Substring/1297-Maximum Number of Occurrences of a Substring.py
```python
import collections
class Solution:
def maxFreq(self, s: str, maxLetters: int, minSize: int, maxSize: int) -> int:
table = collections.Counter()
letter = start = maxCount = 0
count = collections.Counter()
for i, c in enumerate(s):
table[c] += 1
if table[c] == 1:
letter += 1
if i - start + 1 > minSize:
table[s[start]] -= 1
if table[s[start]] == 0:
letter -= 1
start += 1
if i - start + 1 == minSize and letter <= maxLetters:
count[s[start: i+1]] += 1
maxCount = max(maxCount, count[s[start: i+1]])
return maxCount
```
#### File: 1301-1400/1301-Number of Paths with Max Score/1301-Number of Paths with Max Score.py
```python
class Solution:
def pathsWithMaxScore(self, board: List[str]) -> List[int]:
n = len(board)
MOD = 10 ** 9 + 7
maxSum1 = [0] * n
count1 = [0] * n
count1[-1] = 1
for j in range(n - 2, -1, -1):
if board[-1][j] == 'X':
break
else:
maxSum1[j] = maxSum1[j + 1] + int(board[-1][j])
count1[j] = 1
for i in range(n - 2, -1, -1):
maxSum2 = [0] * n
count2 = [0] * n
if count1[-1] > 0 and board[i][-1] != 'X':
maxSum2[-1] = maxSum1[-1] + int(board[i][-1])
count2[-1] = 1
for j in range(n - 2, -1, -1):
if board[i][j] != 'X' and (count2[j + 1] or count1[j] or count1[j + 1]):
maxSum2[j] = maxSum2[j + 1]
count2[j] = count2[j + 1]
for nj in (j, j + 1):
if maxSum1[nj] > maxSum2[j]:
maxSum2[j] = maxSum1[nj]
count2[j] = count1[nj]
elif maxSum1[nj] == maxSum2[j]:
count2[j] = (count2[j] + count1[nj]) % MOD
if board[i][j] != 'E':
maxSum2[j] += int(board[i][j])
maxSum1 = maxSum2
count1 = count2
return [maxSum1[0], count1[0]]
```
#### File: 1301-1400/1324-Print Words Vertically/1324-Print Words Vertically.py
```python
class Solution:
def printVertically(self, s: str) -> List[str]:
words = s.split()
maxLen = max(len(word) for word in words)
result = []
for i in range(maxLen):
result.append(''.join(word[i] if i < len(word) else ' ' for word in words))
result[-1] = result[-1].rstrip()
return result
import itertools
class Solution2:
def printVertically(self, s: str) -> List[str]:
return [''.join(c).rstrip() for c in itertools.zip_longest(*s.split(), fillvalue=' ')]
```
#### File: 1301-1400/1326-Minimum Number of Taps to Open to Water a Garden/1326-Minimum Number of Taps to Open to Water a Garden.py
```python
class Solution:
def minTaps(self, n: int, ranges: List[int]) -> int:
area = sorted((i - r, i + r) for i, r in enumerate(ranges) if r > 0)
left = right = count = i = 0
while i < len(area):
if area[i][0] > left:
return -1
right = max(right, area[i][1])
count += 1
while i < len(area) and area[i][0] <= left:
right = max(right, area[i][1])
i += 1
if right >= n:
return count
left = right
return -1
```
#### File: 1301-1400/1334-Find the City With the Smallest Number of Neighbors at a Threshold Distance/1334-Find the City With the Smallest Number of Neighbors at a Threshold Distance.py
```python
import math
class Solution:
def findTheCity(self, n: int, edges: List[List[int]], distanceThreshold: int) -> int:
dist = [[math.inf] * n for _ in range(n)]
for i, j, w in edges:
dist[i][j] = dist[j][i] = w
for i in range(n):
dist[i][i] = 0
for k in range(n):
for i in range(n):
for j in range(n):
dist[i][j] = min(dist[i][j], dist[i][k] + dist[k][j])
counts = [sum(d <= distanceThreshold for d in dist[i]) for i in range(n)]
minIndex = 0
for i in range(1, n):
if counts[i] <= counts[minIndex]:
minIndex = i
return minIndex
```
#### File: 1301-1400/1360-Number of Days Between Two Dates/1360-Number of Days Between Two Dates.py
```python
from datetime import date
class Solution:
def daysBetweenDates(self, date1: str, date2: str) -> int:
d1 = date(*map(int, date1.split('-')))
d2 = date(*map(int, date2.split('-')))
return abs((d1 - d2).days)
```
#### File: 1301-1400/1363-Largest Multiple of Three/1363-Largest Multiple of Three.py
```python
import collections
class Solution:
def largestMultipleOfThree(self, digits: List[int]) -> str:
count = collections.Counter(digits)
remain1Count = count[1] + count[4] + count[7]
remain2Count = count[2] + count[5] + count[8]
total = sum(digits)
if total % 3 == 1:
if remain1Count > 0:
remain1Count -= 1
else:
remain2Count -= 2
elif total % 3 == 2:
if remain2Count > 0:
remain2Count -= 1
else:
remain1Count -= 2
result = []
for d in range(9, -1, -1):
num = count[d]
if d % 3 == 1:
num = min(num, remain1Count)
remain1Count -= num
elif d % 3 == 2:
num = min(num, remain2Count)
remain2Count -= num
result.append(str(d) * num)
result = ''.join(result)
return '0' if result and result[0] == '0' else result
```
#### File: 1301-1400/1367-Linked List in Binary Tree/1367-Linked List in Binary Tree.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSubPath(self, head: ListNode, root: TreeNode) -> bool:
if root is None:
return False
def dfs(head, root):
if head is None:
return True
if root is None:
return False
if head.val != root.val:
return False
return dfs(head.next, root.left) or dfs(head.next, root.right)
return dfs(head, root) or self.isSubPath(head, root.left) or self.isSubPath(head, root.right)
class Solution2:
def isSubPath(self, head: ListNode, root: TreeNode) -> bool:
lps = [0]
vals = [head.val]
l = 0
node = head.next
while node:
while l and node.val != vals[l]:
l = lps[l - 1]
l += head.val == vals[l]
lps.append(l)
vals.append(node.val)
node = node.next
def dfs(root, l):
if root is None:
return False
while l and root.val != vals[l]:
l = lps[l - 1]
l += root.val == vals[l]
if l == len(vals):
return True
return dfs(root.left, l) or dfs(root.right, l)
return dfs(root, 0)
```
#### File: 1301-1400/1368-Minimum Cost to Make at Least One Valid Path in a Grid/1368-Minimum Cost to Make at Least One Valid Path in a Grid.py
```python
class Solution:
def minCost(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
Q = collections.deque()
visited = [[False] * n for _ in range(m)]
def addQ(r, c):
while 0 <= r < m and 0 <= c < n and not visited[r][c]:
Q.append((r, c))
visited[r][c] = True
if grid[r][c] == 1:
c += 1
elif grid[r][c] == 2:
c -= 1
elif grid[r][c] == 3:
r += 1
else:
r -= 1
addQ(0, 0)
step = 0
while Q:
for _ in range(len(Q)):
r, c = Q.popleft()
if r == m - 1 and c == n - 1:
return step
for nr, nc in (r-1, c), (r+1, c), (r, c-1), (r, c+1):
if 0 <= nr < m and 0 <= nc < n and not visited[nr][nc]:
addQ(nr, nc)
step += 1
return -1
```
#### File: 1301-1400/1373-Maximum Sum BST in Binary Tree/1373-Maximum Sum BST in Binary Tree.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxSumBST(self, root: TreeNode) -> int:
maxSum = 0
def dfs(root):
if root is None:
return 0, True, None, None
lSum, lBST, lMinVal, lMaxVal = dfs(root.left)
rSum, rBST, rMinVal, rMaxVal = dfs(root.right)
if (lBST and (lMaxVal is None or root.val > lMaxVal) and
rBST and (rMinVal is None or root.val < rMinVal)):
currSum = root.val + lSum + rSum
nonlocal maxSum
maxSum = max(maxSum, currSum)
return currSum, True, lMinVal or root.val, rMaxVal or root.val
else:
return 0, False, None, None
dfs(root)
return maxSum
```
#### File: 1301-1400/1375-Bulb Switcher III/1375-Bulb Switcher III.py
```python
class Solution:
def numTimesAllBlue(self, light: List[int]) -> int:
right = num = 0
for i, l in enumerate(light):
right = max(right, l)
if right == i + 1:
num += 1
return num
```
#### File: 1301-1400/1387-Sort Integers by The Power Value/1387-Sort Integers by The Power Value.py
```python
from functools import lru_cache
import heapq
class Solution:
def getKth(self, lo: int, hi: int, k: int) -> int:
@lru_cache(None)
def computeStep(x):
step = 0
while x != 1:
if x & 1:
x = 3 * x + 1
else:
x >>= 1
step += 1
return step
pq = [(computeStep(x), x) for x in range(lo, hi + 1)]
heapq.heapify(pq)
for _ in range(k):
result = heapq.heappop(pq)[1]
return result
```
#### File: 1301-1400/1391-Check if There is a Valid Path in a Grid/1391-Check if There is a Valid Path in a Grid.py
```python
import collections
class Solution:
def hasValidPath(self, grid: List[List[int]]) -> bool:
m, n = len(grid), len(grid[0])
Q = collections.deque([(0, 0)])
visited = [[False] * n for _ in range(m)]
visited[0][0] = True
direction = {1: [(0, -1), (0, 1)],
2: [(-1, 0), (1, 0)],
3: [(0, -1), (1, 0)],
4: [(0, 1), (1, 0)],
5: [(-1, 0), (0, -1)],
6: [(-1, 0), (0, 1)]}
while Q:
r, c = Q.popleft()
if r == m - 1 and c == n - 1:
return True
for dr, dc in direction[grid[r][c]]:
nr = r + dr
nc = c + dc
if 0 <= nr < m and 0 <= nc < n and not visited[nr][nc]:
for bdr, bdc in direction[grid[nr][nc]]:
if nr + bdr == r and nc + bdc == c:
Q.append((nr, nc))
visited[nr][nc] = True
break
return False
```
#### File: 1301-1400/1395-Count Number of Teams/1395-Count Number of Teams.py
```python
Number of Teams/1395-Count Number of Teams.py
class Solution:
def numTeams(self, rating: List[int]) -> int:
n = len(rating)
inc1 = [0] * n
inc2 = [0] * n
dec1 = [0] * n
dec2 = [0] * n
total = 0
for j in range(1, n):
for i in range(j):
if rating[i] < rating[j]:
inc1[j] += 1
inc2[j] += inc1[i]
elif rating[i] > rating[j]:
dec1[j] += 1
dec2[j] += dec1[i]
total += inc2[j] + dec2[j]
return total
```
#### File: 1301-1400/1400-Construct K Palindrome Strings/1400-Construct K Palindrome Strings.py
```python
import collections
class Solution:
def canConstruct(self, s: str, k: int) -> bool:
if k > len(s):
return False
table = collections.Counter(s)
oddCount = sum(v & 1 for v in table.values())
return oddCount <= k
```
#### File: 1401-1500/1413-Minimum Value to Get Positive Step by Step Sum/1413-Minimum Value to Get Positive Step by Step Sum.py
```python
class Solution:
def minStartValue(self, nums: List[int]) -> int:
total = minSum = 0
for num in nums:
total += num
minSum = min(minSum, total)
return 1 - minSum
```
#### File: 1401-1500/1414-Find the Minimum Number of Fibonacci Numbers Whose Sum Is K/1414-Find the Minimum Number of Fibonacci Numbers Whose Sum Is K.py
```python
class Solution:
def findMinFibonacciNumbers(self, k: int) -> int:
a, b = 1, 1
while b < k:
a, b = b, a + b
count = 0
while k > 0:
if b <= k:
k -= b
count += 1
a, b = b - a, a
return count
```
#### File: 1401-1500/1423-Maximum Points You Can Obtain from Cards/1423-Maximum Points You Can Obtain from Cards.py
```python
class Solution:
def maxScore(self, cardPoints: List[int], k: int) -> int:
n = len(cardPoints)
total = sum(cardPoints)
sz = n - k
curr = minSum = sum(cardPoints[:sz])
for i in range(sz, n):
curr += cardPoints[i] - cardPoints[i - sz]
minSum = min(minSum, curr)
return total - minSum
```
#### File: 1401-1500/1424-Diagonal Traverse II/1424-Diagonal Traverse II.py
```python
class Solution:
def findDiagonalOrder(self, nums: List[List[int]]) -> List[int]:
table = sorted((r + c, -r, val) for r, row in enumerate(nums) for c, val in enumerate(row))
return [val for _, _, val in table]
```
#### File: 1401-1500/1427-Perform String Shifts/1427-Perform String Shifts.py
```python
class Solution:
def stringShift(self, s: str, shift: List[List[int]]) -> str:
total = sum(amount if direction == 0 else -amount for direction, amount in shift)
total %= len(s)
return s[total:] + s[:total]
```
#### File: 1401-1500/1432-Max Difference You Can Get From Changing an Integer/1432-Max Difference You Can Get From Changing an Integer.py
```python
Difference You Can Get From Changing an Integer/1432-Max Difference You Can Get From Changing an Integer.py
class Solution:
def maxDiff(self, num: int) -> int:
a = b = str(num)
for d in a:
if d != '9':
a = a.replace(d, '9')
break
if b[0] == '1':
for d in b:
if d not in '01':
b = b.replace(d, '0')
break
else:
b = b.replace(b[0], '1')
return int(a) - int(b)
```
#### File: 1401-1500/1447-Simplified Fractions/1447-Simplified Fractions.py
```python
import math
class Solution:
def simplifiedFractions(self, n: int) -> List[str]:
result = []
for d in range(2, n + 1):
for n in range(1, d):
if math.gcd(d, n) == 1:
result.append("{}/{}".format(n, d))
return result
```
#### File: 1401-1500/1457-Pseudo-Palindromic Paths in a Binary Tree/1457-Pseudo-Palindromic Paths in a Binary Tree.py
```python
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def pseudoPalindromicPaths (self, root: Optional[TreeNode]) -> int:
def dfs(curr, parity):
if curr is None:
return 0
parity ^= (1 << curr.val)
result = 0
if curr.left is None and curr.right is None:
if parity & (parity - 1) == 0:
result += 1
else:
result += dfs(curr.left, parity) + dfs(curr.right, parity)
return result
return dfs(root, 0)
```
#### File: 1401-1500/1462-Course Schedule IV/1462-Course Schedule IV.py
```python
class Solution1:
def checkIfPrerequisite(self, numCourses: int, prerequisites: List[List[int]], queries: List[List[int]]) -> List[bool]:
r = [[False] * numCourses for _ in range(numCourses)]
for u, v in prerequisites:
r[u][v] = True
for k in range(numCourses):
for i in range(numCourses):
for j in range(numCourses):
r[i][j] = r[i][j] or (r[i][k] and r[k][j])
return [r[u][v] for u, v in queries]
import collections
class Solution2:
def checkIfPrerequisite(self, numCourses: int, prerequisites: List[List[int]], queries: List[List[int]]) -> List[bool]:
graph = collections.defaultdict(list)
for u, v in prerequisites:
graph[u].append(v)
table = {}
def dfs(u):
if u in table:
return table[u]
table[u] = set()
for v in graph[u]:
table[u].add(v)
table[u].update(dfs(v))
return table[u]
return [v in dfs(u) for u, v in queries]
```
#### File: 1401-1500/1489-Find Critical and Pseudo-Critical Edges in Minimum Spanning Tree/1489-Find Critical and Pseudo-Critical Edges in Minimum Spanning Tree.py
```python
import math
class Solution:
def findCriticalAndPseudoCriticalEdges(self, n: int, edges: List[List[int]]) -> List[List[int]]:
for i in range(len(edges)):
edges[i].append(i)
edges.sort(key=lambda e: e[2])
def findParent(parent, x):
while parent[x] != x:
parent[x] = parent[parent[x]]
x = parent[x]
return x
def getMST(block, prev):
parent = [i for i in range(n)]
weight = count = 0
if prev != -1:
a, b, w, _ = edges[prev]
parent[b] = a
count = 1
weight = w
for i in range(len(edges)):
if i == block or i == prev:
continue
a, b, w, _ = edges[i]
pa = findParent(parent, a)
pb = findParent(parent, b)
if pa != pb:
parent[pb] = pa
count += 1
weight += w
if count == n - 1:
break
return weight if count == n - 1 else math.inf
original = getMST(-1, -1)
critical = []
pseudoCritical = []
for i in range(len(edges)):
if getMST(i, -1) > original:
critical.append(edges[i][3])
elif getMST(-1, i) == original:
pseudoCritical.append(edges[i][3])
return [critical, pseudoCritical]
```
#### File: 1401-1500/1497-Check If Array Pairs Are Divisible by k/1497-Check If Array Pairs Are Divisible by k.py
```python
import collections
class Solution:
def canArrange(self, arr: List[int], k: int) -> bool:
table = collections.Counter([a % k for a in arr])
if table[0] & 1:
return False
for key in range(1, k // 2 + 1):
if table[key] != table[k - key]:
return False
return True
```
#### File: 1501-1600/1509-Minimum Difference Between Largest and Smallest Value in Three Moves/1509-Minimum Difference Between Largest and Smallest Value in Three Moves.py
```python
import heapq
class Solution:
def minDifference(self, nums: List[int]) -> int:
return min(a - b for a, b in zip(heapq.nlargest(4, nums), heapq.nsmallest(4, nums)[::-1]))
```
#### File: 1501-1600/1518-Water Bottles/1518-Water Bottles.py
```python
class Solution1:
def numWaterBottles(self, numBottles: int, numExchange: int) -> int:
result = numBottles
empty = numBottles
while empty >= numExchange:
full, empty = divmod(empty, numExchange)
result += full
empty = full + empty
return result
class Solution2:
def numWaterBottles(self, numBottles: int, numExchange: int) -> int:
return numBottles + (numBottles - 1) // (numExchange - 1)
```
#### File: 1501-1600/1529-Bulb Switcher IV/1529-Bulb Switcher IV.py
```python
class Solution:
def minFlips(self, target: str) -> int:
count = 0
flag = 1
for b in target:
if b == "01"[flag]:
count += 1
flag = 1 - flag
return count
```
#### File: 1501-1600/1536-Minimum Swaps to Arrange a Binary Grid/1536-Minimum Swaps to Arrange a Binary Grid.py
```python
class Solution:
def minSwaps(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
zero = [0] * m
for i in range(m):
for j in range(n - 1, -1, -1):
if grid[i][j] == 0:
zero[i] += 1
else:
break
count = 0
for i in range(m):
if zero[i] < n - 1 - i:
j = i + 1
while j < n and zero[j] < n - 1 - i:
j += 1
if j == n:
return -1
while j > i:
zero[j], zero[j - 1] = zero[j - 1], zero[j]
j -= 1
count += 1
return count
```
#### File: 1501-1600/1537-Get the Maximum Score/1537-Get the Maximum Score.py
```python
class Solution:
def maxSum(self, nums1: List[int], nums2: List[int]) -> int:
m, n = len(nums1), len(nums2)
i = j = sum1 = sum2 = 0
while i < m or j < n:
if i < m and (j == n or nums1[i] < nums2[j]):
sum1 += nums1[i]
i += 1
elif j < n and (i == m or nums1[i] > nums2[j]):
sum2 += nums2[j]
j += 1
else:
sum1 = sum2 = max(sum1, sum2) + nums1[i]
i += 1
j += 1
return max(sum1, sum2) % (10**9 + 7)
```
#### File: 1501-1600/1541-Minimum Insertions to Balance a Parentheses String/1541-Minimum Insertions to Balance a Parentheses String.py
```python
class Solution:
def minInsertions(self, s: str) -> int:
right = count = 0
for c in s:
if c == '(':
if right & 1:
right -= 1
count += 1
right += 2
else:
right -= 1
if right < 0:
right = 1
count += 1
return right + count
```
#### File: 1501-1600/1542-Find Longest Awesome Substring/1542-Find Longest Awesome Substring.py
```python
class Solution:
def longestAwesome(self, s: str) -> int:
table = {0 : -1}
mask = maxLen = 0
for i, c in enumerate(s):
mask ^= (1 << int(c))
for j in range(10):
mask2 = mask ^ (1 << j)
if mask2 in table:
maxLen = max(maxLen, i - table[mask2])
if mask in table:
maxLen = max(maxLen, i - table[mask])
else:
table[mask] = i
return maxLen
```
#### File: 1501-1600/1551-Minimum Operations to Make Array Equal/1551-Minimum Operations to Make Array Equal.py
```python
class Solution:
def minOperations(self, n: int) -> int:
return (n * n - 1) // 4 if n & 1 else n * n // 4
```
#### File: 1501-1600/1552-Magnetic Force Between Two Balls/1552-Magnetic Force Between Two Balls.py
```python
class Solution:
def maxDistance(self, position: List[int], m: int) -> int:
position.sort()
n = len(position)
left, right = 1, position[-1] - position[0]
while left < right:
mid = (left + right + 1) // 2
count = 1
prev = position[0]
for i in range(1, n):
if position[i] - prev >= mid:
count += 1
prev = position[i]
if count >= m:
left = mid
else:
right = mid - 1
return left
```
#### File: 1501-1600/1557-Minimum Number of Vertices to Reach All Nodes/1557-Minimum Number of Vertices to Reach All Nodes.py
```python
class Solution:
def findSmallestSetOfVertices(self, n: int, edges: List[List[int]]) -> List[int]:
degree = [0] * n
for u, v in edges:
degree[v] = 1
return [i for i, d in enumerate(degree) if d == 0]
```
#### File: 1501-1600/1559-Detect Cycles in 2D Grid/1559-Detect Cycles in 2D Grid.py
```python
class Solution:
def containsCycle(self, grid: List[List[str]]) -> bool:
m, n = len(grid), len(grid[0])
visited = [[False] * n for _ in range(m)]
def dfs(r, c, pr, pc):
visited[r][c] = True
for nr, nc in (r-1, c), (r+1, c), (r, c-1), (r, c+1):
if 0 <= nr < m and 0 <= nc < n and (nr != pr or nc != pc) and grid[nr][nc] == grid[r][c]:
if visited[nr][nc]:
return True
if dfs(nr, nc, r, c):
return True
return False
for r in range(m):
for c in range(n):
if not visited[r][c] and dfs(r, c, -1, -1):
return True
return False
```
#### File: 1501-1600/1560-Most Visited Sector in a Circular Track/1560-Most Visited Sector in a Circular Track.py
```python
class Solution:
def mostVisited(self, n: int, rounds: List[int]) -> List[int]:
start, end = rounds[0], rounds[-1]
if end >= start:
return list(range(start, end + 1))
else:
return list(range(1, end + 1)) + list(range(start, n + 1))
```
#### File: 1501-1600/1563-Stone Game V/1563-Stone Game V.py
```python
class Solution:
def stoneGameV(self, stoneValue: List[int]) -> int:
n = len(stoneValue)
dp = [[0] * n for _ in range(n)]
mx = [[0] * n for _ in range(n)]
for i in range(n):
mx[i][i] = stoneValue[i]
for j in range(1, n):
mid = j
s = stoneValue[j]
rightHalf = 0
for i in range(j - 1, -1, -1):
s += stoneValue[i]
while (rightHalf + stoneValue[mid]) * 2 <= s:
rightHalf += stoneValue[mid]
mid -= 1
if rightHalf * 2 == s:
dp[i][j] = mx[i][mid]
else:
dp[i][j] = (0 if mid == i else mx[i][mid - 1])
if mid != j:
dp[i][j] = max(dp[i][j], mx[j][mid + 1])
mx[i][j] = max(mx[i][j - 1], dp[i][j] + s)
mx[j][i] = max(mx[j][i + 1], dp[i][j] + s)
return dp[0][n - 1]
```
#### File: 1501-1600/1572-Matrix Diagonal Sum/1572-Matrix Diagonal Sum.py
```python
class Solution:
def diagonalSum(self, mat: List[List[int]]) -> int:
sum = 0
n = len(mat)
for i in range(n):
sum += mat[i][i]
if i != n - 1 - i:
sum += mat[i][n - 1 - i]
return sum
```
#### File: 1501-1600/1575-Count All Possible Routes/1575-Count All Possible Routes.py
```python
All Possible Routes/1575-Count All Possible Routes.py
from functools import lru_cache
class Solution:
def countRoutes(self, locations: List[int], start: int, finish: int, fuel: int) -> int:
MOD = 10 ** 9 + 7
@lru_cache(None)
def dfs(curr, fuel):
if fuel < 0:
return 0
result = 1 if curr == finish else 0
for next, location in enumerate(locations):
if next != curr:
result = (result + dfs(next, fuel - abs(locations[curr] - location))) % MOD
return result
return dfs(start, fuel)
```
#### File: 1501-1600/1595-Minimum Cost to Connect Two Groups of Points/1595-Minimum Cost to Connect Two Groups of Points.py
```python
import math
from functools import lru_cache
class Solution:
def connectTwoGroups(self, cost: List[List[int]]) -> int:
size1, size2 = len(cost), len(cost[0])
minCost2 = [min(cost[i][j] for i in range(size1)) for j in range(size2)]
@lru_cache(None)
def dfs(start, mask):
if start == size1:
result = 0
for j in range(size2):
if mask & (1 << j) == 0:
result += minCost2[j]
else:
result = math.inf
for j in range(size2):
result = min(result, cost[start][j] + dfs(start + 1, mask | (1 << j)))
return result
return dfs(0, 0)
```
#### File: 1601-1700/1605-Find Valid Matrix Given Row and Column Sums/1605-Find Valid Matrix Given Row and Column Sums.py
```python
class Solution:
def restoreMatrix(self, rowSum: List[int], colSum: List[int]) -> List[List[int]]:
m, n = len(rowSum), len(colSum)
result = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
result[i][j] = min(rowSum[i], colSum[j])
rowSum[i] -= result[i][j]
colSum[j] -= result[i][j]
return result
```
#### File: 1601-1700/1614-Maximum Nesting Depth of the Parentheses/1614-Maximum Nesting Depth of the Parentheses.py
```python
class Solution:
def maxDepth(self, s: str) -> int:
bal = depth = 0
for c in s:
if c == '(':
bal += 1
depth = max(depth, bal)
elif c == ')':
bal -= 1
return depth
```
#### File: 1601-1700/1617-Count Subtrees With Max Distance Between Cities/1617-Count Subtrees With Max Distance Between Cities.py
```python
import collections
class Solution:
def countSubgraphsForEachDiameter(self, n: int, edges: List[List[int]]) -> List[int]:
graph = collections.defaultdict(list)
for u, v in edges:
graph[u - 1].append(v - 1)
graph[v - 1].append(u - 1)
def treeDiameter(cities):
maxLen = 0
visited = 0
def dfs(curr, prev):
maxLen1 = maxLen2 = 0
nonlocal visited
visited += 1
for next in graph[curr]:
if next != prev and next in cities:
nextLen = dfs(next, curr)
if nextLen > maxLen1:
maxLen2 = maxLen1
maxLen1 = nextLen
elif nextLen > maxLen2:
maxLen2 = nextLen
nonlocal maxLen
maxLen = max(maxLen, maxLen1 + maxLen2)
return 1 + maxLen1
start = cities.pop()
cities.add(start)
dfs(start, -1)
return maxLen if visited == len(cities) else 0
def maxDistance(state):
cities = set()
for i in range(n):
if state & (1 << i):
cities.add(i)
return treeDiameter(cities)
result = [0] * (n - 1)
for state in range(1, 1 << n):
d = maxDistance(state)
if d != 0:
result[d - 1] += 1
return result
```
#### File: 1601-1700/1631-Path With Minimum Effort/1631-Path With Minimum Effort.py
```python
class Solution:
def minimumEffortPath(self, heights: List[List[int]]) -> int:
m, n = len(heights), len(heights[0])
def dfs(r, c, threshold):
if r == m - 1 and c == n - 1:
return True
for nr, nc in (r-1,c),(r+1,c),(r,c-1),(r,c+1):
if 0 <= nr < m and 0 <= nc < n and not visited[nr][nc] and abs(heights[nr][nc] - heights[r][c]) <= threshold:
visited[nr][nc] = True
if dfs(nr, nc, threshold):
return True
return False
low, high = 0, max(map(max, heights)) - min(map(min, heights))
while low < high:
mid = (low + high) // 2
visited = [[False] * n for _ in range(m)]
if dfs(0, 0, mid):
high = mid
else:
low = mid + 1
return low
```
#### File: 1601-1700/1637-Widest Vertical Area Between Two Points Containing No Points/1637-Widest Vertical Area Between Two Points Containing No Points.py
```python
class Solution:
def maxWidthOfVerticalArea(self, points: List[List[int]]) -> int:
xs = sorted(x for x, y in points)
return max(xs[i] - xs[i - 1] for i in range(1, len(xs)))
```
#### File: 1601-1700/1639-Number of Ways to Form a Target String Given a Dictionary/1639-Number of Ways to Form a Target String Given a Dictionary.py
```python
from functools import lru_cache
class Solution:
def numWays(self, words: List[str], target: str) -> int:
MOD = 10 ** 9 + 7
table = [[0] * len(words[0]) for _ in range(26)]
for word in words:
for i, c in enumerate(word):
table[ord(c) - ord('a')][i] += 1
@lru_cache(None)
def dfs(start, wi):
if start == len(target):
return 1
if wi == len(words[0]):
return 0
result = dfs(start, wi + 1)
c = target[start]
if table[ord(c) - ord('a')][wi] > 0:
result = (result + dfs(start + 1, wi + 1) * table[ord(c) - ord('a')][wi]) % MOD
return result
return dfs(0, 0)
```
#### File: 1601-1700/1646-Get Maximum in Generated Array/1646-Get Maximum in Generated Array.py
```python
class Solution:
def getMaximumGenerated(self, n: int) -> int:
if n <= 1:
return n
nums = [0] * (n + 1)
nums[1] = maxNum = 1
for i in range(2, n + 1):
if i & 1:
nums[i] = nums[i // 2] + nums[i // 2 + 1]
else:
nums[i] = nums[i // 2]
maxNum = max(maxNum, nums[i])
return maxNum
```
#### File: 1601-1700/1653-Minimum Deletions to Make String Balanced/1653-Minimum Deletions to Make String Balanced.py
```python
class Solution:
def minimumDeletions(self, s: str) -> int:
minDel = countb = 0
for c in s:
if c == 'b':
countb += 1
else:
minDel = min(minDel + 1, countb)
return minDel
```
#### File: 1601-1700/1654-Minimum Jumps to Reach Home/1654-Minimum Jumps to Reach Home.py
```python
import math
import collections
class Solution:
def minimumJumps(self, forbidden: List[int], a: int, b: int, x: int) -> int:
g = math.gcd(a, b)
if x % g:
return -1
maxVal = a + b + max([x] + forbidden)
jumps = [0] + [math.inf] * maxVal
for pos in forbidden:
jumps[pos] = -1
Q = collections.deque([0])
while Q:
pos = Q.popleft()
if pos + a <= maxVal and jumps[pos + a] > jumps[pos] + 1:
Q.append(pos + a)
jumps[pos + a] = jumps[pos] + 1
if pos - b > 0 and jumps[pos - b] > jumps[pos] + 1:
jumps[pos - b] = jumps[pos] + 1
if pos - b + a <= maxVal and jumps[pos - b + a] > jumps[pos] + 2:
Q.append(pos - b + a)
jumps[pos - b + a] = jumps[pos] + 2
return jumps[x] if jumps[x] < math.inf else -1
```
#### File: 1601-1700/1657-Determine if Two Strings Are Close/1657-Determine if Two Strings Are Close.py
```python
import collections
class Solution:
def closeStrings(self, word1: str, word2: str) -> bool:
set1 = set(word1)
set2 = set(word2)
if set1 != set2:
return False
table1 = collections.Counter(word1)
table2 = collections.Counter(word2)
return sorted(table1.values()) == sorted(table2.values())
```
#### File: 1601-1700/1668-Maximum Repeating Substring/1668-Maximum Repeating Substring.py
```python
class Solution:
def maxRepeating(self, sequence: str, word: str) -> int:
k = 0
s = word
while s in sequence:
k += 1
s += word
return k
```
#### File: 1601-1700/1675-Minimize Deviation in Array/1675-Minimize Deviation in Array.py
```python
import heapq
import math
class Solution:
def minimumDeviation(self, nums: List[int]) -> int:
pq = []
minNum = math.inf
for num in nums:
if num & 1:
num *= 2
heapq.heappush(pq, -num)
minNum = min(minNum, num)
minDev = math.inf
while pq[0] & 1 == 0:
top = heapq.heappop(pq)
val = -top
minDev = min(minDev, val - minNum)
heapq.heappush(pq, top // 2)
minNum = min(minNum, val // 2)
return min(minDev, -pq[0] - minNum)
```
#### File: 1601-1700/1690-Stone Game VII/1690-Stone Game VII.py
```python
class Solution:
def stoneGameVII(self, stones: List[int]) -> int:
n = len(stones)
presum = [0] * (n + 1)
for i, s in enumerate(stones):
presum[i + 1] = presum[i] + s
dp = [[0] * n for _ in range(n)]
for l in range(2, n + 1):
for i in range(n - l + 1):
j = i + l - 1
dp[i][j] = max(presum[j + 1] - presum[i + 1] - dp[i + 1][j],
presum[j] - presum[i] - dp[i][j - 1])
return dp[0][n - 1]
```
#### File: 1601-1700/1696-Jump Game VI/1696-Jump Game VI.py
```python
import collections
class Solution:
def maxResult(self, nums: List[int], k: int) -> int:
maxScore = 0
dq = collections.deque()
n = len(nums)
dp = [0] * n
for i in range(n - 1, -1, -1):
while dq and dq[0] > i + k:
dq.popleft()
maxScore = nums[i] + (dp[dq[0]] if dq else 0)
dp[i] = maxScore
while dq and maxScore > dp[dq[-1]]:
dq.pop()
dq.append(i)
return maxScore
``` |
{
"source": "jiadaizhao/LintCode",
"score": 3
} |
#### File: 0101-0200/0193-Longest Valid Parentheses/0193-Longest Valid Parentheses.py
```python
class Solution:
"""
@param s: a string
@return: return a integer
"""
def longestValidParentheses(self, s):
# write your code here
left = right = maxLen = 0
for c in s:
if c == '(':
left += 1
else:
right += 1
if left == right:
maxLen = max(maxLen, left * 2)
if right > left:
left = right = 0
left = right = 0
for c in s[::-1]:
if c == '(':
left += 1
else:
right += 1
if left == right:
maxLen = max(maxLen, left * 2)
if left > right:
left = right = 0
return maxLen
```
#### File: 0501-0600/0506-Movie Recommendation/0506-Movie Recommendation.py
```python
import collections
class Solution:
def minMalwareSpread(self, graph):
mt = collections.defaultdict(list)
for i, movies in enumerate(graph):
for m in movies:
mt[m].append(i)
result = []
for i, movies in enumerate(graph):
recommend = collections.Counter()
for m in movies:
for user in mt[m]:
if user == i:
continue
for nm in graph[user]:
if nm not in graph[i]:
recommend[nm] += 1
if len(recommend) > 0:
result.append(list(list(zip(*sorted(recommend.items(), key = lambda x: (-x[1], x[0]))[:5]))[0]))
else:
result.append([])
return result
```
#### File: 0701-0800/0773-Valid Anagram/0773-Valid Anagram.py
```python
import collections
class Solution:
"""
@param s: string s
@param t: string t
@return: Given two strings s and t, write a function to determine if t is an anagram of s.
"""
def isAnagram(self, s, t):
# write your code here
return collections.Counter(s) == collections.Counter(t)
```
#### File: 0801-0900/0859-Max Stack/0859-Max Stack.py
```python
class MaxStack:
def __init__(self):
# do intialization if necessary
self.St = []
self.maxSt = []
"""
@param: number: An integer
@return: nothing
"""
def push(self, x):
# write your code here
self.St.append(x)
if not self.maxSt or x >= self.maxSt[-1]:
self.maxSt.append(x)
"""
@return: An integer
"""
def pop(self):
# write your code here
val = self.St.pop()
if val == self.maxSt[-1]:
self.maxSt.pop()
return val
"""
@return: An integer
"""
def top(self):
# write your code here
return self.St[-1]
"""
@return: An integer
"""
def peekMax(self):
# write your code here
return self.maxSt[-1]
"""
@return: An integer
"""
def popMax(self):
# write your code here
val = self.maxSt.pop()
temp = []
while self.St[-1] != val:
temp.append(self.St.pop())
self.St.pop()
self.St.extend(temp)
return val
```
#### File: 0901-1000/0905-Nested List Weight Sum II/0905-Nested List Weight Sum II.py
```python
class Solution:
"""
@param nestedList: a list of NestedInteger
@return: the sum
"""
def depthSumInverse(self, nestedList):
# Write your code here.
unweighted = weighted = 0
while nestedList:
nextLevel = []
for ni in nestedList:
if ni.isInteger():
unweighted += ni.getInteger()
else:
for l in ni.getList():
nextLevel.append(l)
weighted += unweighted
nestedList = nextLevel
return weighted
```
#### File: 0901-1000/0931-Median of K Sorted Arrays/0931-Median of K Sorted Arrays.py
```python
import bisect
class Solution:
"""
@param nums: the given k sorted arrays
@return: the median of the given k sorted arrays
"""
def findMedian(self, nums):
# write your code here
n = sum(len(arr) for arr in nums)
if n == 0:
return 0
def findKth(k):
result = low = 0
high = (1 << 31) - 1
while low <= high:
mid = (low + high) // 2
count = sum(len(arr) - bisect.bisect_left(arr, mid) for arr in nums)
if count >= k:
result = mid
low = mid + 1
else:
high = mid - 1
return result
if n & 1:
return findKth(n // 2 + 1)
else:
return (findKth(n // 2) + findKth(n // 2 + 1)) / 2
```
#### File: 1001-1100/1016-Minimum Swaps To Make Sequences Increasing/1016-Minimum Swaps To Make Sequences Increasing.py
```python
class Solution:
"""
@param A: an array
@param B: an array
@return: the minimum number of swaps to make both sequences strictly increasing
"""
def minSwap(self, A, B):
# Write your code here
preserve0, swap0 = 0, 1
for i in range(1, len(A)):
if A[i - 1] < A[i] and B[i - 1] < B[i]:
preserve1 = preserve0
swap1 = swap0 + 1
else:
preserve1 = len(A)
swap1 = len(A)
if A[i - 1] < B[i] and B[i - 1] < A[i]:
preserve1 = min(preserve1, swap0)
swap1 = min(swap1, preserve0 + 1)
preserve0 = preserve1
swap0 = swap1
return min(preserve0, swap0)
```
#### File: 1001-1100/1024-Number of Matching Subsequences/1024-Number of Matching Subsequences.py
```python
class Solution:
"""
@param S: a string
@param words: a dictionary of words
@return: the number of words[i] that is a subsequence of S
"""
def numMatchingSubseq(self, S, words):
# Write your code here
waiting = collections.defaultdict(list)
for it in map(iter, words):
waiting[next(it)].append(it)
for c in S:
for it in waiting.pop(c, ()):
waiting[next(it, None)].append(it)
return len(waiting[None])
```
#### File: 1001-1100/1026-Domino and Tromino Tiling/1026-Domino and Tromino Tiling.py
```python
class Solution:
"""
@param N: a integer
@return: return a integer
"""
def numTilings(self, N):
# write your code here
MOD = 1000000007
dp = [0] * 1001
dp[0] = 1
dp[1] = 1
dp[2] = 2
for i in range(3, N + 1):
dp[i] = (dp[i - 1] * 2 + dp[i - 3]) % MOD
return dp[N]
```
#### File: 1001-1100/1030-K-th Smallest Prime Fraction/1030-K-th Smallest Prime Fraction.py
```python
class Solution1:
"""
@param A: a list of integers
@param K: a integer
@return: return two integers
"""
def kthSmallestPrimeFraction(self, A, K):
# write your code here
from fractions import Fraction
def under(x):
count = res = left = 0
for right in range(1, len(A)):
while A[left] < x * A[right]:
left += 1
count += left
if left > 0:
res = max(res, Fraction(A[left - 1], A[right]))
return count, res
low, high = 0.0, 1.0
while high - low > 1e-8:
mid = (low + high) / 2
count, res = under(mid)
if count < K:
low = mid
else:
ans = res
high = mid
return ans.numerator, ans.denominator
class Solution2:
"""
@param A: a list of integers
@param K: a integer
@return: return two integers
"""
def kthSmallestPrimeFraction(self, A, K):
# write your code here
import heapq
pq = [(A[0] / A[i], 0, i) for i in range(len(A) - 1, 0, -1)]
for _ in range(K - 1):
frac, p, q = heapq.heappop(pq)
p += 1
if p < q:
heapq.heappush(pq, (A[p] / A[q], p, q))
return A[pq[0][1]], A[pq[0][2]]
```
#### File: 1001-1100/1032-Letter Case Permutation/1032-Letter Case Permutation.py
```python
class Solution:
"""
@param S: a string
@return: return a list of strings
"""
def letterCasePermutation(self, S):
# write your code here
upper = sum(c.isalpha() for c in S)
result = []
for i in range(1 << upper):
b = 0
word = []
for c in S:
if c.isalpha():
if (i >> b) & 1:
word.append(c.upper())
else:
word.append(c.lower())
b += 1
else:
word.append(c)
result.append(''.join(word))
return result
```
#### File: 1001-1100/1041-Reorganize String/1041-Reorganize String.py
```python
import collections
import heapq
class Solution:
"""
@param S: a string
@return: return a string
"""
def reorganizeString(self, S):
# write your code here
pq = [(-v, k) for k, v in collections.Counter(S).items()]
heapq.heapify(pq)
result = []
prevCount = -1
prevChar = ' '
while pq:
count, c = heapq.heappop(pq)
result.append(c)
count = -count
if prevCount > 0:
heapq.heappush(pq, (-prevCount, prevChar))
prevCount = count - 1
prevChar = c
return ''.join(result) if len(result) == len(S) else ''
```
#### File: 1001-1100/1043-Couples Holding Hands/1043-Couples Holding Hands.py
```python
Holding Hands/1043-Couples Holding Hands.py
class Solution:
"""
@param row: the couples' initial seating
@return: the minimum number of swaps
"""
def minSwapsCouples(self, row):
# Write your code here
count = 0
table = {}
for i, r in enumerate(row):
table[r] = i
for i in range(0, len(row), 2):
couple = row[i] ^ 1
if i + 1 == table[couple]:
continue
count += 1
table[row[i + 1]] = table[couple]
row[i + 1], row[table[couple]] = row[table[couple]], row[i + 1]
table[couple] = i + 1
return count
```
#### File: 1001-1100/1052-Shortest Completing Word/1052-Shortest Completing Word.py
```python
class Solution:
"""
@param licensePlate: a string
@param words: List[str]
@return: return a string
"""
def shortestCompletingWord(self, licensePlate, words):
# write your code here
table = [0] * 26
count = 0
for c in licensePlate:
if c.isalpha():
table[ord(c.lower()) - ord('a')] += 1
count += 1
minIndex = -1
for i, word in enumerate(words):
need = table[:]
num = count
for c in word:
if need[ord(c) - ord('a')] > 0:
need[ord(c) - ord('a')] -= 1
num -= 1
if num == 0:
if minIndex == -1 or len(word) < len(words[minIndex]):
minIndex = i
break
return words[minIndex]
```
#### File: 1001-1100/1072-Find K-th Smallest Pair Distance/1072-Find K-th Smallest Pair Distance.py
```python
class Solution:
"""
@param nums: a list of integers
@param k: a integer
@return: return a integer
"""
def smallestDistancePair(self, nums, k):
# write your code here
nums.sort()
low = 0
high = nums[-1] - nums[0]
while low < high:
mid = (low + high) // 2
count = left = 0
for right in range(1, len(nums)):
while nums[right] - nums[left] > mid:
left += 1
count += right - left
if count >= k:
high = mid
else:
low = mid + 1
return low
```
#### File: 1001-1100/1080-Max Area of Island/1080-Max Area of Island.py
```python
class Solution:
"""
@param grid: a 2D array
@return: the maximum area of an island in the given 2D array
"""
def maxAreaOfIsland(self, grid):
# Write your code here
maxArea = 0
m = len(grid)
n = len(grid[0])
def dfs(r, c):
if grid[r][c] == 0:
return 0
total = 1
grid[r][c] = 0
for nr, nc in ((r-1, c), (r+1, c), (r, c-1), (r, c+1)):
if 0 <= nr < m and 0 <= nc < n:
total += dfs(nr, nc)
return total
for i, row in enumerate(grid):
for j, val in enumerate(row):
if val == 1:
maxArea = max(maxArea, dfs(i, j))
return maxArea
```
#### File: 1001-1100/1094-Second Minimum Node In a Binary Tree/1094-Second Minimum Node In a Binary Tree.py
```python
class Solution:
"""
@param root: the root
@return: the second minimum value in the set made of all the nodes' value in the whole tree
"""
def findSecondMinimumValue(self, root):
# Write your code here
def helper(root, smallest):
if root is None:
return -1
if root.val != smallest:
return root.val
left = helper(root.left, smallest)
right = helper(root.right, smallest)
if left == -1:
return right
elif right == -1:
return left
else:
return min(left, right)
return helper(root, root.val)
```
#### File: 1101-1200/1122-Add One Row to Tree/1122-Add One Row to Tree.py
```python
class Solution:
"""
@param root: the root of binary tree
@param v: a integer
@param d: a integer
@return: return a TreeNode
"""
def addOneRow(self, root, v, d):
# write your code here
if d == 1:
node = TreeNode(v)
node.left = root
return node
def helper(root, v, d):
if root is None:
return
if d == 2:
left = TreeNode(v)
right = TreeNode(v)
left.left = root.left
right.right = root.right
root.left = left
root.right = right
else:
helper(root.left, v, d - 1)
helper(root.right, v, d - 1)
helper(root, v, d)
return root
```
#### File: 1101-1200/1134-Find Duplicate File in System/1134-Find Duplicate File in System.py
```python
import collections
class Solution:
"""
@param paths: a list of string
@return: all the groups of duplicate files in the file system in terms of their paths
"""
def findDuplicate(self, paths):
# Write your code here
table = collections.defaultdict(list)
for path in paths:
dir, *files = path.split()
for file in files:
name, _, content = file.partition('(')
table[content[:-1]].append(dir + '/' + name)
return [v for v in table.values() if len(v) > 1]
```
#### File: 1101-1200/1149-Valid Square/1149-Valid Square.py
```python
class Solution:
"""
@param p1: the first point
@param p2: the second point
@param p3: the third point
@param p4: the fourth point
@return: whether the four points could construct a square
"""
def validSquare(self, p1, p2, p3, p4):
# Write your code here
def distanceSq(p1, p2):
return (p1[0] - p2[0]) * (p1[0] - p2[0]) + (p1[1] - p2[1]) * (p1[1] - p2[1])
table = {distanceSq(p1, p2), distanceSq(p1, p3), distanceSq(p1, p4), distanceSq(p2, p3), distanceSq(p2, p4), distanceSq(p3, p4)}
return 0 not in table and len(table) == 2
```
#### File: 1101-1200/1150-Fraction Addition and Subtraction/1150-Fraction Addition and Subtraction.py
```python
Addition and Subtraction/1150-Fraction Addition and Subtraction.py
import re
import math
class Solution:
"""
@param expression: a string
@return: return a string
"""
def fractionAddition(self, expression):
# write your code here
nums = map(int, re.findall(r'[+-]?\d+', expression))
A, B = 0, 1
for a in nums:
b = next(nums)
A = A * b + B * a
B *= b
g = math.gcd(A, B)
A //= g
B //= g
return '{}/{}'.format(A, B)
```
#### File: 1101-1200/1151-Tag Validator/1151-Tag Validator.py
```python
import re
class Solution:
"""
@param code: the given code
@return: whether it is valid
"""
def isValid(self, code):
# Write your code here
code = re.sub(r'<!\[CDATA\[.*?\]\]>|t', '-', code)
prev = None
while code != prev:
prev = code
code = re.sub(r'<([A-Z]{1,9})>[^<]*</\1>', 't', code)
return code == 't'
```
#### File: 1101-1200/1163-Distribute Candies/1163-Distribute Candies.py
```python
class Solution:
"""
@param candies: a list of integers
@return: return a integer
"""
def distributeCandies(self, candies):
# write your code here
return min(len(set(candies)), len(candies) // 2)
```
#### File: 1101-1200/1173-Reverse Words in a String III/1173-Reverse Words in a String III.py
```python
class Solution:
"""
@param s: a string
@return: reverse the order of characters in each word within a sentence while still preserving whitespace and initial word order
"""
def reverseWords(self, s):
# Write your code here
return ' '.join(word[::-1] for word in s.split())
```
#### File: 1101-1200/1182-Reverse String II/1182-Reverse String II.py
```python
class Solution:
"""
@param s: the string
@param k: the integer k
@return: the answer
"""
def reverseStringII(self, s, k):
# Write your code here.
l = list(s)
for i in range(0, len(l), k * 2):
l[i: i + k] = l[i: i + k][::-1]
return ''.join(l)
```
#### File: 1101-1200/1184-Minimum Time Difference/1184-Minimum Time Difference.py
```python
class Solution:
"""
@param timePoints: a list of 24-hour clock time points
@return: the minimum minutes difference between any two time points in the list
"""
def findMinDifference(self, timePoints):
# Write your code here
bucket = [0] * 1440
for time in timePoints:
index = int(time[:2]) * 60 + int(time[3:])
bucket[index] += 1
if bucket[index] > 1:
return 0
first = last = -1
minDiff = 1440
for i, b in enumerate(bucket):
if b:
if first == -1:
first = i
else:
minDiff = min(minDiff, i - last)
last = i
return min(minDiff, 1440 - (last - first))
```
#### File: 1101-1200/1185-Complex Number Multiplication/1185-Complex Number Multiplication.py
```python
class Solution:
"""
@param a: a string
@param b: a string
@return: a string representing their multiplication
"""
def complexNumberMultiply(self, a, b):
# Write your code here
ra, ia = map(int, a[:-1].split('+'))
rb, ib = map(int, b[:-1].split('+'))
return '{}+{}i'.format(ra * rb - ia * ib, ra * ib + rb * ia)
```
#### File: 1101-1200/1186-Encode and Decode TinyURL/1186-Encode and Decode TinyURL.py
```python
import random
class Solution:
def __init__(self):
self.seed = '<KEY>'
self.table = {}
def getKey(self):
return ''.join(random.choice(self.seed) for _ in range(6))
def encode(self, longUrl):
# Encodes a URL to a shortened URL.
while True:
key = self.getKey()
if key not in self.table:
break
self.table[key] = longUrl
return key
def decode(self, shortUrl):
# Decodes a shortened URL to its original URL.
return self.table[shortUrl]
# Your Codec object will be instantiated and called as such:
# Codec codec = new Codec();
# codec.decode(codec.encode(url));
```
#### File: 1101-1200/1189-Minesweeper/1189-Minesweeper.py
```python
import collections
class Solution:
"""
@param board: a board
@param click: the position
@return: the new board
"""
def updateBoard(self, board, click):
# Write your code here
b = []
for s in board:
temp = []
for c in s:
temp.append(c)
b.append(temp)
row, col = click
if b[row][col] == 'M':
b[row][col] = 'X'
else:
m, n = len(board), len(board[0])
Q = collections.deque([(row, col)])
b[row][col] = 'B'
while Q:
r, c = Q.popleft()
count = 0
for nr, nc in (r-1, c-1), (r-1, c), (r-1, c+1), (r, c-1), (r, c+1), (r+1, c-1), (r+1, c), (r+1, c+1):
if 0 <= nr < m and 0 <= nc < n and b[nr][nc] == 'M':
count += 1
if count > 0:
b[r][c] = str(count)
else:
for nr, nc in (r-1, c-1), (r-1, c), (r-1, c+1), (r, c-1), (r, c+1), (r+1, c-1), (r+1, c), (r+1, c+1):
if 0 <= nr < m and 0 <= nc < n and b[nr][nc] == 'E':
Q.append((nr, nc))
b[nr][nc] = 'B'
return [''.join(row) for row in b]
```
#### File: 1101-1200/1193-Detect Capital/1193-Detect Capital.py
```python
class Solution:
"""
@param word: a string
@return: return a boolean
"""
def detectCapitalUse(self, word):
# write your code here
return word.isupper() or word.islower() or word.istitle()
```
#### File: 1201-1300/1201-Next Greater Element II/1201-Next Greater Element II.py
```python
class Solution:
"""
@param nums: an array
@return: the Next Greater Number for every element
"""
def nextGreaterElements(self, nums):
# Write your code here
St = []
n = len(nums)
result = [-1] * n
for i in range(2 * n - 1):
while St and nums[St[-1]] < nums[i % n]:
result[St.pop()] = nums[i % n]
St.append(i % n)
return result
```
#### File: 1201-1300/1202-IPO/1202-IPO.py
```python
import heapq
class Solution:
"""
@param k: an integer
@param W: an integer
@param Profits: an array
@param Capital: an array
@return: final maximized capital
"""
def findMaximizedCapital(self, k, W, Profits, Capital):
# Write your code here
cappq = [(cap, i) for i, cap in enumerate(Capital)]
heapq.heapify(cappq)
profitpq = []
for _ in range(k):
while cappq and cappq[0][0] <= W:
cap, index = heapq.heappop(cappq)
heapq.heappush(profitpq, -Profits[index])
if profitpq:
W -= heapq.heappop(profitpq)
else:
break
return W
```
#### File: 1201-1300/1207-Teemo Attacking/1207-Teemo Attacking.py
```python
class Solution:
"""
@param timeSeries: the Teemo's attacking ascending time series towards Ashe
@param duration: the poisoning time duration per Teemo's attacking
@return: the total time that Ashe is in poisoned condition
"""
def findPoisonedDuration(self, timeSeries, duration):
# Write your code here
return sum(min(duration, curr - prev) for prev, curr in zip(timeSeries, timeSeries[1:])) + duration if timeSeries else 0
```
#### File: 1201-1300/1212-Max Consecutive Ones/1212-Max Consecutive Ones.py
```python
class Solution:
"""
@param nums: a binary array
@return: the maximum number of consecutive 1s
"""
def findMaxConsecutiveOnes(self, nums):
# Write your code here
count = maxCount = 0
for num in nums:
if num == 1:
count += 1
maxCount = max(maxCount, count)
else:
count = 0
return maxCount
```
#### File: 1201-1300/1215-Magical String/1215-Magical String.py
```python
class Solution:
"""
@param n: an integer
@return: the number of '1's in the first N number in the magical string S
"""
def magicalString(self, n):
# write your code here
if n == 0:
return 0
seed = list('122')
count = 1
i = 2
while len(seed) < n:
num = int(seed[i])
if seed[-1] == '1':
seed += ['2'] * num
else:
seed += ['1'] * num
count += (num if len(seed) <= n else 1)
i += 1
return count
```
#### File: 1201-1300/1230-Assign Cookies/1230-Assign Cookies.py
```python
class Solution:
"""
@param g: children's greed factor
@param s: cookie's size
@return: the maximum number
"""
def findContentChildren(self, g, s):
# Write your code here
g.sort()
s.sort()
i = 0
j = 0
while i < len(g) and j < len(s):
if s[j] >= g[i]:
i += 1
j += 1
return i
```
#### File: 1201-1300/1240-Path Sum III/1240-Path Sum III.py
```python
import collections
class Solution:
"""
@param root:
@param sum:
@return: the num of sum path
"""
def pathSum(self, root, sum):
# write your code here
table = collections.Counter([0])
count = 0
def dfs(root, curr):
if root is None:
return
curr += root.val
nonlocal count
count += table[curr - sum]
table[curr] += 1
dfs(root.left, curr)
dfs(root.right, curr)
table[curr] -= 1
dfs(root, 0)
return count
```
#### File: 1201-1300/1243-Number of Segments in a String/1243-Number of Segments in a String.py
```python
class Solution:
"""
@param s: a string
@return: the number of segments in a string
"""
def countSegments(self, s):
# write yout code here
return len(s.split())
```
#### File: 1201-1300/1250-Third Maximum Number/1250-Third Maximum Number.py
```python
import math
class Solution:
"""
@param nums: the array
@return: the third maximum number in this array
"""
def thirdMax(self, nums):
# Write your code here.
maxNums = [-math.inf] * 3
for num in nums:
if num > maxNums[0]:
maxNums = [num, maxNums[0], maxNums[1]]
elif num != maxNums[0] and num > maxNums[1]:
maxNums[1:3] = [num, maxNums[1]]
elif num != maxNums[0] and num != maxNums[1] and num > maxNums[2]:
maxNums[2] = num
return maxNums[0] if -math.inf in maxNums else maxNums[2]
```
#### File: 1201-1300/1255-Remove K Digits/1255-Remove K Digits.py
```python
class Solution:
"""
@param num: a string
@param k: an integer
@return: return a string
"""
def removeKdigits(self, num, k):
# write your code here
St = []
for c in num:
while St and c < St[-1] and k > 0:
St.pop()
k -= 1
St.append(c)
return ''.join(St[:len(St) - k]).lstrip('0') or '0'
```
#### File: 1201-1300/1261-Longest Substring with At Least K Repeating Characters/1261-Longest Substring with At Least K Repeating Characters.py
```python
import collections
class Solution:
"""
@param s: a string
@param k: an integer
@return: return an integer
"""
def longestSubstring(self, s, k):
# write your code here
maxLen = 0
for i in range(1, len(set(s)) + 1):
start = 0
table = collections.Counter()
unique = 0
noLessK = 0
for j in range(len(s)):
table[s[j]] += 1
if table[s[j]] == 1:
unique += 1
if table[s[j]] == k:
noLessK += 1
while unique > i:
table[s[start]] -= 1
if table[s[start]] == 0:
unique -= 1
if table[s[start]] == k - 1:
noLessK -= 1
start += 1
if unique == i and noLessK == i:
maxLen = max(maxLen, j - start + 1)
return maxLen
class Solution2:
"""
@param s: a string
@param k: an integer
@return: return an integer
"""
def longestSubstring(self, s, k):
# write your code here
for c in set(s):
if s.count(c) < k:
return max(self.longestSubstring(t, k) for t in s.split(c))
return len(s)
```
#### File: 1201-1300/1265-Elimination Game/1265-Elimination Game.py
```python
class Solution:
"""
@param n: a integer
@return: return a integer
"""
def lastRemaining(self, n):
# write your code here
left = True
head = 1
step = 1
while n > 1:
if left or (n&1):
head += step
step *= 2
n //= 2
left = not left
return head
```
#### File: 1201-1300/1279-Count Numbers with Unique Digits/1279-Count Numbers with Unique Digits.py
```python
class Solution:
"""
@param n: a non-negative integer
@return: number of numbers with unique digits
"""
def countNumbersWithUniqueDigits(self, n):
# Write your code here
count = 1
base = 9
product = 9
for _ in range(1, min(n+1, 11)):
count += product
product *= base
base -= 1
return count
```
#### File: 1201-1300/1285-Power of Four/1285-Power of Four.py
```python
class Solution:
"""
@param num: an integer
@return: whether the integer is a power of 4
"""
def isPowerOfFour(self, num):
# Write your code here
return num > 0 and num&(num - 1) == 0 and (num - 1)%3 == 0
```
#### File: 1301-1400/1311-Lowest Common Ancestor of a Binary Search Tree/1311-Lowest Common Ancestor of a Binary Search Tree.py
```python
class Solution:
"""
@param root: root of the tree
@param p: the node p
@param q: the node q
@return: find the LCA of p and q
"""
def lowestCommonAncestor(self, root, p, q):
# write your code here
if root is None or root == p or root == q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left and right:
return root
else:
return left or right
```
#### File: 1301-1400/1314-Power of Two/1314-Power of Two.py
```python
class Solution:
"""
@param n: an integer
@return: if n is a power of two
"""
def isPowerOfTwo(self, n):
# Write your code here
return n > 0 and n&(n - 1) == 0
```
#### File: 1301-1400/1327-delete string/1327-delete string.py
```python
class Solution:
"""
@param str: the str
@return: the delete positon
"""
def deleteString(self, str):
# Write your code here.
for i in range(len(str) - 1):
if str[i] > str[i + 1]:
return str[:i] + str[i + 1:]
return str[:-1]
```
#### File: 1301-1400/1334-Rotate Array/1334-Rotate Array.py
```python
class Solution:
"""
@param nums: an array
@param k: an integer
@return: rotate the array to the right by k steps
"""
def rotate(self, nums, k):
# Write your code here
n = len(nums)
if n == 0:
return
def reverse(nums, start, end):
while start < end:
nums[start], nums[end] = nums[end], nums[start]
start += 1
end -= 1
k %= n
reverse(nums, 0, n - 1)
reverse(nums, 0, k - 1)
reverse(nums, k, n - 1)
return nums
```
#### File: 1301-1400/1344-Counter Diagonal Sort/1344-Counter Diagonal Sort.py
```python
class Solution:
"""
@param grids: a maxtrix with alphabet
@return: return sorted lists
"""
def CounterDiagonalSort(self, grids):
# write your code here
m = len(grids)
n = len(grids[0])
table = []
for i in range(m):
temp = []
row = i
col = 0
while row >= 0 and col < n:
temp.append(grids[row][col])
row -= 1
col += 1
table.append(temp)
for i in range(m, m + n - 1):
temp = []
row = m - 1
col = i - m + 1
while row >= 0 and col < n:
temp.append(grids[row][col])
row -= 1
col += 1
table.append(temp)
l = min(m, n)
result = []
for t in table:
temp = []
i = 0
while len(temp) < l:
temp.append(t[i % len(t)])
i += 1
result.append(temp)
return sorted(result)
```
#### File: 1301-1400/1348-Excel Sheet Column Number/1348-Excel Sheet Column Number.py
```python
class Solution:
"""
@param s: a string
@return: return a integer
"""
def titleToNumber(self, s):
# write your code here
num = 0
for c in s:
num = num * 26 + ord(c) - ord('A') + 1
return num
```
#### File: 1301-1400/1354-Pascal's Triangle II/1354-Pascal's Triangle II.py
```python
class Solution:
"""
@param rowIndex: a non-negative index
@return: the kth index row of the Pascal's triangle
"""
def getRow(self, rowIndex):
# write your code here
row = [1] + [0] * rowIndex
for i in range(1, rowIndex + 1):
for j in range(i, 0, -1):
row[j] += row[j - 1]
return row
```
#### File: 1301-1400/1357-Path Sum II/1357-Path Sum II.py
```python
class Solution:
"""
@param root: a binary tree
@param sum: the sum
@return: the scheme
"""
def pathSum(self, root, sum):
# Write your code here.
result = []
path = []
def dfs(root, sum):
if root is None:
return
path.append(root.val)
sum -= root.val
if sum == 0 and root.left is None and root.right is None:
result.append(path[:])
else:
dfs(root.left, sum)
dfs(root.right, sum)
path.pop()
dfs(root, sum)
return result
```
#### File: 1301-1400/1359-Convert Sorted Array to Binary Search Tree/1359-Convert Sorted Array to Binary Search Tree.py
```python
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param nums: the sorted array
@return: the root of the tree
"""
def convertSortedArraytoBinarySearchTree(self, nums):
# Write your code here.
def dfs(start, end):
if start > end:
return None
mid = (start + end) // 2
root = TreeNode(nums[mid])
root.left = dfs(start, mid - 1)
root.right = dfs(mid + 1, end)
return root
return dfs(0, len(nums) - 1)
```
#### File: 1301-1400/1363-ZigZag Conversion/1363-ZigZag Conversion.py
```python
class Solution:
"""
@param s: the given string
@param numRows: the number of rows
@return: the string read line by line
"""
def convert(self, s, numRows):
# Write your code here
if numRows == 1:
return s
result = [''] * numRows
index = 0
step = 1
for c in s:
result[index] += c
if index == 0:
step = 1
elif index == numRows - 1:
step = -1
index += step
return ''.join(result)
```
#### File: 1301-1400/1385-Lucky Number Eight/1385-Lucky Number Eight.py
```python
class Solution:
"""
@param n: count lucky numbers from 1 ~ n
@return: the numbers of lucky number
"""
def luckyNumber(self, n):
# Write your code here
return sum('8' in str(i) for i in range(1, n + 1))
```
#### File: 1301-1400/1387-Binary Trees With Factors/1387-Binary Trees With Factors.py
```python
Trees With Factors/1387-Binary Trees With Factors.py
class Solution:
"""
@param A:
@return: nothing
"""
def numFactoredBinaryTrees(self, A):
A.sort()
MOD = 10 ** 9 + 7
dp = {}
for j in range(len(A)):
dp[A[j]] = 1
for i in range(j):
if A[j] % A[i] == 0:
num = A[j] // A[i]
if num in dp:
dp[A[j]] = (dp[A[j]] + dp[A[i]] * dp[num]) % MOD
return sum(dp.values()) % MOD
```
#### File: 1301-1400/1390-Short Encoding of Words/1390-Short Encoding of Words.py
```python
import collections
class TrieNode:
def __init__(self):
self.next = collections.defaultdict(TrieNode)
class Solution:
"""
@param words:
@return: nothing
"""
def minimumLengthEncoding(self, words):
root = TrieNode()
table = []
for word in set(words):
node = root
for c in word[::-1]:
node = node.next[c]
table.append([node, len(word) + 1])
return sum(l for node, l in table if not node.next)
```
#### File: 1301-1400/1391-Making A Large Island/1391-Making A Large Island.py
```python
class Solution:
"""
@param grid:
@return: nothing
"""
def largestIsland(self, grid):
m = len(grid)
n = len(grid[0])
parents = [i for i in range(m*n)]
def findParent(i):
while parents[i] != i:
parents[i] = parents[parents[i]]
i = parents[i]
return i
areas = [0] * (m*n)
maxArea = 0
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
p1 = i * n + j
areas[p1] = 1
if i > 0 and grid[i - 1][j] == 1:
p2 = findParent((i - 1)*n + j)
parents[p2] = p1
areas[p1] += areas[p2]
if j > 0 and grid[i][j - 1] == 1:
p2 = findParent(i*n + j - 1)
if p1 != p2:
parents[p2] = p1
areas[p1] += areas[p2]
maxArea = max(maxArea, areas[p1])
for i in range(m):
for j in range(n):
if grid[i][j] == 0:
visited = set()
curr = i*n + j
areas[curr] = 1
for r, c in (i-1, j), (i+1, j), (i, j-1), (i, j+1):
if r >= 0 and r < m and c >= 0 and c < n and grid[r][c] == 1:
p = findParent(r*n + c)
if p not in visited:
visited.add(p)
areas[curr] += areas[p]
maxArea = max(maxArea, areas[curr])
return maxArea
```
#### File: 1301-1400/1393-Friends Of Appropriate Ages/1393-Friends Of Appropriate Ages.py
```python
class Solution:
"""
@param ages:
@return: nothing
"""
def numFriendRequests(self, ages):
counts = [0] * 121
presum = [0] * 121
for age in ages:
counts[age] += 1
for i in range(1, 121):
presum[i] = presum[i - 1] + counts[i]
total = 0
for A in range(15, 121):
countB = presum[A] - presum[A // 2 + 7]
total += (countB - 1) * counts[A]
return total
```
#### File: 1401-1500/1433-Image Overlap/1433-Image Overlap.py
```python
import collections
class Solution:
"""
@param A: the matrix A
@param B: the matrix B
@return: maximum possible overlap
"""
def largestOverlap(self, A, B):
# Write your code here.
A1 = [i * len(A) * 2 + j for i in range(len(A)) for j in range(len(A)) if A[i][j]]
B1 = [i * len(A) * 2 + j for i in range(len(A)) for j in range(len(A)) if B[i][j]]
table = collections.Counter(i - j for i in A1 for j in B1)
return max(table.values() or [0])
```
#### File: 1401-1500/1435-Find And Replace in String/1435-Find And Replace in String.py
```python
class Solution:
"""
@param S: a string
@param indexes: the index array
@param sources: the source array
@param targets: the target array
@return: the string after operation
"""
def findReplaceString(self, S, indexes, sources, targets):
# Write your code here.
sl = list(S)
for i, source, target in sorted(zip(indexes, sources, targets), reverse=True):
if S[i:i+len(source)] == source:
sl[i:i+len(source)] = target
return ''.join(sl)
```
#### File: 1401-1500/1477-Car Fleet/1477-Car Fleet.py
```python
class Solution:
"""
@param target: the target
@param position: the initial position
@param speed: the speed
@return: How many car fleets will arrive at the destination
"""
def carFleet(self, target, position, speed):
# Write your code here
time = [(target - p)/s for p, s in sorted(zip(position, speed))]
count = curr = 0
for t in time[::-1]:
if t > curr:
count += 1
curr = t
return count
```
#### File: 1401-1500/1495-Leaf-Similar Trees/1495-Leaf-Similar Trees.py
```python
Trees/1495-Leaf-Similar Trees.py
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root1: the first tree
@param root2: the second tree
@return: returns whether the leaf sequence is the same
"""
def leafSimilar(self, root1, root2):
# write your code here.
def dfs(root):
if root:
if root.left is None and root.right is None:
yield root.val
else:
yield from dfs(root.left)
yield from dfs(root.right)
return list(dfs(root1)) == list(dfs(root2))
```
#### File: 1401-1500/1499-Reordered Power of 2/1499-Reordered Power of 2.py
```python
import collections
class Solution:
"""
@param N:
@return: return true or false
"""
def reorderedPowerOf2(self, N):
# write your code here
curr = collections.Counter(str(N))
return any(curr == collections.Counter(str(1 << i)) for i in range(31))
```
#### File: 1501-1600/1504-Shortest Path to Get All Keys/1504-Shortest Path to Get All Keys.py
```python
import collections
import heapq
class Solution:
"""
@param grid:
@return: The lowest number of moves to acquire all keys
"""
def shortestPathAllKeys(self, grid):
# write your code here
m = len(grid)
n = len(grid[0])
def neighbor(r, c):
for nr, nc in (r-1, c), (r+1, c), (r, c-1), (r, c+1):
if 0 <= nr < m and 0 <= nc < n:
yield nr, nc
def bfs(source, row, col):
visited = [[False] * n for _ in range(m)]
visited[row][col] = True
Q = collections.deque([(row, col, 0)])
dist = {}
while Q:
r, c, d = Q.popleft()
if grid[r][c] != source and grid[r][c] != '.':
dist[grid[r][c]] = d
continue
for nr, nc in neighbor(r, c):
if grid[nr][nc] != '#' and not visited[nr][nc]:
Q.append((nr, nc, d + 1))
visited[nr][nc] = True
return dist
numKey = 0
dists = collections.defaultdict(dict)
for i in range(m):
for j in range(n):
if grid[i][j] not in '.#':
if grid[i][j].islower():
numKey += 1
dists[grid[i][j]] = bfs(grid[i][j], i, j)
target = 2 ** numKey - 1
pq = [(0, '@', 0)]
final_dist = {}
while pq:
d, place, state = heapq.heappop(pq)
if (place, state) in final_dist:
continue
if state == target:
return d
final_dist[(place, state)] = d
for destination, d2 in dists[place].items():
state2 = state
if destination.islower():
state2 |= (1 << (ord(destination) - ord('a')))
elif destination.isupper():
if not (state & (1 << (ord(destination) - ord('A')))):
continue
if (destination, state2) not in final_dist:
heapq.heappush(pq, (d + d2, destination, state2))
return -1
```
#### File: 1501-1600/1523-Partitioning Array/1523-Partitioning Array.py
```python
import collections
class Solution:
"""
@param A: Integer array
@param k: a integer
@return: return is possible to partition the array satisfying the above conditions
"""
def PartitioningArray(self, A, k):
# write your code here
if not A:
return True
if len(A) % k:
return False
group = len(A) // k
return max(collections.Counter(A).values()) <= group
```
#### File: 1501-1600/1530-Encode N-ary Tree to Binary Tree/1530-Encode N-ary Tree to Binary Tree.py
```python
class UndirectedGraphNode:
def __init__(self, x):
self.label = x
self.neighbors = []
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param root: binary tree
@return: N-ary tree
"""
def decode(self, root):
# write your code here
if root is None:
return None
result = UndirectedGraphNode(root.val)
curr = root.left
while curr:
result.neighbors.append(self.decode(curr))
curr = curr.right
return result
"""
@param root: N-ary tree
@return: binary tree
"""
def encode(self, root):
# write your code here
if root is None:
return None
result = TreeNode(root.label)
if root.neighbors:
result.left = self.encode(root.neighbors[0])
curr = result.left
for neighbor in root.neighbors[1:]:
curr.right = self.encode(neighbor)
curr = curr.right
return result
```
#### File: 1501-1600/1533-N-ary Tree Level Order Traversal/1533-N-ary Tree Level Order Traversal.py
```python
class Solution:
def levelOrder(self, root):
if not root:
return []
level = [root[0]]
result = []
while level:
result.append([node.label for node in level])
level = [neighbor for node in level for neighbor in node.neighbors]
return result
```
#### File: 1501-1600/1562-Number of restaurants/1562-Number of restaurants.py
```python
import heapq
class Solution:
"""
@param restaurant:
@param n:
@return: nothing
"""
def nearestRestaurant(self, restaurant, n):
# Write your code here
if len(restaurant) < n or n == 0:
return []
pq = []
for x, y in restaurant:
heapq.heappush(pq, -(x*x + y*y))
if len(pq) > n:
heapq.heappop(pq)
m = -pq[0]
result = []
for x, y in restaurant:
if x*x + y*y <= m:
result.append([x, y])
if len(result) == n:
break
return result
```
#### File: 1601-1700/1621-Cut Connection/1621-Cut Connection.py
```python
class Solution:
"""
@param matrix:
@param x:
@param y:
@return: return the matrix
"""
def removeOne(self, matrix, x, y):
# Write your code here
while x < len(matrix):
matrix[x][y] = 0
x += 1
return matrix
```
#### File: 1701-1800/1704-Range Sum of BST/1704-Range Sum of BST.py
```python
class Solution:
"""
@param root: the root node
@param L: an integer
@param R: an integer
@return: the sum
"""
def rangeSumBST(self, root, L, R):
# write your code here.
if root is None:
return 0
if root.val < L:
return self.rangeSumBST(root.right, L, R)
elif root.val > R:
return self.rangeSumBST(root.left, L, R)
else:
return root.val + self.rangeSumBST(root.left, L, R) + self.rangeSumBST(root.right, L, R)
```
#### File: 1701-1800/1715-Three Equal Parts/1715-Three Equal Parts.py
```python
class Solution:
"""
@param A: an array
@return: divide the array into 3 non-empty parts
"""
def threeEqualParts(self, A):
total = A.count(1)
if total == 0:
return [0, 2]
if total % 3:
return [-1, -1]
k = total // 3
count = 0
for i, a in enumerate(A):
if a == 1:
count += 1
if count == 1:
start = i
elif count == k + 1:
mid = i
elif count == k * 2 + 1:
end = i
break
while end < len(A) and A[start] == A[mid] == A[end]:
start += 1
mid += 1
end += 1
if end == len(A):
return [start - 1, mid]
else:
return [-1, -1]
```
#### File: 1701-1800/1716-Flip String to Monotone Increasing/1716-Flip String to Monotone Increasing.py
```python
class Solution:
"""
@param S: a string
@return: the minimum number
"""
def minFlipsMonoIncr(self, S):
# Write your code here.
minFlip = count1 = 0
for c in S:
if c == '1':
count1 += 1
else:
minFlip = min(minFlip + 1, count1)
return minFlip
```
#### File: 1701-1800/1728-X of a Kind in a Deck of Cards/1728-X of a Kind in a Deck of Cards.py
```python
import collections
import math
import functools
class Solution:
"""
@param deck: a integer array
@return: return a value of bool
"""
def hasGroupsSizeX(self, deck):
# write your code here
table = collections.Counter(deck)
return functools.reduce(math.gcd, table.values()) >= 2
```
#### File: 1701-1800/1781-Reverse ASCII Encoded Strings/1781-Reverse ASCII Encoded Strings.py
```python
class Solution:
"""
@param encodeString: an encode string
@return: a reversed decoded string
"""
def reverseAsciiEncodedString(self, encodeString):
# Write your code here
return ''.join(chr(int(encodeString[i:i + 2])) for i in range(len(encodeString) - 2, -1, -2))
```
#### File: 1701-1800/1792-Cut the sticks/1792-Cut the sticks.py
```python
class Solution:
"""
@param sticks: the length of each stick
@return: return a Integer array
"""
def CutTheSticks(self, sticks):
# write your code here
if not sticks:
return []
sticks.sort()
result = [len(sticks)]
i = 0
while i < len(sticks):
curr = i
while i + 1 < len(sticks) and sticks[i] == sticks[i + 1]:
i += 1
i += 1
if i != len(sticks):
result.append(result[-1] - (i - curr))
return result
```
#### File: 1801-1900/1807-Fibonacci easy/1807-Fibonacci easy.py
```python
class Solution:
"""
@param n: an integer
@return: an ineger f(n)
"""
def fibonacci(self, n):
# write your code here
if n == 1:
return 0
dp = [0] * n
dp[1] = 1
for i in range(2, len(dp)):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[n - 1]
```
#### File: 1801-1900/1834-Grouping Options/1834-Grouping Options.py
```python
class Solution:
"""
@param n: the number of people
@param m: the number of groups
@return: the number of grouping options
"""
def groupingOptions(self, n, m):
# write your code here
if m > n:
return 0
dp = [[0] * (n + 1) for _ in range(n + 1)]
for i in range(1, n + 1):
dp[i][i] = 1
for i in range(2, n + 1):
for j in range(1, i):
for k in range(1, min(i - j, j) + 1):
dp[i][j] = dp[i][j] + dp[i - j][k]
return dp[n][m]
``` |
{
"source": "Jiadalee/swmm_mpc",
"score": 2
} |
#### File: swmm_mpc/swmm_mpc/swmm_mpc.py
```python
import os
import datetime
import random
from shutil import copyfile
import shutil
import pandas as pd
import pyswmm
from pyswmm import Simulation, Links
import update_process_model_input_file as up
import evaluate as ev
import run_ea as ra
import json
import run_baeopt as bo
run = None
def get_global_run(config_file):
global run
run = swmm_mpc_run(config_file)
class swmm_mpc_run(object):
def __init__(self, config_file):
with open(config_file, 'r') as f:
config_dict = json.load(f)
self.inp_file_path = os.path.abspath(config_dict['inp_file_path'])
self.ctl_horizon = config_dict['ctl_horizon']
self.ctl_time_step = config_dict['ctl_time_step']
self.ctl_str_ids = config_dict['ctl_str_ids']
self.work_dir = os.path.abspath(config_dict['work_dir'])
self.results_dir = os.path.abspath(config_dict['results_dir'])
self.opt_method = config_dict['opt_method']
self.optimization_params = config_dict.get('optimization_params', {})
if 'num_cores' in self.optimization_params:
if type(self.optimization_params['num_cores']) != int:
self.optimization_params['num_cores'] = 1
else:
self.optimization_params['num_cores'] = 1
self.run_suffix = config_dict['run_suffix']
self.target_depth_dict = config_dict.get('target_depth_dict', None)
self.node_flood_weight_dict = config_dict.get('node_flood_weight_dict',
None)
self.flood_weight = config_dict.get('flood_weight', 1)
if self.target_depth_dict:
self.dev_weight = config_dict.get('dev_weight', 1)
else:
self.dev_weight = config_dict.get('dev_weight', 0)
self.log_file = os.path.join(self.results_dir,
'log_{}'.format(self.run_suffix))
# check ctl_str_ids
validate_ctl_str_ids(self.ctl_str_ids)
# the input directory and the file name
self.inp_file_dir, inp_file_name = os.path.split(self.inp_file_path)
# the process file name with no extension
inp_process_file_base = inp_file_name.replace('.inp', '_process')
# the process .inp file name
inp_process_file_inp = inp_process_file_base + '.inp'
self.inp_process_file_path = os.path.join(self.work_dir,
inp_process_file_inp)
# copy input file to process file name
copyfile(self.inp_file_path, self.inp_process_file_path)
self.n_ctl_steps = int(self.ctl_horizon*3600/self.ctl_time_step)
def run_swmm_mpc(config_file):
'''
config_file: [string] path to config file. config file is a JSON file that
contains the following key value pairs:
inp_file_path: [string] path to .inp file relative to config file
ctl_horizon: [number] ctl horizon in hours
ctl_time_step: [number] control time step in seconds
ctl_str_ids: [list of strings] ids of control structures for which
controls policies will be found. Each should start with
one of the key words ORIFICE, PUMP, or WEIR
e.g., [ORIFICE R1, ORIFICE R2]
work_dir: [string] directory relative to config file where the temporary
files will be created
results_dir: [string] directory relative to config file where the results
will be written
opt_method: [string] optimization method. Currently supported methods are
'genetic_algorithm', and 'bayesian_opt'
optimization_params: [dict] dictionary with key values that will be passed
to the optimization function
for GA this includes
* ngen: [int] number of generations for GA
* nindividuals: [int] number of individuals for
initial generation in GA
run_suffix: [string] will be added to the results filename
flood_weight: [number] overall weight for the sum of all flooding relative
to the overall weight for the sum of the absolute deviations
from target depths (dev_weight). Default: 1
dev_weight: [number] overall weight for the sum of the absolute deviations
from target depths. This weight is relative to the flood_weight
Default: 0
target_depth_dict: [dict] dictionary where the keys are the nodeids and
the values are a dictionary. The inner dictionary has
two keys, 'target', and 'weight'. These values specify
the target depth for the nodeid and the weight given
to that in the cost function. Default: None
e.g., {'St1': {'target': 1, 'weight': 0.1}}
node_flood_weight_dict: [dict] dictionary where the keys are the node ids
and the values are the relative weights for
weighting the amount of flooding for a given node.
e.g., {'st1': 10, 'J3': 1}. Default: None
'''
# save params to file
get_global_run(config_file)
print(vars(run))
with open(run.log_file, 'w') as f:
f.write(str(vars(run)))
f.write('\n')
pyswmm.lib.use('libswmm5_hs.so') # To use this lib, we need to install the modified pyswmm by 'pip install git+git://github.com/uva-hydroinformatics/pyswmm.git@feature_save_hotstart'
# record when simulation begins
beg_time = datetime.datetime.now()
run_beg_time_str = beg_time.strftime('%Y.%m.%d.%H.%M')
print("Simulation start: {}".format(run_beg_time_str))
best_policy_ts = []
# make sure there is no control rules in inp file
up.remove_control_section(run.inp_file_path)
# run simulation
with Simulation(run.inp_file_path) as sim:
sim.step_advance(run.ctl_time_step)
sim_start_time = sim.start_time
for step in sim:
# get most current system states
current_dt = sim.current_time
dt_hs_file = 'tmp_hsf.hsf'
print(current_dt)
dt_hs_path = os.path.join(os.getcwd(), dt_hs_file)
sim.save_hotstart(dt_hs_path)
link_obj = Links(sim)
# update the process model with the current states
up.update_process_model_file(run.inp_process_file_path,
current_dt, dt_hs_path)
if run.opt_method == 'genetic_algorithm':
best_policy, cost = ra.run_ea(run.work_dir, config_file,
run.optimization_params)
elif run.opt_method == 'bayesian_opt':
best_policy, cost = bo.run_baeopt(run.optimization_params)
initial_guess = get_initial_guess(best_policy, run.ctl_str_ids)
run.optimization_params['initial_guess'] = initial_guess
else:
raise ValueError(
'{} not valid opt method'.format(run.opt_method)
)
# print (best_policy, cost)
best_policy_fmt = ev.format_policies(best_policy,
run.ctl_str_ids,
run.n_ctl_steps,
run.opt_method)
best_policy_ts = update_policy_ts_list(best_policy_fmt,
current_dt,
run.ctl_time_step,
best_policy_ts,
cost)
results_file = save_results_file(best_policy_ts, run.ctl_str_ids,
run.results_dir, sim_start_time,
run_beg_time_str, run.run_suffix)
implement_control_policy(link_obj, best_policy_fmt)
# if we are getting a policy with no cost then it's perfect
if cost == 0:
break
end_time = datetime.datetime.now()
print('simulation end: {}'.format(end_time.strftime('%Y.%m.%d.%H.%M')))
elapsed_time = end_time - beg_time
elapsed_time_str = 'elapsed time: {}'.format(elapsed_time.seconds)
print(elapsed_time_str)
# write the elapsed time to the end of the log file
with open(run.log_file, 'a') as f:
f.write(elapsed_time_str)
# update original inp file with found control policy
up.update_controls_with_policy(run.inp_file_path, results_file)
# remove all files in 'work'
delete_files_in_dir(run.work_dir)
def update_policy_ts_list(fmtd_policy, current_dt, ctl_time_step,
best_policy_ts, cost):
# record the rest of the control policy
for ctl_id, policy in fmtd_policy.iteritems():
# first setting has already been recorded, so disregard
for i, setting in enumerate(policy):
# increase time step
inc_seconds = i * ctl_time_step
inc_time = datetime.timedelta(seconds=inc_seconds)
setting_dt = current_dt + inc_time
# append to list
best_policy_ts.append({'setting_{}'.format(ctl_id):
setting,
'datetime': setting_dt})
# if cost is not zero only do the first one
# this should be the case for all but the last case
if cost != 0:
break
return best_policy_ts
def implement_control_policy(link_obj, best_policy_fmt):
for ctl_id, policy in best_policy_fmt.iteritems():
next_setting = policy[0]
# from for example "ORIFICE R1" to "R1"
ctl_id_short = ctl_id.split()[-1]
# implement best policy
if next_setting == 'ON':
next_setting = 1
elif next_setting == 'OFF':
next_setting = 0
link_obj[ctl_id_short].target_setting = next_setting
def save_results_file(best_policy_ts, ctl_str_ids, results_dir,
sim_start_time, run_beg_time_str, run_suffix):
"""
Convert policy time series to dataframe and save to csv
Parameters
----------
best_policy_ts : list of dicts
list of dicts where the key/values are "setting_{ctl id}"/{setting}
and "datetime"/{datetime}
ctl_str_ids : list of str
see documentation in "run_swmm_mpc"
results_dir : str
the directory where the csv will be saved
sim_start_time : datetime object
the datetime of the start time in the simulation
run_beg_time_str : str
the real time when the swmm_mpc run started
run_suffix : str
the run suffix that will be appended to the csv file name
"""
# consolidate ctl settings and save to csv file
ctl_settings_df = pd.DataFrame(best_policy_ts)
ctl_settings_df = ctl_settings_df.groupby('datetime').first()
ctl_settings_df.index = pd.DatetimeIndex(ctl_settings_df.index)
# add a row at the beginning of the policy since controls start open
sim_start_dt = pd.to_datetime(sim_start_time)
initial_states = get_initial_states(ctl_str_ids)
ctl_settings_df.loc[sim_start_dt] = initial_states
ctl_settings_df.sort_index(inplace=True)
results_file = 'ctl_results_{}{}.csv'.format(run_beg_time_str, run_suffix)
results_path = os.path.join(results_dir, results_file)
ctl_settings_df.to_csv(results_path)
return results_path
def get_initial_states(ctl_str_ids):
"""
Get list of initial states. ASSUME initial states for ORIFICE/WEIR is 1
(open) and for PUMPS is "OFF"
"""
initial_states = []
for ctl in ctl_str_ids:
ctl_type = ctl.split()[0]
if ctl_type == 'ORIFICE' or ctl_type == 'WEIR':
initial_states.append(1)
elif ctl_type == 'PUMP':
initial_states.append('OFF')
return initial_states
def validate_ctl_str_ids(ctl_str_ids):
"""
make sure the ids are ORIFICE, PUMP, or WEIR
"""
valid_structure_types = ['ORIFICE', 'PUMP', 'WEIR']
for ctl_id in ctl_str_ids:
ctl_type = ctl_id.split()[0]
if ctl_type not in valid_structure_types:
raise ValueError(
'{} not valid ctl type. should be one of {}'.format(
ctl_id, valid_structure_types))
def get_initial_guess(best_pol, ctl_str_ids):
best_pol = best_pol.tolist()
split_by_ctl = ev.split_list(best_pol, len(ctl_str_ids))
new_guess = []
for pol in split_by_ctl:
if len(pol) == 1:
return best_pol
else:
# take out first setting
new_pol = pol[1:]
# add random setting at end
new_pol.append(random.random())
new_guess.extend(new_pol)
return new_guess
def delete_files_in_dir(folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
``` |
{
"source": "jiadiyao/price_monitor",
"score": 2
} |
#### File: jiadiyao/price_monitor/monitor.py
```python
import urllib2
import re
from time import sleep
from urlparse import urlsplit,urlparse
import urlparse as urlp
import time
import os
import subprocess
import Image
import random
import sys
import MySQLdb
from datetime import datetime
#from BeautifulSoup import BeautifulSoup
from bs4 import BeautifulSoup
from cache import CacheHandler
from os.path import basename
import string
#from html2text import html2text
from send_mail import send_mail
forceredo = True
parent_folder = 'psdchest'
proxy=False
#con_rsn = MySQLdb.connect (
# host = "localhost",
# user = "root",
# passwd = "<PASSWORD>",
# db = "uielements",
# use_unicode = True,
# charset = "utf8",)
#cursor = con_rsn.cursor()
##record process start time:
timestarted = datetime.now()
print "Process Started: "+str(timestarted)
#############################
#######utilities###########
##########################
def get_status_code(url):
try:
connection = urllib2.urlopen(url)
return connection.getcode()
connection.close()
except urllib2.HTTPError, e:
return e.getcode()
def isRejected(page):
return False
##take url, return page
## trys -- number of trys before give up
##
def opener(url,trys=10,ran_begin=2,ran_end=10):
path = os.path.abspath('cache')
proxy_handler = urllib2.ProxyHandler({'http': 'http://127.0.0.1:8118/'})
if proxy:
##opener with cache
urlopener = urllib2.build_opener(CacheHandler(path, max_age = 604800),proxy_handler)
urlopener.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux x86_64; en-GB; rv:1.9.1.9) Gecko/20100402 Ubuntu/9.10 (karmic) Firefox/3.5.9')]
##opener skip cache:
urlopener_nocache = urllib2.build_opener(proxy_handler)
urlopener_nocache.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux x86_64; en-GB; rv:1.9.1.9) Gecko/20100402 Ubuntu/9.10 (karmic) Firefox/3.5.9')]
else:
##opener with cache
urlopener = urllib2.build_opener(CacheHandler(path, max_age = 604800))
urlopener.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux x86_64; en-GB; rv:1.9.1.9) Gecko/20100402 Ubuntu/9.10 (karmic) Firefox/3.5.9')]
##opener skip cache:
urlopener_nocache = urllib2.build_opener()
urlopener_nocache.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux x86_64; en-GB; rv:1.9.1.9) Gecko/20100402 Ubuntu/9.10 (karmic) Firefox/3.5.9')]
##page -- null page means run the retry loop
page=''
##toretry -- set to 1 to run the retry loop
toretry=0
##isRejected -- page reject means retry loop (waiting and ip renew should be done while in the retry loop)
firstrun = 1
while ((not page) or toretry or isRejected(page)) and trys!=0:
try:
request = urllib2.Request(url)
failed = 1
while failed:
try:
##only first time open the url use the cached version.
if firstrun:
resp = urlopener.open(request,None,30)
firstrun=0
else:
resp = urlopener_nocache.open(request,None,30)
failed = 0
except urllib2.URLError, e:
if hasattr(e, 'reason'):
print 'We failed to reach a server.'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
print 'rejected'
print 'Error code: ', e.code
print 'sleep 600'
sleep(600)
realurl = resp.url
if realurl != url:
print "[ERROR] been redirected. Sleep 600s to wait ip renewal"
toretry=1
sleep(600)
page = resp.read()
page = page.decode('utf8', 'ignore')
if not 'x-cache' in resp.headers:
t = random.randint(ran_begin, ran_end)
print 'not from cache, sleeping...',t
sleep(t)
# resp = urlopener.open(url,None,30)
# page = resp.read()
# page = page.decode('utf8', 'ignore')
# pageincache = 'x-cache' in resp.headers
# #print 'Page is from cache:', pageincache
#
#
# if pageincache and (not page):
# print "cache corrupted, reopen... ", url
# ##if cache is courrpted
# resp = urlopener_nocache.open(url,None,30)
# page = resp.read()
# page = page.decode('utf8', 'ignore')
#
#
#
# ##if not from cache, sleep
# if not 'x-cache' in resp.headers:
# t = random.randint(ran_begin, ran_end)
# print 'not from cache, sleeping...',t
# sleep(t)
# #sleep(5)
except:
page=''
t = 60
print 'Waiting to retry...',t
sleep(t)
trys=trys-1
if not page:
print '~~ERROR: Opening url ',url
return page
def url2name(url):
return basename(urlsplit(url)[2])
##Take a url, save the file pointed by the url.
## handles redirections
def download(url, path, localFileName = None):
try:
localName = url2name(url)
req = urllib2.Request(url)
r = urllib2.urlopen(req)
if r.info().has_key('Content-Disposition'):
# If the response has Content-Disposition, we take file name from it
localName = r.info()['Content-Disposition'].split('filename=')[1]
if localName[0] == '"' or localName[0] == "'":
localName = localName[1:-1]
elif r.url != url:
# if we were redirected, the real file name we take from the final URL
localName = url2name(r.url)
if localFileName:
# we can force to save the file as specified name
localName = localFileName
f = open(path+'/'+localName, 'wb')
f.write(r.read())
f.close()
return localName
except:
return False
#######################################################
###########getting various parts:
#################################
def get_title(soup):
title = ''
#if 1:
try:
titleTag = soup.html.head.title
title = titleTag.string.split(' - ')[0]
title = html2text(title)
#print title
except:
print "[title failed]"
return title.strip()
##find images on the post, save the images to the post folder. returns the foldername for the post
##
def find_images(soup):
##mkdir:
folder = get_title(soup)
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
foldername = ''.join(c for c in folder if c in valid_chars)
foldername = foldername.replace(' ','_')
folderpath = parent_folder+'/'+foldername
print 'download images...'
##create preview folder
prevfolderpath = folderpath+'/previews'
try:
os.makedirs(prevfolderpath)
except OSError:
pass
###############################
##find the THUMBNAIL image:
try:
os.makedirs(folderpath)
except OSError:
pass
imgurl = soup.find('div',{'class':'pic2 fl'}).findAll('img')[-1]['src']
print imgurl
###save image binary data in a dictionary
imgname = url2name(imgurl)
# data = opener(imgurl)
# rtn[imgname] = data
##download the image using the download function
saved_name = download(imgurl,folderpath)
download(imgurl,prevfolderpath)
##############################
## resize the thumbnail to 200x144
size = 200,144
ratio = 1.*200/144
try:
im = Image.open(folderpath+'/'+saved_name)
(width, height) = im.size
if width<=size[0] and height<=size[1]:
print 'no resize'
im.save(folderpath+'/thumbnail.jpg', "JPEG",quality=100)
else:
if width > height * ratio:
# crop the image on the left and right side
newwidth = int(height * ratio)
#left = width / 2 - newwidth / 2
left = 0
right = newwidth
# keep the height of the image
top = 0
bottom = height
elif width < height * ratio:
# crop the image on the top and bottom
newheight = int(width / ratio)
top = 0#height / 2 - newheight / 2
bottom = newheight
# keep the width of the impage
left = 0
right = width
if width != height * ratio:
im = im.crop((left, top, right, bottom))
out = im.resize(size, Image.ANTIALIAS)
out.save(folderpath+'/thumbnail.jpg', "JPEG",quality=100)
except IOError:
print "cannot create thumbnail for '%s'" % saved_name
################################
#print rtn.keys()
#raw_input('sdf')
#########################
##### MORE PREVIEW IMAGES
# imgs = content.find('div',{'class':'entry'}).findAll('img')
# for im in imgs:
# imgurl = im['src']
# download(imgurl,prevfolderpath)
return foldername
##get the free download, save to the correct folder.
##return filename when successfully saved file, return False when not
def get_downloadfile(soup,url=''):
downloadurl= soup.find('img',src = 'http://www.shegy.nazwa.pl/psdchest/wp-content/plugins/smartcounter/download_button.png' ).parent['href']
print downloadurl
##adding baseurl to reletive url:
# if not downloadurl.startswith('http'):
# downloadurl = 'http://designmoo.com'+downloadurl
#print downloadurl
#print 'download url:',downloadurl
#if not downloadurl.startswith('http'):
#u = urlparse(downloadurl)
#print u.query
#downloadurl = 'http://freepsdfiles.net/get-file/?'+u.query
#print downloadurl
folder = get_title(soup)
# print folder
# exit()
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
foldername = ''.join(c for c in folder if c in valid_chars)
foldername = foldername.replace(' ','_')
folderpath = parent_folder+'/'+foldername
print 'download files...:',downloadurl
try:
os.makedirs(folderpath)
except OSError:
pass
rst = download(downloadurl,folderpath)
if rst:
print 'downloaded file:', rst
return rst
else:
return False
##get tags for the post:
def get_tags(soup):
rtn = []
try:
tags = soup.find('p',{'class':'meta-nfo'}).findAll('a')
for tag in tags:
#print tag.string
rtn.append(tag.string)
except:
pass
##remove last item as it is not tag:
rtn = rtn[0:-1]
return rtn
def get_desc(soup):
rtn = ''
try:
desc = soup.find('div',{'class':'single-post'}).find('h6')
desc = str(desc).decode('utf8')
###filter out unwanted tags:
desc = desc.replace('<h6>','',1)
desc = re.sub('</h6>.*$','',desc)
#rtn = html2text(desc)
rtn = desc
except:
pass
return rtn
def get_creator(soup):
creator = 'Shegy'
creator_url = 'http://www.psdchest.com'
# try:
# cont = soup.find('div',{'class':'user_meta'}).find('a')
# creator = cont.string
# creator_url = 'http://designmoo.com'+cont['href']
# #print creator, creator_url
#
#
# except:
# pass
return creator, creator_url
##function to process a post
##url, url to a single post
##if download the source file fail, will just return without saving to the database
def post_process(soup,url=''):
##check for post existance:
post_exist = cursor.execute('SELECT id FROM post where source_url=%s',(url))
if post_exist == 0 or forceredo:
downloadname = get_downloadfile(soup,url)
if not downloadname:
print '[download file failed] skip'
return
#
# exit()
title = get_title(soup)
tags = get_tags(soup)
# print tags
# exit()
desc = get_desc(soup)
# print desc
# exit()
creator,creator_url = get_creator(soup)
# print creator
# print creator_url
# exit()
foldername = find_images(soup)
##insert current post:
print '[insert post]', title.encode('utf8')
cursor.execute('insert into post (title,description,html,creator,creator_url,source_url,foldername,downloadname,parentfolder,date_created,date_modified) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,now(),now())',(title,desc,soup,creator,creator_url,url,foldername,downloadname,parent_folder))
id = int(cursor.lastrowid)
##check and insert tags:
for t in tags:
###insert keyword to keyword table if not exist:
query = 'select id from tag where tag=%s'
kwexist = cursor.execute(query,(t))
keywords = cursor.fetchone()
if kwexist==0:
##insert:
query = 'INSERT INTO tag (tag) VALUES (%s)'
cursor.execute(query,(t))
kwid = cursor.lastrowid
print '[insert tag] tag insert at id :',kwid
else:
kwid = keywords[0]
print '[nothing] keyword id retrived:',kwid
##insert tag relations:
query = "INSERT INTO post_tag (post_id,tag_id) VALUES (%s,%s)"
cursor.execute(query,(id,kwid))
print '[insert link tag-post] linked between post and tag:',id,'--',kwid
n=5
print 'post download and insert successfully, sleep',n
sleep(n)
else:
print '[POST EXIST] SKIP', url
#page = opener('http://freepsdfiles.net/web-elements/free-web-ui-set-navigations-buttons-circles-and-ribbons/')
#soup = BeautifulSoup(page)
#post_process(soup,'http://freepsdfiles.net/web-elements/free-web-ui-set-navigations-buttons-circles-and-ribbons/')
#exit()
##take a start page, process all the post listed on the start page
def post_warpper(url):
###########process the posts listed on this page:
page = opener(url)
postlistsoup = BeautifulSoup(page)
print '[post list]:',url
#try:
if 1:
items = postlistsoup.findAll('div',{'class':'post'})
for i in items:
##process each individual post:
#print i
url = i.find('a')['href']
print '[POST]',url
page = opener(url)
soup = BeautifulSoup(page)
post_process(soup,url)
#raw_input('page 1 done')
#except:
# print '[POST Fail]',url
################
##baseurl for pagination
##get url programmingly
##baseurl = postlistsoup.find('div',{'class':'navigation'}).find('span', {'class':'older'}).find('a')['href'].rsplit('/',2)[0]+'/'
##set base url manually
baseurl = 'http://www.psdchest.com/page/'
print baseurl
#raw_input('___')
##process each pages
for p in range(2,4):
#print p
pageurl = baseurl+str(p)+'/'
print '#################\n[post list url]:', pageurl
st = str(get_status_code(pageurl))
print 'Status code:', st
if st == '404':
print 'break out pagenation loop now!'
break
page = opener(pageurl)
postlistsoup = BeautifulSoup(page)
try:
items = postlistsoup.findAll('div',{'class':'post'})
for i in items:
##process each individual post:
#print i
url = i.find('a')['href']
print '[POST]',url
page = opener(url)
soup = BeautifulSoup(page)
post_process(soup,url)
#raw_input('here')
except:
print 'Failed:', url
##get data.
##given a soup for university box on the university list,
##return a list of data
##[university name, description, image_url, location, fee, times rank,satisfaction,int% ,id_code_for_course]
def get_data(soup):
# print soup
name = soup.select(".t1")[0].string
# print name
desc = soup.select(".summary")[0].p.string
# print desc
imageurl = soup.select(".col01")[0].img['src']
# print imageurl
attrs = soup.select(".cols02")[0].select(".col01")
for attr in attrs:
if re.search("Satisfaction",attr.img['alt']):
sat = attr.img.next_sibling.strip()
# print sat
elif re.search("Location",attr.img['alt']):
loc = attr.img.next_sibling.strip()
# print loc
elif re.search("Tuition",attr.img['alt']):
fee = attr.img.next_sibling.strip()
# print fee
elif re.search("Rank",attr.img['alt']):
rank = attr.img.next_sibling.strip()
# print rank
else:
international = attr.img.next_sibling.strip()
# print international
idcode = soup.select(".add")[0].a['href'].split('/')[4].split('?')[0]
# print idcode
return (name, desc, imageurl,loc ,fee,rank,sat,international,idcode)
def isemail(log,price,n=1):
stdin,stdout = os.popen2("tail -n "+str(n)+" "+log)
stdin.close()
lines = stdout.readlines(); stdout.close()
lastprice = lines[0].split('\t')[2]
# print "cur:",price
# print 'last:',lastprice
if float(lastprice)!=float(price):
return 1
else:
return 0
url = "https://www.thomasexchange.co.uk/currency.asp"
page = opener(url)
soup = BeautifulSoup(page)
block = soup.find(id='comparison_rates_tbl').find_all('tr')[2].find_all('td')
eb = block[3].text
es = block[4].text
if not (eb and es):
print "ERROR, buy or sell value not get."
exit()
out = str(datetime.now())+'\t' + str(eb)+'\t'+str(es)+"\n"
toemail = isemail('./pricelog.txt',es)
f1=open('./pricelog.txt', 'a+')
f1.write(out)
f1.close()
mailcontent = "Euro now selling at: "+es+" at Thomas Exchange.\n\n Online reserve: https://www.thomasexchange.co.uk/currency_collect.asp \n\n\n Jiadi "
if float(es) > 1.36 and toemail:
send_mail(mailcontent.encode("utf8") , es)
exit()
#post_warpper('http://freepsdfiles.net/category/web-elements/')
#post_warpper('http://freepsdfiles.net/category/buttons/')
#page = opener('http://designmoo.com/4101/purse-icon/')
#p = BeautifulSoup(page)
#post_process(p)
#post_warpper('http://www.psdchest.com')
##time finished:
timefinished = datetime.now()
print "Time finished:"+str(timefinished)
print "Time taken: "+ str(timefinished-timestarted)
print "--------------------------------\n\n\n"
exit(0)
``` |
{
"source": "jiadonglee/laspec",
"score": 2
} |
#### File: laspec/laspec/download.py
```python
import os, sys, io
import warnings
import numpy as np
import pandas as pd
from astropy.io import fits, ascii
from astropy.table import Table
import scipy.signal
import urllib.request
import urllib.parse
import requests
'''
Pipeline by Created by <NAME>
https://github.com/fandongwei/pylamost
Edited by <NAME>
Email
-----
<EMAIL>
Created on
----------
- Mon Jul 27 18:29:06 2020
LAMOST dataset Python Interface
Aims
----
- Download LAMOST spectra by obsid
'''
class LAMOST():
email=None
token=None
dataset=None
version=None
__isdev=False
def __init__(self, isdev=False, dataset=5):
self.__isdev=isdev
self.dataset=dataset
def __getDataset(self):
prefix='dr5'
if self.dataset is not None:
prefix = 'dr%d'%self.dataset
if self.__isdev: return 'l'+prefix
else: return prefix
def __getVersion(self):
if self.version is not None:
return '/v%d'%self.version
return ''
__config=None
__config_file_path=os.path.expanduser('~')+'/pylamost.ini'
def __getConfig(self, reload=False):
if not os.path.exists(self.__config_file_path): return None
if not reload and None!=self.__config: return self.__config
with open(self.__config_file_path) as fh:
self.__config={}
for line in fh:
if line.startswith('#'):continue
k,v=line.split("=")
self.__config[k.strip()]=v.strip()
return self.__config
def __detectToken(self):
if self.token is not None: return True
cf = self.__getConfig()
if cf is None or 'token' not in cf.keys():
print('please set your token')
return False
self.token=cf['token']
return True
def download(self, url, savedir='./'):
response = urllib.request.urlopen(url)
data = response.read()
savefile=savedir+'/'+response.getheader("Content-disposition").split('=')[1]
with open(savefile, 'wb') as fh:
fh.write(data)
return savefile
def getUrl(self, url, params=None):
if params is None:
response = urllib.request.urlopen(url)
else:
response = urllib.request.urlopen(url, urllib.parse.urlencode(params).encode('utf-8'))
chrset = response.headers.get_content_charset()
if chrset is None: chrset='utf-8'
data = response.read().decode(chrset)
return data
def downloadCatalog(self, catname, savedir='./'):
caturl='http://{0}.lamost.org{1}/catdl?name={2}&token={3}'.format(self.__getDataset(), self.__getVersion(), catname, self.token)
self.download(url, savedir)
def downloadFits(self, obsid, savedir='./'):
if not self.__detectToken(): return
fitsurl='http://{0}.lamost.org{1}/spectrum/fits/{2}?token={3}'.format(self.__getDataset(), self.__getVersion(), obsid, self.token)
self.download(fitsurl, savedir)
def downloadPng(self, obsid, savedir='./'):
if not self.__detectToken(): return
pngurl='http://{0}.lamost.org{1}/spectrum/png/{2}?token={3}'.format(self.__getDataset(), self.__getVersion(), obsid, self.token)
self.download(pngurl, savedir)
def getFitsCsv(self, obsid):
if not self.__detectToken(): return None
url='http://{0}.lamost.org{1}/spectrum/fits2csv/{2}?token={3}'.format(self.__getDataset(), self.__getVersion(), obsid, self.token)
return self.getUrl(url)
def getInfo(self, obsid):
if not self.__detectToken(): return None
#url='http://{0}.lamost.org{1}/spectrum/info/{2}?token={3}'.format(self.__getDataset(), self.__getVersion(), obsid, self.token)
#return self.getUrl(url, params)
url='http://{0}.lamost.org{1}/spectrum/info/{2}'.format(self.__getDataset(), self.__getVersion(), obsid)
return self.getUrl(url, {'token':self.token})
#Cone Search Protocol
def conesearch(self, ra, dec, radius):
if not self.__detectToken(): return
conesearchurl='http://{0}.lamost.org{1}/voservice/conesearch?ra={2}&dec={3}&sr={4}&token={5}'.format(self.__getDataset(), self.__getVersion(), ra, dec, radius, self.token)
return self.getUrl(conesearchurl)
#Simple Spectral Access Protocol
def ssap(self, ra, dec, radius):
if not self.__detectToken(): return
ssapurl='http://{0}.lamost.org{1}/voservice/ssap?pos={2},{3}&size={4}&token={5}'.format(self.__getDataset(), self.__getVersion(), ra, dec, radius, self.token)
return self.getUrl(ssapurl)
def sql(self, sql):
if not self.__detectToken(): return
sqlurl='http://{0}.lamost.org{1}/sql/q?&token={2}'.format(self.__getDataset(), self.__getVersion(), self.token)
return self.getUrl(sqlurl, {'output.fmt':'csv', 'sql':sql})
def query(self, params):
if not self.__detectToken(): return
qurl='http://{0}.lamost.org{1}/q?token={2}'.format(self.__getDataset(), self.__getVersion(), self.token)
return self.getUrl(qurl, params)
def query2(self, params, files):
if not self.__detectToken(): return
qurl='http://{0}.lamost.org{1}/q?token={2}'.format(self.__getDataset(), self.__getVersion(),self.token)
r=requests.post(qurl, data=params, files=files)
return str(r.text)
def readFits(self, filename):
hdulist = fits.open(filename)
head = hdulist[0].header
scidata = hdulist[0].data
# coeff0 = head['COEFF0']
# coeff1 = head['COEFF1']
# pixel_num = head['NAXIS1']
flux = scidata[0,]
invar = scidata[1,]
# wavelength=np.linspace(0,pixel_num,pixel_num)
# wavelength=np.power(10,(coeff0+wavelength*coeff1))
wavelength = scidata[2,]
hdulist.close()
# spec_smooth_7=scipy.signal.medfilt(specflux,7)
# spec_smooth_15=scipy.signal.medfilt(specflux,15)
return (wavelength, flux, invar)
```
#### File: laspec/laspec/optimize.py
```python
import numpy as np
from astropy import table
import matplotlib.pyplot as plt
class RandomWalkMinimizer:
""" Random Walk Minimizer """
def __init__(self, fun, x0, dx, maxiter=20, args=[], kwargs={}, optind=None,
verbose=False, random="normal"):
""" multiple loops over different dx
Parameters
----------
fun:
objective function
x0: array like
initial guess of x
dx:
a list of different scales. e.g. []
maxiter:
number of max iterations
args, kwargs:
fun(x, *args, **kwargs)
optind:
a subset ind of parameters
verbose: bool
if True, print verbose
random: [uniform, normal]
type of random number generator of
"""
self.fun = fun
self.x0 = x0
self.dx = dx
self.maxiter = maxiter
self.args = args
self.kwargs = kwargs
self.optind = optind
self.verbose = verbose
assert random in ["uniform", "normal"]
self.random = random
self.xhist = []
def __call__(self, x):
return np.float(self.fun(np.array(x), *self.args, **self.kwargs))
def run(self, fun=None, x0=None, dx=None, maxiter=None, args=None,
kwargs=None, optind=None, verbose=None, random=None):
x0 = x0 if x0 is not None else self.x0
xhist = []
opthist = []
cloop = 0
for _dx in dx if dx is not None else self.dx:
for _optind in optind if optind is not None else self.optind:
cloop += 1
info = cloop
_result = self.minimize(
fun=fun if fun is not None else self.fun,
x0=x0,
dx=_dx,
maxiter=maxiter if maxiter is not None else self.maxiter,
args=args if args is not None else self.args,
kwargs=kwargs if kwargs is not None else self.kwargs,
optind=_optind,
verbose=verbose if verbose is not None else self.verbose,
info=info if info is not None else self.info,
random=random if random is not None else self.random,
)
opthist.append(_result)
xhist.append(x0)
return dict(x=_result["x"],
nfev=np.sum([_["nfev"]for _ in opthist]),
niter=np.sum([_["niter"] for _ in opthist]),
msg=table.vstack([table.Table(_["msg"]) for _ in opthist]))
@staticmethod
def minimize(fun, x0, dx, maxiter=10, args=None, kwargs={},
optind=None, verbose=False, info="", random="normal"):
""" a single
Parameters
----------
fun:
objective function
x0: array like
initial guess of x
dx:
a list of different scales. e.g. []
maxiter:
number of max iterations
args, kwargs:
fun(x, *args, **kwargs)
optind:
a subset ind of parameters, e.g., [5, 7]
verbose: bool
if True, print verbose
random: [uniform, normal]
type of random number generator
info:
additional info appended in msg
"""
if args is None:
args = []
# evaluate cost0
x0 = np.asarray(x0, float)
dx = np.asarray(dx, float)
ndim = len(x0)
cost0 = fun(x0, *args, **kwargs)
# opt ind
if optind is None:
optmask = np.ones(ndim, dtype=bool)
elif len(optind) == ndim:
optmask = np.asarray(optind, dtype=bool)
else:
optmask = np.zeros(ndim, dtype=bool)
optmask[optind] = True
# max number of iterations --> maxiter
niter = 0
nfev = 0
iiter = 0
if verbose:
print("X{} = {}, cost={}".format(niter, x0, cost0))
# messages
this_msg = dict(x=x0,
cost=cost0,
accept=True,
info=info,
nfev=0,
niter=0)
msg = [this_msg]
while iiter < maxiter:
# random step
if random == "normal":
x1 = x0 + np.random.normal(loc=0, scale=dx,
size=x0.shape) * optmask
elif random == "uniform":
x1 = x0 + np.random.uniform(low=-dx, high=dx,
size=x0.shape) * optmask
else:
raise ValueError("Invalid random type! [{}]".format(random))
cost1 = fun(x1, *args, **kwargs)
nfev += 1
if cost1 <= cost0:
x0 = np.copy(x1)
cost0 = cost1
iiter = 0
niter += 1
if verbose:
print("X{} = {}, cost={}".format(niter, x0, cost0), iiter)
# messages
this_msg = dict(x=x1,
cost=cost1,
accept=True,
info=info,
nfev=nfev,
niter=niter)
msg.append(this_msg)
else:
iiter += 1
# messages
this_msg = dict(x=x1,
cost=cost1,
accept=False,
info=info,
nfev=nfev,
niter=niter)
msg.append(this_msg)
return dict(x=x0, nfev=nfev, niter=niter, msg=table.Table(msg))
def test1():
def fun(x, asin=1):
return asin * np.sin(x) + x ** 2
xx = np.linspace(-10, 10, 1000)
plt.figure()
plt.plot(xx, fun(xx, 10))
res = RandomWalkMinimizer.minimize(fun, x0=[10], dx=10, maxiter=10,
args=[], optind=None, verbose=True, )
print("")
print(res)
def test2():
def fun(x, asin=1):
return np.sum(asin * np.sin(x) + x ** 2)
res = RandomWalkMinimizer.minimize(fun, x0=[-2, -1], dx=1, maxiter=20,
args=[], optind=[0], verbose=True,
random="uniform")
print("")
print(res)
def test3():
def fun(x, asin=1):
return np.sum(asin * np.sin(x) + x ** 2)
rwm = RandomWalkMinimizer(fun, x0=[10, 10], dx=[[10, 10], [2, 2]],
maxiter=20, args=[], optind=[[0], [1]],
verbose=True, random="uniform")
res = rwm.run()
print("")
print(res)
if __name__ == "__main__":
test3()
```
#### File: laspec/laspec/read_spectrum.py
```python
import os
from collections import OrderedDict
import numpy as np
from astropy.io import fits
from astropy.table import Table, Column
from .lamost import lamost_filepath
from .spec import Spec
def reconstruct_wcs_coord_from_fits_header(hdr, dim=1):
""" reconstruct wcs coordinates (e.g., wavelength array) """
# assert dim is not larger than limit
assert dim <= hdr['NAXIS']
# get keywords
crval = hdr['CRVAL%d' % dim]
cdelt = hdr['CDELT%d' % dim]
try:
crpix = hdr['CRPIX%d' % dim]
except KeyError:
crpix = 1
# length of the current dimension
naxis_ = hdr['NAXIS%d' % dim]
# reconstruct wcs coordinates
coord = np.arange(1 - crpix, naxis_ + 1 - crpix) * cdelt + crval
return coord
def read_spectrum_phoenix_r10000(fp):
""" read spectrum from PHOENIX R10000 """
hl = fits.open(fp)
wave = np.e ** reconstruct_wcs_coord_from_fits_header(hl[0].header)
flux = hl[0].data
return Table(data=[wave, flux], names=['wave', 'flux'])
def read_spectrum_elodie_r42000(fp):
""" read spectrum from ELODIE library (R42000) """
# assert the file exists
assert os.path.exists(fp)
# read fits
hl = fits.open(fp)
# reconstruct wave array
wave = reconstruct_wcs_coord_from_fits_header(hl[0].header, dim=1)
# flux
flux = hl[0].data
# flux err
flux_err = hl[2].data
# flux ivar
flux_ivar = 1 / flux_err ** 2.
# reconstruct spec
sp = Spec(data=[wave, flux, flux_ivar, flux_err],
names=['wave', 'flux', 'flux_ivar', 'flux_err'])
return sp
def read_spectrum(filepath, filesource='auto'):
"""read SDSS/LAMOST spectrum
Parameters
----------
filepath: string
input file path
filesource: string
{'sdss_dr12' / 'lamost_dr2' / 'lamost_dr3' / 'lamost_dr6'}
Returns
-------
specdata: astropy.table.Table
spectra as a table
"""
# auto-identify the spectrum origination
if filesource == 'auto':
telescope = fits.open(filepath)[0].header['TELESCOP']
if telescope == 'SDSS 2.5-M':
return read_spectrum(filepath, filesource='sdss_dr12')
if telescope == 'LAMOST':
return read_spectrum(filepath, filesource='lamost_dr3')
# SDSS DR7 spectrum
if filesource == 'sdss_dr7':
hdulist = fits.open(filepath)
# 1. flux, flux_err, mask
data = hdulist[0].data # 5 rows
flux = data[0][:]
flux_err = data[2][:]
mask = data[3][:]
# 2. wave
# http://iraf.net/irafdocs/specwcs.php
# wi = CRVALi + CDi_i * (li - CRPIXi)
CRVAL1 = hdulist[0].header['CRVAL1']
CD1_1 = hdulist[0].header['CD1_1']
CRPIX1 = hdulist[0].header['CRPIX1']
Npix = len(flux)
wavelog = CRVAL1 + (np.arange(Npix) + 1 - CRPIX1) * CD1_1
wave = np.power(10, wavelog)
spec = Table([wave, flux, flux_err, mask],
names=['wave', 'flux', 'flux_err', 'mask'])
return spec
# SDSS DR10/DR12 spectrum
if filesource == 'sdss_dr10' or filesource == 'sdss_dr12':
data = fits.open(filepath)
specdata = Table(data[1].data)
wave = Column(name='wave', data=np.power(10., specdata['loglam']))
flux_err = Column(name='flux_err', data=(specdata['ivar']) ** -0.5)
specdata.add_columns([wave, flux_err])
return specdata
# LAMOST DR2/DR3 spectrum
if filesource == 'lamost_dr3' or filesource == 'lamost_dr2' or filesource == 'lamost_dr1':
data = fits.open(filepath)
head = data[0].header
obsid = head['OBSID']
specdata = Table(data[0].data.T)
# flux = Column(name='flux', data=specdata['col0'])
# ivar = Column(name='ivar', data=specdata['col1'])
# flux_err = Column(name='flux_err', data=(specdata['col1']) ** -0.5)
# wave = Column(name='wave', data=specdata['col2'])
# and_mask = Column(name='and_mask', data=specdata['col3'])
# or_mask = Column(name='or_mask', data=specdata['col4'])
# for flux_err, convert inf to nan
# flux_err = specdata['col1'] ** -0.5
# flux_err[np.isinf(flux_err)] = np.nan
# return Table([wave, flux, flux_err, ivar, and_mask, or_mask])
return {'wave':specdata['col2'],
'flux':specdata['col0'],
# 'flux_err':flux_err,
'ivar':specdata['col1'],
'and_mask':specdata['col3'],
'or_mask':specdata['col4'],
'obsid':obsid}
return None
def _test_read_spectrum():
fp = '/home/cham/PycharmProjects/bopy/bopy/data/test_spectra/lamost_dr3/'\
+ lamost_filepath('GAC_061N46_V3', 55939, 7, 78)
print(fp)
sp = read_spectrum(fp)
sp.pprint()
class MedSpec(OrderedDict):
""" for Median Resolution Spectrum """
meta = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __repr__(self):
s = "No. Name Dimensions\n"
for i, (k,v) in enumerate(self.items()):
s += "{} {} {}Rx{}C\n".format(i, k, len(v), len(v.colnames))
return s
@staticmethod
def read(fp):
return read_lamostms(fp)
def read_lamostms(fp):
# read fits
hl = fits.open(fp)
# initialize MS
ms = MedSpec()
# set meta
ms.meta = OrderedDict(hl[0].header)
# set spec
for i, data in enumerate(hl[1:]):
ms[data.name] = Table(data=data.data, meta=OrderedDict(data.header))
return ms
if __name__ == '__main__':
print('')
print('@Cham: testing ''read_spectrum'' ...')
_test_read_spectrum()
"""
=======================================
filesource: 'lamost_dr2'
=======================================
documents of data structures (LAMOST and SDSS spectra)
http://dr2.lamost.org/doc/data-production-description#toc_3
RowNumber Data Type
1 Flux float
2 Inverse Variance float
3 WaveLength float
4 Andmask float
5 Ormask float
=======================================
filesource: 'sdss_dr7'
=======================================
http://classic.sdss.org/dr7/dm/flatFiles/spSpec.html
=======================================
filesource: 'sdss_dr10' / 'sdss_dr12'
=======================================
http://data.sdss3.org/datamodel/files/BOSS_SPECTRO_REDUX/RUN2D/spectra/PLATE4/spec.html
HDU 0 : Header info from spPlate
HDU 1 : Coadded spectrum from spPlate --> use this
HDU 2 : Summary metadata copied from spAll
HDU 3 : Line fitting metadata from spZline
HDU 4+ : [Optional] Individual spCFrame spectra [B, R for each exposure]
HDU 0: Header keywords only
Copied from spPlate with the following additions/modifications:
PLUG_RA, PLUG_DEC, THING_ID, FIBERID: added from spAll
NEXP and EXPID*: modified to just include the frames which contributed to this fiber
Removed keywords which apply only to single exposures
HDU 1 (extname COADD): Coadded Spectrum from spPlate
Binary table with columns:
Required Columns
Col Name Type Comment
1 flux float32 coadded calibrated flux [10-17 ergs/s/cm2/Å]
2 loglam float32 log10(wavelength [Å])
3 ivar float32 inverse variance of flux
4 and_mask int32 AND mask
5 or_mask int32 OR mask
6 wdisp float32 wavelength dispersion in pixel=dloglam units
7 sky float32 subtracted sky flux [10-17 ergs/s/cm2/Å]
8 model float32 pipeline best model fit used for classification and redshift
"""
``` |
{
"source": "jiadongy/spark-1.3.1.rebuild",
"score": 3
} |
#### File: spark-1.3.1.rebuild/trace-analysis/all_utilization.py
```python
import numpy
import os
import sys
import parse_logs
def plot_cdf(values, filename):
f = open(filename, "w")
def write_data_to_file(data, file_handle):
stringified_data = [str(x) for x in data]
stringified_data += "\n"
file_handle.write("\t".join(stringified_data))
def main(argv):
disk_utilizations = []
cpu_utilizations = []
network_utilizations = []
dirname = argv[0]
for filename in os.listdir(dirname):
full_name = os.path.join(dirname, filename)
if os.path.isfile(full_name) and filename.endswith("job_log"):
print "Reading %s" % filename
analyzer = parse_logs.Analyzer(full_name)
for (id, stage) in analyzer.stages.iteritems():
for task in stage.tasks:
cpu_utilizations.append(task.total_cpu_utilization / 8.)
network_utilizations.append(task.network_bytes_transmitted_ps / (1000*1000*1000))
network_utilizations.append(task.network_bytes_received_ps / (1000*1000*1000))
for name, block_device_numbers in task.disk_utilization.iteritems():
if name in ["xvdb", "xvdf"]:
disk_utilizations.append(block_device_numbers[0])
output_filename = os.path.join(dirname, "cpu_disk_utilization_cdf")
f = open(output_filename, "w")
print max(network_utilizations)
for percent in range(100):
f.write("%s\t%s\t%s\t%s\n" % (percent / 100., numpy.percentile(cpu_utilizations, percent),
numpy.percentile(disk_utilizations, percent),
numpy.percentile(network_utilizations, percent)))
f.close()
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: spark-1.3.1.rebuild/trace-analysis/task.py
```python
import numpy
import logging
class Task:
def __init__(self, data, is_json):
if is_json:
self.initialize_from_json(data)
else:
self.initialize_from_job_logger(data)
self.scheduler_delay = (self.finish_time - self.executor_run_time -
self.executor_deserialize_time - self.result_serialization_time - self.start_time)
# Should be set to true if this task is a straggler and we know the cause of the
# straggler behavior.
self.straggler_behavior_explained = False
def initialize_from_json(self, json_data):
self.logger = logging.getLogger("Task")
task_info = json_data["Task Info"]
task_metrics = json_data["Task Metrics"]
self.start_time = task_info["Launch Time"]
self.finish_time = task_info["Finish Time"]
self.executor = task_info["Host"]
self.executor_run_time = task_metrics["Executor Run Time"]
self.executor_deserialize_time = task_metrics["Executor Deserialize Time"]
self.result_serialization_time = task_metrics["Result Serialization Time"]
self.gc_time = task_metrics["JVM GC Time"]
# TODO: looks like this is never used.
self.executor_id = task_info["Executor ID"]
# TODO: add utilization to task metrics output by JSON.
self.disk_utilization = {}
self.network_bytes_transmitted_ps = 0
self.network_bytes_received_ps = 0
self.process_cpu_utilization = 0
self.total_cpu_utilization = 0
self.shuffle_write_time = 0
self.shuffle_mb_written = 0
SHUFFLE_WRITE_METRICS_KEY = "Shuffle Write Metrics"
if SHUFFLE_WRITE_METRICS_KEY in task_metrics:
shuffle_write_metrics = task_metrics[SHUFFLE_WRITE_METRICS_KEY]
# Convert to milliseconds (from nanoseconds).
self.shuffle_write_time = shuffle_write_metrics["Shuffle Write Time"] / 1.0e6
OPEN_TIME_KEY = "Shuffle Open Time"
if OPEN_TIME_KEY in shuffle_write_metrics:
shuffle_open_time = shuffle_write_metrics[OPEN_TIME_KEY] / 1.0e6
print "Shuffle open time: ", shuffle_open_time
self.shuffle_write_time += shuffle_open_time
CLOSE_TIME_KEY = "Shuffle Close Time"
if CLOSE_TIME_KEY in shuffle_write_metrics:
shuffle_close_time = shuffle_write_metrics[CLOSE_TIME_KEY] / 1.0e6
print "Shuffle close time: ", shuffle_close_time
self.shuffle_write_time += shuffle_close_time
self.shuffle_mb_written = shuffle_write_metrics["Shuffle Bytes Written"] / 1048576.
# TODO: print warning when non-zero disk bytes spilled??
# TODO: are these accounted for in shuffle metrics?
INPUT_METRICS_KEY = "Input Metrics"
self.input_read_time = 0
self.input_read_method = "unknown"
self.input_mb = 0
if INPUT_METRICS_KEY in task_metrics:
input_metrics = task_metrics[INPUT_METRICS_KEY]
self.input_read_time = 0 # TODO: fill in once input time has been added.
self.input_read_method = input_metrics["Data Read Method"]
self.input_mb = input_metrics["Bytes Read"] / 1048576.
# TODO: add write time and MB.
self.output_write_time = 0 #int(items_dict["OUTPUT_WRITE_BLOCKED_NANOS"]) / 1.0e6
self.output_mb = 0
#if "OUTPUT_BYTES" in items_dict:
# self.output_mb = int(items_dict["OUTPUT_BYTES"]) / 1048576.
self.has_fetch = True
# False if the task was a map task that did not run locally with its input data.
self.data_local = True
SHUFFLE_READ_METRICS_KEY = "Shuffle Read Metrics"
if SHUFFLE_READ_METRICS_KEY not in task_metrics:
if task_info["Locality"] != "NODE_LOCAL":
self.data_local = False
self.has_fetch = False
return
shuffle_read_metrics = task_metrics[SHUFFLE_READ_METRICS_KEY]
self.fetch_wait = shuffle_read_metrics["Fetch Wait Time"]
self.local_blocks_read = shuffle_read_metrics["Local Blocks Fetched"]
self.remote_blocks_read = shuffle_read_metrics["Remote Blocks Fetched"]
self.remote_mb_read = shuffle_read_metrics["Remote Bytes Read"] / 1048576.
self.local_mb_read = 0
LOCAL_BYTES_READ_KEY = "Local Bytes Read"
if LOCAL_BYTES_READ_KEY in shuffle_read_metrics:
self.local_mb_read = shuffle_read_metrics[LOCAL_BYTES_READ_KEY] / 1048576.
# The local read time is not included in the fetch wait time: the task blocks
# on reading data locally in the BlockFetcherIterator.initialize() method.
self.local_read_time = 0
LOCAL_READ_TIME_KEY = "Local Read Time"
if LOCAL_READ_TIME_KEY in shuffle_read_metrics:
self.local_read_time = shuffle_read_metrics[LOCAL_READ_TIME_KEY]
self.total_time_fetching = shuffle_read_metrics["Fetch Wait Time"]
def initialize_from_job_logger(self, line):
self.logger = logging.getLogger("Task")
items = line.split(" ")
items_dict = {}
for pair in items:
if pair.find("=") == -1:
continue
key, value = pair.split("=")
items_dict[key] = value
self.start_time = int(items_dict["START_TIME"])
self.finish_time = int(items_dict["FINISH_TIME"])
self.executor = items_dict["HOST"]
self.executor_run_time = int(items_dict["EXECUTOR_RUN_TIME"])
self.executor_deserialize_time = int(items_dict["EXECUTOR_DESERIALIZE_TIME"])
# TODO: Add result serialization time to job logger metrics.
self.result_serialization_time = 0
self.gc_time = int(items_dict["GC_TIME"])
self.executor_id = int(items_dict["EXECUTOR_ID"])
# Estimate serialization and deserialization time based on samples.
# TODO: Report an estimated error here based on variation in samples?
self.estimated_serialization_millis = 0
if "SERIALIZATED_ITEMS" in items_dict:
serialized_items = int(items_dict["SERIALIZATED_ITEMS"])
# Samples are times in nanoseconds.
serialized_samples = [int(sample) for sample in items_dict["SERIALIZED_SAMPLES"].split(",")]
print "Serialized %s items, sampled %s" % (serialized_items, len(serialized_samples))
self.estimated_serialization_millis = serialized_items * numpy.mean(serialized_samples[0::10]) / 1e6
self.estimated_deserialization_millis = 0
if "DESERIALIZED_ITEMS" in items_dict:
deserialized_items = int(items_dict["DESERIALIZED_ITEMS"])
deserialized_samples = [
int(sample) for sample in items_dict["DESERIALIZATION_TIME_NANOS"].split(",")]
print "Deserialized %s items, sampled %s" % (deserialized_items, len(deserialized_samples))
self.estimated_deserialization_millis = (
deserialized_items * numpy.median(deserialized_samples[0::1]) / 1e6)
# Utilization metrics.
# Map of device name to (utilization, read throughput, write throughout).
self.disk_utilization = {}
utilization_str = items_dict["DISK_UTILIZATION"]
for block_utilization_str in utilization_str.split(";"):
if block_utilization_str:
device_name, numbers = block_utilization_str.split(":")
self.disk_utilization[device_name] = [float(x) for x in numbers.split(",")]
network_throughput_items = [
float(x.split(":")[1]) for x in items_dict["NETWORK_UTILIZATION"].split(",")]
self.network_bytes_transmitted_ps = network_throughput_items[1]
self.network_bytes_received_ps = network_throughput_items[0]
cpu_utilization_numbers = [
float(x.split(":")[1]) for x in items_dict["CPU_UTILIZATION"].split(",")]
# Record the total CPU utilization as the total system CPU use + total user CPU use.
self.process_cpu_utilization = cpu_utilization_numbers[0] + cpu_utilization_numbers[1]
self.total_cpu_utilization = cpu_utilization_numbers[2] + cpu_utilization_numbers[3]
# Should be set to true if this task is a straggler and we know the cause of the
# straggler behavior.
self.straggler_behavior_explained = False
self.shuffle_write_time = 0
self.shuffle_mb_written = 0
SHUFFLE_WRITE_TIME_KEY = "SHUFFLE_WRITE_TIME"
if SHUFFLE_WRITE_TIME_KEY in items_dict:
# Convert to milliseconds (from nanoseconds).
self.shuffle_write_time = int(items_dict[SHUFFLE_WRITE_TIME_KEY]) / 1.0e6
self.shuffle_mb_written = int(items_dict["SHUFFLE_BYTES_WRITTEN"]) / 1048576.
INPUT_METHOD_KEY = "READ_METHOD"
self.input_read_time = 0
self.input_read_method = "unknown"
self.input_mb = 0
if INPUT_METHOD_KEY in items_dict:
self.input_read_time = int(items_dict["READ_TIME_NANOS"]) / 1.0e6
self.input_read_method = items_dict["READ_METHOD"]
self.input_mb = float(items_dict["INPUT_BYTES"]) / 1048576.
self.output_write_time = int(items_dict["OUTPUT_WRITE_BLOCKED_NANOS"]) / 1.0e6
self.output_mb = 0
if "OUTPUT_BYTES" in items_dict:
self.output_mb = int(items_dict["OUTPUT_BYTES"]) / 1048576.
self.has_fetch = True
# False if the task was a map task that did not run locally with its input data.
self.data_local = True
if line.find("FETCH") < 0:
if "LOCALITY" in items_dict and items_dict["LOCALITY"] != "NODE_LOCAL":
self.data_local = False
self.has_fetch = False
return
self.fetch_wait = int(items_dict["REMOTE_FETCH_WAIT_TIME"])
self.local_blocks_read = int(items_dict["BLOCK_FETCHED_LOCAL"])
self.remote_blocks_read = int(items_dict["BLOCK_FETCHED_REMOTE"])
self.remote_mb_read = int(items_dict["REMOTE_BYTES_READ"]) / 1048576.
self.local_mb_read = int(items_dict["LOCAL_READ_BYTES"]) / 1048576.
# The local read time is not included in the fetch wait time: the task blocks
# on reading data locally in the BlockFetcherIterator.initialize() method.
self.local_read_time = int(items_dict["LOCAL_READ_TIME"])
self.total_time_fetching = int(items_dict["REMOTE_FETCH_TIME"])
def input_size_mb(self):
if self.has_fetch:
return self.remote_mb_read + self.local_mb_read
else:
return self.input_mb
def compute_time_without_gc(self):
""" Returns the time this task spent computing.
Assumes shuffle writes don't get pipelined with task execution (TODO: verify this).
Does not include GC time.
"""
compute_time = (self.runtime() - self.scheduler_delay - self.gc_time -
self.shuffle_write_time - self.input_read_time - self.output_write_time)
if self.has_fetch:
# Subtract off of the time to read local data (which typically comes from disk) because
# this read happens before any of the computation starts.
compute_time = compute_time - self.fetch_wait - self.local_read_time
return compute_time
def compute_time(self):
""" Returns the time this task spent computing (potentially including GC time).
The reason we don't subtract out GC time here is that garbage collection may happen
during fetch wait.
"""
return self.compute_time_without_gc() + self.gc_time
def runtime_no_compute(self):
""" Returns how long the task would have run for had it not done any computation. """
# Time the task spent reading data over the network or from disk for the shuffle.
# Computation happens during this time, but if the computation were infinitely fast,
# this phase wouldn't have sped up because it was ultimately waiting on the network.
# This is an approximation because tasks don't currently log the amount of time where
# the network is stopped, waiting for the computation to speed up.
# We're also approximating because there's some disk writing that happens in parallel
# via the OS buffer cache. It's basically impossible for us to account for that so
# we ignore it.
# The final reason that this is an approximation is that the shuffle write time could overlap with
# the shuffle time (if a task is both reading shuffle inputs and writing shuffle outputs).
# We should be able to fix the logging to correct this issue.
compute_wait_time = self.finish_time - self.start_time - self.shuffle_write_time - self.scheduler_delay - self.gc_time - self.input_read_time
if self.has_fetch:
#compute_wait_time = compute_wait_time - shuffle_time
compute_wait_time = compute_wait_time - self.fetch_wait
return self.runtime() - compute_wait_time
def runtime_no_disk(self):
""" Returns a lower bound on what the runtime would have been without disk IO.
Includes shuffle read time, which is partially spent using the network and partially spent
using disk.
"""
disk_time = self.output_write_time + self.shuffle_write_time + self.input_read_time
if self.has_fetch:
disk_time += self.local_read_time + self.fetch_wait
return self.runtime() - disk_time
def disk_time(self):
""" Returns the time writing shuffle output.
Ignores disk time taken to read shuffle input as part of a transfer over the network because in
the traces we've seen so far, it's a very small percentage of the network time.
"""
if self.has_fetch:
return self.shuffle_write_time + self.local_read_time
return self.shuffle_write_time
def __str__(self):
if self.has_fetch:
base = self.start_time
# Print times relative to the start time so that they're easier to read.
desc = (("Start time: %s, local read time: %s, " +
"fetch wait: %s, compute time: %s, gc time: %s, shuffle write time: %s, " +
"result ser: %s, finish: %s, shuffle bytes: %s, input bytes: %s") %
(self.start_time, self.local_read_time,
self.fetch_wait, self.compute_time(), self.gc_time,
self.shuffle_write_time, self.result_serialization_time, self.finish_time - base,
self.local_mb_read + self.remote_mb_read, self.input_mb))
else:
desc = ("Start time: %s, finish: %s, scheduler delay: %s, input read time: %s, gc time: %s, shuffle write time: %s" %
(self.start_time, self.finish_time, self.scheduler_delay, self.input_read_time, self.gc_time, self.shuffle_write_time))
return desc
def log_verbose(self):
self.logger.debug(str(self))
def runtime(self):
return self.finish_time - self.start_time
def runtime_no_input(self):
new_finish_time = self.finish_time - self.input_read_time
return new_finish_time - self.start_time
def runtime_no_output(self):
new_finish_time = self.finish_time - self.output_write_time
return new_finish_time - self.start_time
def runtime_no_shuffle_write(self):
return self.finish_time - self.shuffle_write_time - self.start_time
def runtime_no_shuffle_read(self):
if self.has_fetch:
return self.finish_time - self.fetch_wait - self.local_read_time - self.start_time
else:
return self.runtime()
def runtime_no_remote_shuffle_read(self):
if self.has_fetch:
return self.finish_time - self.fetch_wait - self.start_time
else:
return self.runtime()
def runtime_no_output(self):
new_finish_time = self.finish_time - self.output_write_time
return new_finish_time - self.start_time
def runtime_no_input_or_output(self):
new_finish_time = self.finish_time - self.input_read_time - self.output_write_time
return new_finish_time - self.start_time
def runtime_no_network(self):
runtime_no_in_or_out = self.runtime_no_output()
if not self.data_local:
runtime_no_in_or_out -= self.input_read_time
if self.has_fetch:
return runtime_no_in_or_out - self.fetch_wait
else:
return runtime_no_in_or_out
```
#### File: spark-1.3.1.rebuild/trace-analysis/utilization_scatter.py
```python
import sys
import parse_logs
def write_data_to_file(data, file_handle):
stringified_data = [str(x) for x in data]
stringified_data += "\n"
file_handle.write("\t".join(stringified_data))
def main(argv):
filename = argv[0]
analyzer = parse_logs.Analyzer(filename)
start_time = min([x.start_time for x in analyzer.stages.values()])
for (id, stage) in analyzer.stages.iteritems():
stage_filename = "%s_%s_utilization" % (filename, id)
f = open(stage_filename, "w")
for task in stage.tasks:
items = [task.start_time, task.executor_run_time, task.total_cpu_utilization]
for block_device_numbers in task.disk_utilization.values():
items.extend(block_device_numbers)
items.append(task.network_bytes_transmitted_ps / 125000000)
items.append(task.network_bytes_received_ps / 125000000)
write_data_to_file(items, f)
f.close()
plot_base_file = open("utilization_scatter_base.gp", "r")
plot_file = open("%s_%s_utilization.gp" % (filename, id), "w")
for line in plot_base_file:
plot_file.write(line)
plot_base_file.close()
plot_file.write("set output \"%s_%s_utilization.pdf\"\n" % (filename, id))
plot_file.write("plot \"%s\" using ($1-%s):4 with p title \"Disk1\",\\\n" %
(stage_filename, start_time))
plot_file.write("\"%s\" using ($1-%s):7 with p title \"Disk2\",\\\n" %
(stage_filename, start_time))
plot_file.write("\"%s\" using ($1-%s):13 with p title\"Network T\",\\\n" %
(stage_filename, start_time))
plot_file.write("\"%s\" using ($1-%s):14 with p title\"Network R\",\\\n" %
(stage_filename, start_time))
plot_file.write("\"%s\" using ($1-%s):($3/8) with p title \"CPU\"\n" %
(stage_filename, start_time))
plot_file.close()
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "jiady/htdb",
"score": 3
} |
#### File: crawler/crawler/HttpStatusMiddleware.py
```python
from scrapy.exceptions import IgnoreRequest
import redis_const
class HttpStatusMiddleware(object):
def process_response(self, request, response, spider):
if response.status == 200:
pass
elif response.status == 404 or response.status == 410 or response.status == 400:
if "com/people" in response.url:
url_name = response.url.split("/")[-1]
spider.flog.warning("get invalid status:@"+response.url+" give up")
spider.rclient.smove(spider.seen_S, spider.fail_S, url_name)
raise IgnoreRequest()
elif response.status == 403:
spider.send_mail("http status 403", "see if cookie invalide:" + response.body)
raise IgnoreRequest()
return response
```
#### File: crawler/offlineTask/ZhihuDownloadTask.py
```python
from myTask import Task
import requests
import json
import abc
import time
class ZhihuDownloadTask(Task):
header = None
http_people_base = "https://api.zhihu.com/people/"
topic_tail = "/following_topic?offset=0"
answers_tail = "/answers?order_by=created&offset=0&limit=20"
def __init__(self, name, logger, mail, retryFail=False, pipeline_key=[]):
Task.__init__(self, name, logger, mail, retryFail=retryFail, pipeline_key=pipeline_key)
Task.task_sleep = 7
def getInitQueue(self):
return Task.getInitQueueBySetNames(self, "target-has-face")
def filter_out(self, hash_id):
r = Task.rclient.get("people/" + hash_id)
return json.loads(r)["school"] == "not-found"
def get(self, url):
time.sleep(Task.task_sleep)
self.logger.info("getting " + url)
response = requests.get(url, cookies=self.cookie, headers=self.header)
self.logger.info("response:" + str(response.status_code))
return response
def post(self, url, payload):
return requests.post(url, cookies=self.cookie, headers=self.header, data=payload)
def init(self):
auth = Task.rclient.get("crawler/auth")
auth_info = json.loads(auth)
self.access_token = auth_info["access_token"]
self.cookie = auth_info["cookie"]
self.auth_type = auth_info["token_type"]
self.header = {
"Authorization": self.auth_type.capitalize() + " " + self.access_token,
"x-api-version": "3.0.23",
"x-app-version": "4.4.0",
"x-app-za": "OS=Android&Release=6.0.1&Model=MI+NOTE+LTE&VersionName=4.4.0&VersionCode=413&Width=1080&Height=1920&Installer=%E5%BA%94%E7%94%A8%E5%86%85%E5%8D%87%E7%BA%A7",
"x-app-build": "release",
"Connection": "Keep-Alive",
"Host": "api.zhihu.com",
"User-Agent": "Futureve/4.4.0 Mozilla/5.0 (Linux; Android 6.0.1; MI NOTE LTE Build/MMB29M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/46.0.2490.76 Mobile Safari/537.36 Google-HTTP-Java-Client/1.20.0 (gzip)",
"Accept-Encoding": "gzip",
}
@abc.abstractmethod
def taskOperation(self, hash):
pass
```
#### File: crawler/spiders/MyDupeFilter.py
```python
from scrapy.dupefilters import RFPDupeFilter
import redis
from crawler import redis_const
class MyDupeFilter(RFPDupeFilter):
def __init__(self, path=None, debug=False):
RFPDupeFilter.__init__(self, path=None, debug=False)
self.rclient = redis.StrictRedis(host="localhost", port=6379, db=0)
def request_seen(self, request):
return False
```
#### File: crawler/spiders/zhihu.py
```python
import scrapy
import json
import os
from crawler import items
from scrapy import Selector
import logging
import logging.handlers
from crawler.mail import Mail
from crawler import redis_const
import redis
import time
import ConfigParser
def _monkey_patching_HTTPClientParser_statusReceived():
"""
monkey patching for scrapy.xlib.tx._newclient.HTTPClientParser.statusReceived
"""
from twisted.web._newclient import HTTPClientParser, ParseError
old_sr = HTTPClientParser.statusReceived
def statusReceived(self, status):
try:
return old_sr(self, status)
except ParseError, e:
if e.args[0] == 'wrong number of parts':
return old_sr(self, status + ' OK')
raise
statusReceived.__doc__ == old_sr.__doc__
HTTPClientParser.statusReceived = statusReceived
class filterConditon:
# propogation node limit condition
school_target = [u"复旦大学", u"华东师范大学", u"上海交通大学", u"同济大学", u"SJTU", u"sjtu", u"fdu", u"FDU", u"ECNU", u"ecnu",
u"上海财经大学", u"上海外国语大学", u"浙江大学", u"南京大学", u"ZJU", u"zju", u"清华大学", u"北京大学", u"THU", u"PKU"]
followee_min = 0
followee_max = 1000
# target node limitation
location_target = u"上海"
gender_target = "female"
follower_min = 0
follower_max = 3000
# ranking dict
topic_score = {}
stop = False
class ZhihuSpider(scrapy.Spider):
name = "zhihu"
allowed_domains = ["zhihu.com"]
base = "http://www.zhihu.com"
spider_filter = filterConditon()
headers = {}
sub_name = ""
user = ""
pwd = ""
sub_type = ""
pending_Q = ""
pending_S = ""
seen_S = ""
success_S = ""
cookies = dict()
# not add header will lead to getting 500 internal error.
def __init__(self, user=None, pwd=None, sub_name=None, start_from_redis=False,
start_from_file=None, sub_type=redis_const.TYPE_NO_ACCOUNT, master=False):
if sub_type == redis_const.TYPE_HAS_ACCOUNT:
if user is None or pwd is None or sub_name is None:
self.logger.error("an sub_config must be given")
exit(0)
scrapy.Spider.__init__(self)
self.master = master
self.user = user
self.pwd = <PASSWORD>
if self.master:
self.sub_name = "master." + sub_type + "." + sub_name
else:
self.sub_name = "slave." + sub_type + "." + sub_name
self.sub_type = sub_type
self.start_from_redis = start_from_redis
self.start_from_file = start_from_file
self.pending_Q = redis_const.PENDING_QUEUE + self.sub_type
self.pending_S = redis_const.PENDING_SET + self.sub_type
self.seen_S = redis_const.SET_SEEN + self.sub_type
self.success_S = redis_const.SET_SUCCESS + self.sub_type
self.fail_S = redis_const.SET_FAIL + self.sub_type
self.rclient = redis.StrictRedis(host="localhost", port=6379, db=0)
handler = logging.handlers.TimedRotatingFileHandler(
"htdb_" + self.sub_name + ".log",
when='D',
backupCount=7)
fmt = '%(asctime)s|%(filename)s:%(funcName)s:%(lineno)s:[%(name)s]:%(message)s'
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
self.flog = logging.getLogger('htdb')
self.flog.addHandler(handler)
self.flog.setLevel(logging.DEBUG)
self.myMail = Mail(self.flog)
self.headers = {
'Accept': '*/*',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'Keep-Alive',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-TW;q=0.2',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://www.zhihu.com/',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0',
}
self.stop = False
def send_mail(self, subject, text, lock=600):
self.myMail.send_timed(lock, subject + "@" + self.sub_name, text)
def genConfig(self):
content=self.rclient.get("crawler/web-cookie")
dire = os.path.dirname(os.path.realpath(__file__))
lines = map(lambda x: x.strip(" ;"), content.split(';'))
config = open(dire + "/config.private", 'w')
config.write("[cookies]\n")
for line in lines:
config.write(line + "\n")
config.close()
cf = ConfigParser.ConfigParser()
f = os.path.dirname(os.path.realpath(__file__))
cf.read(f + '/config.private')
cookies = cf.items('cookies')
self.cookies = dict(cookies)
self.headers['X-Xsrftoken'] = self.cookies["_xsrf"]
def prepare(self):
if self.master:
seen = self.rclient.spop(self.seen_S)
print self.pending_S, self.pending_Q, seen
while (seen is not None) and len(seen) < 64:
if self.rclient.sadd(self.pending_S, seen):
self.rclient.rpush(self.pending_Q, seen)
seen = self.rclient.spop(self.seen_S)
self.genConfig()
self.fromFile()
self.logger.info("master prepare done!")
else:
self.logger.info("this is s slave,wait a sec...")
time.sleep(5)
def consume(self, response):
b = "https://www.zhihu.com/people/"
count = 50
if self.sub_type == redis_const.TYPE_HAS_ACCOUNT:
count = 1
while count > 0:
name = self.rclient.lpop(self.pending_Q)
if name is not None:
self.rclient.sadd(self.seen_S, name)
self.rclient.srem(self.pending_S, name)
yield scrapy.Request(b + name,
callback=self.parse_people_info,
headers=self.headers)
if self.sub_type == redis_const.TYPE_HAS_ACCOUNT:
time.sleep(1)
count -= 1
else:
time.sleep(20)
break
yield scrapy.Request("https://www.zhihu.com",
callback=self.backToConsume,
headers=self.headers)
def backToConsume(self, response):
yield scrapy.Request("https://www.zhihu.com",
callback=self.consume,
headers=self.headers)
def start_requests(self):
_monkey_patching_HTTPClientParser_statusReceived()
self.prepare()
if self.sub_type == redis_const.TYPE_HAS_ACCOUNT:
yield scrapy.Request(
"https://www.zhihu.com",
callback=self.consume,
headers=self.headers,
cookies=self.cookies
)
elif self.sub_type == redis_const.TYPE_NO_ACCOUNT:
yield scrapy.Request(
"https://www.zhihu.com",
callback=self.consume,
headers=self.headers
)
def fromFile(self):
if self.start_from_file is not None:
f = open(self.start_from_file, 'r')
user = f.readlines()
user = map(lambda x: x.strip("\n"), user)
for u in user:
if self.rclient.sadd(self.pending_S, u):
self.rclient.rpush(self.pending_Q, u)
def parse_people_list_more(self, response):
try:
j = json.loads(response.body)
sel = Selector(text=" ".join(j['msg']))
self.logger.info(response.request.headers['tracking_user_hash'])
follower_hash = response.request.headers['tracking_user_hash']
people = sel.xpath("//h2[@class='zm-list-content-title']//a")
for person in people:
# name = person.xpath("@title").extract_first(default='not-found')
href = person.xpath("@href").extract_first(default='not-found')
url_name = href.split('/')[-1]
ret = items.RelationItem()
ret["follower"] = follower_hash
ret["followee"] = url_name
yield ret
self.logger.info("add to noaccount:"+href)
except Exception, e:
self.send_mail("parse_people_list_exception", str(e))
def judge_person_as_valid_source(self, person):
if person['school'] not in self.spider_filter.school_target:
return False
if int(person['followee_num']) < self.spider_filter.followee_min:
return False
if int(person['followee_num']) > self.spider_filter.followee_max:
return False
return True
def judge_person_as_target(self, person):
if (person['school'] not in self.spider_filter.school_target) and person[
'city'] != self.spider_filter.location_target:
return False
if not person['if_female']:
return False
if int(person['follower_num']) > self.spider_filter.follower_max:
return False
if int(person['follower_num']) < self.spider_filter.follower_min:
return False
return True
def parse_people_info(self, response):
try:
person = items.PersonItem()
profileCard = response.selector.xpath("//div[@class='zu-main-content-inner']/div[1]")
person["image_href"] = profileCard.xpath(".//img[contains(@class,'Avatar--l')]/@src").extract_first(
default='not-found')
person["url_name"] = response.url.split('/')[-1]
info = response.xpath("//script[@data-name='current_people']/text()").extract_first(
default="[not-found]")
basic_info = info[1:-1].split(',')
person['hash_id'] = basic_info[3].strip('"')
top = profileCard.xpath(".//div[@class='top']")
person["name"] = top.xpath(".//span[@class='name']/text()").extract_first(default='not-found')
person['bio'] = top.xpath(".//div[contains(@class,'bio')]/text()").extract_first(default='not-found')
infoItem = profileCard.xpath("//div[@class='items']")
person['city'] = infoItem.xpath(".//span[contains(@class,'location')]/@title").extract_first(
default='not-found')
gender = infoItem.xpath(".//span[contains(@class,'gender')]/i/@class").extract_first(
default='not-found')
person['if_female'] = 'female' in gender
person['school'] = infoItem.xpath(".//span[contains(@class,'education')]/@title").extract_first(
default='not-found')
person['major'] = infoItem.xpath(".//span[contains(@class,'education-extra')]/@title").extract_first(
default='not-found')
person['introduction'] = profileCard.xpath(".//span[@class='content']/text()").extract_first(
default='not-found').strip("\n")
person['agree_num'] = profileCard.xpath(
".//div[@class='zm-profile-header-info-list']//span[@class='zm-profile-header-user-agree']/strong/text()").extract_first(
default='not-found')
sideBar = response.selector.xpath(
".//div[@class='zu-main-sidebar']/div[@class='zm-profile-side-following zg-clear']")
person['followee_num'] = sideBar.xpath("a[1]/strong/text()").extract_first(default='not-found')
person['follower_num'] = sideBar.xpath("a[2]/strong/text()").extract_first(default='not-found')
yield person
if self.sub_type == redis_const.TYPE_NO_ACCOUNT:
if self.judge_person_as_target(person) and (not self.judge_person_as_valid_source(person)):
target = items.TargetPersonItem()
target['hash_id'] = person['hash_id']
yield target
elif self.judge_person_as_valid_source(person):
tosend = items.SendToAccountItem()
tosend["url_name"] = person['url_name']
yield tosend
else:
if self.judge_person_as_target(person):
target = items.TargetPersonItem()
target['hash_id'] = person['hash_id']
yield target
if self.judge_person_as_valid_source(person):
target = items.PropogationPersonItem()
target['hash_id'] = person['hash_id']
yield target
form = dict()
form["method"] = "next"
params = dict()
params['offset'] = 0
params['order_by'] = "created"
params['hash_id'] = person['hash_id']
for offset in xrange(int(person['followee_num']) / 20 + 1):
params['offset'] = offset * 20
form['params'] = json.dumps(params)
self.logger.info(str(form))
header_this = self.headers.copy()
header_this['Referer'] = response.url + "/followees"
header_this['tracking_user_hash'] = person['hash_id']
yield scrapy.FormRequest(
"https://www.zhihu.com/node/ProfileFolloweesListV2",
formdata=form,
headers=header_this,
callback=self.parse_people_list_more
)
time.sleep(0.5)
except Exception, e:
self.send_mail("parse_people_info", str(
e) + "response:" + response.url + "\nresponse_body:" + response.body + "request:" + response.request.url)
'''
def parse_people_info_mob(self, response):
try:
j = json.loads(response.body)
if "error" in j:
self.send_mail("parse_people_error", response.body)
person = items.PersonItem()
person["image_href"] = j["avatar_url"].replace("_s.", "_l.")
person["url_name"] = ""
person["hash_id"] = j['id']
person["name"] = j['name']
person["bio"] = j['headline']
try:
person['city'] = j['locations'][0]['name']
except:
person['city'] = 'not-found'
person["if_female"] = (j['gender'] == 0)
try:
person['school'] = j['educations'][0]['school']['name']
except:
person['school'] = 'not-found'
try:
person['major'] = j['educations'][0]['major']['name']
except:
person['major'] = 'not-found'
person['introduction'] = j['description']
person['agree_num'] = j['voteup_count']
person['follower_num'] = j['follower_count']
person['followee_num'] = j['following_count']
'''
``` |
{
"source": "jiaevo/datastructurealgo",
"score": 4
} |
#### File: datastructurealgo/scripts/0_longest_substring_between_2_strings.py
```python
def longest(X,Y):
dp = [[0 for k in range(len(X))] for l in range(len(Y))]
result = 0
for i in range(len(Y)):
print('i:{}'.format(i))
for j in range(len(X)):
print('j:{}'.format(j))
if(i == 0 or j == 0):
dp[i][j] = 0
elif(X[i] == Y[j]):
dp[i][j] = dp[i-1][j-1] + 1
result = max(result, dp[i][j])
else:
dp[i][j] = 0
return result
X = 'OldSite:GeeksforGeeks.org'
Y = 'NewSite:GeeksQuiz.com'
longest(X,Y)
```
#### File: datastructurealgo/scripts/10_regex_matching.py
```python
pos = 0
p = 'abc.*'
s = 'abc'
def regex(p,s):
dp = [[False for i in range(len(p)+1)] for j in range(len(s)+1)]
dp[0][0] = False
for i in range(1,len(p)+1):
if p[i-1] == '*' and dp[0][i-2] and i > 1:
dp[0][i] = True
for i in range(1,len(s)+1):
for j in range(1,len(p)+1):
if s[i-1] == p[j-1] or p[j-1] == '.':
dp[i][j] = dp[i-1][j-1]
elif p[j-1] == '*':
if dp[i][j-2]:
dp[i][j] = True
elif dp[i-1][j] and (s[i-1] == p[j-2] or p[j-2] == '.'):
dp[i][j] = True
return dp[i-1][j-1]
```
#### File: datastructurealgo/scripts/110_balanced_binary_tree.py
```python
class node:
def __init__(self,data):
self.data = data
class binary_tree:
def __init__(self,node):
self.root = node
self.left = None
self.right = None
def push(self,node):
if node.data <= self.root.data:
if self.left is None:
self.left = binary_tree(node)
else:
self.left.push(node)
else:
if self.right is None:
self.right = binary_tree(node)
else:
self.right.push(node)
def getdepth(tree):
if tree is None:
return 0
else:
leftdepth = getdepth(tree.left)
rightdepth = getdepth(tree.right)
if leftdepth > rightdepth:
return leftdepth + 1
else:
return rightdepth + 1
def balanced_tree(tree):
if tree is None:
return True
else:
lefttree = getdepth(tree.left)
righttree = getdepth(tree.right)
if abs(lefttree-righttree) > 1:
return False
else:
return True
```
#### File: datastructurealgo/scripts/24_swap_nodes_in_pairs.py
```python
class node:
def __init__(self,data):
self.value = data
self.next = None
class link_list:
def __init__(self,node):
self.head = node
def push(self,node):
current = self.head
while(current.next):
current = current.next
current.next = node
def printlist(self):
current = self.head
while(current.next):
print(current.value)
current=current.next
print(current.value)
#edged case odd number of nodes: current.next is None, finish
def swapnodes(linklist):
current = linklist.head
prev = None
while(current.next and current):
temp = current.next
current.next = temp.next
temp.next = current
if prev is None:
linklist.head = temp
print(linklist)
else:
prev.next = temp
prev = current
current = current.next
return linklist.head
l4 = link_list(node(20))
l4.push(node(99))
l4.push(node(202))
l4.push(node(204))
l4.push(node(205))
```
#### File: datastructurealgo/scripts/31_next_perm.py
```python
def permute(nums):
start = len(nums)-1
start_next = start - 1
flag = 0
if len(nums) == 1:
return
while(start_next != 0):
if nums[start] > nums[start_next]:
print('in')
i = nums[start]
j = nums[start_next]
nums[start] = j
nums[start_next] = i
break
elif nums[start] < nums[start_next]:
flag = 1
break
start = start - 1
start_next = start_next - 1
if flag == 1:
nums.sort()
```
#### File: datastructurealgo/scripts/6_zigzag_conversion.py
```python
def zigzag(strs,n):
zigzag_list = ['']*n
row_nmber = 0
flip_insert = 1
for i in range(0,len(strs)):
zigzag_list[row_number] = zigzag_list[row_number] + strs[i]
if row_number < n - 1 and flip_insert == 1:
row_number = row_number + 1
elif row_number > 0 and flip_insert == -1:
row_number = row_number - 1
else:
flip_insert = flip_insert * -1
row_number = row_number + flip_insert
# row_to_insert = i % n - 1
# if flip_insert == -1:
# zigzag_list[row_to_insert].append(str[i])
# elif flip_insert == 1:
# zigzag_list[n - row_to_insert + 1].append(str[i])
# if row_to_insert - 1 == 0:
# flip_insert = flip_insert * -1
# if flip_insert == 1 and i+1 % n == 1:
# flip_insert = flip_insert * -1
return zigzag_list
```
#### File: datastructurealgo/scripts/7_reverse_integer.py
```python
def reverse(x: int) -> int:
negative_ind = False
if x < 0:
negative_ind = True
x = x*-1
rev = 0
while x:
mod = x % 10
x = round(x / 10)
rev = rev*10 + mod
if negative_ind == True:
rev = rev * -1
return rev
```
#### File: datastructurealgo/scripts/prime_number.py
```python
def isprime(num):
prime_ind = 0
for i in range(1,num+1):
temp = num%i
if i != 1 and i != num and temp == 0:
prime_ind = 1
if prime_ind == 1:
return 'is not prime'
return 'is prime'
``` |
{
"source": "jiaeyan/Chinese-Rhyme",
"score": 3
} |
#### File: Chinese-Rhyme/chrhyme/ryhthm_generator.py
```python
import re
from pkg_resources import resource_filename as rf
from collections import defaultdict
from typing import Set, List, Dict
from chrhyme.parser import word_parser
phrase_dict = {}
with open(rf('chrhyme', 'data/phrase_dict.txt'), 'r', encoding="utf-8") as f:
for line in f:
items = line.strip().split('\t')
phrase_dict[tuple(items[0].split())] = items[1].split()
def generate_rhythms():
while 1:
user_input = input('◼︎ 请输入一个你想押韵的词(按回车退出): ')
if user_input:
word = prune_word(user_input)
if word:
num_char = len(word)
cs = input('\t-是否押声母?例如对于"欢喜",押"还席"不押"惯技"。\n\t 请输入要押声母的字的位置\n\t (0-不押;1-押"欢";2-押"喜";12-押"欢喜"): ')
vs = input('\t-是否押全韵母?例如对于"欢喜",押"端倪"不押"叹息"。\n\t 请输入要押全韵母的字的位置\n\t (0-不押;1-押"欢";2-押"喜";12-押"欢喜"): ')
c_ids = check_positions(cs, num_char)
v_ids = check_positions(vs, num_char)
pinyins = word_parser(word)
candidates = get_candidates(word, pinyins, num_char, c_ids, v_ids)
if candidates:
display_results(candidates)
else:
print('>>> 太可惜了,没有适合押韵的词!请尝试分解押韵,例如将"光明磊落"分为"光明"和"磊落"分别进行查询。')
else:
break
def prune_word(word):
valid_word = "".join(re.findall(r'[\u4E00-\u9FA5]+', word))
if 0 < len(valid_word) < 5:
return valid_word
else:
print(' 词长不合法,请输入一个词长为1至4的汉字词语/短语。')
return ''
def get_candidates(word, pinyins, num_char, c_ids, v_ids) -> Dict[int, List[str]]:
candidates = defaultdict(list)
if num_char == 1:
return single_rhyme(word, pinyins, candidates, c_ids, v_ids)
return multi_rhyme(word, pinyins, num_char, candidates, c_ids, v_ids)
def single_rhyme(target_word, pinyins, candidates, c_ids, v_ids):
print('单押检索较慢,请稍等...')
target_vowel = pinyins[0][1][-1]
for k, v in phrase_dict.items():
if k[-1] == target_vowel:
for word in v:
word_pys = word_parser(word)
if word[-1] != target_word \
and match_cv(word_pys[-1:], pinyins, c_ids, v_ids) \
and word not in candidates[len(word)]:
candidates[len(word)].append(word)
return candidates
def multi_rhyme(target_word, pinyins, num_char, candidates, c_ids, v_ids):
hash_vowels = tuple([pinyin[1][-1] for pinyin in pinyins])
try:
basic_candidates = phrase_dict[hash_vowels]
except KeyError:
return candidates
parsed_candidates = [(word, word_parser(word)) for word in basic_candidates]
for word, word_pinyins in parsed_candidates:
if word[-num_char:] != target_word \
and match_cv(word_pinyins[-num_char:], pinyins, c_ids, v_ids):
candidates[len(word)].append(word)
return candidates
def check_positions(ps: str, num_char: int) -> Set[str]:
if '0' in ps:
return set()
else:
p_ids_str = "".join(re.findall(r'\d+', ps))
if p_ids_str:
return {num_char - 1 if int(p) >= num_char else int(p) - 1 for p in p_ids_str}
return set()
def match_cv(word_pinyins, target_pinyins, c_ids, v_ids) -> bool:
for c in c_ids:
# if consonant of target pinyin is empty, it matches any consonant in the word pinyin
if target_pinyins[c][0] and word_pinyins[c][0] != target_pinyins[c][0]:
return False
for v in v_ids:
word_v = word_pinyins[v][1]
target_v = target_pinyins[v][1]
# if vowel of target pinyin is single, only need to check it against with the last vowel of word_v
if len(target_v) == 1 and word_v[-1] != target_v[-1]:
return False
elif len(target_v) == 2 and "".join(word_v) != "".join(target_v):
return False
return True
def display_results(candidates):
lens = sorted(candidates.keys())
num_ryhthms = {2: '双押词', 3: '三押词', 4: '四押词'}
for i in lens:
try:
head = num_ryhthms[i]
except KeyError:
head = '俗语词 ({}字)'.format(i)
print('>>> {}:\n{}\n'.format(head, candidates[i]))
``` |
{
"source": "jiaeyan/Convolutional_Neural_Network",
"score": 3
} |
#### File: jiaeyan/Convolutional_Neural_Network/scorer.py
```python
import argparse
import json
from confusion_matrix import ConfusionMatrix, Alphabet
import validator
def evaluate(gold_list, predicted_list):
sense_cm = evaluate_sense(gold_list, predicted_list)
print('Sense classification--------------')
sense_cm.print_summary()
print('Overall parser performance --------------')
precision, recall, f1 = sense_cm.compute_micro_average_f1()
print('Precision %1.4f Recall %1.4f F1 %1.4f' % (precision, recall, f1))
#return connective_cm, arg1_cm, arg2_cm, rel_arg_cm, sense_cm, precision, recall, f1
return sense_cm, precision, recall, f1
def spans_exact_matching(gold_doc_id_spans, predicted_doc_id_spans):
"""Matching two lists of spans
Input:
gold_doc_id_spans : (DocID , a list of lists of tuples of token addresses)
predicted_doc_id_spans : (DocID , a list of lists of token indices)
Returns:
True if the spans match exactly
"""
exact_match = True
gold_docID = gold_doc_id_spans[0]
gold_spans = gold_doc_id_spans[1]
predicted_docID = predicted_doc_id_spans[0]
predicted_spans = predicted_doc_id_spans[1]
for gold_span, predicted_span in zip(gold_spans, predicted_spans):
exact_match = span_exact_matching((gold_docID,gold_span), (predicted_docID, predicted_span)) \
and exact_match
return exact_match
def span_exact_matching(gold_span, predicted_span):
"""Matching two spans
Input:
gold_span : a list of tuples :(DocID, list of tuples of token addresses)
predicted_span : a list of tuples :(DocID, list of token indices)
Returns:
True if the spans match exactly
"""
gold_docID = gold_span[0]
predicted_docID = predicted_span[0]
if gold_docID != predicted_docID:
return False
gold_token_indices = [x[2] for x in gold_span[1]]
predicted_token_indices = predicted_span[1]
return gold_docID == predicted_docID and gold_token_indices == predicted_token_indices
def evaluate_sense(gold_list, predicted_list):
"""Evaluate sense classifier
The label ConfusionMatrix.NEGATIVE_CLASS is for the relations
that are missed by the system
because the arguments don't match any of the gold relations.
"""
sense_alphabet = Alphabet()
valid_senses = validator.identify_valid_senses(gold_list)
for relation in gold_list:
sense = relation['Sense'][0]
if sense in valid_senses:
sense_alphabet.add(sense)
sense_alphabet.add(ConfusionMatrix.NEGATIVE_CLASS)
sense_cm = ConfusionMatrix(sense_alphabet)
gold_to_predicted_map, predicted_to_gold_map = \
_link_gold_predicted(gold_list, predicted_list, spans_exact_matching)
for i, gold_relation in enumerate(gold_list):
gold_sense = gold_relation['Sense'][0]
if gold_sense in valid_senses:
if i in gold_to_predicted_map:
predicted_sense = gold_to_predicted_map[i]['Sense'][0]
if predicted_sense in gold_relation['Sense']:
sense_cm.add(predicted_sense, predicted_sense)
else:
if not sense_cm.alphabet.has_label(predicted_sense):
predicted_sense = ConfusionMatrix.NEGATIVE_CLASS
sense_cm.add(predicted_sense, gold_sense)
else:
sense_cm.add(ConfusionMatrix.NEGATIVE_CLASS, gold_sense)
for i, predicted_relation in enumerate(predicted_list):
if i not in predicted_to_gold_map:
predicted_sense = predicted_relation['Sense'][0]
if not sense_cm.alphabet.has_label(predicted_sense):
predicted_sense = ConfusionMatrix.NEGATIVE_CLASS
sense_cm.add(predicted_sense, ConfusionMatrix.NEGATIVE_CLASS)
return sense_cm
def _link_gold_predicted(gold_list, predicted_list, matching_fn):
"""Link gold standard relations to the predicted relations
A pair of relations are linked when the arg1 and the arg2 match exactly.
We do this because we want to evaluate sense classification later.
Returns:
A tuple of two dictionaries:
1) mapping from gold relation index to predicted relation index
2) mapping from predicted relation index to gold relation index
"""
gold_to_predicted_map = {}
predicted_to_gold_map = {}
gold_arg12_list = [(x['DocID'], (x['Arg1']['TokenList'], x['Arg2']['TokenList']))
for x in gold_list]
predicted_arg12_list = [(x['DocID'], (x['Arg1']['TokenList'], x['Arg2']['TokenList']))
for x in predicted_list]
for gi, gold_span in enumerate(gold_arg12_list):
for pi, predicted_span in enumerate(predicted_arg12_list):
if matching_fn(gold_span, predicted_span):
gold_to_predicted_map[gi] = predicted_list[pi]
predicted_to_gold_map[pi] = gold_list[gi]
return gold_to_predicted_map, predicted_to_gold_map
def main():
parser = argparse.ArgumentParser(
description="Evaluate system's output against the gold standard")
parser.add_argument('gold', help='Gold standard file')
parser.add_argument('predicted', help='System output file')
args = parser.parse_args()
gold_list = [json.loads(x) for x in open(args.gold)]
predicted_list = [json.loads(x) for x in open(args.predicted)]
print('\n================================================')
print('Evaluation for all discourse relations')
print(args.gold)
print(args.predicted)
evaluate(gold_list, predicted_list)
print('\n================================================')
print('Evaluation for explicit discourse relations only')
explicit_gold_list = [x for x in gold_list if x['Type'] == 'Explicit']
explicit_predicted_list = [x for x in predicted_list if x['Type'] == 'Explicit']
evaluate(explicit_gold_list, explicit_predicted_list)
print('\n================================================')
print('Evaluation for non-explicit discourse relations only (Implicit, EntRel, AltLex)')
non_explicit_gold_list = [x for x in gold_list if x['Type'] != 'Explicit']
non_explicit_predicted_list = [x for x in predicted_list if x['Type'] != 'Explicit']
evaluate(non_explicit_gold_list, non_explicit_predicted_list)
if __name__ == '__main__':
main()
```
#### File: jiaeyan/Convolutional_Neural_Network/train.py
```python
import tensorflow as tf
import random
from CNN import CNN
from data_util import *
from prep_data import *
root = '/Users/svenyan/Desktop/CS134-Machine Learning/Projects/Final/cs134-final-project/'
path_dict = {'train':'train/relations.json', 'dev':'dev/relations.json', 'test':'test/relations.json'}
output_f = '/Users/svenyan/Desktop/output8.json'
wdict, cdict = convert2id(root)
# A single training step
def train_step(x_batch, y_batch, cnn, sess):
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 0.5
}
sess.run([train_op, global_step, cnn.loss, cnn.accuracy], feed_dict)
# A single dev testing step
def dev_step(x_batch, y_batch, cnn, sess):
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, loss, accuracy = sess.run(
[global_step, cnn.loss, cnn.accuracy], feed_dict)
print("step {}, loss {:g}, acc {:g}".format(step, loss, accuracy))
# A single testing step, returns an argmax list of predicted sense labels
def test_step(x_batch, y_batch, cnn, sess):
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, predictions, loss, accuracy = sess.run(
[global_step, cnn.predictions, cnn.loss, cnn.accuracy], feed_dict)
print("step {}, loss {:g}, acc {:g}".format(step, loss, accuracy))
return predictions
def train(train_data, cnn, iter_time):
# Training loop, eval on dev after every loop of the entire training data
for epoch in range(iter_time):
random.shuffle(train_data)
train_xs, train_ys = generateBatches(train_data, cnn.sen_len, cnn.num_class, wdict, cdict, 100)
print('Iteration: ' + str(epoch + 1))
for i in range(len(train_xs)):
train_step(train_xs[i], train_ys[i], cnn, sess)
for i in range(len(dev_xs)):
dev_step(dev_xs[i], dev_ys[i], cnn, sess)
train_data[:] = []
def test(cnn):
test_data = load_data(root, path_dict['test'])
test_xs, test_ys = generateBatches(test_data, cnn.sen_len, cnn.num_class, wdict, cdict, 100)
count = 0
rel_list = []
for i in range(len(test_xs)):
sense_ids = test_step(test_xs[i], test_ys[i], cnn, sess)
for j in range(len(test_xs[i])):
predict = make_json({}, test_data[count], sense_ids, j)
rel_list.append(predict)
count += 1
with open(output_f, 'w') as f:
for rel in rel_list:
f.write(json.dumps(rel) + '\n')
def make_json(predict, gold, sense_ids, j):
predict['Arg1'] = {'TokenList':[pos_list[2] for pos_list in gold['Arg1']['TokenList']]}
predict['Arg2'] = {'TokenList':[pos_list[2] for pos_list in gold['Arg2']['TokenList']]}
predict['DocID'] = gold['DocID']
predict['Sense'] = [cdict[sense_ids[j]]]
predict['Type'] = gold['Type']
predict['Connective'] = {'TokenList':gold['Connective']['TokenList']}
return predict
with tf.Graph().as_default():
sess = tf.Session()
with sess.as_default():
# Initialize a CNN instance, feeding hyper-parameters
cnn = CNN(
sen_len = 50,
emb_len = 300,
num_class = len(cdict),
vocab_len = len(wdict),
filter_sizes = [3, 4, 5],
num_filter = 3)
# Define the training procedure
global_step = tf.Variable(0, name = "global_step", trainable = False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Initialize all variables
sess.run(tf.global_variables_initializer())
# Prepare data
train_data = load_data(root, path_dict['train'])
dev_data = load_data(root, path_dict['dev'])
dev_xs, dev_ys = generateBatches(dev_data, cnn.sen_len, cnn.num_class, wdict, cdict, 100)
dev_data[:] = []
# Begin training
train(train_data, cnn, iter_time = 1)
# Begin testing
test(cnn)
``` |
{
"source": "jiaeyan/MachineLearningPlate",
"score": 3
} |
#### File: MachineLearningPlate/hidden_markov_model/hmm.py
```python
import random
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
np.random.seed(42)
random.seed(42)
class HiddenMarkovModel:
def __init__(self):
self.Prior = None
self.Trans = None
self.Emit = None
self.S_dict = None
self._S_dict = None
self.O_dict = None
self.N = None
self.M = None
def load_data(self, X, Y):
ob_set = {ob for x in X for ob in x} | {'<unk>'}
state_set = {state for y in Y for state in y}
self.S_dict = {state: i for i, state in enumerate(state_set)}
self._S_dict = {i: state for state, i in self.S_dict.items()}
self.O_dict = {ob: i for i, ob in enumerate(ob_set)}
self.N = len(self.S_dict)
self.M = len(self.O_dict)
# add-1 laplace to all metrices
self.Prior = np.zeros(self.N) + 1
self.Trans = np.zeros((self.N, self.N)) + 1
self.Emit = np.zeros((self.N, self.M)) + 1
def train(self, X, Y):
self.load_data(X, Y)
for x, y in zip(X, Y):
o_end, s_start, s_end = self.O_dict[x[-1]], self.S_dict[y[0]], self.S_dict[y[-1]]
self.Prior[s_start] += 1
self.Trans[s_end, s_end] += 1
self.Emit[s_end, o_end] += 1
for i in range(len(x) - 1):
o1, s1, s2 = self.O_dict[x[i]], self.S_dict[y[i]], self.S_dict[y[i + 1]]
self.Trans[s1, s2] += 1
self.Emit[s1, o1] += 1
S_count = np.sum(self.Trans, axis=1).reshape(-1, 1)
self.Trans = self.Trans / S_count
self.Emit = self.Emit / (S_count - self.N + self.M)
self.Prior = self.Prior / sum(self.Prior)
def forward(self, x):
F = np.zeros((self.N, len(x)))
F[:, 0] = self.Prior * self.Emit[:, x[0]]
for t in range(1, len(x)):
for s in range(self.N):
paths = F[:, t-1] * self.Trans[:, s] * self.Emit[s, x[t]]
F[s, t] = np.sum(paths)
return F
def backward(self, x):
B = np.zeros((self.N, len(x)))
B[:, -1] = np.ones(self.N)
for t in range(len(x) - 2, -1, -1):
for s in range(self.N):
paths = B[:, t+1] * self.Trans[s] * self.Emit[:, x[t+1]]
B[s, t] = np.sum(paths)
return B
def viterbi(self, x):
V = np.zeros((self.N, len(x)))
B = np.zeros((self.N, len(x)), dtype=int)
V[:, 0] = self.Prior * self.Emit[:, x[0]]
for t in range(1, len(x)):
for s in range(self.N):
paths = V[:, t-1] * self.Trans[:, s] * self.Emit[s, x[t]]
V[s, t] = np.max(paths)
B[s, t] = np.argmax(paths)
return V, B
def backtrace(self, V, B):
best = np.argmax(V[:, -1])
path = []
for t in range(B.shape[1] - 1, -1, -1):
path.append(self._S_dict[best])
best = B[best, t]
return path[::-1]
def featurize(self, x):
'''Handle unk, and convert features to ids.'''
return [self.O_dict[ob] if ob in self.O_dict else self.O_dict['<unk>'] for ob in x]
def likelihood(self, x):
f = self.featurize(x)
F = self.forward(f)
return np.sum(F[:, -1]) # sum(self.backward(f)[:, 0] * self.Prior * self.Emit[:, f[0]])
def decode(self, x):
f = self.featurize(x)
V, B = self.viterbi(f)
return self.backtrace(V, B)
def score(self, X, Y):
Y_true = [s for y in Y for s in y]
Y_pred = [s for x in X for s in self.decode(x)]
print(classification_report(Y_true, Y_pred))
def learn(self, X, Y, iterations=100):
self.load_data(X, Y)
self.init_parameters()
X = [self.featurize(x) for x in X]
for _ in range(iterations):
Gammas, Xis = self.E_step(X)
self.M_step(X, Gammas, Xis)
def init_parameters(self):
self.Prior = np.zeros(self.N) + 1/self.N
self.Trans = np.zeros((self.N, self.N)) + 1/self.N
# To make some init parameters unequal.
# BW algorithm works poor with all equal init parameters.
Sum = self.Trans[0][0] + self.Trans[0][-1]
self.Trans[0][0], self.Trans[0][-1] = Sum / 3, 2 * Sum / 3
self.Emit = np.zeros((self.N, self.M)) + 1/self.M
def E_step(self, X):
Gammas = []
Xis = []
for x in X:
F = self.forward(x)
B = self.backward(x)
Gamma = self.gamma(F, B)
Gammas.append(Gamma)
Xi = self.xi(x, F, B)
Xis.append(Xi)
return Gammas, Xis
def gamma(self, F, B):
Gamma = F * B
Gamma = Gamma / np.sum(Gamma, 0)
return Gamma
def xi(self, x, F, B):
Xi = np.zeros((self.N, self.N, len(x) - 1))
for t in range(len(x) - 1):
for i in range(self.N):
for j in range(self.N):
Xi[i, j, t] = F[i, t] * self.Trans[i, j] * self.Emit[j, x[t+1]] * B[j, t+1]
Xi[:, :, t] /= np.sum(np.sum(Xi[:, :, t], 1), 0)
return Xi
def M_step(self, X, Gammas, Xis):
self.learn_prior(X, Gammas)
self.learn_trans(X, Gammas, Xis)
self.learn_emit(X, Gammas)
def learn_prior(self, X, Gammas):
for i in range(self.N):
gammas = np.sum(Gammas[xid][i, 0] for xid in range(len(X)))
self.Prior[i] = gammas / len(X)
def learn_trans(self, X, Gammas, Xis):
for i in range(self.N):
denominator = np.sum(np.sum(Gammas[xid][i, :len(x) - 1]) for xid, x in enumerate(X))
for j in range(self.N):
numerator = np.sum(np.sum(Xis[xid][i, j, :len(x) - 1]) for xid, x in enumerate(X))
self.Trans[i, j] = numerator / denominator
def learn_emit(self, X, Gammas):
for j in range(self.N):
denominator = np.sum(np.sum(Gammas[xid][j]) for xid in range(len(X)))
for k in range(self.M):
numerator = 0.0
for xid, x in enumerate(X):
for t in range(len(x)):
if x[t] == k:
numerator += Gammas[xid][j, t]
self.Emit[j, k] = numerator / denominator
def generate_data():
from nltk.corpus import brown
X = []
Y = []
for sent in brown.tagged_sents():
x = []
y = []
for w, pos in sent:
x.append(w)
y.append(pos)
X.append(x)
Y.append(y)
X_train, X_val, Y_train, Y_val = train_test_split(X, Y)
return X_train, X_val, Y_train, Y_val
def test_learn():
hmm = HiddenMarkovModel()
X = [
['he', 'want', 'to', 'eat', 'food'],
['John', 'eat', 'food'],
['he', 'want', 'food'],
['John', 'want', 'food']
]
Y = [
['PRON', 'VB', 'TO', 'VB', 'NN', 'NN', 'VB', 'PRON', 'VB', 'NN', 'TO', 'VB'],
['NNP', 'VB', 'NN'],
['PRON', 'VB', 'NN'],
['NNP', 'VB', 'NN']
]
hmm.learn(X, Y, iterations=50)
print(hmm.decode(['John', 'want', 'to', 'eat']))
def test_train():
X_train, X_val, Y_train, Y_val = generate_data()
hmm = HiddenMarkovModel()
hmm.train(X_train, Y_train)
print(np.sum(hmm.Trans, axis=1))
print(np.sum(hmm.Emit, axis=1))
print(np.sum(hmm.Prior))
x = X_val[0]
y = Y_val[0]
print('Instance:', x)
print('True labels:', y)
print('Predicted labels:', hmm.decode(x))
ch_x = hmm.featurize(x)
B = hmm.backward(ch_x)
print('Forward prob:', hmm.likelihood(x))
print('Backward prob:', sum(B[:, 0] * hmm.Prior * hmm.Emit[:, ch_x[0]]))
print()
hmm.score(X_val[:10], Y_val[:10])
print(hmm.decode(['John', 'want', 'to', 'eat']))
if __name__ == '__main__':
# test_learn()
test_train()
```
#### File: MachineLearningPlate/k_nearest_neighbor/knn.py
```python
import numpy as np
from math import sqrt
from collections import Counter
from sklearn.metrics import classification_report
np.random.seed(42)
class KNN:
def __init__(self, k=5):
self.k = k
self.data = None
self.labels = None
def load_data(self, X, Y):
self.data = X
self.labels = Y
def distance(self, p1, p2):
return sqrt(np.sum(np.power(p1 - p2, 2)))
def vote(self, Y):
Y_dict = Counter(Y)
return Y_dict.most_common()[0][0]
def predict(self, x):
dists = [self.distance(x, point) for point in self.data]
# get the index list which is sorted by values on according positions
ids = np.argsort(dists)
top_k_ids = ids[: self.k]
top_k_labels = self.labels[top_k_ids]
return self.vote(top_k_labels)
def score(self, X, Y):
Y_pred = [self.predict(x) for x in X]
print(classification_report(Y, Y_pred))
def generate_digit_data():
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
digits = load_digits()
X = digits.data
Y = digits.target
X_train, X_val, Y_train, Y_val = train_test_split(X, Y)
print('Train data shape', X_train.shape)
print('Validation data shape', X_val.shape)
return X_train, X_val, Y_train, Y_val
if __name__ == '__main__':
X_train, X_val, Y_train, Y_val = generate_digit_data()
knn = KNN()
knn.load_data(X_train, Y_train)
knn.score(X_val, Y_val)
```
#### File: MachineLearningPlate/linear_regression/linear_regression.py
```python
import numpy as np
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
np.random.seed(42)
"""
BGD needs much more iterations than SGD/MBGD.
"""
class LinearRegression:
def __init__(self, learning_rate=0.001, epoch=300, batch_size=150):
self.lr = learning_rate
self.epoch = epoch
self.batch_size = batch_size
self.w = None
self.b = None
def initialize_weights(self, num_features):
self.w = np.random.randn(num_features)
self.b = np.random.randint(5)
def train(self, X_train, X_val, Y_train, Y_val, mode='SGD'):
self.initialize_weights(X_train.shape[1])
if mode == 'BGD':
self.train_BGD(X_train, X_val, Y_train, Y_val)
elif mode == 'SGD':
self.train_SGD(X_train, X_val, Y_train, Y_val)
elif mode == 'MBGD':
self.train_MBGD(X_train, X_val, Y_train, Y_val)
elif mode == 'NE':
self.train_NE(X_train, X_val, Y_train, Y_val)
def predict(self, X):
# broadcast multiplication of weight vector to sample features,
# sum each row to get predictions.
# pred_y here is a row vector.
return np.sum(self.w * X, axis=1) + self.b
def compute_loss(self, X, Y):
Y_pred = self.predict(X)
return (1 / (2 * len(X))) * np.sum((Y_pred - Y) ** 2)
def show_loss(self, X_train, X_val, Y_train, Y_val, epoch_num):
train_loss = self.compute_loss(X_train, Y_train)
val_loss = self.compute_loss(X_val, Y_val)
print("Training loss at epoch {}: {}".format(epoch_num, train_loss))
print("Validation loss at eopch {}: {}\n".format(epoch_num, val_loss))
def update_weights(self, X, Y):
# broadcast multiplication of error diff of each sample to all its features
# then accumulate all errors of each feature weight, update
Y_pred = self.predict(X)
error_diffs = (Y_pred - Y).reshape(-1, 1)
d_w = (1 / len(X)) * np.sum(error_diffs * X, axis=0)
d_b = (1 / len(X)) * np.sum(error_diffs)
self.w -= self.lr * d_w
self.b -= self.lr * d_b
def shuffle_data(self, X, Y):
shuffled_index = np.random.permutation(len(X))
X = X[shuffled_index]
Y = Y[shuffled_index]
return X, Y
def train_BGD(self, X_train, X_val, Y_train, Y_val):
for i in range(1, self.epoch + 1):
# Step 1: compute train and validation loss
if i % 100 == 0 or i == 1:
self.show_loss(X_train, X_val, Y_train, Y_val, i)
# Step 2: compute gradient and update weights
self.update_weights(X_train, Y_train)
def train_MBGD(self, X_train, X_val, Y_train, Y_val):
# construct a batch generator
def batch_generator(X, Y):
num_samples = len(X)
for i in range(0, num_samples, self.batch_size):
yield X[i: min(i + self.batch_size, num_samples)], \
Y[i: min(i + self.batch_size, num_samples)]
for i in range(1, self.epoch + 1):
# Step 1: compute train and validation loss
if i % 100 == 0 or i == 1:
self.show_loss(X_train, X_val, Y_train, Y_val, i)
# Step 2: shuffle data
X_train, Y_train = self.shuffle_data(X_train, Y_train)
# Step 3: compute gradients and update weights
for X_batch, Y_batch in batch_generator(X_train, Y_train):
self.update_weights(X_batch, Y_batch)
def train_SGD(self, X_train, X_val, Y_train, Y_val):
for i in range(1, self.epoch + 1):
# Step 1: compute train and validation loss
if i % 100 == 0 or i == 1:
self.show_loss(X_train, X_val, Y_train, Y_val, i)
# Step 2: shuffle data
X_train, Y_train = self.shuffle_data(X_train, Y_train)
# Step 3: compute gradients and update weights
for x, y in zip(X_train, Y_train):
self.update_weights([x], [y])
def train_NE(self, X_train, X_val, Y_train, Y_val):
self.show_loss(X_train, X_val, Y_train, Y_val, 0)
# add bias terms to all samples, default as 1
X_train_b = np.c_[np.ones((len(X_train), 1)), X_train]
# conduct normal equations
a = np.dot(X_train_b.T, X_train_b)
b = np.linalg.inv(a)
c = np.dot(b, X_train_b.T)
theta = np.dot(c, Y_train)
# the 1st item is the bias
self.b, self.w = theta[0], theta[1:]
self.show_loss(X_train, X_val, Y_train, Y_val, 1)
def score(self, X, Y):
Y_pred = self.predict(X)
self.regression_report(Y, Y_pred)
def regression_report(self, Y_true, Y_pred):
explained_variance=metrics.explained_variance_score(Y_true, Y_pred)
mean_absolute_error=metrics.mean_absolute_error(Y_true, Y_pred)
mse=metrics.mean_squared_error(Y_true, Y_pred)
mean_squared_log_error=metrics.mean_squared_log_error(Y_true, Y_pred)
median_absolute_error=metrics.median_absolute_error(Y_true, Y_pred)
r2=metrics.r2_score(Y_true, Y_pred)
print('Explained_variance: ', round(explained_variance,4))
print('Mean_squared_log_error: ', round(mean_squared_log_error,4))
print('Median_absolute_error: ', round(median_absolute_error))
print('R2: ', round(r2,4))
print('MAE: ', round(mean_absolute_error,4))
print('MSE: ', round(mse,4))
print('RMSE: ', round(np.sqrt(mse),4))
def generate_parameters(num_features):
w = np.random.randn(num_features)
b = np.random.randint(5)
return w, b
def generate_data(num_samples, num_features, w, b):
# each feature satisfies normal distribution, with different means
# and standard deviations
X = np.array([np.random.normal(
loc=np.random.randint(10),
scale=np.random.random() * 5,
size=num_samples)
for _ in range(num_features)]).T
Y = np.sum(w * X, axis=1) + b
X_train, X_val, Y_train, Y_val = train_test_split(X, Y)
print('Train data shape', X_train.shape)
print('Validation data shape', X_val.shape)
return X_train, X_val, Y_train, Y_val
def generate_house_data():
from sklearn.datasets.california_housing import fetch_california_housing
from sklearn.preprocessing import StandardScaler
houses = fetch_california_housing()
scaler = StandardScaler()
scaled_data = scaler.fit_transform(houses.data)
X_train, X_val, Y_train, Y_val = train_test_split(scaled_data, houses.target)
print('Train data shape', X_train.shape)
print('Validation data shape', X_val.shape)
return X_train, X_val, Y_train, Y_val
if __name__ == '__main__':
# W, B = generate_parameters(5)
X_train, X_val, Y_train, Y_val = generate_house_data()
# X_train, X_val, Y_train, Y_val = generate_data(1000, 5, W, B)
lr = LinearRegression()
lr.train(X_train, X_val, Y_train, Y_val, mode='MBGD')
# lr.train(X_train, X_val, Y_train, Y_val, mode='SGD')
# lr.train(X_train, X_val, Y_train, Y_val, mode='BGD')
# lr.train(X_train, X_val, Y_train, Y_val, mode='NE')
# print('Pred weights:', lr.w)
# print('True weights:', W)
# print('Pred bias:', lr.b)
# print('True bias:', B)
lr.score(X_val, Y_val)
```
#### File: MachineLearningPlate/maximum_entropy/maximum_entropy.py
```python
import numpy as np
from math import exp
from math import log
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
np.random.seed(42)
class MaximumEntropy:
def __init__(self, learning_rate=0.001, epoch=500, batch_size=150):
self.lr = learning_rate
self.epoch = epoch
self.batch_size = batch_size
self.W = None
self.X_dict = None
self.Y_dict = None
def load_data(self, X, Y):
feat_set = {feat for x in X for feat in x}
self.X_dict = {feat: i for i, feat in enumerate(feat_set)}
self.Y_dict = {y: i for i, y in enumerate(set(Y))}
self.W = np.zeros((len(self.Y_dict), len(self.X_dict)))
def remake_data(self, X, Y):
X = np.array([self.featurize(x) for x in X])
Y = np.array(Y)
return X, Y
def featurize(self, x):
feat_vec = np.zeros(len(self.X_dict))
for feat in x:
if feat in self.X_dict:
feat_vec[self.X_dict[feat]] = 1
return feat_vec
def shuffle_data(self, X, Y):
shuffled_index = np.random.permutation(len(X))
X = X[shuffled_index]
Y = Y[shuffled_index]
return X, Y
def compute_loss(self, X, Y):
"""loss = negative log likelihood"""
loss = -sum([log(self.posterior(x, y)) for x, y in zip(X, Y)])
return loss
def show_loss(self, X_train, X_val, Y_train, Y_val, epoch_num):
train_loss = self.compute_loss(X_train, Y_train)
val_loss = self.compute_loss(X_val, Y_val)
print("Training loss at epoch {}: {}".format(epoch_num, train_loss))
print("Validation loss at eopch {}: {}\n".format(epoch_num, val_loss))
def predict(self, x):
feat_vec = self.featurize(x)
ret = [(np.dot(self.W[i], feat_vec), y) for y, i in self.Y_dict.items()]
return max(ret)[1]
def train(self, X_train, X_val, Y_train, Y_val):
# construct a batch generator
def batch_generator(X, Y):
num_samples = len(X)
for i in range(0, num_samples, self.batch_size):
yield X[i: min(i + self.batch_size, num_samples)], \
Y[i: min(i + self.batch_size, num_samples)]
self.load_data(X_train, Y_train)
X_train, Y_train = self.remake_data(X_train, Y_train)
X_val, Y_val = self.remake_data(X_val, Y_val)
for i in range(1, self.epoch + 1):
# Step 1: compute train and validation loss
if i % 100 == 0 or i == 1:
self.show_loss(X_train, X_val, Y_train, Y_val, i)
# Step 2: shuffle data
X_train, Y_train = self.shuffle_data(X_train, Y_train)
# Step 3: compute gradient and update weights
for X_batch, Y_batch in batch_generator(X_train, Y_train):
self.update_weights(X_batch, Y_batch)
def posterior(self, x, y):
"""Compute p(y|x) by softmax."""
y_prob = exp(np.dot(self.W[self.Y_dict[y]], x))
z = sum([exp(np.dot(self.W[self.Y_dict[y]], x)) for y in self.Y_dict])
return y_prob / z
def update_weights(self, X, Y):
# observed expectations from data
ob_exp = np.zeros(self.W.shape)
# model expectations from model parameters
model_exp = np.zeros(self.W.shape)
for i in range(len(X)):
ob_exp[self.Y_dict[Y[i]]] += X[i]
for y, y_id in self.Y_dict.items():
model_exp[y_id] += X[i] * self.posterior(X[i], y)
self.W -= self.lr * (model_exp - ob_exp)
def score(self, X, Y):
Y_pred = [self.predict(x) for x in X]
print(classification_report(Y, Y_pred))
def generate_data():
dataset = [['no', 'sunny', 'hot', 'high', 'FALSE'],
['no', 'sunny', 'hot', 'high', 'TRUE'],
['yes', 'overcast', 'hot', 'high', 'FALSE'],
['yes', 'rainy', 'mild', 'high', 'FALSE'],
['yes', 'rainy', 'cool', 'normal', 'FALSE'],
['no', 'rainy', 'cool', 'normal', 'TRUE'],
['yes', 'overcast', 'cool', 'normal', 'TRUE'],
['no', 'sunny', 'mild', 'high', 'FALSE'],
['yes', 'sunny', 'cool', 'normal', 'FALSE'],
['yes', 'rainy', 'mild', 'normal', 'FALSE'],
['yes', 'sunny', 'mild', 'normal', 'TRUE'],
['yes', 'overcast', 'mild', 'high', 'TRUE'],
['yes', 'overcast', 'hot', 'normal', 'FALSE'],
['no', 'rainy', 'mild', 'high', 'TRUE']]
X = []
Y = []
for data in dataset:
X.append(data[1:])
Y.append(data[0])
return X, Y
def generate_news_data():
def split_data(dataset):
dataset = [data.lower().split() for data in dataset]
return dataset
from sklearn.datasets import fetch_20newsgroups
news = fetch_20newsgroups()
X = news.data
Y = news.target
X_train, X_val, Y_train, Y_val = train_test_split(X, Y)
print('Train data shape', X_train.shape)
print('Validation data shape', X_val.shape)
return split_data(X_train), split_data(X_val), Y_train, Y_val
if __name__ == '__main__':
X, Y = generate_data()
maxent = MaximumEntropy(batch_size=5)
maxent.train(X, X, Y, Y)
maxent.score(X, Y)
print(maxent.predict(['sunny', 'hot', 'high', 'FALSE']))
# X_train, X_val, Y_train, Y_val = generate_news_data()
# maxent = MaximumEntropy(batch_size=1024)
# maxent.train(X_train, X_val, Y_train, Y_val)
# maxent.score(X_val, Y_val)
``` |
{
"source": "jiafangjun/DD_KaoRou",
"score": 2
} |
#### File: DD_KaoRou/utils/asyncTable.py
```python
import time
from PySide2.QtWidgets import QWidget, QMainWindow, QGridLayout, QFileDialog, QToolBar,\
QAction, QDialog, QStyle, QSlider, QLabel, QPushButton, QStackedWidget, QHBoxLayout,\
QLineEdit, QTableWidget, QAbstractItemView, QTableWidgetItem, QGraphicsTextItem, QMenu,\
QGraphicsScene, QGraphicsView, QGraphicsDropShadowEffect, QComboBox, QMessageBox, QColorDialog
from PySide2.QtMultimedia import QMediaPlayer
from PySide2.QtMultimediaWidgets import QGraphicsVideoItem
from PySide2.QtGui import QIcon, QKeySequence, QFont, QBrush, QColor
from PySide2.QtCore import Qt, QTimer, QEvent, QPoint, Signal, QSizeF, QUrl, QThread
def cnt2Time(cnt, interval, value=0):
'''
receive int
return str
count of interval times -> m:s.ms
'''
labels = []
for i in range(value, cnt + value):
m, s = divmod(i * interval, 60000)
s, ms = divmod(s, 1000)
labels.append(('%s:%02d.%03d' % (m, s, ms))[:-1])
return labels
class refillVerticalLabel(QThread):
def __init__(self, value, globalInterval, subtitle, parent=None):
super(refillVerticalLabel, self).__init__(parent)
self.value = value - 1
self.globalInterval = globalInterval
self.oldInterval = self.globalInterval
self.subtitle = subtitle
def setGlobalInterval(self, globalInterval):
self.globalInterval = globalInterval
def run(self):
while 1:
scrollValue = self.subtitle.verticalScrollBar().value()
if scrollValue != self.oldInterval:
print(scrollValue)
self.oldInterval = scrollValue
refillToken = False
for y in range(scrollValue - 1, scrollValue + 60):
if not self.subtitle.verticalHeaderItem(y):
refillToken = True
break
if refillToken:
for cnt, label in enumerate(cnt2Time(60, self.globalInterval, self.value)):
self.subtitle.setVerticalHeaderItem(self.value + cnt, QTableWidgetItem(label))
time.sleep(0.000001)
time.sleep(20)
class asyncTable(QThread):
reconnect = Signal()
def __init__(self, subtitleDict, oldInterval, globalInterval, duration, subtitle, autoSub, tablePreset, position, parent=None):
super(asyncTable, self).__init__(parent)
self.subtitleDict = subtitleDict
self.oldInterval = oldInterval
self.globalInterval = globalInterval
self.duration = duration
self.subtitle = subtitle
self.autoSub = autoSub
self.tablePreset = tablePreset
self.position = position
def initSubtitle(self):
# for index, subData in self.subtitleDict.items():
# for start, rowData in subData.items():
# if start >= 0:
# startRow = start // self.oldInterval
# deltaRow = rowData[0] // self.oldInterval
# for y in range(startRow, startRow + deltaRow + 1):
# self.subtitle.setItem(y, index, QTableWidgetItem(''))
# self.subtitle.item(y, index).setBackground(QBrush(QColor('#232629'))) # 全部填黑
# if self.subtitle.rowspan(y, index) > 1:
# self.subtitle.setSpan(y, index, 1, 1)
self.subtitle.clear()
self.subtitle.setRowCount(self.duration // self.globalInterval + 1) # 重置表格行数
for t in self.autoSub: # 重新标记AI识别位置
start, end = t
startRow = start // self.globalInterval
endRow = end // self.globalInterval
if self.tablePreset[1]:
self.subtitle.setItem(startRow, 0, QTableWidgetItem(self.tablePreset[0]))
try:
self.subtitle.item(startRow, 0).setBackground(QBrush(QColor('#35545d')))
except:
pass
self.subtitle.setSpan(startRow, 0, endRow - startRow, 1)
if self.tablePreset[0]:
self.subtitleDict[0][start] = [end - start, self.tablePreset[0]]
else:
for y in range(startRow, endRow):
self.subtitle.setItem(y, 0, QTableWidgetItem(self.tablePreset[0]))
try:
self.subtitle.item(y, 0).setBackground(QBrush(QColor('#35545d')))
except:
pass
if self.tablePreset[0]:
self.subtitleDict[0][y * self.globalInterval] = [self.globalInterval, self.tablePreset[0]]
scrollValue = self.subtitle.verticalScrollBar().value() - 1
for cnt, label in enumerate(cnt2Time(60, self.globalInterval, scrollValue)):
self.subtitle.setVerticalHeaderItem(scrollValue + cnt, QTableWidgetItem(label))
time.sleep(0.000001)
# for cnt, label in enumerate(cnt2Time(200, self.globalInterval)): # 只画前200个 其余的行号随用户拖动条动态生成
# self.subtitle.setVerticalHeaderItem(cnt, QTableWidgetItem(label))
# time.sleep(0.000000001)
def run(self):
self.initSubtitle()
for index, subData in self.subtitleDict.items():
for start, rowData in subData.items():
startRow = start // self.globalInterval
deltaRow = rowData[0] // self.globalInterval
if deltaRow:
endRow = startRow + deltaRow
for row in range(startRow, endRow):
self.subtitle.setItem(row, index, QTableWidgetItem(rowData[1]))
if row >= 0:
self.subtitle.item(row, index).setBackground(QBrush(QColor('#35545d')))
if endRow - startRow > 1:
self.subtitle.setSpan(startRow, index, endRow - startRow, 1)
row = self.position // self.globalInterval
self.subtitle.selectRow(row)
self.subtitle.verticalScrollBar().setValue(row - 10)
self.reconnect.emit()
``` |
{
"source": "jiafatom/onnxmltools",
"score": 2
} |
#### File: onnxmltools/convert/main.py
```python
import onnx
from .common import utils
import warnings
def convert_coreml(model, name=None, initial_types=None, doc_string='', target_opset=None,
targeted_onnx=onnx.__version__, custom_conversion_functions=None, custom_shape_calculators=None):
if not utils.coreml_installed():
raise RuntimeError('coremltools is not installed. Please install coremltools to use this feature.')
from .coreml.convert import convert
return convert(model, name, initial_types, doc_string, target_opset, targeted_onnx,
custom_conversion_functions, custom_shape_calculators)
def convert_keras(model, name=None, initial_types=None, doc_string='',
target_opset=None, targeted_onnx=onnx.__version__,
channel_first_inputs=None, custom_conversion_functions=None, custom_shape_calculators=None,
default_batch_size=1):
if not utils.keras2onnx_installed():
raise RuntimeError('keras2onnx is not installed. Please install it to use this feature.')
if custom_conversion_functions:
warnings.warn('custom_conversion_functions is not supported any more. Please set it to None.')
from keras2onnx import convert_keras as convert
return convert(model, name, doc_string, target_opset, channel_first_inputs)
def convert_libsvm(model, name=None, initial_types=None, doc_string='', target_opset=None,
targeted_onnx=onnx.__version__, custom_conversion_functions=None, custom_shape_calculators=None):
if not utils.libsvm_installed():
raise RuntimeError('libsvm is not installed. Please install libsvm to use this feature.')
from .libsvm.convert import convert
return convert(model, name, initial_types, doc_string, target_opset, targeted_onnx,
custom_conversion_functions, custom_shape_calculators)
def convert_catboost(model, name=None, initial_types=None, doc_string='', target_opset=None):
try:
from catboost.utils import convert_to_onnx_object
except ImportError:
raise RuntimeError('CatBoost is not installed or needs to be updated. '
'Please install/upgrade CatBoost to use this feature.')
return convert_to_onnx_object(model, export_parameters={'onnx_doc_string': doc_string, 'onnx_graph_name': name},
initial_types=initial_types, target_opset=target_opset)
def convert_lightgbm(model, name=None, initial_types=None, doc_string='', target_opset=None,
targeted_onnx=onnx.__version__, custom_conversion_functions=None,
custom_shape_calculators=None, without_onnx_ml=False):
if not utils.lightgbm_installed():
raise RuntimeError('lightgbm is not installed. Please install lightgbm to use this feature.')
from .lightgbm.convert import convert
return convert(model, name, initial_types, doc_string, target_opset, targeted_onnx,
custom_conversion_functions, custom_shape_calculators, without_onnx_ml)
def convert_sklearn(model, name=None, initial_types=None, doc_string='', target_opset=None,
targeted_onnx=onnx.__version__, custom_conversion_functions=None, custom_shape_calculators=None):
if not utils.sklearn_installed():
raise RuntimeError('scikit-learn is not installed. Please install scikit-learn to use this feature.')
if not utils.skl2onnx_installed():
raise RuntimeError('skl2onnx is not installed. Please install skl2onnx to use this feature.')
from skl2onnx.convert import convert_sklearn as convert_skl2onnx
return convert_skl2onnx(model, name, initial_types, doc_string, target_opset,
custom_conversion_functions, custom_shape_calculators)
def convert_sparkml(model, name=None, initial_types=None, doc_string='', target_opset=None,
targeted_onnx=onnx.__version__, custom_conversion_functions=None,
custom_shape_calculators=None, spark_session=None):
if not utils.sparkml_installed():
raise RuntimeError('Spark is not installed. Please install Spark to use this feature.')
from .sparkml.convert import convert
return convert(model, name, initial_types, doc_string, target_opset, targeted_onnx,
custom_conversion_functions, custom_shape_calculators, spark_session)
def convert_xgboost(*args, **kwargs):
if not utils.xgboost_installed():
raise RuntimeError('xgboost is not installed. Please install xgboost to use this feature.')
from .xgboost.convert import convert
return convert(*args, **kwargs)
def convert_h2o(*args, **kwargs):
if not utils.h2o_installed():
raise RuntimeError('h2o is not installed. Please install h2o to use this feature.')
from .h2o.convert import convert
return convert(*args, **kwargs)
def _collect_input_nodes(graph, outputs):
nodes_to_keep = set()
input_nodes = set()
node_inputs = [graph.get_tensor_by_name(ts_).op for ts_ in outputs]
while node_inputs:
nd_ = node_inputs[0]
del node_inputs[0]
if nd_.type in ['Placeholder', "PlaceholderV2", 'PlaceholderWithDefault']:
input_nodes.add(nd_)
if nd_ in nodes_to_keep:
continue
nodes_to_keep.add(nd_)
node_inputs.extend(in_.op for in_ in nd_.inputs)
return input_nodes, nodes_to_keep
def _convert_tf_wrapper(frozen_graph_def,
name=None, input_names=None, output_names=None,
doc_string='',
target_opset=None,
channel_first_inputs=None,
debug_mode=False, custom_op_conversions=None):
"""
convert a tensorflow graph def into a ONNX model proto, just like how keras does.
:param graph_def: the frozen tensorflow graph
:param name: the converted onnx model internal name
:param input_names: the inputs name list of the model
:param output_names: the output name list of the model
:param doc_string: doc string
:param target_opset: the targeted onnx model opset
:param channel_first_inputs: A list of channel first input (not supported yet)
:param debug_mode: will enable the log and try to convert as much as possible on conversion
:return an ONNX ModelProto
"""
import tensorflow as tf
import tf2onnx
if target_opset is None:
target_opset = onnx.defs.onnx_opset_version()
if not doc_string:
doc_string = "converted from {}".format(name)
tf_graph_def = tf2onnx.tfonnx.tf_optimize(input_names, output_names, frozen_graph_def, True)
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(tf_graph_def, name='')
if not input_names:
input_nodes = list(_collect_input_nodes(tf_graph, output_names)[0])
input_names = [nd_.outputs[0].name for nd_ in input_nodes]
g = tf2onnx.tfonnx.process_tf_graph(tf_graph,
continue_on_error=debug_mode,
opset=target_opset,
custom_op_handlers=custom_op_conversions,
inputs_as_nchw=channel_first_inputs,
output_names=output_names,
input_names=input_names)
onnx_graph = tf2onnx.optimizer.optimize_graph(g)
model_proto = onnx_graph.make_model(doc_string)
return model_proto
def convert_tensorflow(frozen_graph_def,
name=None, input_names=None, output_names=None,
doc_string='',
target_opset=None,
channel_first_inputs=None,
debug_mode=False, custom_op_conversions=None):
import pkgutil
if not pkgutil.find_loader('tf2onnx'):
raise RuntimeError('tf2onnx is not installed, please install it before calling this function.')
return _convert_tf_wrapper(frozen_graph_def, name, input_names, output_names, doc_string,
target_opset, channel_first_inputs, debug_mode, custom_op_conversions)
```
#### File: tests/xgboost/test_xgboost_converters.py
```python
import os
import unittest
import numpy as np
import pandas
from sklearn.datasets import (
load_diabetes, load_iris, make_classification, load_digits)
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor, XGBClassifier, train, DMatrix
from sklearn.preprocessing import StandardScaler
from onnxmltools.convert import convert_xgboost
from onnxmltools.convert.common.data_types import FloatTensorType
from onnxmltools.utils import dump_data_and_model
from onnxruntime import InferenceSession
def _fit_classification_model(model, n_classes, is_str=False, dtype=None):
x, y = make_classification(n_classes=n_classes, n_features=100,
n_samples=1000,
random_state=42, n_informative=7)
y = y.astype(np.str) if is_str else y.astype(np.int64)
x_train, x_test, y_train, _ = train_test_split(x, y, test_size=0.5,
random_state=42)
if dtype is not None:
y_train = y_train.astype(dtype)
model.fit(x_train, y_train)
return model, x_test.astype(np.float32)
class TestXGBoostModels(unittest.TestCase):
def test_xgb_regressor(self):
iris = load_diabetes()
x = iris.data
y = iris.target
x_train, x_test, y_train, _ = train_test_split(x, y, test_size=0.5,
random_state=42)
xgb = XGBRegressor()
xgb.fit(x_train, y_train)
conv_model = convert_xgboost(
xgb, initial_types=[('input', FloatTensorType(shape=[None, None]))])
self.assertTrue(conv_model is not None)
dump_data_and_model(
x_test.astype("float32"),
xgb,
conv_model,
basename="SklearnXGBRegressor-Dec3",
allow_failure="StrictVersion("
"onnx.__version__)"
"< StrictVersion('1.3.0')",
)
def test_xgb_classifier(self):
xgb, x_test = _fit_classification_model(XGBClassifier(), 2)
conv_model = convert_xgboost(
xgb, initial_types=[('input', FloatTensorType(shape=[None, None]))])
self.assertTrue(conv_model is not None)
dump_data_and_model(
x_test,
xgb,
conv_model,
basename="SklearnXGBClassifier",
allow_failure="StrictVersion("
"onnx.__version__)"
"< StrictVersion('1.3.0')",
)
def test_xgb_classifier_uint8(self):
xgb, x_test = _fit_classification_model(
XGBClassifier(), 2, dtype=np.uint8)
conv_model = convert_xgboost(
xgb, initial_types=[('input', FloatTensorType(shape=['None', 'None']))])
self.assertTrue(conv_model is not None)
dump_data_and_model(
x_test,
xgb,
conv_model,
basename="SklearnXGBClassifier",
allow_failure="StrictVersion("
"onnx.__version__)"
"< StrictVersion('1.3.0')",
)
def test_xgb_classifier_multi(self):
xgb, x_test = _fit_classification_model(XGBClassifier(), 3)
conv_model = convert_xgboost(
xgb, initial_types=[('input', FloatTensorType(shape=[None, None]))])
self.assertTrue(conv_model is not None)
dump_data_and_model(
x_test,
xgb,
conv_model,
basename="SklearnXGBClassifierMulti",
allow_failure="StrictVersion("
"onnx.__version__)"
"< StrictVersion('1.3.0')",
)
def test_xgb_classifier_multi_reglog(self):
xgb, x_test = _fit_classification_model(
XGBClassifier(objective='reg:logistic'), 4)
conv_model = convert_xgboost(
xgb, initial_types=[('input', FloatTensorType(shape=[None, None]))])
self.assertTrue(conv_model is not None)
dump_data_and_model(
x_test,
xgb,
conv_model,
basename="SklearnXGBClassifierMultiRegLog",
allow_failure="StrictVersion("
"onnx.__version__)"
"< StrictVersion('1.3.0')",
)
def test_xgb_classifier_reglog(self):
xgb, x_test = _fit_classification_model(
XGBClassifier(objective='reg:logistic'), 2)
conv_model = convert_xgboost(
xgb, initial_types=[('input', FloatTensorType(shape=[None, None]))])
self.assertTrue(conv_model is not None)
dump_data_and_model(
x_test,
xgb,
conv_model,
basename="SklearnXGBClassifierRegLog",
allow_failure="StrictVersion("
"onnx.__version__)"
"< StrictVersion('1.3.0')",
)
def test_xgb_classifier_multi_str_labels(self):
xgb, x_test = _fit_classification_model(
XGBClassifier(n_estimators=4), 5, is_str=True)
conv_model = convert_xgboost(
xgb, initial_types=[('input', FloatTensorType(shape=[None, None]))])
self.assertTrue(conv_model is not None)
dump_data_and_model(
x_test,
xgb,
conv_model,
basename="SklearnXGBClassifierMultiStrLabels",
allow_failure="StrictVersion("
"onnx.__version__)"
"< StrictVersion('1.3.0')",
)
def test_xgb_classifier_multi_discrete_int_labels(self):
iris = load_iris()
x = iris.data[:, :2]
y = iris.target
y[y == 0] = 10
y[y == 1] = 20
y[y == 2] = -30
x_train, x_test, y_train, _ = train_test_split(x,
y,
test_size=0.5,
random_state=42)
xgb = XGBClassifier(n_estimators=3)
xgb.fit(x_train, y_train)
conv_model = convert_xgboost(
xgb, initial_types=[('input', FloatTensorType(shape=[None, None]))])
self.assertTrue(conv_model is not None)
dump_data_and_model(
x_test.astype("float32"),
xgb,
conv_model,
basename="SklearnXGBClassifierMultiDiscreteIntLabels",
allow_failure="StrictVersion("
"onnx.__version__)"
"< StrictVersion('1.3.0')",
)
def test_xgboost_booster_classifier_bin(self):
x, y = make_classification(n_classes=2, n_features=5,
n_samples=100,
random_state=42, n_informative=3)
x_train, x_test, y_train, _ = train_test_split(x, y, test_size=0.5,
random_state=42)
data = DMatrix(x_train, label=y_train)
model = train({'objective': 'binary:logistic',
'n_estimators': 3, 'min_child_samples': 1}, data)
model_onnx = convert_xgboost(model, 'tree-based classifier',
[('input', FloatTensorType([None, x.shape[1]]))])
dump_data_and_model(x_test.astype(np.float32),
model, model_onnx,
allow_failure="StrictVersion(onnx.__version__) < StrictVersion('1.3.0')",
basename="XGBBoosterMCl")
def test_xgboost_booster_classifier_multiclass_softprob(self):
x, y = make_classification(n_classes=3, n_features=5,
n_samples=100,
random_state=42, n_informative=3)
x_train, x_test, y_train, _ = train_test_split(x, y, test_size=0.5,
random_state=42)
data = DMatrix(x_train, label=y_train)
model = train({'objective': 'multi:softprob',
'n_estimators': 3, 'min_child_samples': 1,
'num_class': 3}, data)
model_onnx = convert_xgboost(model, 'tree-based classifier',
[('input', FloatTensorType([None, x.shape[1]]))])
dump_data_and_model(x_test.astype(np.float32),
model, model_onnx,
allow_failure="StrictVersion(onnx.__version__) < StrictVersion('1.3.0')",
basename="XGBBoosterMClSoftProb")
def test_xgboost_booster_classifier_multiclass_softmax(self):
x, y = make_classification(n_classes=3, n_features=5,
n_samples=100,
random_state=42, n_informative=3)
x_train, x_test, y_train, _ = train_test_split(x, y, test_size=0.5,
random_state=42)
data = DMatrix(x_train, label=y_train)
model = train({'objective': 'multi:softmax',
'n_estimators': 3, 'min_child_samples': 1,
'num_class': 3}, data)
model_onnx = convert_xgboost(model, 'tree-based classifier',
[('input', FloatTensorType([None, x.shape[1]]))])
dump_data_and_model(x_test.astype(np.float32),
model, model_onnx,
allow_failure="StrictVersion(onnx.__version__) < StrictVersion('1.3.0')",
basename="XGBBoosterMClSoftMax")
def test_xgboost_booster_classifier_reg(self):
x, y = make_classification(n_classes=2, n_features=5,
n_samples=100,
random_state=42, n_informative=3)
y = y.astype(np.float32) + 0.567
x_train, x_test, y_train, _ = train_test_split(x, y, test_size=0.5,
random_state=42)
data = DMatrix(x_train, label=y_train)
model = train({'objective': 'reg:squarederror',
'n_estimators': 3, 'min_child_samples': 1}, data)
model_onnx = convert_xgboost(model, 'tree-based classifier',
[('input', FloatTensorType([None, x.shape[1]]))])
dump_data_and_model(x_test.astype(np.float32),
model, model_onnx,
allow_failure="StrictVersion(onnx.__version__) < StrictVersion('1.3.0')",
basename="XGBBoosterReg")
def test_xgboost_10(self):
this = os.path.abspath(os.path.dirname(__file__))
train = os.path.join(this, "input_fail_train.csv")
test = os.path.join(this, "input_fail_test.csv")
param_distributions = {
"colsample_bytree": 0.5,
"gamma": 0.2,
'learning_rate': 0.3,
'max_depth': 2,
'min_child_weight': 1.,
'n_estimators': 1,
'missing': np.nan,
}
train_df = pandas.read_csv(train)
X_train, y_train = train_df.drop('label', axis=1).values, train_df['label'].values
test_df = pandas.read_csv(test)
X_test, y_test = test_df.drop('label', axis=1).values, test_df['label'].values
regressor = XGBRegressor(verbose=0, objective='reg:squarederror', **param_distributions)
regressor.fit(X_train, y_train)
model_onnx = convert_xgboost(
regressor, 'bug',
[('input', FloatTensorType([None, X_train.shape[1]]))])
dump_data_and_model(
X_test.astype(np.float32),
regressor, model_onnx,
allow_failure="StrictVersion(onnx.__version__) < StrictVersion('1.3.0')",
basename="XGBBoosterRegBug")
def test_xgboost_classifier_i5450(self):
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
clr = XGBClassifier(objective="multi:softmax", max_depth=1, n_estimators=2)
clr.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=40)
initial_type = [('float_input', FloatTensorType([None, 4]))]
onx = convert_xgboost(clr, initial_types=initial_type)
sess = InferenceSession(onx.SerializeToString())
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[1].name
predict_list = [1., 20., 466., 0.]
predict_array = np.array(predict_list).reshape((1,-1)).astype(np.float32)
pred_onx = sess.run([label_name], {input_name: predict_array})[0]
pred_xgboost = sessresults=clr.predict_proba(predict_array)
bst = clr.get_booster()
bst.dump_model('dump.raw.txt')
dump_data_and_model(
X_test.astype(np.float32) + 1e-5,
clr, onx,
allow_failure="StrictVersion(onnx.__version__) < StrictVersion('1.3.0')",
basename="XGBClassifierIris")
def test_xgboost_example_mnist(self):
"""
Train a simple xgboost model and store associated artefacts.
"""
X, y = load_digits(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
X_train = X_train.reshape((X_train.shape[0], -1))
X_test = X_test.reshape((X_test.shape[0], -1))
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
clf = XGBClassifier(objective="multi:softprob", n_jobs=-1)
clf.fit(X_train, y_train)
sh = [None, X_train.shape[1]]
onnx_model = convert_xgboost(
clf, initial_types=[('input', FloatTensorType(sh))])
dump_data_and_model(
X_test.astype(np.float32), clf, onnx_model,
allow_failure="StrictVersion(onnx.__version__) < StrictVersion('1.3.0')",
basename="XGBoostExample")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "Jiafauser/News_blog",
"score": 2
} |
#### File: apps/doc/views.py
```python
from django.shortcuts import render
# Create your views here.
def doc_list(request):
return render(request, 'doc/docDownload.html')
``` |
{
"source": "jiafeiyan/xops",
"score": 2
} |
#### File: xops/account/activity_handler.py
```python
import re
open_id_pattern = re.compile("^\d{11}$")
def join_activity(mysql_conn, parameters):
open_id = parameters.get("id")
activity = parameters.get("activity")
code = "0"
response = {"id": open_id, "activity": activity}
result = {"kind": "joinActivity", "code": code, "response": response}
if open_id is None:
code = "-1"
error = "请输入手机号"
elif not open_id_pattern.match(open_id):
code = "-1"
error = "手机号格式错误"
if activity is None:
code = "-1"
error = "请输入赛事代码"
elif len(activity) != 4:
code = "-1"
error = "赛事代码应为4位"
if mysql_conn is None or not mysql_conn.is_connected():
code = "-1"
error = "系统内部错误"
if code == "-1":
response.update({"error": error})
result.update({"code": code, "response": response})
return result
mysql_conn.set_charset_collation('utf8')
mysql_conn.start_transaction()
cursor = mysql_conn.cursor()
sql = '''SELECT investorid FROM siminfo.t_investor WHERE openid = %s'''
cursor.execute(sql, (open_id,))
row = cursor.fetchone()
if row is None:
code = "-1"
error = "投资者尚未开户"
response.update({"error": error})
result.update({"code": code, "response": response})
else:
investor_id = str(row[0])
sql = '''SELECT activityid FROM siminfo.t_activity WHERE activityid = %s'''
cursor.execute(sql, (activity,))
row = cursor.fetchone()
if row is None:
code = "-1"
error = "赛事活动不存在"
response.update({"error": error})
result.update({"code": code, "response": response})
else:
sql = '''SELECT activityid, investorid, joindate FROM siminfo.t_activityinvestor WHERE activityid = %s AND investorid = %s'''
cursor.execute(sql, (activity, investor_id))
row = cursor.fetchone()
if row is None:
sql = """SELECT settlementgroupid FROM siminfo.t_activitysettlementgroup
WHERE activityid = %s AND settlementgroupid IN(
SELECT DISTINCT settlementgroupid FROM siminfo.t_activitysettlementgroup t
WHERE t.activityid IN
(SELECT t1.activityid FROM siminfo.t_activityinvestor t1, siminfo.t_activity t2
WHERE t1.investorid = %s AND t1.activityid = t2.activityid AND (t2.activitystatus = '0' OR t2.activitystatus = '1')))"""
cursor.execute(sql, (activity, investor_id))
cursor.fetchall()
if cursor.rowcount > 0:
code = "-1"
error = "投资者已参加其他相似类型赛事活动"
response.update({"error": error})
result.update({"code": code, "response": response})
else:
# 获取当前交易日
sql = """SELECT DISTINCT t1.tradingday FROM siminfo.t_tradesystemtradingday t1, siminfo.t_tradesystemsettlementgroup t2, siminfo.t_activitysettlementgroup t3
WHERE t1.tradesystemid = t2.tradesystemid AND t2.settlementgroupid = t3.settlementgroupid AND t3.activityid = %s"""
cursor.execute(sql, (activity,))
row = cursor.fetchone()
current_trading_day = str(row[0])
# 检查赛事活动状态
sql = """SELECT activitystatus, initialbalance FROM siminfo.t_activity WHERE activityid = %s"""
cursor.execute(sql, (activity,))
row = cursor.fetchone()
activity_status = str(row[0])
initial_balance = str(row[1])
join_status = '0'
# 检查投资者资金 持仓
if activity_status == '1':
sql = """SELECT t1.investorid FROM siminfo.t_investorfund t1
WHERE t1.brokersystemid = (SELECT DISTINCT t2.brokersystemid
FROM siminfo.t_activitysettlementgroup t1, siminfo.t_brokersystemsettlementgroup t2 WHERE t1.settlementgroupid = t2.settlementgroupid AND t1.activityid = %s)
AND t1.investorid = %s AND (t1.balance <> %s OR t1.available <> %s OR t1.currmargin <> 0 OR t1.profit <> 0 OR t1.stockvalue <> 0)
UNION
SELECT DISTINCT t2.investorid FROM siminfo.t_clientposition t1, siminfo.t_investorclient t2, (SELECT settlementgroupid FROM siminfo.t_activitysettlementgroup WHERE activityid = %s) t3
WHERE t2.investorid = %s AND t1.clientid = t2.clientid AND t1.settlementgroupid = t2.settlementgroupid AND t2.settlementgroupid = t3.settlementgroupid AND t1.position > 0"""
cursor.execute(sql, (activity,investor_id,initial_balance,initial_balance,activity,investor_id))
cursor.fetchall()
if cursor.rowcount == 0:
sql = """INSERT INTO siminfo.t_activityinvestorevaluation(ActivityID,InvestorID,InitialAsset,PreAsset,CurrentAsset,TotalReturnRate,ReturnRateOf1Day)
SELECT t2.activityid, t1.investorid, SUM(t1.balance) AS initialasset, SUM(t1.balance) AS preasset, SUM(t1.balance) AS currasset, 0, 0 FROM siminfo.t_investorfund t1,
(SELECT DISTINCT t1.activityid, t2.brokersystemid FROM siminfo.t_activitysettlementgroup t1, siminfo.t_brokersystemsettlementgroup t2 WHERE t1.activityid = %s AND t1.settlementgroupid = t2.settlementgroupid) t2
WHERE t1.investorid = %s AND t1.brokersystemid = t2.brokersystemid
GROUP BY t2.activityid, t1.investorid"""
cursor.execute(sql, (activity, investor_id))
join_status = '1'
sql = """INSERT INTO siminfo.t_activityinvestor(activityid, investorid, joindate, joinstatus) VALUES(%s, %s, DATE_FORMAT(NOW(), '%Y%m%d'), %s)"""
cursor.execute(sql, (activity, investor_id, join_status))
if cursor.rowcount == 0:
code = "-1"
error = "参加赛事活动失败"
response.update({"error": error})
result.update({"code": code, "response": response})
mysql_conn.commit()
return result
def query_activity_ranking(mysql_conn, parameters):
activity_id = parameters.get("activity")
investor_id = parameters.get("investor")
query_type = parameters.get("type")
query_count = parameters.get("count")
code = "0"
response = {"activity": activity_id, "investor": investor_id, "type": query_type, "count": query_count}
result = {"kind": "queryActivityRanking", "code": code, "response": response}
if activity_id is None:
code = "-1"
error = "请输入赛事编号"
elif len(activity_id) != 4:
code = "-1"
error = "赛事代码应为4位"
if query_type not in ['00', '01', '99']:
code = "-1"
error = "查询类型仅支持00、01、99"
if query_type == '99' and activity_id is None:
code = "-1"
error = "请输入投资者代码"
if query_count is None:
query_count = 30
if mysql_conn is None or not mysql_conn.is_connected():
code = "-1"
error = "系统内部错误"
if code == "-1":
response.update({"error": error})
result.update({"code": code, "response": response})
return result
mysql_conn.set_charset_collation('utf8')
cursor = mysql_conn.cursor()
if investor_id is not None and investor_id != "":
sql = '''SELECT investorid FROM siminfo.t_investor WHERE investorid = %s'''
cursor.execute(sql, (investor_id,))
row = cursor.fetchone()
if row is None:
code = "-1"
error = "投资者尚未开户"
response.update({"error": error})
result.update({"code": code, "response": response})
return result
rows = None
if query_type == '99' and investor_id is not None and investor_id != "":
sql = """SELECT t.investorid, t1.investorname, t.initialasset, t.preasset, t.currentasset, ROUND(t.totalreturnrate, 4), ROUND(t.returnrateof1day, 4), t.rankingstatus, t.preranking, t.ranking
FROM siminfo.t_activityinvestorevaluation t, siminfo.t_investor t1
WHERE t.activityid = %s AND t.investorid = %s AND t.investorid = t1.investorid"""
cursor.execute(sql, (activity_id, investor_id,))
rows = cursor.fetchall()
if query_type == '00':
if investor_id is not None and investor_id != "":
sql = """SELECT t.investorid, t1.investorname, t.initialasset, t.preasset, t.currentasset, ROUND(t.totalreturnrate, 4), ROUND(t.returnrateof1day, 4), t.rankingstatus, t.preranking, t.ranking
FROM siminfo.t_activityinvestorevaluation t, siminfo.t_investor t1
WHERE t.activityid = %s AND ((t.rankingstatus = '1' AND (t.ranking <= %s OR %s = '0')) OR t.investorid = %s) AND t.investorid = t1.investorid
ORDER BY t.rankingstatus DESC, t.ranking"""
cursor.execute(sql, (activity_id, query_count, query_count, investor_id))
rows = cursor.fetchall()
else:
sql = """SELECT t.investorid, t1.investorname, t.initialasset, t.preasset, t.currentasset, ROUND(t.totalreturnrate, 4), ROUND(t.returnrateof1day, 4), t.rankingstatus, t.preranking, t.ranking
FROM siminfo.t_activityinvestorevaluation t, siminfo.t_investor t1
WHERE t.activityid = %s AND t.rankingstatus = '1' AND (t.ranking <= %s OR %s = '0') AND t.investorid = t1.investorid
ORDER BY t.rankingstatus DESC, t.ranking"""
cursor.execute(sql, (activity_id, query_count, query_count))
rows = cursor.fetchall()
if query_type == '01':
if investor_id is not None and investor_id != "":
sql = """SELECT t.investorid, t1.investorname, t.initialasset, t.preasset, t.currentasset, ROUND(t.totalreturnrate, 4), ROUND(t.returnrateof1day, 4), t.rankingstatus, 0 as preranking, t.newranking AS ranking
FROM (SELECT t.* FROM
(SELECT t.*, (@i:=@i+1) AS newranking FROM siminfo.t_activityinvestorevaluation t,(SELECT @i:=0) AS it
WHERE t.activityid = %s AND t.rankingstatus = '1'
ORDER BY t.returnrateof1day DESC, t.totalreturnrate DESC, t.currentasset DESC, t.investorid) t WHERE t.newranking <= %s OR %s = '0'
UNION ALL
SELECT t.*, 0 AS newranking FROM siminfo.t_activityinvestorevaluation t
WHERE t.activityid = %s AND t.rankingstatus = '0' AND t.investorid = %s
) t, siminfo.t_investor t1 WHERE t.investorid = t1.investorid"""
cursor.execute(sql, (activity_id, query_count, query_count, activity_id, investor_id))
rows = cursor.fetchall()
else:
sql = """SELECT t.investorid, t1.investorname, t.initialasset, t.preasset, t.currentasset, ROUND(t.totalreturnrate, 4), ROUND(t.returnrateof1day, 4), t.rankingstatus, 0 as preranking, t.newranking AS ranking
FROM (SELECT t.*, (@i:=@i+1) AS newranking FROM siminfo.t_activityinvestorevaluation t,(SELECT @i:=0) AS it
WHERE t.activityid = %s AND t.rankingstatus = '1'
ORDER BY t.returnrateof1day DESC, t.totalreturnrate DESC, t.currentasset DESC, t.investorid) t, siminfo.t_investor t1 WHERE (t.newranking <= %s OR %s = '0') AND t.investorid = t1.investorid"""
cursor.execute(sql, (activity_id, query_count, query_count))
rows = cursor.fetchall()
data = []
if rows is not None:
for row in rows:
data.append({"investorId": str(row[0]),"investorName": str(row[1]),"initialAsset": str(row[2]),"preAsset": str(row[3]),
"currentAsset": str(row[4]),"totalReturnRate": str(row[5]),"returnRateOf1Day": str(row[6]),"rankingStatus": str(int(row[7])),
"preRanking": str(int(row[8])),"ranking": str(int(row[9]))})
response.update({"data": data})
result.update({"code": code, "response": response})
return result
def query_activity_joinstatus(mysql_conn, parameters):
activity_id = parameters.get("activity")
open_id = parameters.get("id")
code = "0"
response = {"activity": activity_id, "id": open_id, "status" : "-1"}
result = {"kind": "queryActivityJoinStatus", "code": code, "response": response}
if open_id is None or open_id == "":
code = "-1"
error = "请输入手机号"
elif not open_id_pattern.match(open_id):
code = "-1"
error = "手机号格式错误"
if activity_id is None or activity_id == "":
code = "-1"
error = "请输入赛事代码"
elif len(activity_id) != 4:
code = "-1"
error = "赛事代码应为4位"
if mysql_conn is None or not mysql_conn.is_connected():
code = "-1"
error = "系统内部错误"
if code == "-1":
response.update({"error": error})
result.update({"code": code, "response": response})
return result
mysql_conn.set_charset_collation('utf8')
cursor = mysql_conn.cursor()
sql = '''SELECT investorid FROM siminfo.t_investor WHERE openid = %s'''
cursor.execute(sql, (open_id,))
row = cursor.fetchone()
if row is None:
code = "-1"
error = "投资者尚未开户"
response.update({"error": error})
result.update({"code": code, "response": response})
else:
investor_id = str(row[0])
sql = '''SELECT activityid FROM siminfo.t_activity WHERE activityid = %s'''
cursor.execute(sql, (activity_id,))
row = cursor.fetchone()
if row is None:
code = "-1"
error = "赛事活动不存在"
response.update({"error": error})
result.update({"code": code, "response": response})
else:
sql = '''SELECT activityid, investorid, joindate FROM siminfo.t_activityinvestor WHERE activityid = %s AND investorid = %s'''
cursor.execute(sql, (activity_id, investor_id))
row = cursor.fetchone()
if row is not None:
response.update({"status": "1"})
result.update({"code": code, "response": response})
return result
```
#### File: xops/account/gen_account_ex.py
```python
from utils import Configuration, mysql, log, parse_conf_args
def gen_investors(context, conf):
mysql_pool = mysql(configs=context.get("mysql").get(conf.get("mysqlId")))
logger = log.get_logger(category="GenAccount")
balance_conf = conf["balance"]
settlementgroupid = conf["settlementgroupid"]
logger.info("[gen investors %s] begin", balance_conf)
mysql_conn = mysql_pool.get_cnx()
mysql_conn.set_charset_collation('utf8')
try:
mysql_conn.start_transaction()
cursor = mysql_conn.cursor()
config_brokers = ""
for broker_system_id in balance_conf.keys():
if broker_system_id != "default":
broker_system_balance = balance_conf[broker_system_id]
sql = '''INSERT INTO siminfo.t_investorfund(BrokerSystemID,InvestorID,PreBalance,CurrMargin,CloseProfit,Premium,Deposit,Withdraw,Balance,Available,PreMargin,FuturesMargin,OptionsMargin,PositionProfit,Profit,Interest,Fee,
TotalCollateral,CollateralForMargin,PreAccmulateInterest,AccumulateInterest,AccumulateFee,ForzenDeposit,AccountStatus,InitialAsset,PreMonthAsset,PreWeekAsset,PreAsset,CurrentAsset,PreStockValue,StockValue)
SELECT %s,t2.investorid,%s,0,0,0,0,0,%s,%s,0,0,0,0,0,0,0,0,0,0,0,0,0,'0',%s,%s,%s,%s,%s,0,0
FROM siminfo.t_investor t2 '''
cursor.execute(sql, (
broker_system_id, broker_system_balance, broker_system_balance, broker_system_balance,
broker_system_balance, broker_system_balance, broker_system_balance, broker_system_balance,
broker_system_balance))
config_brokers = "'%s'" % broker_system_id if len(config_brokers) == 0 else "%s,'%s'" % (
config_brokers, broker_system_id)
for sgid in settlementgroupid:
# siminfo.t_partclient;
sql = """insert into siminfo.t_partclient(SettlementGroupID, ClientID, ParticipantID)
select %s, ClientID, ParticipantID from siminfo.t_partclient where SettlementGroupID = %s"""
cursor.execute(sql, (settlementgroupid.get(sgid), sgid))
# siminfo.t_client;
sql = """insert into siminfo.t_client(SettlementGroupID,ClientID,ClientName,IdentifiedCardType,IdentifiedCardNo,TradingRole,ClientType,IsActive,HedgeFlag)
select %s,ClientID,ClientName,IdentifiedCardType,IdentifiedCardNo,TradingRole,ClientType,IsActive,HedgeFlag from siminfo.t_client where SettlementGroupID = %s"""
cursor.execute(sql, (settlementgroupid.get(sgid), sgid))
# siminfo.t_investorclient;
sql = """insert into siminfo.t_investorclient(SettlementGroupID, InvestorID, ClientID)
select %s, InvestorID, ClientID from siminfo.t_investorclient where SettlementGroupID = %s"""
cursor.execute(sql, (settlementgroupid.get(sgid), sgid))
mysql_conn.commit()
except Exception as e:
logger.error(e)
finally:
mysql_conn.close()
logger.info("[gen investors %s] end", balance_conf)
def main():
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["mysql"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files)
gen_investors(context, conf)
if __name__ == "__main__":
main()
```
#### File: xops/account/gen_investor.py
```python
import csv
import os
from utils import Configuration, mysql, log, parse_conf_args, csv_tool, path
def gen_investors(context, conf):
logger = log.get_logger(category="Investors")
_mysql = mysql(configs=context.get("mysql")[conf.get("mysqlId")])
User = dict(columns=("UserID", "Passwd"),
sql="""select InvestorID, Password from siminfo.t_investor"""),
csv_data = _mysql.select(User[0]['sql'])
output = path.convert(context.get("csv")[conf.get("csv")]['quant'])
if not os.path.exists(str(output)):
os.makedirs(str(output))
csv_path = os.path.join(output, "user.csv")
produce_csv(User[0]["columns"], csv_data, csv_path)
# 生成csv文件
def produce_csv(columns, csv_data, _path):
with open(_path, 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(csv_tool.covert_to_gbk(columns))
writer.writerows(csv_tool.covert_to_gbk(csv_data))
def main():
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["mysql", "csv"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files)
gen_investors(context, conf)
if __name__ == "__main__":
main()
```
#### File: xops/activity/mdf_activity.py
```python
import sys
import json
from utils import Configuration, mysql, log, parse_conf_args
def mdf_activity(context, conf):
mysql_pool = mysql(configs=context.get("mysql").get(conf.get("mysqlId")))
logger = log.get_logger(category="MdfActivity")
logger.info("[mdf activity with %s] begin" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))
mysql_conn = mysql_pool.get_cnx()
mysql_conn.set_charset_collation('utf8')
try:
mysql_conn.start_transaction()
cursor = mysql_conn.cursor()
for activity in conf["activities"]:
code = activity["code"]
name = activity["name"]
atype = activity["type"]
initial_balance = activity["balance"]
join_mode = activity["joinMode"]
ranking_rule = activity["rankingRule"]
begin = activity["begin"]
end = activity["end"]
status = activity["status"]
settlement_groups = activity["settlement_groups"]
logger.info("[mdf activity with {code=%s, name=%s, type=%s, joinMode=%s, rankingRule=%s, begin=%s, end=%s, status=%s, settlementgroups=%s}]......" % (code, name, atype, join_mode, ranking_rule, begin, end, status, settlement_groups))
sql = '''SELECT activityid FROM siminfo.t_activity WHERE activityid = %s for update'''
cursor.execute(sql, (code,))
row = cursor.fetchone()
if row is None:
sys.stderr.write("Error: Activity %s is not existed.\n" % (code,))
logger.error("[gen activity with {code=%s, name=%s, type=%s, joinMode=%s, rankingRule=%s, begin=%s, end=%s, status=%s, settlementgroups=%s}] Error: Activity %s is not existed." % (code, name, atype, join_mode, ranking_rule, begin, end, status, settlement_groups, code))
else:
sql = '''UPDATE siminfo.t_activity set activityname = %s, activitytype = %s, initialbalance = %s, joinmode=%s, rankingrule=%s, begindate = %s, enddate = %s, activitystatus = %s, updatedate = DATE_FORMAT(NOW(), '%Y%m%d'), updatetime = DATE_FORMAT(NOW(), '%H:%i:%S')
WHERE activityid = %s'''
cursor.execute(sql, (name, atype, initial_balance, join_mode, ranking_rule, begin, end, status, code,))
sql = '''DELETE FROM siminfo.t_activitysettlementgroup WHERE activityid = %s'''
cursor.execute(sql, (code,))
relations = []
for settlement_group_id in settlement_groups:
relations.append((code, settlement_group_id,))
sql = '''INSERT INTO siminfo.t_activitysettlementgroup(activityid, settlementgroupid) values (%s, %s)'''
cursor.executemany(sql, relations)
mysql_conn.commit()
except Exception as e:
logger.error("[mdf activity with %s] Error: %s" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False), e))
finally:
mysql_conn.close()
logger.info("[mdf activity with %s] end" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))
def main():
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["mysql"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files)
mdf_activity(context, conf)
if __name__ == "__main__":
main()
```
#### File: xops/datatrans/trans_goldinfo.py
```python
import os
import json
import csv
from utils import parse_conf_args, Configuration, path, mysql, log
class trans_goldinfo:
def __init__(self, context, configs):
log_conf = None if context.get("log") is None else context.get("log").get(configs.get("logId"))
# 初始化日志
self.logger = log.get_logger(category="trans_gold", configs=log_conf)
if log_conf is None:
self.logger.warning("trans_goldinfo未配置Log日志")
# 初始化数据库连接
self.mysqlDB = mysql(configs=context.get("mysql")[configs.get("mysqlId")])
# 初始化模板路径
self.initTemplate = context.get("init")[configs.get("initId")]
self.tradesystemid = configs.get("tradesystemid")
self.SettlementGroupID = configs.get("settlementGroupID")
self.file_instrument = "gold_instrument.csv"
self.file_marketdata = "gold_depthmarketdata.csv"
# 交易所和结算组对应关系
self.__transform()
def __transform(self):
mysqlDB = self.mysqlDB
# 查询当前交易日
sql = """SELECT tradingday FROM siminfo.t_tradesystemtradingday WHERE tradesystemid = %s"""
fc = mysqlDB.select(sql, (self.tradesystemid,))
current_trading_day = fc[0][0]
self.TradingDay = current_trading_day
self.logger.info("[trans_goldinfo] current_trading_day = %s" % current_trading_day)
# 读取csv文件
csvs = self.__check_file()
if csvs is None:
return
if csvs[0] is not None:
# ===========处理instrument.csv写入t_Instrument表==============
self.__t_Instrument(mysqlDB=mysqlDB, csv_file=csvs[0])
# ===========处理instrument.csv写入t_TradingSegmentAttr表==============
self.__t_TradingSegmentAttr(mysqlDB=mysqlDB, csv_file=csvs[0])
# ===========处理instrument.csv写入t_MarginRate表==============
self.__t_MarginRate(mysqlDB=mysqlDB, csv_file=csvs[0])
# ===========处理instrument.csv写入t_MarginRateDetail表==============
self.__t_MarginRateDetail(mysqlDB=mysqlDB, csv_file=csvs[0])
# ===========判断并写入t_InstrumentProperty表==============
self.__t_InstrumentProperty(mysqlDB=mysqlDB, csv_file=csvs[0])
# ===========判断并写入t_TransFeeRateDetail表==============
self.__t_TransFeeRateDetail(mysqlDB=mysqlDB, csv_file=csvs[0])
# ===========判断并写入__t_PriceBanding表==============
self.__t_PriceBanding(mysqlDB=mysqlDB, csv_file=csvs[0])
if csvs[1] is not None:
# ===========写入t_MarketData表 ==============
self.__t_MarketData(mysqlDB=mysqlDB, csv_file=csvs[1])
def __t_Instrument(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
cursor = mysql_conn.cursor()
# 删除黄金交易所下所有数据
cursor.execute("delete from siminfo.t_Instrument where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert_golds = """INSERT INTO siminfo.t_Instrument(
SettlementGroupID,ProductID,
ProductGroupID,UnderlyingInstrID,
ProductClass,PositionType,PositionDateType,
StrikePrice,OptionsType,
VolumeMultiple,UnderlyingMultiple,
InstrumentID,InstrumentName,
DeliveryYear,DeliveryMonth,AdvanceMonth
)VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s, %s)"""
sql_insert_params = []
for gold in csv_file:
sql_insert_params.append((self.SettlementGroupID, gold["ProductID"],
gold["ProductID"], gold["UnderlyingInstrID"],
gold["ProductClass"], gold["PositionType"], 2,
"0", gold["OptionsType"],
gold["VolumeMultiple"],
"0",
gold["InstrumentID"],
gold["InstrumentName"].decode(encoding='gbk', errors='ignore').encode(
encoding='utf8'),
gold["DeliveryYear"], gold["DeliveryMonth"], "012"))
cursor.executemany(sql_insert_golds, sql_insert_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_Instrument完成")
# 导入完成后写入产品表
self.__init_product()
def __init_product(self):
mysql_conn = self.mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
cursor = mysql_conn.cursor()
cursor.execute("delete from siminfo.t_ClientProductRight where SettlementGroupID = %s", (self.SettlementGroupID,))
cursor.execute("delete from siminfo.t_MarketProduct where SettlementGroupID = %s", (self.SettlementGroupID,))
cursor.execute("delete from siminfo.t_MdPubStatus where SettlementGroupID = %s", (self.SettlementGroupID,))
cursor.execute("delete from siminfo.t_PartProductRight where SettlementGroupID = %s", (self.SettlementGroupID,))
cursor.execute("delete from siminfo.t_PartProductRole where SettlementGroupID = %s", (self.SettlementGroupID,))
cursor.execute("delete from siminfo.t_Product where SettlementGroupID = %s", (self.SettlementGroupID,))
cursor.execute("delete from siminfo.t_ProductGroup where SettlementGroupID = %s", (self.SettlementGroupID,))
# t_ClientProductRight
self.logger.info("产品类型导入t_ClientProductRight")
cursor.execute("")
sql = """INSERT into siminfo.t_ClientProductRight(
SELECT SettlementGroupID,ProductID,'00000000' AS ClientID,'0' AS TradingRight
FROM siminfo.t_instrument
WHERE SettlementGroupID = %s
GROUP BY SettlementGroupID,ProductID)"""
cursor.execute(sql, (self.SettlementGroupID,))
# t_MarketProduct
self.logger.info("产品类型导入t_MarketProduct")
sql = """INSERT into siminfo.t_MarketProduct(
SELECT t.SettlementGroupID, t1.MarketID, t.ProductID
FROM siminfo.t_instrument t,siminfo.t_market t1
WHERE t.SettlementGroupID = t1.SettlementGroupID
AND t.SettlementGroupID = %s
GROUP BY t.SettlementGroupID,t.ProductID,t1.MarketID)"""
cursor.execute(sql, (self.SettlementGroupID,))
# t_MdPubStatus
self.logger.info("产品类型导入t_MdPubStatus")
sql = """INSERT into siminfo.t_MdPubStatus(
SELECT SettlementGroupID,ProductID,'3' AS InstrumentStatus,'0' AS MdPubStatus
FROM siminfo.t_instrument
WHERE SettlementGroupID = %s
GROUP BY SettlementGroupID,ProductID)"""
cursor.execute(sql, (self.SettlementGroupID,))
# t_PartProductRight
self.logger.info("产品类型导入t_PartProductRight")
sql = """INSERT INTO siminfo.t_PartProductRight(
SELECT SettlementGroupID,ProductID,'00000000' AS ParticipantID,'0' AS TradingRight
FROM siminfo.t_instrument
WHERE SettlementGroupID = %s
GROUP BY SettlementGroupID,ProductID)"""
cursor.execute(sql, (self.SettlementGroupID,))
# t_PartProductRole
self.logger.info("产品类型导入t_PartProductRole")
sql = """INSERT INTO siminfo.t_PartProductRole(
SELECT SettlementGroupID,'00000000' AS ParticipantID,ProductID,'1' AS TradingRole
FROM siminfo.t_instrument
WHERE SettlementGroupID = %s
GROUP BY SettlementGroupID,ProductID)"""
cursor.execute(sql, (self.SettlementGroupID,))
# t_Product
self.logger.info("产品类型导入t_Product")
sql = """INSERT INTO siminfo.t_Product(
SELECT SettlementGroupID, ProductID, ProductGroupID, '' AS ProductName,'' AS ProductClass
FROM siminfo.t_instrument
WHERE SettlementGroupID = %s
GROUP BY SettlementGroupID,ProductID,ProductGroupID)"""
cursor.execute(sql, (self.SettlementGroupID,))
# t_ProductGroup
self.logger.info("产品类型导入t_ProductGroup")
sql = """INSERT INTO siminfo.t_ProductGroup(
SELECT SettlementGroupID,ProductGroupID,'' AS ProductGroupName,ProductGroupID as CommodityID
FROM siminfo.t_instrument
WHERE SettlementGroupID = %s
GROUP BY SettlementGroupID,ProductGroupID,ProductGroupID)"""
cursor.execute(sql, (self.SettlementGroupID,))
mysql_conn.commit()
finally:
mysql_conn.close()
def __t_TradingSegmentAttr(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
cursor = mysql_conn.cursor()
# 删除黄金交易所下所有数据
cursor.execute("delete from siminfo.t_TradingSegmentAttr where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert_segment = """INSERT INTO siminfo.t_TradingSegmentAttr (
SettlementGroupID,TradingSegmentSN,
TradingSegmentName,StartTime,
InstrumentStatus,DayOffset,InstrumentID
) VALUES (%s,%s,%s,%s,%s,%s,%s)"""
sql_insert_params = []
# 加载交易时间段数据
segment_attr = self.__loadJSON(tableName='t_TradingSegmentAttr')
if segment_attr is None:
self.logger.error("t_TradingSegmentAttr不存在")
return
SGID = self.SettlementGroupID
for gold in csv_file:
# 判断结算组是否存在
if SGID in segment_attr:
params = self.__get_segment_attr(attr=segment_attr[SGID],
instrument=gold["InstrumentID"])
sql_insert_params += params
cursor.executemany(sql_insert_segment, sql_insert_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_TradingSegmentAttr完成")
# 通过产品代码生成目标合约的交易时间段
def __get_segment_attr(self, attr, instrument):
gold = attr['gold']
all_trading_time = attr['tradingTime']
exist_trading_time = []
# 获取当前模版存在的产品代码
for segment in gold:
if str(instrument) in str(gold[segment]):
exist_trading_time.append(segment)
# 如果模版里面没有该产品,则取该结算组白天交易时间段
params = []
if len(exist_trading_time) == 0:
for segment in all_trading_time["day"]:
params.append((segment[0], segment[1], segment[2], segment[3], segment[4], segment[5], instrument))
else:
segment_list = []
for exist in exist_trading_time:
segment_list += all_trading_time[exist]
for segment in segment_list:
params.append((segment[0], segment[1], segment[2], segment[3], segment[4], segment[5], instrument))
return params
def __t_MarginRate(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
# 获取模板文件
template = self.__loadJSON(tableName='t_MarginRate')
if template is None:
self.logger.error("t_MarginRate template is None")
return
cursor = mysql_conn.cursor()
# 删除黄金交易所下所有数据
cursor.execute("delete from siminfo.t_MarginRate where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert_rate = """INSERT INTO siminfo.t_MarginRate (
SettlementGroupID,
MarginCalcID,
InstrumentID,
ParticipantID
) VALUES (%s,%s,%s,%s)"""
sql_insert_params = []
for gold in csv_file:
SGID = self.SettlementGroupID
if SGID in template:
sql_insert_params.append((SGID, template[SGID][1], gold["InstrumentID"], template[SGID][3]))
cursor.executemany(sql_insert_rate, sql_insert_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_MarginRate完成")
def __t_MarginRateDetail(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
# 获取模板文件
template = self.__loadJSON(tableName='t_MarginRateDetail')
if template is None:
self.logger.error("t_MarginRateDetail template is None")
return
cursor = mysql_conn.cursor()
# 删除黄金交易所下所有数据
cursor.execute("delete from siminfo.t_MarginRateDetail where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert_detail = """INSERT INTO siminfo.t_MarginRateDetail (
SettlementGroupID,TradingRole,HedgeFlag,
ValueMode,LongMarginRatio,ShortMarginRatio,
InstrumentID,ParticipantID,ClientID
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
sql_insert_params = []
for gold in csv_file:
SGID = self.SettlementGroupID
if SGID in template:
sql_insert_params.append(
self.__get_margin_rate_detail(attr=template[SGID],
instrument=gold["InstrumentID"]))
cursor.executemany(sql_insert_detail, sql_insert_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_MarginRateDetail完成")
# 通过产品代码生成目标合约的保证金率
def __get_margin_rate_detail(self, attr, instrument):
template = attr["template"]
margin_ratio = attr["marginRatio"]
# 判断产品代码是否存在于模版
if instrument in margin_ratio.keys():
params = (template[0], template[1], template[2], template[3], margin_ratio[instrument][0],
margin_ratio[instrument][1], instrument, template[9], template[10])
else:
params = (template[0], template[1], template[2], template[3], template[4], template[5], instrument,
template[9], template[10])
return params
def __t_InstrumentProperty(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
cursor = mysql_conn.cursor()
cursor.execute("delete from siminfo.t_InstrumentProperty where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_Property = """INSERT INTO siminfo.t_InstrumentProperty (
SettlementGroupID,CreateDate,OpenDate,ExpireDate,StartDelivDate,
EndDelivDate,BasisPrice,MaxMarketOrderVolume,MinMarketOrderVolume,
MaxLimitOrderVolume,MinLimitOrderVolume,PriceTick,
AllowDelivPersonOpen,InstrumentID,InstLifePhase
)VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
sql_params = []
for gold in csv_file:
SGID = self.SettlementGroupID
sql_params.append((SGID, gold["CreateDate"], gold["OpenDate"], gold["ExpireDate"],
gold["StartDelivDate"], gold["EndDelivDate"], 0,
gold["MaxMarketOrderVolume"],
gold["MinMarketOrderVolume"],
gold["MaxLimitOrderVolume"],
gold["MinLimitOrderVolume"],
gold["PriceTick"],
0, gold["InstrumentID"], 1))
cursor.executemany(sql_Property, sql_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_InstrumentProperty完成")
def __t_TransFeeRateDetail(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
# 获取模板文件
template = self.__loadJSON(tableName='t_TransFeeRateDetail')
if template is None:
self.logger.error("t_TransFeeRateDetail template is None")
return
cursor = mysql_conn.cursor()
# 删除黄金交易所下所有数据
cursor.execute("delete from siminfo.t_transfeeratedetail where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert_detail = """insert into siminfo.t_transfeeratedetail(
SettlementGroupID,TradingRole,HedgeFlag,ValueMode,OpenFeeRatio,
CloseYesterdayFeeRatio,CloseTodayFeeRatio,MinOpenFee,MinCloseFee,
MaxOpenFee,MaxCloseFee,InstrumentID,ParticipantID,
ClientID) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
sql_insert_params = []
for gold in csv_file:
SGID = self.SettlementGroupID
if SGID in template:
sql_insert_params.append(
self.__get_trans_fee_rate_detail(attr=template[SGID],
instrument=gold["InstrumentID"]))
cursor.executemany(sql_insert_detail, sql_insert_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_TransFeeRateDetail完成")
# 通过产品代码生成目标合约的保证金率
def __get_trans_fee_rate_detail(self, attr, instrument):
template = attr["template"]
trans_fee = attr["transFee"]
# 判断产品代码是否存在于模版
if instrument in trans_fee.keys():
params = (template[0], template[1], template[2], trans_fee[instrument][1],
trans_fee[instrument][0], trans_fee[instrument][0], trans_fee[instrument][0],
template[7], template[8], template[9], template[10], instrument, template[12],
template[13])
else:
params = (template[0], template[1], template[2], template[3], template[4], template[5], template[6],
template[7], template[8], template[9], template[10], instrument, template[12],
template[13])
return params
def __t_PriceBanding(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
# 获取模板文件
template = self.__loadJSON(tableName='t_PriceBanding')
if template is None:
self.logger.error("t_PriceBanding template is None")
return
cursor = mysql_conn.cursor()
# 删除黄金交易所下所有数据
cursor.execute("delete from siminfo.t_PriceBanding where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert_price = """INSERT INTO siminfo.t_PriceBanding (
SettlementGroupID,PriceLimitType,ValueMode,RoundingMode,
UpperValue,LowerValue,InstrumentID,TradingSegmentSN
) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)"""
sql_insert_params = []
for gold in csv_file:
SGID = self.SettlementGroupID
if SGID in template:
sql_insert_params.append((SGID, template[SGID][1], template[SGID][2], template[SGID][3],
template[SGID][4], template[SGID][5], gold["InstrumentID"],
template[SGID][7]))
cursor.executemany(sql_insert_price, sql_insert_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_PriceBanding完成")
def __t_MarketData(self, mysqlDB, csv_file):
mysql_conn = mysqlDB.get_cnx()
mysql_conn.start_transaction()
try:
cursor = mysql_conn.cursor()
cursor.execute("delete from siminfo.t_MarketData where SettlementGroupID = %s", (self.SettlementGroupID,))
sql_insert = """INSERT INTO siminfo.t_MarketData (
TradingDay,SettlementGroupID,LastPrice,PreSettlementPrice,
PreClosePrice,PreOpenInterest,OpenPrice,
HighestPrice,LowestPrice,Volume,Turnover,
OpenInterest,ClosePrice,SettlementPrice,
UpperLimitPrice,LowerLimitPrice,PreDelta,
CurrDelta,UpdateTime,UpdateMillisec,InstrumentID
)VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
sql_params = []
for gold in csv_file:
SGID = self.SettlementGroupID
sql_params.append(
(self.TradingDay, SGID, None, gold["PreSettlementPrice"], gold["PreClosePrice"],
gold["PreOpenInterest"], None,
None, None, None, None,
None, None, None,
None, None, None,
None, "15:15:00", "100", gold["InstrumentID"]))
cursor.executemany(sql_insert, sql_params)
mysql_conn.commit()
finally:
mysql_conn.close()
self.logger.info("写入t_MarketData完成")
def __check_file(self, file_name=None):
env_dist = os.environ
# 判断环境变量是否存在HOME配置
if 'HOME' not in env_dist:
self.logger.error("HOME not in environment variable")
return None
# 获取文件路径
catalog = env_dist['HOME']
catalog = '%s%s%s%s%s' % (catalog, os.path.sep, 'sim_data', os.path.sep, self.TradingDay)
# 合约信息
instrument = '%s%s%s' % (catalog, os.path.sep, self.file_instrument)
# 行情信息
depthmarketdata = '%s%s%s' % (catalog, os.path.sep, self.file_marketdata)
# 判断instrument.csv文件是否存在,不存在设置为空
if not os.path.exists(instrument):
self.logger.error("%s%s" % (instrument, " is not exists"))
instrument = None
# 判断depthmarketdata.csv文件是否存在,不存在设置为空
if not os.path.exists(depthmarketdata):
self.logger.error("%s%s" % (depthmarketdata, " is not exists"))
depthmarketdata = None
# 读取CSV文件
if file_name is None:
return self.__loadCSV(instrument), self.__loadCSV(depthmarketdata)
elif file_name == 'instrument':
return self.__loadCSV(instrument)
elif file_name == 'depthmarketdata':
return self.__loadCSV(depthmarketdata)
def __loadCSV(self, csv_file):
if csv_file is None:
return None
else:
return [row for row in csv.DictReader(open(csv_file))]
# 主要读取template数据
def __loadJSON(self, tableName):
_output = path.convert(self.initTemplate['initTemplate'])
_path = "%s%s%s%s" % (_output, os.path.sep, tableName, ".json")
if not os.path.exists(_path):
self.logger.error("文件" + tableName + ".json不存在")
return None
f = open(_path)
return json.load(f)
if __name__ == '__main__':
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["mysql", "log", "init"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files)
# 启动gold脚本
trans_goldinfo(context=context, configs=conf)
```
#### File: xops/extrader/md_handler.py
```python
import threading
import shfemdapi
import time
from utils import log
class MdHandler(shfemdapi.CShfeFtdcMduserSpi):
def __init__(self, md_api, user_id, password):
self.logger = log.get_logger(category="MdSpi")
shfemdapi.CShfeFtdcMduserSpi.__init__(self)
self.md_api = md_api
self.userId = user_id
self.password = password
self.is_connected = False
self.is_logined = False
self.request_id = 0
self.lock = threading.Lock()
def set_msg_puber(self, msg_pusher):
self.msg_pusher = msg_pusher
def OnFrontConnected(self):
self.logger.info("OnFrontConnected")
self.is_connected = True
req_login_field = shfemdapi.CShfeFtdcReqUserLoginField()
req_login_field.UserID = str(self.userId)
req_login_field.Password = str(<PASSWORD>)
self.md_api.ReqUserLogin(req_login_field, self.get_request_id())
def OnFrontDisconnected(self, nReason):
self.logger.info("OnFrontDisconnected: %s" % str(nReason))
self.is_connected = False
def OnRspUserLogin(self, pRspUserLogin, pRspInfo, nRequestID, bIsLast):
self.logger.info("OnRspUserLogin")
if pRspInfo is not None and pRspInfo.ErrorID != 0:
self.logger.error("login failed : %s" % pRspInfo.ErrorMsg.decode("GBK").encode("UTF-8"))
time.sleep(3)
req_login_field = shfemdapi.CShfeFtdcReqUserLoginField()
req_login_field.UserID = str(self.userId)
req_login_field.Password = str(<PASSWORD>)
self.md_api.ReqUserLogin(req_login_field, self.get_request_id())
else:
self.logger.info("login success")
self.is_logined = True
def OnRspSubscribeTopic(self, pDissemination, pRspInfo, nRequestID, bIsLast):
self.logger.info("OnRspSubscribeTopic")
if pRspInfo is not None and pRspInfo.ErrorID != 0:
self.logger.error("OnRspSubscribeTopic failed : %s" % pRspInfo.ErrorMsg.decode("GBK").encode("UTF-8"))
else:
print("SequenceSeries ==> " + str(pDissemination.SequenceSeries))
print("SequenceNo ==> " + str(pDissemination.SequenceNo))
def OnRtnDepthMarketData(self, pDepthMarketData):
self.logger.info("OnRtnDepthMarketData")
if pDepthMarketData is not None:
md_info = dict({"InstrumentID": pDepthMarketData.InstrumentID,
"LastPrice": float(pDepthMarketData.LastPrice),
"UpperLimitPrice": float(pDepthMarketData.UpperLimitPrice),
"LowerLimitPrice": float(pDepthMarketData.LowerLimitPrice),
"Volume": float(pDepthMarketData.Volume),
"BidPrice1": float(pDepthMarketData.BidPrice1),
"BidVolume1": float(pDepthMarketData.BidVolume1),
"AskPrice1": float(pDepthMarketData.AskPrice1),
"AskVolume1": float(pDepthMarketData.AskVolume1),
"BidPrice2": float(pDepthMarketData.BidPrice2),
"BidVolume2": float(pDepthMarketData.BidVolume2),
"AskPrice2": float(pDepthMarketData.AskPrice2),
"AskVolume2": float(pDepthMarketData.AskVolume2),
"BidPrice3": float(pDepthMarketData.BidPrice3),
"BidVolume3": float(pDepthMarketData.BidVolume3),
"AskPrice3": float(pDepthMarketData.AskPrice3),
"AskVolume3": float(pDepthMarketData.AskVolume3),
"BidPrice4": float(pDepthMarketData.BidPrice4),
"BidVolume4": float(pDepthMarketData.BidVolume4),
"AskPrice4": float(pDepthMarketData.AskPrice4),
"AskVolume4": float(pDepthMarketData.AskVolume4),
"BidPrice5": float(pDepthMarketData.BidPrice5),
"BidVolume5": float(pDepthMarketData.BidVolume5),
"AskPrice5": float(pDepthMarketData.AskPrice5),
"AskVolume5": float(pDepthMarketData.AskVolume5)})
msg = {"type": "marketdata",
"data": {pDepthMarketData.InstrumentID: md_info}}
self.msg_pusher.send(msg)
def get_request_id(self):
self.lock.acquire()
self.request_id += 1
req_id = self.request_id
self.lock.release()
return req_id
```
#### File: xops/extrader/md_real_time_quotes.py
```python
import json
import time
from msg_resolver_qry_insstatus import QryInstrumentStatusMsgResolver
from xmq import xmq_queue_puber, xmq_resolving_suber, xmq_pusher
from utils import Configuration, parse_conf_args, log, path
def start_get_md_quotes(context, conf):
# 偏移量跳过csv文件的表头区域
HEAD_OFF_SET = 502
logger = log.get_logger(category="MdService")
logger.info("[start real time quotes with %s] begin" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))
# md 文件开始时间
start_time = conf.get("start_time")
# 获取实盘行情文件路径
md_source = path.convert(conf.get("mdSource"))
# 获取刷新频率
frequency = conf.get("frequency")
xmq_target_conf = context.get("xmq").get(conf.get("targetMQ"))
target_mq_addr = xmq_target_conf["address"]
target_mq_topic = xmq_target_conf["topic"]
msg_queue_puber = xmq_queue_puber(target_mq_addr, target_mq_topic)
# 发送信息【获取行情】
xmq_target_conf = context.get("xmq").get(conf.get("sTargetMQ"))
target_mq_addr = xmq_target_conf["address"]
target_mq_topic = xmq_target_conf["topic"]
msg_target_pusher = xmq_pusher(target_mq_addr, target_mq_topic)
# 接收行情状态信息
xmq_source_conf = context.get("xmq").get(conf.get("sourceMQ"))
source_mq_addr = xmq_source_conf["address"]
source_mq_topic = xmq_source_conf["topic"]
msg_source_suber_status = xmq_resolving_suber(source_mq_addr, source_mq_topic)
md_resolver_status = QryInstrumentStatusMsgResolver()
msg_source_suber_status.add_resolver(md_resolver_status)
# 发送一条获取行情信息
while not md_resolver_status.status:
msg_target_pusher.send({"type": "get_status"})
time.sleep(5)
# 处理文件并且发送实盘行情消息
# 定义指针记录当前行
# pointer = 1
step = conf.get("msg_step")
f = open(md_source, 'r')
# 设置启动偏移量
if conf.get("is_loop"):
# 循环读取,跳过表头区域
f.seek(HEAD_OFF_SET)
else:
# 非循环读取,设置为行末尾
f.seek(-1, 2)
while f.read(1) != '\n':
f = open(md_source, 'r')
f.seek(-1, 2)
count = 0
find_one = False if start_time is not None else True
while True:
istatus = md_resolver_status.istatus.values()
if len(istatus) == 0:
continue
else:
istatus = istatus[0].get("InstrumentStatus")
if istatus in (("2", "3") if conf.get("is_loop") else ("1", "2", "3", "0", "5", "6")):
line = f.readline()
if len(line) == 0:
logger.info("real time quotes had send %s messages", str(count))
count = 0
pos = f.tell()
f.close()
f = open(md_source, 'r')
f.seek(pos)
# 设置是否循环
if conf.get("is_loop"):
logger.info("读到文件末尾,重新循环")
f.seek(HEAD_OFF_SET)
find_one = False if start_time is not None else True
time.sleep(frequency)
else:
if not find_one and start_time not in line:
continue
else:
find_one = True
handle_file(line, msg_queue_puber)
count += 1
if count == step:
logger.info("real time quotes had send %s messages", str(count))
# 集合竞价延长一倍时间
if istatus == "3":
time.sleep(frequency * 10)
else:
time.sleep(frequency)
count = 0
def handle_file(read_line, pub):
row = read_line
# 判断是否存在换行符,表示一行行情结束
row = row.replace("\n", "").split(",")
md_info = dict({"InstrumentID": row[21],
"LastPrice": row[3],
"UpperLimitPrice": row[15],
"LowerLimitPrice": row[16],
"Volume": row[10],
"BidPrice1": row[23],
"BidVolume1": row[24],
"AskPrice1": row[25],
"AskVolume1": row[26],
"BidPrice2": row[27],
"BidVolume2": row[28],
"AskPrice2": row[29],
"AskVolume2": row[30],
"BidPrice3": row[31],
"BidVolume3": row[32],
"AskPrice3": row[33],
"AskVolume3": row[34],
"BidPrice4": row[35],
"BidVolume4": row[36],
"AskPrice4": row[37],
"AskVolume4": row[38],
"BidPrice5": row[39],
"BidVolume5": row[40],
"AskPrice5": row[41],
"AskVolume5": row[42]})
msg = {"type": "makemarket",
"data": {row[21]: md_info}}
pub.send(msg)
def main():
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["exchange", "xmq"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files, add_ons=add_ons)
start_get_md_quotes(context, conf)
if __name__ == "__main__":
main()
```
#### File: xops/extrader/md_worker.py
```python
import json
import time
import shfemdapi
from md_handler import MdHandler
from xmq import xmq_queue_pusher
from utils import Configuration, parse_conf_args, log
def start_md_service(context, conf):
logger = log.get_logger(category="MdService")
logger.info("[start stock md service with %s] begin" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))
exchange_conf = context.get("exchange").get(conf.get("targetExchangeId"))
exchange_front_addr = str(exchange_conf["mdAddress"])
user_id = conf["userId"]
password = conf["password"]
topic_id_list = conf["topicId"]
xmq_target_conf = context.get("xmq").get(conf.get("targetMQ"))
target_mq_addr = xmq_target_conf["address"]
target_mq_topic = xmq_target_conf["topic"]
msg_queue_pusher = xmq_queue_pusher(target_mq_addr, target_mq_topic)
md_api = shfemdapi.CShfeFtdcMduserApi_CreateFtdcMduserApi()
md_handler = MdHandler(md_api, user_id, password)
for topic_id in topic_id_list:
md_api.SubscribeMarketDataTopic(topic_id, shfemdapi.TERT_QUICK)
md_api.RegisterFront(exchange_front_addr)
md_api.RegisterSpi(md_handler)
md_handler.set_msg_puber(msg_queue_pusher)
md_api.Init()
while not md_handler.is_logined:
time.sleep(1)
md_api.Join()
def main():
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["exchange", "xmq"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files)
start_md_service(context, conf)
if __name__ == "__main__":
main()
```
#### File: xops/extrader/order_policy_makemarket.py
```python
import json
import Queue
import sys
import csv
import os
import time
import threading
from datetime import datetime
from msg_resolver_qry_insstatus import QryInstrumentStatusMsgResolver
from xmq import xmq_pusher, xmq_resolving_suber, xmq_msg_resolver, xmq_resolving_puller
from utils import Configuration, parse_conf_args, log, path
def makemarket_order(context, conf):
pid = os.getpid()
logger = log.get_logger(category="OrderMakeMarket")
logger.info(
"[start makemarket order order with %s] begin" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))
# 发送报单
xmq_target_conf = context.get("xmq").get(conf.get("targetMQ"))
target_mq_addr = xmq_target_conf["address"]
target_mq_topic = xmq_target_conf["topic"]
msg_target_pusher = xmq_pusher(target_mq_addr, target_mq_topic)
md_resolver = MakeMarketMsgResolver()
# 接收实盘行情信息
xmq_source_conf = context.get("xmq").get(conf.get("tmdSourceMQ"))
source_mq_addr = xmq_source_conf["address"]
source_mq_topic = xmq_source_conf["topic"]
t_msg_source_suber = xmq_resolving_suber(source_mq_addr, source_mq_topic)
t_msg_source_suber.add_resolver(md_resolver)
# 接收模拟盘行情信息
xmq_source_conf = context.get("xmq").get(conf.get("smdSourceMQ"))
source_mq_addr = xmq_source_conf["address"]
source_mq_topic = xmq_source_conf["topic"]
s_msg_source_puller = xmq_resolving_puller(source_mq_addr, source_mq_topic)
s_msg_source_puller.add_resolver(md_resolver)
# 接收行情状态信息
xmq_source_conf = context.get("xmq").get(conf.get("sourceMQ"))
source_mq_addr = xmq_source_conf["address"]
source_mq_topic = xmq_source_conf["topic"]
msg_source_suber_status = xmq_resolving_suber(source_mq_addr, source_mq_topic)
md_resolver_status = QryInstrumentStatusMsgResolver()
msg_source_suber_status.add_resolver(md_resolver_status)
# 获取数据来源
file_source = path.convert(conf.get("fileSource"))
order_source_data = [row for row in csv.DictReader(open(file_source))]
load_marketdata(order_source_data, md_resolver)
# 发送一条获取行情信息
while not md_resolver_status.status:
msg_target_pusher.send({"type": "get_status"})
time.sleep(5)
count = 0
while True:
while not md_resolver.result_queue.empty():
result = md_resolver.result_queue.get()
# 查看合约状态
if result.get("SecurityID") not in md_resolver_status.istatus:
continue
if str(md_resolver_status.istatus.get(result.get("SecurityID")).get("InstrumentStatus")) in ('2', '3'):
input_params = {"InstrumentID": result.get("SecurityID"),
"LimitPrice": result.get("LimitPrice"),
"VolumeTotalOriginal": int(result.get("VolumeTotalOriginal")),
"Direction": ord(result.get("Direction")),
"ParticipantID": conf.get("ParticipantID"),
"ClientID": conf.get("clientId"),
"OrderPriceType": conf.get("OrderPriceType")}
# logger.info(input_params)
seq = str(pid) + "_" + str(count)
msg_target_pusher.send({"type": "order", "data": input_params, "seq": seq})
logger.info(seq)
count += 1
def load_marketdata(marketdata, MakeMarketMsgResolver):
for data in marketdata:
digit = get_decimal_digit(float(data.get("PriceTick")))
if data.get("ValueMode") == '1':
lower = round((1 - float(data.get("LowerValue"))) * float(data.get("PreClosePrice")), digit)
upper = round((1 + float(data.get("UpperValue"))) * float(data.get("PreClosePrice")), digit)
elif data.get("ValueMode") == '2':
lower = float(data.get("PreClosePrice")) - float(data.get("LowerValue"))
upper = float(data.get("UpperValue")) + float(data.get("PreClosePrice"))
else:
continue
InstrumentID = data.get("InstrumentID")
PreClosePrice = data.get("PreClosePrice")
MaxLimitOrderVolume = data.get("MaxLimitOrderVolume")
one_row = dict({
InstrumentID: {
'BidPrice5': 0.00000,
'BidPrice4': 0.00000,
'BidPrice1': 0.00000,
'BidPrice3': 0.00000,
'BidPrice2': 0.00000,
'LowerLimitPrice': lower,
'AskPrice5': 0.00000,
'AskPrice4': 0.00000,
'AskPrice3': 0.00000,
'AskPrice2': 0.00000,
'AskPrice1': 0.00000,
'BidVolume5': 0,
'BidVolume4': 0,
'BidVolume3': 0,
'BidVolume2': 0,
'BidVolume1': 0,
'Volume': '0',
'AskVolume1': 0,
'AskVolume2': 0,
'AskVolume3': 0,
'AskVolume4': 0,
'AskVolume5': 0,
'UpperLimitPrice': upper,
'InstrumentID': InstrumentID,
'LastPrice': PreClosePrice
}
})
# 缓存最大下单量
MakeMarketMsgResolver.max_volume.update({InstrumentID: int(MaxLimitOrderVolume)})
MakeMarketMsgResolver.make_target(one_row)
def get_decimal_digit(decimal):
digit = 0
while True:
if decimal == int(decimal):
break
else:
decimal = decimal * 10
digit = digit + 1
return digit
class MakeMarketMsgResolver(xmq_msg_resolver):
def __init__(self):
self.target_market_context = {}
self.source_market_context = {}
self.max_volume = dict()
self.source_time = dict()
self.instrument_id = None
self.result_queue = Queue.Queue()
self.lock = threading.Lock()
xmq_msg_resolver.__init__(self)
def recv_target(self, md_data):
self.target_market_context.update(md_data)
self.instrument_id = md_data.keys()[0]
def make_target(self, md_data):
if self.max_volume.has_key(md_data.keys()[0]):
self.source_market_context.update(md_data)
self.instrument_id = md_data.keys()[0]
def resolve_msg(self, msg):
self.lock.acquire()
try:
if msg is None or msg.get("type") is None:
return
# 获取消息服务器行情信息
md_data = msg.get("data")
if msg.get("type") == "marketdata":
self.make_target(md_data)
# 模拟盘报单,并且更新时间
self.req_order()
self.source_time.update({md_data.keys()[0]: datetime.now()})
elif msg.get("type") == "makemarket":
self.recv_target(md_data)
ins = md_data.keys()[0]
# 实盘行情与模拟盘缓存时间大于一分钟则报单,并且更新缓存时间
if not self.source_time.has_key(ins):
self.source_time.update({ins: datetime.now()})
self.req_order()
elif (datetime.now() - self.source_time.get(ins)).total_seconds() >= 60:
self.source_time.update({ins: datetime.now()})
self.market_order()
finally:
self.lock.release()
def req_order(self):
security_id = self.instrument_id
if self.source_market_context.has_key(security_id) and self.target_market_context.has_key(security_id):
source_market = self.source_market_context[security_id]
target_market = self.target_market_context[security_id]
orders = self.gen_order(source_market, target_market)
for order in orders:
self.result_queue.put(order)
def market_order(self):
order = {"SecurityID": self.instrument_id, "Direction": "0", "VolumeTotalOriginal": 100,
"LimitPrice": 0, "OrderPriceType": '3'}
self.result_queue.put(order)
def gen_order(self, source_market, target_market):
security_id = str(target_market["InstrumentID"])
max_volume = self.max_volume.get(security_id)
target_price = self.__to_float(target_market["LastPrice"])
source_price = self.__to_float(source_market["LastPrice"])
upper_price = self.__to_float(source_market["UpperLimitPrice"])
lower_price = self.__to_float(source_market["LowerLimitPrice"])
if target_price > upper_price:
target_price = upper_price
if target_price < lower_price:
target_price = lower_price
orders = []
if not self.__check_price_valid(source_price):
order0 = {"SecurityID": security_id, "Direction": "0", "VolumeTotalOriginal": 100,
"LimitPrice": target_price}
order1 = {"SecurityID": security_id, "Direction": "1", "VolumeTotalOriginal": 100,
"LimitPrice": target_price}
orders.append(order0)
orders.append(order1)
elif target_price > source_price:
s_a1_p = source_market["AskPrice1"]
s_a1_v = source_market["AskVolume1"]
s_a2_p = source_market["AskPrice2"]
s_a2_v = source_market["AskVolume2"]
s_a3_p = source_market["AskPrice3"]
s_a3_v = source_market["AskVolume3"]
s_a4_p = source_market["AskPrice4"]
s_a4_v = source_market["AskVolume4"]
s_a5_p = source_market["AskPrice5"]
s_a5_v = source_market["AskVolume5"]
# 比较五档行情范围
temp_price = 0
if self.__check_price_valid(s_a1_p):
temp_price = s_a1_p
if self.__check_price_valid(s_a2_p):
temp_price = s_a2_p
if self.__check_price_valid(s_a3_p):
temp_price = s_a3_p
if self.__check_price_valid(s_a4_p):
temp_price = s_a4_p
if self.__check_price_valid(s_a5_p):
temp_price = s_a5_p
if target_price > temp_price > 0:
target_price = temp_price
order1 = {"SecurityID": security_id, "Direction": "0", "VolumeTotalOriginal": 0, "LimitPrice": target_price}
if self.__check_price_valid(s_a5_p) and target_price >= s_a5_p:
order1["VolumeTotalOriginal"] = s_a1_v + s_a2_v + s_a3_v + s_a4_v + s_a5_v
elif self.__check_price_valid(s_a4_p) and target_price >= s_a4_p:
order1["VolumeTotalOriginal"] = s_a1_v + s_a2_v + s_a3_v + s_a4_v
elif self.__check_price_valid(s_a3_p) and target_price >= s_a3_p:
order1["VolumeTotalOriginal"] = s_a1_v + s_a2_v + s_a3_v
elif self.__check_price_valid(s_a2_p) and target_price >= s_a2_p:
order1["VolumeTotalOriginal"] = s_a1_v + s_a2_v
elif self.__check_price_valid(s_a1_p) and target_price >= s_a1_p:
order1["VolumeTotalOriginal"] = s_a1_v
# 报单量为0修改为100
if order1["VolumeTotalOriginal"] == 0:
v = 100 if max_volume > 100 else 1
order1["VolumeTotalOriginal"] = v
order2 = {"SecurityID": security_id, "Direction": "1", "VolumeTotalOriginal": v,
"LimitPrice": target_price}
# orders.append(order2)
orders.append(order1)
else:
volume_list = []
quotient = divmod(int(order1["VolumeTotalOriginal"]), int(max_volume))[0] - 1
remainder = divmod(int(order1["VolumeTotalOriginal"]), int(max_volume))[1]
while quotient >= 0:
quotient -= 1
volume_list.append(int(max_volume))
volume_list.append(remainder)
# 分段报单
for vol in volume_list:
orders.append({"SecurityID": security_id,
"Direction": "0",
"VolumeTotalOriginal": vol,
"LimitPrice": target_price})
break
elif target_price < source_price:
s_b1_p = source_market["BidPrice1"]
s_b1_v = source_market["BidVolume1"]
s_b2_p = source_market["BidPrice2"]
s_b2_v = source_market["BidVolume2"]
s_b3_p = source_market["BidPrice3"]
s_b3_v = source_market["BidVolume3"]
s_b4_p = source_market["BidPrice4"]
s_b4_v = source_market["BidVolume4"]
s_b5_p = source_market["BidPrice5"]
s_b5_v = source_market["BidVolume5"]
# 比较五档行情范围
temp_price = 0
if self.__check_price_valid(s_b1_p):
temp_price = s_b1_p
if self.__check_price_valid(s_b2_p):
temp_price = s_b2_p
if self.__check_price_valid(s_b3_p):
temp_price = s_b3_p
if self.__check_price_valid(s_b4_p):
temp_price = s_b4_p
if self.__check_price_valid(s_b5_p):
temp_price = s_b5_p
if target_price < temp_price:
target_price = temp_price
order1 = {"SecurityID": security_id, "Direction": "1", "VolumeTotalOriginal": 0, "LimitPrice": target_price}
if self.__check_price_valid(s_b5_p) and target_price <= s_b5_p:
order1["VolumeTotalOriginal"] = s_b1_v + s_b2_v + s_b3_v + s_b4_v + s_b5_v
elif self.__check_price_valid(s_b4_p) and target_price <= s_b4_p:
order1["VolumeTotalOriginal"] = s_b1_v + s_b2_v + s_b3_v + s_b4_v
elif self.__check_price_valid(s_b3_p) and target_price <= s_b3_p:
order1["VolumeTotalOriginal"] = s_b1_v + s_b2_v + s_b3_v
elif self.__check_price_valid(s_b2_p) and target_price <= s_b2_p:
order1["VolumeTotalOriginal"] = s_b1_v + s_b2_v
elif self.__check_price_valid(s_b1_p) and target_price <= s_b1_p:
order1["VolumeTotalOriginal"] = s_b1_v
if order1["VolumeTotalOriginal"] == 0:
v = 100 if max_volume > 100 else 1
order1["VolumeTotalOriginal"] = v
order2 = {"SecurityID": security_id, "Direction": "0", "VolumeTotalOriginal": v,
"LimitPrice": target_price}
# orders.append(order2)
orders.append(order1)
else:
volume_list = []
quotient = divmod(int(order1["VolumeTotalOriginal"]), int(max_volume))[0] - 1
remainder = divmod(int(order1["VolumeTotalOriginal"]), int(max_volume))[1]
while quotient >= 0:
quotient -= 1
volume_list.append(int(max_volume))
volume_list.append(remainder)
# 分段报单
for vol in volume_list:
orders.append({"SecurityID": security_id,
"Direction": "1",
"VolumeTotalOriginal": vol,
"LimitPrice": target_price})
break
return orders
def __to_float(self, float_str):
return float(float_str) if float_str else 0
def __to_int(self, int_str):
return int(int_str) if int_str else 0
def __check_price_valid(self, price):
if float(sys.float_info.max) == price or price == 0:
return False
return True
def main():
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["exchange", "xmq"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files)
makemarket_order(context, conf)
if __name__ == "__main__":
main()
```
#### File: xops/extrader/order_policy_union.py
```python
import json
import Queue
import sys
import csv
import os
import threading
import time
import random
from datetime import datetime
from msg_resolver_qry_insstatus import QryInstrumentStatusMsgResolver
from xmq import xmq_pusher, xmq_resolving_suber, xmq_msg_resolver, xmq_resolving_puller
from utils import Configuration, parse_conf_args, log, path
def order_union(context, conf):
pid = os.getpid()
logger = log.get_logger(category="OrderUnion")
logger.info(
"[start union order order with %s] begin" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))
order_conf = conf.get("orderConf")
# 判断股票期货
market_type = conf.get("type")
# 接收行情状态信息
xmq_source_conf = context.get("xmq").get(conf.get("sourceMQ"))
source_mq_addr = xmq_source_conf["address"]
source_mq_topic = xmq_source_conf["topic"]
msg_source_suber_status = xmq_resolving_suber(source_mq_addr, source_mq_topic)
md_resolver_status = QryInstrumentStatusMsgResolver()
msg_source_suber_status.add_resolver(md_resolver_status)
md_resolver = UnionMsgResolver(md_resolver_status, market_type)
# 获取数据来源
file_source = path.convert(conf.get("fileSource"))
order_source_data = [row for row in csv.DictReader(open(file_source))]
load_marketdata(order_source_data, md_resolver, order_conf)
# 发送报单
xmq_target_conf = context.get("xmq").get(conf.get("targetMQ"))
target_mq_addr = xmq_target_conf["address"]
target_mq_topic = xmq_target_conf["topic"]
msg_target_pusher = xmq_pusher(target_mq_addr, target_mq_topic)
# 发送一条获取行情信息
while not md_resolver_status.status:
msg_target_pusher.send({"type": "get_status"})
logger.info("order_policy_union check marketdata status waiting !!! ")
time.sleep(5)
# 接收实盘行情信息
xmq_source_conf = context.get("xmq").get(conf.get("tmdSourceMQ"))
source_mq_addr = xmq_source_conf["address"]
source_mq_topic = xmq_source_conf["topic"]
t_msg_source_suber = xmq_resolving_suber(source_mq_addr, source_mq_topic)
t_msg_source_suber.add_resolver(md_resolver)
# 接收模拟盘行情信息
xmq_source_conf = context.get("xmq").get(conf.get("smdSourceMQ"))
source_mq_addr = xmq_source_conf["address"]
source_mq_topic = xmq_source_conf["topic"]
s_msg_source_puller = xmq_resolving_puller(source_mq_addr, source_mq_topic)
s_msg_source_puller.add_resolver(md_resolver)
count = 0
while True:
while not md_resolver.result_queue.empty():
result = md_resolver.result_queue.get()
# 查看合约状态
if result.get("SecurityID") not in md_resolver_status.istatus:
continue
if str(md_resolver_status.istatus.get(result.get("SecurityID")).get("InstrumentStatus")) in ('2', '3'):
input_params = {"InstrumentID": result.get("SecurityID"),
"LimitPrice": result.get("LimitPrice"),
"VolumeTotalOriginal": int(result.get("VolumeTotalOriginal")),
"Direction": ord(result.get("Direction")),
"ParticipantID": conf.get("ParticipantID"),
"ClientID": conf.get("clientId"),
"OrderPriceType": result.get("OrderPriceType"),
"order_type": result.get("order_type")}
# logger.info(input_params)
seq = str(pid) + "_" + str(count) + "_" + result.get("order_type")
msg_target_pusher.send({"type": "order", "data": input_params, "seq": seq})
# logger.info(seq)
print seq
count += 1
def load_marketdata(marketdata, UnionMsgResolver, conf):
for data in marketdata:
digit = get_decimal_digit(float(data.get("PriceTick")))
if data.get("ValueMode") == '1':
lower = round((1 - float(data.get("LowerValue"))) * float(data.get("PreClosePrice")), digit)
upper = round((1 + float(data.get("UpperValue"))) * float(data.get("PreClosePrice")), digit)
elif data.get("ValueMode") == '2':
lower = float(data.get("PreClosePrice")) - float(data.get("LowerValue"))
upper = float(data.get("UpperValue")) + float(data.get("PreClosePrice"))
else:
continue
instrument_id = data.get("InstrumentID")
pre_close_price = data.get("PreClosePrice")
max_limit_order_volume = data.get("MaxLimitOrderVolume")
product_class = str(data.get("ProductClass"))
price_tick = data.get("PriceTick")
one_row = dict({
instrument_id: {
'BidPrice5': 0.00000,
'BidPrice4': 0.00000,
'BidPrice1': 0.00000,
'BidPrice3': 0.00000,
'BidPrice2': 0.00000,
'LowerLimitPrice': lower,
'AskPrice5': 0.00000,
'AskPrice4': 0.00000,
'AskPrice3': 0.00000,
'AskPrice2': 0.00000,
'AskPrice1': 0.00000,
'BidVolume5': 0,
'BidVolume4': 0,
'BidVolume3': 0,
'BidVolume2': 0,
'BidVolume1': 0,
'Volume': '0',
'AskVolume1': 0,
'AskVolume2': 0,
'AskVolume3': 0,
'AskVolume4': 0,
'AskVolume5': 0,
'UpperLimitPrice': upper,
'InstrumentID': instrument_id,
'LastPrice': pre_close_price
}
})
if conf.has_key('stock'):
conf['4'] = conf.pop('stock')
if conf.has_key('etf'):
conf['2'] = conf.pop('etf')
if conf.has_key('future'):
conf['1'] = conf.pop('future')
if conf.has_key('options'):
conf['2'] = conf.pop('options')
# 缓存数据
UnionMsgResolver.cache.update({instrument_id: {
"max_limit_order_volume": int(max_limit_order_volume),
"fifth_level_call_volume": conf.get(product_class).get("fifth_level_call_volume"),
"normal_volume": conf.get(product_class).get("normal_volume"),
"price_tick": float(price_tick)
}})
UnionMsgResolver.source(one_row)
def get_decimal_digit(decimal):
digit = 0
while True:
if decimal == int(decimal):
break
else:
decimal = decimal * 10
digit = digit + 1
return digit
class UnionMsgResolver(xmq_msg_resolver):
def __init__(self, md_resolver_status, market_type):
xmq_msg_resolver.__init__(self)
# 类型
self.market_type = market_type
# 模拟盘行情
self.source_market_context = {}
# 实盘行情
self.target_market_context = {}
# 通过order_info.csv初始化缓存数据
self.cache = dict()
# 缓存模拟盘行情接受时间o
self.source_update_time = dict()
# 合约状态
self.md_resolver_status = md_resolver_status
self.instrument_id = None
self.result_queue = Queue.Queue()
self.lock = threading.Lock()
# 模拟盘行情
def source(self, md_data):
instrument_id = md_data.keys()[0]
# 判断是否有缓存数据
if self.cache.has_key(instrument_id):
self.source_market_context.update(md_data)
self.instrument_id = instrument_id
# 实盘行情
def target(self, md_data):
self.target_market_context.update(md_data)
self.instrument_id = md_data.keys()[0]
def resolve_msg(self, msg):
self.lock.acquire()
try:
if msg is None or msg.get("type") is None:
return
# 获取消息服务器行情信息
md_data = msg.get("data")
ins = md_data.keys()[0]
# 模拟盘行情
if msg.get("type") == "marketdata":
self.source(md_data)
self.req_order()
# 缓存当前时间
self.source_update_time.update({ins: datetime.now()})
# 实盘行情
elif msg.get("type") == "makemarket":
self.target(md_data)
if ins in self.md_resolver_status.istatus:
if not self.source_update_time.has_key(ins):
self.source_update_time.update({ins: datetime.now()})
self.req_order()
elif self.md_resolver_status.istatus.get(ins).get("InstrumentStatus") in ('3',) \
and (datetime.now() - self.source_update_time.get(ins)).total_seconds() >= 20:
# 报五档单
self.source_update_time.update({ins: datetime.now()})
self.req_order(callmarket=True)
elif self.md_resolver_status.istatus.get(ins).get("InstrumentStatus") in ('2',) \
and (datetime.now() - self.source_update_time.get(ins)).total_seconds() >= 60:
# 大于1s处理
self.source_update_time.update({ins: datetime.now()})
self.req_order(timeout=True)
finally:
self.lock.release()
def req_order(self, **kwargs):
instrument_id = self.instrument_id
if self.source_market_context.has_key(instrument_id) \
and self.target_market_context.has_key(instrument_id):
source_market = self.source_market_context[instrument_id]
target_market = self.target_market_context[instrument_id]
orders = self.gen_order(source_market, target_market, kwargs)
for order in orders:
self.result_queue.put(order)
def gen_order(self, source_market, target_market, kwargs):
# 模拟盘价格
source_price = self.__to_float(source_market["LastPrice"])
# 实盘价格
target_price = self.__to_float(target_market["LastPrice"])
upper_price = self.__to_float(source_market["UpperLimitPrice"])
lower_price = self.__to_float(source_market["LowerLimitPrice"])
if target_price > upper_price:
target_price = upper_price
if target_price < lower_price:
target_price = lower_price
# 判断是否集合竞价
if kwargs.get("callmarket"):
return self.policy_fifth_level(source_market, target_market, True)
# 判断是否大于一分钟
if kwargs.get("timeout"):
if source_price != target_price:
# 价格不同优先撮合
return self.policy_make_market(source_market, target_market)
else:
return self.policy_order_direct()
else:
if source_price != target_price:
# 价格不同优先撮合
return self.policy_make_market(source_market, target_market)
elif source_price == target_price:
# 相同的话报五档
return self.policy_fifth_level(source_market, target_market)
# 撮合单
def policy_make_market(self, source_market, target_market):
security_id = self.instrument_id
normal_volume = self.cache.get(security_id).get("normal_volume") * random.randint(1, 10)
max_volume = self.cache.get(security_id).get("max_limit_order_volume")
target_price = self.__to_float(target_market["LastPrice"])
source_price = self.__to_float(source_market["LastPrice"])
upper_price = self.__to_float(source_market["UpperLimitPrice"])
lower_price = self.__to_float(source_market["LowerLimitPrice"])
if target_price > upper_price:
target_price = upper_price
if target_price < lower_price:
target_price = lower_price
s_a1_p = source_market["AskPrice1"]
s_a1_v = source_market["AskVolume1"]
s_a2_p = source_market["AskPrice2"]
s_a2_v = source_market["AskVolume2"]
s_a3_p = source_market["AskPrice3"]
s_a3_v = source_market["AskVolume3"]
s_a4_p = source_market["AskPrice4"]
s_a4_v = source_market["AskVolume4"]
s_a5_p = source_market["AskPrice5"]
s_a5_v = source_market["AskVolume5"]
s_b1_p = source_market["BidPrice1"]
s_b1_v = source_market["BidVolume1"]
s_b2_p = source_market["BidPrice2"]
s_b2_v = source_market["BidVolume2"]
s_b3_p = source_market["BidPrice3"]
s_b3_v = source_market["BidVolume3"]
s_b4_p = source_market["BidPrice4"]
s_b4_v = source_market["BidVolume4"]
s_b5_p = source_market["BidPrice5"]
s_b5_v = source_market["BidVolume5"]
orders = []
if not self.__check_price_valid(source_price):
order0 = {"SecurityID": security_id, "Direction": "0", "VolumeTotalOriginal": normal_volume,
"LimitPrice": target_price, "order_type": "make_market_type_1"}
order1 = {"SecurityID": security_id, "Direction": "1", "VolumeTotalOriginal": normal_volume,
"LimitPrice": target_price, "order_type": "make_market_type_2"}
orders.append(order0)
orders.append(order1)
elif self.__check_price_valid(s_a1_p) and target_price >= s_a1_p:
# 比较五档行情范围
temp_price = 0
if self.__check_price_valid(s_a1_p):
temp_price = s_a1_p
if self.__check_price_valid(s_a2_p):
temp_price = s_a2_p
if self.__check_price_valid(s_a3_p):
temp_price = s_a3_p
if self.__check_price_valid(s_a4_p):
temp_price = s_a4_p
if self.__check_price_valid(s_a5_p):
temp_price = s_a5_p
if target_price > temp_price > 0:
target_price = temp_price
order1 = {"SecurityID": security_id, "Direction": "0", "VolumeTotalOriginal": 0, "LimitPrice": target_price,
"order_type": "make_market_type_3"}
if self.__check_price_valid(s_a5_p) and target_price >= s_a5_p:
order1["VolumeTotalOriginal"] = s_a1_v + s_a2_v + s_a3_v + s_a4_v + s_a5_v
elif self.__check_price_valid(s_a4_p) and target_price >= s_a4_p:
order1["VolumeTotalOriginal"] = s_a1_v + s_a2_v + s_a3_v + s_a4_v
elif self.__check_price_valid(s_a3_p) and target_price >= s_a3_p:
order1["VolumeTotalOriginal"] = s_a1_v + s_a2_v + s_a3_v
elif self.__check_price_valid(s_a2_p) and target_price >= s_a2_p:
order1["VolumeTotalOriginal"] = s_a1_v + s_a2_v
elif self.__check_price_valid(s_a1_p) and target_price >= s_a1_p:
order1["VolumeTotalOriginal"] = s_a1_v
# 报单量为0修改为100
if order1["VolumeTotalOriginal"] == 0:
order1["VolumeTotalOriginal"] = normal_volume
# order2 = {"SecurityID": security_id, "Direction": "1", "VolumeTotalOriginal": v,
# "LimitPrice": target_price}
# orders.append(order2)
orders.append(order1)
else:
volume_list = []
quotient = divmod(int(order1["VolumeTotalOriginal"]), int(max_volume))[0] - 1
remainder = divmod(int(order1["VolumeTotalOriginal"]), int(max_volume))[1]
while quotient >= 0:
quotient -= 1
volume_list.append(int(max_volume))
volume_list.append(remainder)
# 分段报单
for vol in volume_list:
orders.append({"SecurityID": security_id,
"Direction": "0",
"VolumeTotalOriginal": vol,
"LimitPrice": target_price,
"order_type": "make_market_type_4"})
break
elif self.__check_price_valid(s_b1_p) and target_price <= s_b1_p:
# 比较五档行情范围
temp_price = 0
if self.__check_price_valid(s_b1_p):
temp_price = s_b1_p
if self.__check_price_valid(s_b2_p):
temp_price = s_b2_p
if self.__check_price_valid(s_b3_p):
temp_price = s_b3_p
if self.__check_price_valid(s_b4_p):
temp_price = s_b4_p
if self.__check_price_valid(s_b5_p):
temp_price = s_b5_p
if target_price < temp_price:
target_price = temp_price
order1 = {"SecurityID": security_id, "Direction": "1", "VolumeTotalOriginal": 0, "LimitPrice": target_price,
"order_type": "make_market_type_5"}
if self.__check_price_valid(s_b5_p) and target_price <= s_b5_p:
order1["VolumeTotalOriginal"] = s_b1_v + s_b2_v + s_b3_v + s_b4_v + s_b5_v
elif self.__check_price_valid(s_b4_p) and target_price <= s_b4_p:
order1["VolumeTotalOriginal"] = s_b1_v + s_b2_v + s_b3_v + s_b4_v
elif self.__check_price_valid(s_b3_p) and target_price <= s_b3_p:
order1["VolumeTotalOriginal"] = s_b1_v + s_b2_v + s_b3_v
elif self.__check_price_valid(s_b2_p) and target_price <= s_b2_p:
order1["VolumeTotalOriginal"] = s_b1_v + s_b2_v
elif self.__check_price_valid(s_b1_p) and target_price <= s_b1_p:
order1["VolumeTotalOriginal"] = s_b1_v
if order1["VolumeTotalOriginal"] == 0:
order1["VolumeTotalOriginal"] = normal_volume
# order2 = {"SecurityID": security_id, "Direction": "0", "VolumeTotalOriginal": v,
# "LimitPrice": target_price}
# orders.append(order2)
orders.append(order1)
else:
volume_list = []
quotient = divmod(int(order1["VolumeTotalOriginal"]), int(max_volume))[0] - 1
remainder = divmod(int(order1["VolumeTotalOriginal"]), int(max_volume))[1]
while quotient >= 0:
quotient -= 1
volume_list.append(int(max_volume))
volume_list.append(remainder)
# 分段报单
for vol in volume_list:
orders.append({"SecurityID": security_id,
"Direction": "1",
"VolumeTotalOriginal": vol,
"LimitPrice": target_price,
"order_type": "make_market_type_6"})
break
elif not self.__check_price_valid(s_a1_p):
order = {"SecurityID": self.instrument_id, "Direction": "1", "VolumeTotalOriginal": normal_volume,
"LimitPrice": target_price, "order_type": "order_direct"}
orders.append(order)
elif not self.__check_price_valid(s_b1_p):
order = {"SecurityID": self.instrument_id, "Direction": "0", "VolumeTotalOriginal": normal_volume,
"LimitPrice": target_price, "order_type": "order_direct"}
orders.append(order)
else:
order = {"SecurityID": self.instrument_id, "Direction": "1", "VolumeTotalOriginal": normal_volume,
"LimitPrice": target_price, "order_type": "order_direct"}
orders.append(order)
return orders
# 五档行情单
def policy_fifth_level(self, source_market, target_market, callmarket=False):
security_id = self.instrument_id
# normal_volume = self.cache.get(security_id).get("normal_volume") * random.randint(1, 10)
# volume = fifth_level_call_volume if callmarket else normal_volume
last_price = self.__to_float(source_market["LastPrice"])
price_tick = self.cache.get(security_id).get("price_tick")
fifth_level_call_volume = self.cache.get(security_id).get("fifth_level_call_volume")
upper_price = self.__to_float(source_market["UpperLimitPrice"])
lower_price = self.__to_float(source_market["LowerLimitPrice"])
s_a1_p = self.__to_float(source_market["AskPrice1"])
s_a2_p = self.__to_float(source_market["AskPrice2"])
s_a3_p = self.__to_float(source_market["AskPrice3"])
s_a4_p = self.__to_float(source_market["AskPrice4"])
s_a5_p = self.__to_float(source_market["AskPrice5"])
t_a1_p = self.__to_float(target_market["AskPrice1"])
t_a2_p = self.__to_float(target_market["AskPrice2"])
t_a3_p = self.__to_float(target_market["AskPrice3"])
t_a4_p = self.__to_float(target_market["AskPrice4"])
t_a5_p = self.__to_float(target_market["AskPrice5"])
s_b1_p = self.__to_float(source_market["BidPrice1"])
s_b2_p = self.__to_float(source_market["BidPrice2"])
s_b3_p = self.__to_float(source_market["BidPrice3"])
s_b4_p = self.__to_float(source_market["BidPrice4"])
s_b5_p = self.__to_float(source_market["BidPrice5"])
t_b1_p = self.__to_float(target_market["BidPrice1"])
t_b2_p = self.__to_float(target_market["BidPrice2"])
t_b3_p = self.__to_float(target_market["BidPrice3"])
t_b4_p = self.__to_float(target_market["BidPrice4"])
t_b5_p = self.__to_float(target_market["BidPrice5"])
t_a1_v = self.__to_float(target_market["AskVolume1"])
t_a2_v = self.__to_float(target_market["AskVolume2"])
t_a3_v = self.__to_float(target_market["AskVolume3"])
t_a4_v = self.__to_float(target_market["AskVolume4"])
t_a5_v = self.__to_float(target_market["AskVolume5"])
t_b1_v = self.__to_float(target_market["BidVolume1"])
t_b2_v = self.__to_float(target_market["BidVolume2"])
t_b3_v = self.__to_float(target_market["BidVolume3"])
t_b4_v = self.__to_float(target_market["BidVolume4"])
t_b5_v = self.__to_float(target_market["BidVolume5"])
orders = []
# 定义模拟卖五档行情集合
s_a_p = [s_a1_p, s_a2_p, s_a3_p, s_a4_p, s_a5_p]
t_a_p = [t_a1_p, t_a2_p, t_a3_p, t_a4_p, t_a5_p]
t_a_v = [t_a1_v, t_a2_v, t_a3_v, t_a4_v, t_a5_v]
# 定义模拟买五档行情集合
s_b_p = [s_b1_p, s_b2_p, s_b3_p, s_b4_p, s_b5_p]
t_b_p = [t_b1_p, t_b2_p, t_b3_p, t_b4_p, t_b5_p]
t_b_v = [t_b1_v, t_b2_v, t_b3_v, t_b4_v, t_b5_v]
# 定义买卖五档tick后价格
s_t_a_p = []
s_t_b_p = []
for i in range(1, 6):
s_t_a_p.append(last_price + i * price_tick)
s_t_b_p.append(last_price - i * price_tick)
# 判断是否集合竞价
if callmarket:
for (index, ap) in enumerate(s_a_p):
if ap == 0 and t_a_p[index] != 0:
orders.append({
"SecurityID": security_id,
"Direction": "1",
"VolumeTotalOriginal": min(t_a_v[index], fifth_level_call_volume),
"LimitPrice": t_a_p[index],
"level": "a" + str(index + 1),
"order_type": "fifth_level_call_market_sell"
})
for (index, bp) in enumerate(s_b_p):
if bp == 0 and t_b_p[index] != 0:
orders.append({
"SecurityID": security_id,
"Direction": "0",
"VolumeTotalOriginal": min(t_b_v[index], fifth_level_call_volume),
"LimitPrice": t_b_p[index], "level": "b" + str(index + 1),
"order_type": "fifth_level_call_market_buy"
})
return orders
for (index, ap) in enumerate(s_a_p):
# 跌停
if index == 0 and 0 < t_a1_p <= lower_price:
orders.append({
"SecurityID": security_id,
"Direction": "1",
"VolumeTotalOriginal": t_a_v[index],
"LimitPrice": t_a1_p, "level": "a1", "order_type": "fifth_level_type"
})
return orders
# 实盘卖一小于模拟盘卖一
if index == 0 and 0 < t_a1_p < ap:
orders.append({
"SecurityID": security_id,
"Direction": "1",
"VolumeTotalOriginal": min(t_a_v[index], fifth_level_call_volume),
"LimitPrice": t_a1_p, "level": "a1", "order_type": "fifth_level_type_1"
})
return orders
# 期货补齐五档
if self.market_type == 'future':
fifth_add = None
if s_a_p.count(0) != 0:
for value in s_t_a_p:
if value not in s_a_p:
fifth_add = value
break
if fifth_add is not None:
orders.append({
"SecurityID": security_id,
"Direction": "1",
"VolumeTotalOriginal": random.randint(8, 10),
"LimitPrice": fifth_add, "level": "a1", "order_type": "fifth_level_type_add_sell"
})
return orders
# 股票
if self.market_type == 'stock':
if ap == 0 and t_a_p[index] != 0:
orders.append({
"SecurityID": security_id,
"Direction": "1",
"VolumeTotalOriginal": min(t_a_v[index], fifth_level_call_volume),
"LimitPrice": t_a_p[index], "level": "a" + str(index + 1), "order_type": "fifth_level_type_2"
})
return orders
for (index, bp) in enumerate(s_b_p):
# 涨停
if index == 0 and t_b1_p >= upper_price:
orders.append({
"SecurityID": security_id,
"Direction": "0",
"VolumeTotalOriginal": t_b_v[index],
"LimitPrice": t_b1_p, "level": "b1", "order_type": "fifth_level_type"
})
return orders
# 实盘买一大于模拟盘买一
if index == 0 and t_b1_p > bp:
orders.append({
"SecurityID": security_id,
"Direction": "0",
"VolumeTotalOriginal": min(t_b_v[index], fifth_level_call_volume),
"LimitPrice": t_b1_p, "level": "b1", "order_type": "fifth_level_type_3"
})
return orders
# 期货补齐五档
if self.market_type == 'future':
fifth_add = None
if s_b_p.count(0) != 0:
for value in s_t_b_p:
if value not in s_b_p:
fifth_add = value
break
if fifth_add is not None:
orders.append({
"SecurityID": security_id,
"Direction": "0",
"VolumeTotalOriginal": random.randint(8, 10),
"LimitPrice": fifth_add, "level": "b1", "order_type": "fifth_level_type_add_buy"
})
return orders
# 股票
if self.market_type == 'stock':
if bp == 0 and t_b_p[index] != 0:
orders.append({
"SecurityID": security_id,
"Direction": "0",
"VolumeTotalOriginal": min(t_b_v[index], fifth_level_call_volume),
"LimitPrice": t_b_p[index], "level": "b" + str(index + 1), "order_type": "fifth_level_type_4"
})
return orders
return orders
# 市价单
def policy_order_direct(self):
normal_volume = self.cache.get(self.instrument_id).get("normal_volume")
order = {"SecurityID": self.instrument_id, "Direction": "0", "VolumeTotalOriginal": normal_volume,
"LimitPrice": 0, "OrderPriceType": '3', "order_type": "order_direct"}
return [order]
def __to_float(self, float_str):
return float(float_str) if float_str and sys.float_info.max != float(float_str) else 0
def __to_int(self, int_str):
return int(int_str) if int_str else 0
def __check_price_valid(self, price):
if float(sys.float_info.max) == price or price == 0:
return False
return True
def main():
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["exchange", "xmq"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files,
add_ons=add_ons)
order_union(context, conf)
if __name__ == "__main__":
main()
```
#### File: xops/extrader/trader_handler.py
```python
import threading
import shfetraderapi
import time
import os
from utils import log
class TraderHandler(shfetraderapi.CShfeFtdcTraderSpi):
def __init__(self, trader_api, user_id, password, tradingday):
self.logger = log.get_logger(category="TraderSpi")
shfetraderapi.CShfeFtdcTraderSpi.__init__(self)
self.trader_api = trader_api
self.userId = user_id
self.password = password
self.tradingday = tradingday
self.is_connected = False
self.is_logined = False
self.cache_md_status = dict()
self.request_id = 0
self.lock = threading.Lock()
self.private_worker = False
# 初始化锁文件【防止多进程重复擦送合约状态信息】
try:
os.mknod("private_worker.con", 0600)
self.private_worker = True
except OSError:
pass
def set_msg_puber(self, msg_puber):
self.msg_puber = msg_puber
def send_status(self):
for md in self.cache_md_status:
self.msg_puber.send({"type": "istatus", "data": {md: self.cache_md_status.get(md)}})
def get_request_id(self):
self.lock.acquire()
self.request_id += 1
req_id = self.request_id
self.lock.release()
return req_id
def ReqQryInstrumentStatus(self):
req_status_field = shfetraderapi.CShfeFtdcQryInstrumentStatusField()
self.trader_api.ReqQryInstrumentStatus(req_status_field, self.get_request_id())
def OnFrontConnected(self):
self.logger.info("OnFrontConnected")
self.is_connected = True
req_login_field = shfetraderapi.CShfeFtdcReqUserLoginField()
req_login_field.UserID = str(self.userId)
req_login_field.Password = str(<PASSWORD>)
req_login_field.TradingDay = str(self.tradingday)
self.trader_api.ReqUserLogin(req_login_field, self.get_request_id())
def OnFrontDisconnected(self, nReason):
self.logger.info("OnFrontDisconnected: %s" % str(nReason))
self.is_connected = False
def OnRspUserLogin(self, pRspUserLogin, pRspInfo, nRequestID, bIsLast):
self.logger.info("OnRspUserLogin")
if pRspInfo is not None and pRspInfo.ErrorID != 0:
self.logger.error("login failed : %s" % pRspInfo.ErrorMsg.decode("GBK").encode("UTF-8"))
time.sleep(3)
req_login_field = shfetraderapi.CShfeFtdcReqUserLoginField()
req_login_field.UserID = str(self.userId)
req_login_field.Password = str(<PASSWORD>)
req_login_field.TradingDay = str(self.tradingday)
self.trader_api.ReqUserLogin(req_login_field, self.get_request_id())
else:
self.logger.info("login success")
self.is_logined = True
def OnRspQryInstrumentStatus(self, pInstrumentStatus, pRspInfo, nRequestID, bIsLast):
self.logger.info("OnRspQryInstrumentStatus")
if pRspInfo is not None and pRspInfo.ErrorID != 0:
self.logger.error("login failed : %s" % pRspInfo.ErrorMsg.decode("GBK").encode("UTF-8"))
else:
if pInstrumentStatus is not None:
self.logger.info("instrument[%s] current status is [%s]" % (
pInstrumentStatus.InstrumentID, pInstrumentStatus.InstrumentStatus))
msg = {"type": "istatus", "data": {
pInstrumentStatus.InstrumentID: {"InstrumentID": pInstrumentStatus.InstrumentID,
"InstrumentStatus": pInstrumentStatus.InstrumentStatus}}}
self.cache_md_status.update(msg.get("data"))
self.msg_puber.send(msg)
def OnRtnInstrumentStatus(self, pInstrumentStatus):
self.logger.info("OnRtnInstrumentStatus")
if pInstrumentStatus is not None:
self.logger.info("instrument[%s] current status is [%s]" % (
pInstrumentStatus.InstrumentID, pInstrumentStatus.InstrumentStatus))
msg = {"type": "istatus", "data": {
pInstrumentStatus.InstrumentID: {"InstrumentID": pInstrumentStatus.InstrumentID,
"InstrumentStatus": pInstrumentStatus.InstrumentStatus}}}
self.cache_md_status.update(msg.get("data"))
if self.private_worker:
self.msg_puber.send(msg)
def control_md_status(self):
self.lock.acquire()
try:
pass
finally:
self.lock.release()
def OnRspOrderInsert(self, pInputOrder, pRspInfo, nRequestID, bIsLast):
if pRspInfo is not None and pRspInfo.ErrorID != 0:
self.logger.error("OnRspOrderInsert failed : %s" % pRspInfo.ErrorMsg.decode("GBK").encode("UTF-8"))
else:
pass
# def OnRspOrderAction(self, pOrderAction, pRspInfo, nRequestID, bIsLast):
# self.logger.info("OnRspOrderAction")
# if pRspInfo is not None and pRspInfo.ErrorID != 0:
# self.logger.error("OnRspOrderAction failed : %s" % pRspInfo.ErrorMsg.decode("GBK").encode("UTF-8"))
# else:
# if pOrderAction is not None:
# msg = {
# "OrderSysID": pOrderAction.OrderSysID,
# "OrderLocalID": pOrderAction.OrderLocalID,
# "ActionFlag": pOrderAction.ActionFlag,
# "ParticipantID": pOrderAction.ParticipantID,
# "ClientID": pOrderAction.ClientID,
# "UserID": pOrderAction.UserID,
# "LimitPrice": pOrderAction.LimitPrice,
# "VolumeChange": pOrderAction.VolumeChange,
# "ActionLocalID": pOrderAction.ActionLocalID,
# "BusinessUnit": pOrderAction.BusinessUnit,
# "BusinessLocalID": pOrderAction.BusinessLocalID
# }
# print msg
def OnRtnOrder(self, pOrder):
self.logger.info("OnRtnOrder")
if pOrder is not None:
data = {
"TradingDay": pOrder.TradingDay,
# "SettlementGroupID": pOrder.SettlementGroupID,
# "SettlementID": pOrder.SettlementID,
"OrderSysID": pOrder.OrderSysID,
"ParticipantID": pOrder.ParticipantID,
"ClientID": pOrder.ClientID,
# "UserID": pOrder.UserID,
# "InstrumentID": pOrder.InstrumentID,
# "OrderPriceType": pOrder.OrderPriceType,
# "Direction": pOrder.Direction,
# "CombOffsetFlag": pOrder.CombOffsetFlag,
# "CombHedgeFlag": pOrder.CombHedgeFlag,
# "LimitPrice": pOrder.LimitPrice,
# "VolumeTotalOriginal": pOrder.VolumeTotalOriginal,
# "TimeCondition": pOrder.TimeCondition,
# "GTDDate": pOrder.GTDDate,
# "VolumeCondition": pOrder.VolumeCondition,
# "MinVolume": pOrder.MinVolume,
# "ContingentCondition": pOrder.ContingentCondition,
# "StopPrice": pOrder.StopPrice,
# "ForceCloseReason": pOrder.ForceCloseReason,
# "OrderLocalID": pOrder.OrderLocalID,
# "IsAutoSuspend": pOrder.IsAutoSuspend,
# "OrderSource": pOrder.OrderSource,
"OrderStatus": pOrder.OrderStatus,
# "OrderType": pOrder.OrderType,
# "VolumeTraded": pOrder.VolumeTraded,
# "VolumeTotal": pOrder.VolumeTotal,
# "InsertDate": pOrder.InsertDate,
# "InsertTime": pOrder.InsertTime,
# "ActiveTime": pOrder.ActiveTime,
# "SuspendTime": pOrder.SuspendTime,
# "UpdateTime": pOrder.UpdateTime,
# "CancelTime": pOrder.CancelTime,
# "ActiveUserID": pOrder.ActiveUserID,
# "Priority": pOrder.Priority,
# "TimeSortID": pOrder.TimeSortID,
# "ClearingPartID": pOrder.ClearingPartID,
# "BusinessUnit": pOrder.BusinessUnit,
# "BusinessLocalID": pOrder.BusinessLocalID,
# "ActionDay": pOrder.ActionDay,
}
msg = {"type": "rtnOrder",
"data": [pOrder.TradingDay,
pOrder.OrderSysID,
pOrder.ParticipantID,
pOrder.ClientID,
pOrder.OrderStatus]}
self.msg_puber.send(msg)
def OnRtnTrade(self, pTrade):
self.logger.info("OnRtnTrade")
if pTrade is not None:
data = {
"TradingDay": pTrade.TradingDay,
# "SettlementGroupID": pTrade.SettlementGroupID,
# "SettlementID": pTrade.SettlementID,
# "TradeID": pTrade.TradeID,
# "Direction": pTrade.Direction,
"OrderSysID": pTrade.OrderSysID,
"ParticipantID": pTrade.ParticipantID,
"ClientID": pTrade.ClientID,
# "TradingRole": pTrade.TradingRole,
# "AccountID": pTrade.AccountID,
# "InstrumentID": pTrade.InstrumentID,
# "OffsetFlag": pTrade.OffsetFlag,
# "HedgeFlag": pTrade.HedgeFlag,
# "Price": pTrade.Price,
# "Volume": pTrade.Volume,
# "TradeTime": pTrade.TradeTime,
# "TradeType": pTrade.TradeType,
# "PriceSource": pTrade.PriceSource,
# "UserID": pTrade.UserID,
# "OrderLocalID": pTrade.OrderLocalID,
# "ClearingPartID": pTrade.ClearingPartID,
# "BusinessUnit": pTrade.BusinessUnit,
# "BusinessLocalID": pTrade.BusinessLocalID,
# "ActionDay": pTrade.ActionDay,
}
msg = {"type": "rtnTrade",
"data": [pTrade.TradingDay,
pTrade.OrderSysID,
pTrade.ParticipantID,
pTrade.ClientID]}
self.msg_puber.send(msg)
```
#### File: xops/settlement/publish_stock_broker.py
```python
import json
from utils import Configuration, mysql, log, parse_conf_args, process_assert
def publish_stock(context, conf):
result_code = 0
logger = log.get_logger(category="PublishStockBroker")
mysql_pool = mysql(configs=context.get("mysql").get(conf.get("mysqlId")))
mysql_conn = mysql_pool.get_cnx()
mysql_conn.set_charset_collation('utf8')
broker_system_id = conf.get("brokerSystemId")
logger.info("[publish stock broker %s] begin" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))
try:
mysql_conn.start_transaction()
cursor = mysql_conn.cursor()
logger.info("[get current trading day]......")
sql = """SELECT
DISTINCT t1.tradingday, t1.lasttradingday
FROM
siminfo.t_tradesystemtradingday t1,
siminfo.t_tradesystemsettlementgroup t2,
siminfo.t_brokersystemsettlementgroup t3
WHERE t1.tradesystemid = t2.tradesystemid
AND t2.settlementgroupid = t3.settlementgroupid
AND t3.brokersystemid = %s"""
cursor.execute(sql, (broker_system_id,))
row = cursor.fetchone()
current_trading_day = str(row[0])
last_trading_day = str(row[1])
logger.info("[get current trading day] current_trading_day = %s, last_trading_day = %s" % (current_trading_day, last_trading_day))
logger.info("[get next trading day]......")
# 判断是否跳过节假日
holiday = conf.get("holiday")
if holiday is True or holiday is None:
sql = """SELECT DAY FROM siminfo.t_TradingCalendar t WHERE t.day > %s AND t.tra = '1' ORDER BY DAY LIMIT 1"""
else:
sql = """SELECT DAY FROM siminfo.t_TradingCalendar t WHERE t.day > %s ORDER BY DAY LIMIT 1"""
cursor.execute(sql, (current_trading_day,))
row = cursor.fetchone()
next_trading_day = str(row[0])
logger.info("[get next trading day] next_trading_day = %s" % (next_trading_day))
# 投资者资金预处理
logger.info("[reset investor fund]......")
sql = """UPDATE siminfo.t_investorfund t1
SET t1.prebalance = t1.balance, t1.prestockvalue = t1.stockvalue, t1.stockvalue = 0, t1.fee = 0,
t1.premonthasset = IF(MONTH(%s) - MONTH(%s) = 0, t1.premonthasset, t1.currentasset),
t1.preweekasset = IF(WEEK(%s, 1) - WEEK(%s, 1) = 0, t1.preweekasset, t1.currentasset),
t1.preasset = t1.currentasset,
t1.currentasset = t1.balance
WHERE t1.brokersystemid = %s"""
cursor.execute(sql, (current_trading_day, last_trading_day, current_trading_day, last_trading_day, broker_system_id, ))
for settlement_groups in conf.get("settlementGroups"):
settlement_group_id = settlement_groups.get("settlementGroupId")
settlement_id = settlement_groups.get("settlementId")
# 检查结算状态
logger.info("[check settlement status]......")
sql = """SELECT
t1.tradingday, t1.settlementgroupid, t1.settlementid, t1.settlementstatus
FROM
dbclear.t_settlement t1
WHERE t1.tradingday = %s
AND t1.settlementgroupid = %s
AND t1.settlementid = %s for update"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
row = cursor.fetchone()
if row is None:
logger.error("[publish stock broker] Error: There is no data for %s-%s." % (settlement_group_id, settlement_id))
result_code = -1
elif row[3] == '0':
logger.error("[publish stock broker] Error: Settlement for %s-%s has not done." % (settlement_group_id, settlement_id))
result_code = -1
elif row[3] == '2':
logger.error("[publish stock broker] Error: Settlement for %s-%s has been published." % (settlement_group_id, settlement_id))
result_code = -1
else:
# 更新客户持仓
logger.info("[update client position]......")
sql = """DELETE FROM siminfo.t_clientposition WHERE settlementgroupid = %s"""
cursor.execute(sql, (settlement_group_id,))
sql = """INSERT INTO siminfo.t_clientposition(TradingDay,SettlementGroupID,SettlementID,HedgeFlag,PosiDirection,YdPosition,Position,LongFrozen,ShortFrozen,YdLongFrozen,YdShortFrozen,BuyTradeVolume,SellTradeVolume,PositionCost,YdPositionCost,UseMargin,FrozenMargin,LongFrozenMargin,ShortFrozenMargin,FrozenPremium,InstrumentID,ParticipantID,ClientID)
SELECT %s,SettlementGroupID,SettlementID,HedgeFlag,'3',Position,0,0,0,LongFrozen,ShortFrozen,0,0,0,PositionCost,UseMargin,0,0,0,FrozenPremium,InstrumentID,ParticipantID,ClientID
FROM dbclear.t_clientposition t WHERE t .tradingday = %s AND t.settlementgroupid = %s AND t.settlementid = %s"""
cursor.execute(sql, (next_trading_day, current_trading_day, settlement_group_id, settlement_id))
# 更新会员持仓
logger.info("[update part position]......")
sql = """DELETE FROM siminfo.t_partposition WHERE settlementgroupid = %s"""
cursor.execute(sql, (settlement_group_id,))
sql = """INSERT INTO siminfo.t_partposition(TradingDay,SettlementGroupID,SettlementID,HedgeFlag,PosiDirection,YdPosition,Position,LongFrozen,ShortFrozen,YdLongFrozen,YdShortFrozen,InstrumentID,ParticipantID,TradingRole)
SELECT %s,SettlementGroupID,SettlementID,HedgeFlag,PosiDirection,Position,0,0,0,LongFrozen,ShortFrozen,InstrumentID,ParticipantID,TradingRole
FROM dbclear.t_partposition t WHERE t .tradingday = %s AND t.settlementgroupid = %s AND t.settlementid = %s"""
cursor.execute(sql, (next_trading_day, current_trading_day, settlement_group_id, settlement_id))
# 更新客户分红持仓表数据
logger.info("[update ClientPositionForSecurityProfit]......")
sql = """DELETE FROM siminfo.t_ClientPositionForSecurityProfit WHERE settlementgroupid = %s"""
cursor.execute(sql, (settlement_group_id,))
sql = """INSERT INTO siminfo.t_ClientPositionForSecurityProfit(DJDate,SettlementGroupID,HedgeFlag,PosiDirection,YdPosition,Position,LongFrozen,ShortFrozen,YdLongFrozen,YdShortFrozen,BuyTradeVolume,SellTradeVolume,PositionCost,YdPositionCost,UseMargin,FrozenMargin,LongFrozenMargin,ShortFrozenMargin,FrozenPremium,InstrumentID,ParticipantID,ClientID)
SELECT DJDate,SettlementGroupID,HedgeFlag,PosiDirection,YdPosition,Position,LongFrozen,ShortFrozen,YdLongFrozen,YdShortFrozen,BuyTradeVolume,SellTradeVolume,PositionCost,YdPositionCost,UseMargin,FrozenMargin,LongFrozenMargin,ShortFrozenMargin,FrozenPremium,InstrumentID,ParticipantID,ClientID
FROM dbclear.t_ClientPositionForSecurityProfit t WHERE t.djdate = %s AND t.settlementgroupid = %s AND t.settlementid = %s"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 更新行情数据
logger.info("[update marketdata]......")
sql = """DELETE FROM siminfo.t_marketdata WHERE settlementgroupid = %s"""
cursor.execute(sql, (settlement_group_id,))
sql = """INSERT INTO siminfo.t_marketdata(TradingDay,SettlementGroupID,LastPrice,PreSettlementPrice,PreClosePrice,PreOpenInterest,OpenPrice,HighestPrice,LowestPrice,Volume,Turnover,OpenInterest,ClosePrice,SettlementPrice,UpperLimitPrice,LowerLimitPrice,PreDelta,CurrDelta,UpdateTime,UpdateMillisec,InstrumentID)
SELECT %s,SettlementGroupID,NULL,SettlementPrice,ClosePrice,OpenInterest,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,UpdateTime,UpdateMillisec,InstrumentID
FROM dbclear.t_marketdata t WHERE t .tradingday = %s AND t.settlementgroupid = %s AND t.settlementid = %s"""
cursor.execute(sql, (next_trading_day, current_trading_day, settlement_group_id, settlement_id))
# 更新客户资金
logger.info("[update client fund]......")
sql = """DELETE FROM siminfo.t_clientfund WHERE settlementgroupid = %s"""
cursor.execute(sql, (settlement_group_id,))
sql = """INSERT INTO siminfo.t_clientfund(TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,Available,TransFee,DelivFee,PositionMargin,Profit,StockValue)
SELECT %s,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,Available,TransFee,DelivFee,PositionMargin,Profit,StockValue
FROM dbclear.t_clientfund t WHERE t .tradingday = %s AND t.settlementgroupid = %s AND t.settlementid = %s
AND (t.available != 0 OR t.transfee != 0 OR t.delivfee != 0 OR t.positionmargin != 0 OR t.profit != 0 OR t.stockvalue != 0)"""
cursor.execute(sql, (next_trading_day, current_trading_day, settlement_group_id, settlement_id))
# 更新投资者资金
logger.info("[update investor fund]......")
sql = """UPDATE siminfo.t_investorfund t1,(
SELECT t3.brokersystemid, t1.investorid, t2.available, t2.transfee, t2.positionmargin, t2.profit, t2.stockvalue FROM siminfo.t_investorclient t1, siminfo.t_clientfund t2, siminfo.t_brokersystemsettlementgroup t3
WHERE t1.settlementgroupid = t2.settlementgroupid AND t1.settlementgroupid = t3.settlementgroupid AND t1.clientid = t2.clientid AND t2.tradingday = %s AND t1.settlementgroupid = %s AND t2.settlementid = %s) t2
SET t1.balance = t1.available + t2.available - t2.transfee + t2.profit - t2.positionmargin,
t1.available = t1.available + t2.available - t2.transfee + t2.profit - t2.positionmargin,
t1.fee = t1.fee + t2.transfee, t1.stockvalue = t1.stockvalue + t2.stockvalue,
t1.currentasset = t1.currentasset + t2.available - t2.transfee + t2.profit - t2.positionmargin + t2.stockvalue
WHERE t1.brokersystemid = t2.brokersystemid and t1.investorid = t2.investorid"""
cursor.execute(sql, (next_trading_day, settlement_group_id, settlement_id))
# 更新结算状态
logger.info("[update settlement status]......")
sql = """UPDATE dbclear.t_settlement SET settlementstatus = '2' WHERE tradingday = %s AND settlementgroupid = %s AND settlementid = %s AND settlementstatus = '1'"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
mysql_conn.commit()
except Exception as e:
logger.error("[publish stock broker] Error: %s" % (e))
result_code = -1
finally:
mysql_conn.close()
logger.info("[publish stock broker] end")
return result_code
def main():
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["mysql"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files)
process_assert(publish_stock(context, conf))
if __name__ == "__main__":
main()
```
#### File: xops/settlement/settle_future.py
```python
import json
from bs_module import *
from utils import Configuration, mysql, log, parse_conf_args, process_assert
def settle_future(context, conf):
result_code = 0
logger = log.get_logger(category="Settlefuture")
settlement_group_id = conf.get("settlementGroupId")
settlement_id = conf.get("settlementId")
exchange_id = conf.get("exchangeId")
marginSingleBigSide = conf.get("marginSingleBigSide")
logger.info("[settle future %s] begin" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))
mysql_pool = mysql(configs=context.get("mysql").get(conf.get("mysqlId")))
mysql_conn = mysql_pool.get_cnx()
mysql_conn.set_charset_collation('utf8')
try:
mysql_conn.start_transaction()
cursor = mysql_conn.cursor()
logger.info("[get current trading day]......")
sql = """SELECT t1.tradingday
FROM siminfo.t_tradesystemtradingday t1, siminfo.t_tradesystemsettlementgroup t2
WHERE t1.tradesystemid = t2.tradesystemid AND t2.settlementgroupid = %s"""
cursor.execute(sql, (settlement_group_id,))
row = cursor.fetchone()
current_trading_day = str(row[0])
logger.info("[get current trading day] current_trading_day = %s" % current_trading_day)
logger.info("[get next trading day]......")
# 判断是否跳过节假日
holiday = conf.get("holiday")
if holiday is True or holiday is None:
sql = """SELECT DAY FROM siminfo.t_TradingCalendar t WHERE t.day > %s AND t.tra = '1' ORDER BY DAY LIMIT 1"""
else:
sql = """SELECT DAY FROM siminfo.t_TradingCalendar t WHERE t.day > %s ORDER BY DAY LIMIT 1"""
cursor.execute(sql, (current_trading_day,))
row = cursor.fetchone()
next_trading_day = str(row[0])
logger.info("[get next trading day] next_trading_day = %s" % (next_trading_day))
# 检查结算状态
logger.info("[check settlement status]......")
sql = """SELECT t1.tradingday, t1.settlementgroupid, t1.settlementid, t1.settlementstatus
FROM dbclear.t_settlement t1
WHERE t1.tradingday = %s
AND t1.settlementgroupid = %s
AND t1.settlementid = %s for update"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
row = cursor.fetchone()
if row is None:
logger.error("[settle future] Error: There is no data for %s-%s." % (settlement_group_id, settlement_id))
result_code = -1
elif row[3] != '0':
logger.error("[settle future] Error: Settlement for %s-%s has done." % (settlement_group_id, settlement_id))
result_code = -1
else:
# 收盘价为零赋值为最新价
sql = """UPDATE dbclear.t_marketdata t
SET t.ClosePrice = t.LastPrice
WHERE
t.TradingDay = %s
AND t.SettlementID = %s
AND t.SettlementGroupID = %s
AND t.ClosePrice = %s"""
cursor.execute(sql, (current_trading_day, settlement_id, settlement_group_id, 0))
# 清除数据
logger.info("[delete t_delivinstrument ... ]")
sql = "delete from dbclear.t_delivinstrument where settlementgroupid = %s and settlementid = %s and tradingday = %s "
cursor.execute(sql, (settlement_group_id, settlement_id, current_trading_day))
logger.info("[delete t_clientdelivposition ... ]")
sql = "delete from dbclear.t_clientdelivposition where settlementgroupid = %s and settlementid = %s and tradingday = %s "
cursor.execute(sql, (settlement_group_id, settlement_id, current_trading_day))
logger.info("[delete t_clientdelivfee ... ]")
sql = "delete from dbclear.t_clientdelivfee where settlementgroupid = %s and settlementid = %s and tradingday = %s "
cursor.execute(sql, (settlement_group_id, settlement_id, current_trading_day))
logger.info("[delete t_clienttransfee ... ]")
sql = "delete from dbclear.t_clienttransfee where settlementgroupid = %s and settlementid = %s and tradingday = %s "
cursor.execute(sql, (settlement_group_id, settlement_id, current_trading_day))
logger.info("[delete t_clientpositionmargin ... ]")
sql = "delete from dbclear.t_clientpositionmargin where settlementgroupid = %s and settlementid = %s and tradingday = %s "
cursor.execute(sql, (settlement_group_id, settlement_id, current_trading_day))
logger.info("[delete t_clienttradeprofit ... ]")
sql = "delete from dbclear.t_clienttradeprofit where settlementgroupid = %s and settlementid = %s and tradingday = %s "
cursor.execute(sql, (settlement_group_id, settlement_id, current_trading_day))
logger.info("[delete t_clientPositionProfit ... ]")
sql = "delete from dbclear.t_clientPositionProfit where settlementgroupid = %s and settlementid = %s and tradingday = %s "
cursor.execute(sql, (settlement_group_id, settlement_id, current_trading_day))
logger.info("[delete t_clientpositionpremium ... ]")
sql = "delete from dbclear.t_clientpositionpremium where settlementgroupid = %s and settlementid = %s and tradingday = %s "
cursor.execute(sql, (settlement_group_id, settlement_id, current_trading_day))
# 计算持仓明细
calc_future_posdtl(logger, cursor, current_trading_day, settlement_group_id, settlement_id, exchange_id)
# 期货结算
sett_future(logger, cursor, current_trading_day, next_trading_day, settlement_group_id, settlement_id)
# 期货期权计算
sett_future_option(logger, cursor, current_trading_day, next_trading_day, settlement_group_id,
settlement_id)
# 计算客户资金
logger.info("[Calculate ClientFund] is processing......")
# 1)更新positionmargin
if marginSingleBigSide:
sql = """insert into dbclear.t_clientfund (TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,TransFee,DelivFee,PositionMargin,Profit,available,StockValue)
(select t.tradingday,
t.settlementgroupid,
t.settlementid,
t.participantid,
t.clientid,
t.accountid,
0,
0,
sum(t.positionmargin) as positionmargin,
0,
0,
0
from (select t.tradingday,
t.settlementgroupid,
t.settlementid,
t.participantid,
t.clientid,
t.accountid,
t.productid,
max(t.positionmargin) as positionmargin
from (select t.tradingday,
t.settlementgroupid,
t.settlementid,
t.participantid,
t.clientid,
t.accountid,
t.productid,
t.posidirection,
sum(t.positionmargin) as positionmargin
from dbclear.t_clientpositionmargin t
where t.tradingday = %s
and t.settlementgroupid = %s
and t.settlementid = %s
group by t.tradingday,
t.settlementgroupid,
t.settlementid,
t.participantid,
t.clientid,
t.accountid,
t.productid,
t.posidirection) t
group by t.tradingday,
t.settlementgroupid,
t.settlementid,
t.participantid,
t.clientid,
t.accountid,
t.productid) t
group by t.tradingday,
t.settlementgroupid,
t.settlementid,
t.participantid,
t.clientid,
t.accountid)
ON DUPLICATE KEY UPDATE dbclear.t_clientfund.positionmargin = values(positionmargin)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
else:
sql = """insert into dbclear.t_clientfund (TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,TransFee,DelivFee,PositionMargin,Profit,available,StockValue)
(select t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid,0,0,sum(t.positionmargin) as positionmargin,0,0,0
from dbclear.t_clientpositionmargin t where t.tradingday = %s and t.settlementgroupid = %s and t.settlementid = %s
group by t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid)
ON DUPLICATE KEY UPDATE dbclear.t_clientfund.positionmargin = values(positionmargin)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 2)更新transfee
sql = """insert into dbclear.t_clientfund (TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,TransFee,DelivFee,PositionMargin,Profit,available,StockValue)
(select t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid,sum(t.transfee) as transfee,0,0,0,0,0
from dbclear.t_clienttransfee t where t.tradingday = %s and t.settlementgroupid = %s and t.settlementid = %s
group by t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid)
ON DUPLICATE KEY UPDATE dbclear.t_clientfund.transfee = values(transfee)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 3)更新delivfee
sql = """insert into dbclear.t_clientfund (TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,TransFee,DelivFee,PositionMargin,Profit,available,StockValue)
(select t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid,0,sum(t.delivfee) as delivfee,0,0,0,0
from dbclear.t_clientdelivfee t where t.tradingday = %s and t.settlementgroupid = %s and t.settlementid = %s
group by t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid)
ON DUPLICATE KEY UPDATE t_clientfund.delivfee = values(delivfee)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 4)更新profit
sql = """insert into dbclear.t_clientfund (TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,TransFee,DelivFee,PositionMargin,Profit,available,StockValue)
(select t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid,0,0,0,sum(t.profit) as profit,0,0
from (select t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid,sum(t.profit) as profit
from dbclear.t_clienttradeprofit t where t.tradingday = %s and t.settlementgroupid = %s and t.settlementid = %s
group by t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid
union all
select t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid,sum(t.profit) as profit
from dbclear.t_clientdelivprofit t where t.tradingday = %s and t.settlementgroupid = %s and t.settlementid = %s
group by t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid) t
group by t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid)
ON DUPLICATE KEY UPDATE dbclear.t_clientfund.profit = values(profit)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id, current_trading_day, settlement_group_id, settlement_id))
# 5)更新premium
sql = """insert into dbclear.t_clientfund (TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,TransFee,DelivFee,PositionMargin,Profit,available,StockValue)
(select t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid,0,0,0,0,sum( t.Premium ) AS available,0
from dbclear.t_clientpositionpremium t where t.tradingday = %s and t.settlementgroupid = %s and t.settlementid = %s
group by t.tradingday,t.settlementgroupid,t.settlementid,t.participantid,t.clientid,t.accountid)
ON DUPLICATE KEY UPDATE dbclear.t_clientfund.available = values(available)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 6)更新stockvalue
sql = """insert into dbclear.t_clientfund (TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,TransFee,DelivFee,PositionMargin,Profit,available,StockValue)
(SELECT t1.tradingday, t1.settlementgroupid, t1.settlementid, t1.participantid, t2.accountid, t1.clientid, 0 AS transfee, 0 AS delivfee, 0 AS positionmargin, 0 AS profit, 0 AS available,
ROUND(SUM(
CASE
WHEN t1.posidirection = '2'
THEN t1.position * t3.settlementprice * t4.underlyingmultiple
WHEN t1.posidirection = '3'
THEN - 1 * t1.position * t3.settlementprice * t4.underlyingmultiple
ELSE 0
END
), 2) AS stockvalue
FROM
(SELECT
t1.*,
t2.tradingrole
FROM
dbclear.t_clientposition t1,
siminfo.t_client t2
WHERE t1.clientid = t2.clientid) t1,
siminfo.t_PartRoleAccount t2,
dbclear.t_marketdata t3,
siminfo.t_instrument t4
WHERE t2.TradingRole = t1.TradingRole
AND t2.SettlementGroupID = t1.SettlementGroupID
AND t2.ParticipantID = t1.ParticipantID
AND t1.instrumentid = t3.instrumentid
AND t1.tradingday = t3.tradingday
AND t1.settlementgroupid = t3.settlementgroupid
AND t1.settlementid = t3.settlementid
AND t1.settlementgroupid = t4.settlementgroupid
AND t1.instrumentid = t4.instrumentid
AND (
t1.posidirection = '2'
OR t1.posidirection = '3'
)
AND t1.tradingday = %s
AND t1.settlementgroupid = %s
AND t1.settlementid = %s
GROUP BY t1.tradingday,
t1.settlementgroupid,
t1.settlementid,
t1.participantid,
t2.accountid,
t1.clientid)
ON DUPLICATE KEY UPDATE dbclear.t_clientfund.stockvalue = values(stockvalue)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 更新结算状态
logger.info("[update settlement status] is processing......")
sql = """UPDATE dbclear.t_settlement SET settlementstatus = '1'
WHERE tradingday = %s AND settlementgroupid = %s AND settlementid = %s AND settlementstatus = '0'"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
mysql_conn.commit()
except Exception as e:
logger.error("[settle future] Error: %s" % e)
result_code = -1
finally:
mysql_conn.close()
logger.info("[settle future] end")
return result_code
def sett_future(logger, cursor, current_trading_day, next_trading_day, settlement_group_id, settlement_id):
# 计算结算价
logger.info("[calculate settlement price] is processing......")
sql = """UPDATE dbclear.t_marketdata tm,
(
select t1.tradingday,
t1.settlementgroupid,
t1.settlementid,
t1.instrumentid,
case
when abs(mod((t1.settlementprice - t3.presettlementprice), t2.pricetick)) <
(t2.pricetick / 2) then
t3.presettlementprice +
if(sign(t1.settlementprice - t3.presettlementprice)=1,
floor((t1.settlementprice - t3.presettlementprice) /
t2.pricetick),
ceil((t1.settlementprice - t3.presettlementprice) /
t2.pricetick)) * t2.pricetick
else
t3.presettlementprice +
if(sign(t1.settlementprice - t3.presettlementprice)=1,
ceil((t1.settlementprice - t3.presettlementprice) /
t2.pricetick),
floor((t1.settlementprice - t3.presettlementprice) /
t2.pricetick)) * t2.pricetick
end as settlementprice
from (SELECT
t.tradingday,
t.settlementgroupid,
t.settlementid,
t.instrumentid,
CASE
WHEN t.Volume = 0 THEN
0 ELSE round( t.Turnover / t.Volume / t1.VolumeMultiple, 2 )
END AS settlementprice
FROM
dbclear.t_marketdata t, siminfo.t_instrument t1
WHERE
t.tradingday = %s
AND t.settlementgroupid = %s
AND t.settlementid = %s
AND t.instrumentid = t1.instrumentid
AND t.settlementgroupid = t1.settlementgroupid) t1,
siminfo.t_instrumentproperty t2, dbclear.t_marketdata t3
where t1.settlementgroupid = t2.settlementgroupid
and t1.instrumentid = t2.instrumentid
and t1.tradingday = t3.tradingday
and t1.settlementgroupid = t3.settlementgroupid
and t1.settlementid = t3.settlementid
and t1.instrumentid = t3.instrumentid) tt
SET tm.settlementprice = tt.settlementprice
WHERE
tm.settlementgroupid = tt.settlementgroupid
AND tm.settlementid = tt.settlementid
AND tm.instrumentid = tt.instrumentid
AND tm.tradingday = tt.tradingday
AND tm.settlementprice = 0"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 结算价为零赋值为昨结算
sql = """UPDATE dbclear.t_marketdata t
SET t.SettlementPrice = t.PreSettlementPrice
WHERE
t.TradingDay = %s
AND t.SettlementID = %s
AND t.SettlementGroupID = %s
AND t.SettlementPrice = %s"""
cursor.execute(sql, (current_trading_day, settlement_id, settlement_group_id, 0))
# 计算盈亏
logger.info("[Calculate ClientProfit] is processing......")
sql = """insert into dbclear.t_clienttradeprofit(TradingDay, SettlementGroupID, SettlementID, ParticipantID, ClientID, AccountID, InstrumentID, TradeID, Direction, OffsetFlag, Price, Volume, Profit)
select t1.tradingday,
t1.settlementgroupid,
t1.settlementid,
t1.participantid,
t1.clientid,
t3.accountid,
t1.instrumentid,
t1.tradeid,
t1.direction,
t1.offsetflag,
t1.price,
t1.volume,
case
when t1.offsetflag = '0' or t1.offsetflag = '1' or t1.offsetflag = '4' or
t1.offsetflag = '2' or t1.offsetflag = '3' then
round(t4.volumemultiple * if(t1.direction='0',
(t2.settlementprice - t1.price) * t1.volume,
(t1.price - t2.settlementprice) * t1.volume),
2)
end as profit
from dbclear.t_trade t1, dbclear.t_marketdata t2, (select t3.settlementgroupid,
t2.participantid,
t1.clientid,
t1.tradingrole,
t3.accountid
from siminfo.t_client t1, siminfo.t_partclient t2, siminfo.t_partroleaccount t3
where t1.SettlementGroupID = t2.SettlementGroupID
and t1.SettlementGroupID = t3.SettlementGroupID
and t1.clientid = t2.clientid
and t2.participantid = t3.participantid
and t1.tradingrole = t3.tradingrole
and t3.settlementgroupid = %s) t3, siminfo.t_instrument t4
where t1.tradingday = t2.tradingday
and t1.settlementgroupid = t2.settlementgroupid
and t1.settlementid = t2.settlementid
and t1.instrumentid = t2.instrumentid
and t1.settlementgroupid = t3.settlementgroupid
and t1.clientid = t3.clientid
and t1.settlementgroupid = t4.settlementgroupid
and t1.instrumentid = t4.instrumentid
and t4.ProductClass != '2'
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s
union all
select t1.tradingday,
t1.settlementgroupid,
t1.settlementid,
t1.participantid,
t1.clientid,
t3.accountid,
t1.instrumentid,
'--' as tradeid,
if(t1.posidirection='2', '0', '1') as direction,
'5'as offsetflag,
t2.settlementprice as price,
t1.ydposition as volume,
round(t4.volumemultiple * if(t1.posidirection='2',
(t2.settlementprice - t2.presettlementprice) * t1.ydposition,
(t2.presettlementprice - t2.settlementprice) * t1.ydposition), 2) as profit
from dbclear.t_clientposition t1, dbclear.t_marketdata t2, (select t3.settlementgroupid,
t2.participantid,
t1.clientid,
t1.tradingrole,
t3.accountid
from siminfo.t_client t1, siminfo.t_partclient t2, siminfo.t_partroleaccount t3
where t1.SettlementGroupID = t2.SettlementGroupID
and t1.SettlementGroupID = t3.SettlementGroupID
and t1.clientid = t2.clientid
and t2.participantid = t3.participantid
and t1.tradingrole = t3.tradingrole
and t3.settlementgroupid = %s) t3, siminfo.t_instrument t4
where t1.tradingday = t2.tradingday
and t1.settlementgroupid = t2.settlementgroupid
and t1.settlementid = t2.settlementid
and t1.instrumentid = t2.instrumentid
and t1.settlementgroupid = t3.settlementgroupid
and t1.clientid = t3.clientid
and t1.settlementgroupid = t4.settlementgroupid
and t1.instrumentid = t4.instrumentid
and t4.ProductClass != '2'
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s"""
cursor.execute(sql, (settlement_group_id, current_trading_day, settlement_group_id, settlement_id,
settlement_group_id, current_trading_day, settlement_group_id, settlement_id))
sql = """insert into dbclear.t_clienttradeprofit(TradingDay, SettlementGroupID, SettlementID, ParticipantID, ClientID, AccountID, InstrumentID, TradeID, Direction, OffsetFlag, Price, Volume, Profit)
(SELECT t1.TradingDay,
t1.SettlementGroupID,
t1.SettlementID,
t1.ParticipantID,
t1.ClientID,
t5.AccountID,
t1.InstrumentID,
'0' as TradeID,
if(t1.PosiDirection = '2', '0', '1') as Direction,
'1' as OffsetFlag,
'0' as Price,
t1.ydposition,
round(
t4.volumemultiple *
IF( if(t1.PosiDirection = '2', '0', '1') = '0',
( t3.SettlementPrice - t3.PreSettlementPrice ) * t1.ydposition,
( t3.PreSettlementPrice - t3.SettlementPrice ) * t1.ydposition
),2
) AS profit
FROM siminfo.t_clientposition t1
LEFT JOIN dbclear.t_marketdata t3 ON ( t1.InstrumentID = t3.InstrumentID AND t1.TradingDay = t3.TradingDay and t1.SettlementGroupID = t3.SettlementGroupID)
LEFT JOIN siminfo.t_instrument t4 on ( t1.InstrumentID = t4.InstrumentID and t1.SettlementGroupID = t4.SettlementGroupID)
LEFT JOIN siminfo.t_partroleaccount t5 on ( t5.SettlementGroupID = t1.SettlementGroupID and t5.ParticipantID = t1.ParticipantID)
WHERE
t1.tradingday = %s
and t1.SettlementGroupID = %s
and t1.SettlementID = %s
AND t1.ydposition > 0)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 交收持仓处理
logger.info("[Move DelivPosition] is processing......")
# 1)插入到t_delivinstrument表
sql = """insert into dbclear.t_delivinstrument(TradingDay, SettlementGroupID, SettlementID, InstrumentID
)select %s, t.SettlementGroupID, %s, t.instrumentid
from siminfo.t_instrumentproperty t, siminfo.t_instrument t1
where t.SettlementGroupID = t1.SettlementGroupID and t.InstrumentID = t1.InstrumentID
and t1.ProductClass != '2' and t.settlementgroupid = %s and t.ExpireDate < %s"""
cursor.execute(sql, (current_trading_day, settlement_id, settlement_group_id, next_trading_day))
# 2)插入到t_clientdelivposition
sql = """insert into dbclear.t_clientdelivposition(TradingDay,SettlementGroupID,SettlementID,HedgeFlag,
PosiDirection,YdPosition,Position,LongFrozen,ShortFrozen,YdLongFrozen,YdShortFrozen,
BuyTradeVolume,SellTradeVolume,PositionCost,YdPositionCost,UseMargin,FrozenMargin,
LongFrozenMargin,ShortFrozenMargin,FrozenPremium,InstrumentID,ParticipantID,ClientID
)select TradingDay,SettlementGroupID,SettlementID,HedgeFlag,PosiDirection,YdPosition,Position,
LongFrozen,ShortFrozen,YdLongFrozen,YdShortFrozen,BuyTradeVolume,SellTradeVolume,PositionCost,
YdPositionCost,UseMargin,FrozenMargin,LongFrozenMargin,ShortFrozenMargin,FrozenPremium,
InstrumentID,ParticipantID,ClientID from dbclear.t_clientposition
where tradingday = %s
and settlementgroupid = %s
and settlementid = %s
and Position != '0'
and instrumentid in
(select t.instrumentid
from dbclear.t_delivinstrument t
where t.tradingday = %s
and t.settlementgroupid = %s
and t.settlementid = %s)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id,
current_trading_day, settlement_group_id, settlement_id))
# 3) 删除t_clientposition
sql = """delete from dbclear.t_clientposition
where (tradingday = %s
and settlementgroupid = %s
and settlementid = %s
and instrumentid in
(select t.instrumentid
from dbclear.t_delivinstrument t
where t.tradingday = %s
and t.settlementgroupid = %s
and t.settlementid = %s))
or Position = '0'"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id,
current_trading_day, settlement_group_id, settlement_id))
# 删除 t_FuturePositionDtl
sql = """delete from dbclear.t_FuturePositionDtl
where (tradingday = %s
and settlementgroupid = %s
and settlementid = %s
and instrumentid in
(select t.instrumentid
from dbclear.t_delivinstrument t
where t.tradingday = %s
and t.settlementgroupid = %s
and t.settlementid = %s))"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id,
current_trading_day, settlement_group_id, settlement_id))
# 交割手续费
logger.info("[Calculate DelivFee] is processing......")
# 插入t_clientdelivfee表中
sql = """insert into dbclear.t_clientdelivfee(TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,ProductGroupID,ProductID,UnderlyingInstrID,InstrumentID,Position,Price,DelivFeeRatio,ValueMode,DelivFee
)select t1.tradingday,
t1.settlementgroupid,
t1.settlementid,
t1.participantid,
t1.clientid,
t2.accountid,
t3.productgroupid,
t3.productid,
t3.underlyinginstrid,
t1.instrumentid,
t1.position,
t4.settlementprice as Price,
t2.delivfeeratio,
t2.valuemode,
round(if(t2.valuemode='2',
round(t2.delivfeeratio * t1.position * t3.volumemultiple, 2),
round(t2.delivfeeratio * t4.settlementprice * t1.position *
t3.volumemultiple,
2)), 2) as delivfee
from (select t.tradingday,
t.settlementgroupid,
t.settlementid,
t.participantid,
t.clientid,
t.instrumentid,
abs(sum(if(t.posidirection='2',
(t.position + t.ydposition),
-1 * (t.position + t.ydposition)))) as position
from dbclear.t_clientdelivposition t
where t.tradingday = %s
and t.settlementgroupid = %s
and t.settlementid = %s
group by t.tradingday,
t.settlementgroupid,
t.settlementid,
t.participantid,
t.clientid,
t.instrumentid) t1,
(select t1.SettlementGroupID,
t1.ParticipantID,
t1.ClientID,
t2.AccountID,
t1.InstrumentID,
t1.DelivFeeRatio,
t1.ValueMode
from siminfo.t_delivfeeratedetail t1, siminfo.t_partroleaccount t2
where t1.SettlementGroupID = t2.SettlementGroupID) t2,
siminfo.t_instrument t3,
dbclear.t_marketdata t4
where t1.settlementgroupid = t2.settlementgroupid
and t2.participantid = '00000000'
and t2.clientid = '00000000'
and t1.instrumentid = t2.instrumentid
and t1.settlementgroupid = t3.settlementgroupid
and t1.instrumentid = t3.instrumentid
and t1.tradingday = t4.tradingday
and t1.settlementgroupid = t4.settlementgroupid
and t1.settlementid = t4.settlementid
and t1.instrumentid = t4.instrumentid
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id,
current_trading_day, settlement_group_id, settlement_id))
sql = """insert into dbclear.t_clientdelivfee(TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,ProductGroupID,ProductID,UnderlyingInstrID,InstrumentID,Position,Price,DelivFeeRatio,ValueMode,DelivFee
)select t1.tradingday,
t1.settlementgroupid,
t1.settlementid,
t1.participantid,
t1.clientid,
t2.accountid,
t3.productgroupid,
t3.productid,
t3.underlyinginstrid,
t1.instrumentid,
t1.position,
t4.settlementprice as Price,
t2.delivfeeratio,
t2.valuemode,
round(if(t2.valuemode='2',
round(t2.delivfeeratio * t1.position * t3.volumemultiple, 2),
round(t2.delivfeeratio * t4.settlementprice * t1.position *
t3.volumemultiple,
2)), 2) as delivfee
from (select t.tradingday,
t.settlementgroupid,
t.settlementid,
t.participantid,
t.clientid,
t.instrumentid,
abs(sum(if(t.posidirection='2',
(t.position + t.ydposition),
-1 * (t.position + t.ydposition)))) as position
from dbclear.t_clientdelivposition t
where t.tradingday = %s
and t.settlementgroupid = %s
and t.settlementid = %s
group by t.tradingday,
t.settlementgroupid,
t.settlementid,
t.participantid,
t.clientid,
t.instrumentid) t1,
(select t1.SettlementGroupID,
t1.ParticipantID,
t1.ClientID,
t2.AccountID,
t1.InstrumentID,
t1.DelivFeeRatio,
t1.ValueMode
from siminfo.t_delivfeeratedetail t1, siminfo.t_partroleaccount t2
where t1.SettlementGroupID = t2.SettlementGroupID) t2,
siminfo.t_instrument t3,
dbclear.t_marketdata t4
where t1.settlementgroupid = t2.settlementgroupid
and t2.participantid = t1.participantid
and t2.clientid = '00000000'
and t1.instrumentid = t2.instrumentid
and t1.settlementgroupid = t3.settlementgroupid
and t1.instrumentid = t3.instrumentid
and t1.tradingday = t4.tradingday
and t1.settlementgroupid = t4.settlementgroupid
and t1.settlementid = t4.settlementid
and t1.instrumentid = t4.instrumentid
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s
ON DUPLICATE KEY UPDATE delivfeeratio = VALUES(delivfeeratio), valuemode = VALUES(valuemode),
delivfee = VALUES(delivfee)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id,
current_trading_day, settlement_group_id, settlement_id))
sql = """insert into dbclear.t_clientdelivfee(TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,ProductGroupID,ProductID,UnderlyingInstrID,InstrumentID,Position,Price,DelivFeeRatio,ValueMode,DelivFee
)select t1.tradingday,
t1.settlementgroupid,
t1.settlementid,
t1.participantid,
t1.clientid,
t2.accountid,
t3.productgroupid,
t3.productid,
t3.underlyinginstrid,
t1.instrumentid,
t1.position,
t4.settlementprice as prcie,
t2.delivfeeratio,
t2.valuemode,
round(if(t2.valuemode='2',
round(t2.delivfeeratio * t1.position * t3.volumemultiple, 2),
round(t2.delivfeeratio * t4.settlementprice * t1.position *
t3.volumemultiple,
2)), 2) as delivfee
from (select t.tradingday,
t.settlementgroupid,
t.settlementid,
t.participantid,
t.clientid,
t.instrumentid,
abs(sum(if(t.posidirection='2',
(t.position + t.ydposition),
-1 * (t.position + t.ydposition)))) as position
from dbclear.t_clientdelivposition t
where t.tradingday = %s
and t.settlementgroupid = %s
and t.settlementid = %s
group by t.tradingday,
t.settlementgroupid,
t.settlementid,
t.participantid,
t.clientid,
t.instrumentid) t1,
(select t1.SettlementGroupID,
t1.ParticipantID,
t1.ClientID,
t2.AccountID,
t1.InstrumentID,
t1.DelivFeeRatio,
t1.ValueMode
from siminfo.t_delivfeeratedetail t1, siminfo.t_partroleaccount t2
where t1.SettlementGroupID = t2.SettlementGroupID) t2,
siminfo.t_instrument t3,
dbclear.t_marketdata t4
where t1.settlementgroupid = t2.settlementgroupid
and t2.participantid = t1.participantid
and t2.clientid = t1.clientid
and t1.instrumentid = t2.instrumentid
and t1.settlementgroupid = t3.settlementgroupid
and t1.instrumentid = t3.instrumentid
and t1.tradingday = t4.tradingday
and t1.settlementgroupid = t4.settlementgroupid
and t1.settlementid = t4.settlementid
and t1.instrumentid = t4.instrumentid
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s
ON DUPLICATE KEY UPDATE delivfeeratio = VALUES(delivfeeratio), valuemode = VALUES(valuemode),
delivfee = VALUES(delivfee)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id,
current_trading_day, settlement_group_id, settlement_id))
# 交易手续费
logger.info("[Calculate TransFee] is processing......")
# 插入t_clienttransfee表中
sql = """insert into dbclear.t_clienttransfee(TradingDay, SettlementGroupID, SettlementID, ParticipantID, ClientID, AccountID, ProductGroupID, ProductID, UnderlyingInstrID, InstrumentID, TradeID, Direction, TradingRole, HedgeFlag, OffsetFlag, Volume, Price, TransFeeRatio, ValueMode, TransFee, OrderSysID, MinFee, MaxFee
)select t1.tradingday,t1.settlementgroupid,t1.settlementid,t1.participantid,t1.clientid,
t1.accountid,t3.productgroupid,t3.productid,t3.underlyinginstrid,t1.instrumentid,
t1.tradeid,t1.direction,t1.tradingrole,t1.hedgeflag,t1.offsetflag,t1.volume,t1.price,
case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' then
t2.closetodayfeeratio
when t1.offsetflag = '4' then
t2.closeyesterdayfeeratio
end as transfeeratio,
t2.valuemode,
if(t2.valuemode='2',
round((case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' then
t2.closetodayfeeratio
when t1.offsetflag = '4' then
t2.closeyesterdayfeeratio
end) * t1.volume,
2),
round((case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' then
t2.closetodayfeeratio
when t1.offsetflag = '4' then
t2.closeyesterdayfeeratio
end) * t1.price * t1.volume * t3.volumemultiple,
2)) as transfee,
t1.OrderSysID,
'0' as Minfee,
'0' as MaxFee
from dbclear.t_trade t1,dbclear.t_clienttransfeeratio t2, siminfo.t_instrument t3
where t1.TradingDay = t2.TradingDay
and t1.SettlementID = t2.SettlementID
and t1.settlementgroupid = t2.settlementgroupid
and t2.participantid = '00000000'
and t2.clientid = '00000000'
and t1.instrumentid = t2.instrumentid
and t1.tradingrole = t2.tradingrole
and t1.hedgeflag = t2.hedgeflag
and t1.settlementgroupid = t3.settlementgroupid
and t1.instrumentid = t3.instrumentid
and t3.ProductClass != '2'
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
sql = """insert into dbclear.t_clienttransfee(TradingDay, SettlementGroupID, SettlementID, ParticipantID, ClientID, AccountID, ProductGroupID, ProductID, UnderlyingInstrID, InstrumentID, TradeID, Direction, TradingRole, HedgeFlag, OffsetFlag, Volume, Price, TransFeeRatio, ValueMode, TransFee, OrderSysID, MinFee, MaxFee
)select t1.tradingday,t1.settlementgroupid,t1.settlementid,t1.participantid,t1.clientid,
t1.accountid,t3.productgroupid,t3.productid,t3.underlyinginstrid,t1.instrumentid,
t1.tradeid,t1.direction,t1.tradingrole,t1.hedgeflag,t1.offsetflag,t1.volume,t1.price,
case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' then
t2.closetodayfeeratio
when t1.offsetflag = '4' then
t2.closeyesterdayfeeratio
end as transfeeratio,
t2.valuemode,
if(t2.valuemode='2',
round((case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' then
t2.closetodayfeeratio
when t1.offsetflag = '4' then
t2.closeyesterdayfeeratio
end) * t1.volume,
2),
round((case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' then
t2.closetodayfeeratio
when t1.offsetflag = '4' then
t2.closeyesterdayfeeratio
end) * t1.price * t1.volume * t3.volumemultiple,
2)) as transfee,
t1.OrderSysID,
'0' as Minfee,
'0' as MaxFee
from dbclear.t_trade t1,dbclear.t_clienttransfeeratio t2, siminfo.t_instrument t3
where t1.TradingDay = t2.TradingDay
and t1.SettlementID = t2.SettlementID
and t1.settlementgroupid = t2.settlementgroupid
and t2.participantid = t1.participantid
and t2.clientid = '00000000'
and t1.instrumentid = t2.instrumentid
and t1.tradingrole = t2.tradingrole
and t1.hedgeflag = t2.hedgeflag
and t1.settlementgroupid = t3.settlementgroupid
and t1.instrumentid = t3.instrumentid
and t3.ProductClass != '2'
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s
ON DUPLICATE KEY UPDATE transfeeratio = VALUES(transfeeratio), valuemode = VALUES(valuemode),
transfee = VALUES(transfee)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
sql = """insert into dbclear.t_clienttransfee(TradingDay, SettlementGroupID, SettlementID, ParticipantID, ClientID, AccountID, ProductGroupID, ProductID, UnderlyingInstrID, InstrumentID, TradeID, Direction, TradingRole, HedgeFlag, OffsetFlag, Volume, Price, TransFeeRatio, ValueMode, TransFee, OrderSysID, MinFee, MaxFee
)select t1.tradingday,t1.settlementgroupid,t1.settlementid,t1.participantid,t1.clientid,
t1.accountid,t3.productgroupid,t3.productid,t3.underlyinginstrid,t1.instrumentid,
t1.tradeid,t1.direction,t1.tradingrole,t1.hedgeflag,t1.offsetflag,t1.volume,t1.price,
case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' then
t2.closetodayfeeratio
when t1.offsetflag = '4' then
t2.closeyesterdayfeeratio
end as transfeeratio,
t2.valuemode,
if(t2.valuemode='2',
round((case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' then
t2.closetodayfeeratio
when t1.offsetflag = '4' then
t2.closeyesterdayfeeratio
end) * t1.volume,
2),
round((case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' then
t2.closetodayfeeratio
when t1.offsetflag = '4' then
t2.closeyesterdayfeeratio
end) * t1.price * t1.volume * t3.volumemultiple,
2)) as transfee,
t1.OrderSysID,
'0' as Minfee,
'0' as MaxFee
from dbclear.t_trade t1,dbclear.t_clienttransfeeratio t2, siminfo.t_instrument t3
where t1.TradingDay = t2.TradingDay
and t1.SettlementID = t2.SettlementID
and t1.settlementgroupid = t2.settlementgroupid
and t2.participantid = t1.participantid
and t2.clientid = t1.clientid
and t1.instrumentid = t2.instrumentid
and t1.tradingrole = t2.tradingrole
and t1.hedgeflag = t2.hedgeflag
and t1.settlementgroupid = t3.settlementgroupid
and t1.instrumentid = t3.instrumentid
and t3.ProductClass != '2'
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s
ON DUPLICATE KEY UPDATE transfeeratio = VALUES(transfeeratio), valuemode = VALUES(valuemode),
transfee = VALUES(transfee)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 持仓保证金
logger.info("[Calculate PositionMargin] is processing......")
# 插入t_clientpositionmargin表中
sql = """insert into dbclear.t_clientpositionmargin(TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,ProductGroupID,ProductID,UnderlyingInstrID,InstrumentID,TradingRole,HedgeFlag,PosiDirection,Position,MarginRatio,ValueMode,SettlementPrice,PositionMargin
)select t1.tradingday,t1.settlementgroupid,t1.settlementid,t1.participantid,t1.clientid,
t2.accountid,t4.productgroupid,t4.productid,t4.underlyinginstrid,t1.instrumentid,t1.tradingrole,
t1.hedgeflag,t1.posidirection,t1.position + t1.ydposition,
if(t1.posidirection='2',
t2.longmarginratio,
t2.shortmarginratio) as MarginRatio,
t2.valuemode,t3.settlementprice,
round(if(t2.valuemode='1',
if(t1.posidirection='2',
t2.longmarginratio,
t2.shortmarginratio) * (t1.position + t1.ydposition) *
t4.volumemultiple * t3.settlementprice,
if(t1.posidirection='2',
t2.longmarginratio,
t2.shortmarginratio) * (t1.position + t1.ydposition) * t4.volumemultiple),
2) as positionmargin
from (select t1.*, t2.tradingrole
from dbclear.t_clientposition t1, siminfo.t_client t2
where t1.clientid = t2.clientid and t1.settlementgroupid = t2.settlementgroupid) t1,
(select t1.settlementgroupid,
t1.participantid,
t2.accountid,
t1.clientid,
t1.instrumentid,
t2.tradingrole,
t1.hedgeflag,
t1.longmarginratio,
t1.shortmarginratio,
t1.valuemode
from siminfo.t_marginratedetail t1, siminfo.t_partroleaccount t2
where t1.SettlementGroupID = t2.SettlementGroupID) t2,
dbclear.t_marketdata t3,
siminfo.t_instrument t4
where t2.participantid = '00000000'
and t2.clientid = '00000000'
and t1.instrumentid = t2.instrumentid
and t1.tradingrole = t2.tradingrole
and t1.hedgeflag = t2.hedgeflag
and t1.instrumentid = t3.instrumentid
and t1.tradingday = t3.tradingday
and t1.settlementgroupid = t2.settlementgroupid
and t1.settlementgroupid = t3.settlementgroupid
and t1.settlementid = t3.settlementid
and t1.settlementgroupid = t4.settlementgroupid
and t1.instrumentid = t4.instrumentid
and t4.ProductClass != '2'
and (t1.posidirection = '2' or t1.posidirection = '3')
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s
"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
sql = """insert into dbclear.t_clientpositionmargin(TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,ProductGroupID,ProductID,UnderlyingInstrID,InstrumentID,TradingRole,HedgeFlag,PosiDirection,Position,MarginRatio,ValueMode,SettlementPrice,PositionMargin
)select t1.tradingday,t1.settlementgroupid,t1.settlementid,t1.participantid,t1.clientid,
t2.accountid,t4.productgroupid,t4.productid,t4.underlyinginstrid,t1.instrumentid,t1.tradingrole,
t1.hedgeflag,t1.posidirection,t1.position + t1.ydposition,
if(t1.posidirection='2',
t2.longmarginratio,
t2.shortmarginratio) as MarginRatio,
t2.valuemode,t3.settlementprice,
round(if(t2.valuemode='1',
if(t1.posidirection='2',
t2.longmarginratio,
t2.shortmarginratio) * (t1.position + t1.ydposition) *
t4.volumemultiple * t3.settlementprice,
if(t1.posidirection='2',
t2.longmarginratio,
t2.shortmarginratio) * (t1.position + t1.ydposition) * t4.volumemultiple),
2) as positionmargin
from (select t1.*, t2.tradingrole
from dbclear.t_clientposition t1, siminfo.t_client t2
where t1.clientid = t2.clientid and t1.settlementgroupid = t2.settlementgroupid) t1,
(select t1.settlementgroupid,
t1.participantid,
t2.accountid,
t1.clientid,
t1.instrumentid,
t2.tradingrole,
t1.hedgeflag,
t1.longmarginratio,
t1.shortmarginratio,
t1.valuemode
from siminfo.t_marginratedetail t1, siminfo.t_partroleaccount t2
where t1.SettlementGroupID = t2.SettlementGroupID) t2,
dbclear.t_marketdata t3,
siminfo.t_instrument t4
where t2.participantid = t1.participantid
and t2.clientid = '00000000'
and t1.instrumentid = t2.instrumentid
and t1.tradingrole = t2.tradingrole
and t1.hedgeflag = t2.hedgeflag
and t1.instrumentid = t3.instrumentid
and t1.tradingday = t3.tradingday
and t1.settlementgroupid = t2.settlementgroupid
and t1.settlementgroupid = t3.settlementgroupid
and t1.settlementid = t3.settlementid
and t1.settlementgroupid = t4.settlementgroupid
and t1.instrumentid = t4.instrumentid
and t4.ProductClass != '2'
and (t1.posidirection = '2' or t1.posidirection = '3')
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s
ON DUPLICATE KEY UPDATE positionmargin = VALUES(positionmargin), MarginRatio = VALUES(MarginRatio),
valuemode = VALUES(valuemode)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
sql = """insert into dbclear.t_clientpositionmargin(TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,ProductGroupID,ProductID,UnderlyingInstrID,InstrumentID,TradingRole,HedgeFlag,PosiDirection,Position,MarginRatio,ValueMode,SettlementPrice,PositionMargin
)select t1.tradingday,t1.settlementgroupid,t1.settlementid,t1.participantid,t1.clientid,
t2.accountid,t4.productgroupid,t4.productid,t4.underlyinginstrid,t1.instrumentid,t1.tradingrole,
t1.hedgeflag,t1.posidirection,t1.position + t1.ydposition,
if(t1.posidirection='2',
t2.longmarginratio,
t2.shortmarginratio) as MarginRatio,
t2.valuemode,t3.settlementprice,
round(if(t2.valuemode='1',
if(t1.posidirection='2',
t2.longmarginratio,
t2.shortmarginratio) * (t1.position + t1.ydposition) *
t4.volumemultiple * t3.settlementprice,
if(t1.posidirection='2',
t2.longmarginratio,
t2.shortmarginratio) * (t1.position + t1.ydposition) * t4.volumemultiple),
2) as positionmargin
from (select t1.*, t2.tradingrole
from dbclear.t_clientposition t1, siminfo.t_client t2
where t1.clientid = t2.clientid and t1.settlementgroupid = t2.settlementgroupid) t1,
(select t1.settlementgroupid,
t1.participantid,
t2.accountid,
t1.clientid,
t1.instrumentid,
t2.tradingrole,
t1.hedgeflag,
t1.longmarginratio,
t1.shortmarginratio,
t1.valuemode
from siminfo.t_marginratedetail t1, siminfo.t_partroleaccount t2
where t1.SettlementGroupID = t2.SettlementGroupID) t2,
dbclear.t_marketdata t3,
siminfo.t_instrument t4
where t1.participantid = t1.participantid
and t2.clientid = t1.clientid
and t1.instrumentid = t2.instrumentid
and t1.tradingrole = t2.tradingrole
and t1.hedgeflag = t2.hedgeflag
and t1.instrumentid = t3.instrumentid
and t1.tradingday = t3.tradingday
and t1.settlementgroupid = t2.settlementgroupid
and t1.settlementgroupid = t3.settlementgroupid
and t1.settlementid = t3.settlementid
and t1.settlementgroupid = t4.settlementgroupid
and t1.instrumentid = t4.instrumentid
and t4.ProductClass != '2'
and (t1.posidirection = '2' or t1.posidirection = '3')
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s
ON DUPLICATE KEY UPDATE positionmargin = VALUES(positionmargin), MarginRatio = VALUES(MarginRatio),
valuemode = VALUES(valuemode)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 持仓盈亏
logger.info("[Calculate PositionProfit] is processing......")
# 写入t_clientPositionProfit表中
sql = """insert into dbclear.t_ClientPositionProfit(TradingDay, SettlementGroupID, SettlementID, ParticipantID, ClientID, InstrumentID, HedgeFlag, PosiDirection, PositionCost, SettlementPositionCost, PositionProfit
)select t1.tradingday,
t1.settlementgroupid,
t1.settlementid,
t1.participantid,
t1.clientid,
t1.instrumentid,
t1.hedgeflag,
t1.posidirection,
t1.positioncost + t1.ydpositioncost,
round((t1.position + t1.ydposition) * t2.volumemultiple * t3.settlementprice, 2) as settlepositioncost,
if(t1.posidirection='2', 1, -1) * round((round((t1.position + t1.ydposition) * t2.volumemultiple * t3.settlementprice, 2) - (t1.positioncost + t1.ydpositioncost)), 2) as positionprofit
from dbclear.t_clientposition t1, siminfo.t_instrument t2, dbclear.t_marketdata t3
where t1.tradingday = t3.tradingday
and t1.settlementgroupid = t2.settlementgroupid
and t1.settlementgroupid = t3.settlementgroupid
and t1.settlementid = t3.settlementid
and t1.instrumentid = t2.instrumentid
and t1.instrumentid = t3.instrumentid
and t2.ProductClass != '2'
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
def sett_future_option(logger, cursor, current_trading_day, next_trading_day, settlement_group_id, settlement_id):
# 计算结算价
riskfree_interest = 0.02
sql = """SELECT t1.instrumentid, t1.underlyinginstrid, t1.optionstype, t1.strikeprice, t2.lastprice AS optionprice, t3.settlementprice AS underlyingprice, t2.volume,
DATEDIFF(t4.expiredate, %s) + 1 AS duration, t4.pricetick
FROM siminfo.t_instrument t1, dbclear.t_marketdata t2, dbclear.t_marketdata t3, siminfo.t_instrumentproperty t4
WHERE t1.settlementgroupid = %s AND t1.OptionsType != '0'
AND t1.settlementgroupid = t2.settlementgroupid AND t1.instrumentid = t2.instrumentid
AND t1.settlementgroupid = t3.settlementgroupid AND t1.underlyinginstrid = t3.instrumentid
AND t1.settlementgroupid = t4.settlementgroupid AND t1.instrumentid = t4.instrumentid
AND t2.tradingday = %s AND t2.settlementid = %s
AND t3.tradingday = %s AND t3.settlementid = %s
ORDER BY t1.underlyinginstrid, t1.instrumentid, t1.optionstype"""
cursor.execute(sql, (
current_trading_day, settlement_group_id, current_trading_day, settlement_id, current_trading_day,
settlement_id))
rows = cursor.fetchall()
dce_bs = DCE_BLACKSCHOLES()
op = Option(UNDERLYING_FUTURE, OPTIONTYPE_EUROPEAN, CALLPUT_CALL, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00)
ins_list = []
md_dict = {}
for row in rows:
underlying_id = str(row[1])
instrument_id = str(row[0])
options_type = str(row[2])
strike_price = float(str(row[3]))
option_price = float(str(row[4]))
underlying_price = float(str(row[5]))
trade_volume = int(str(row[6]))
duration = int(str(row[7]))
price_tick = float(str(row[8]))
ins = {"instrumentID": instrument_id, "underlyingInsID": underlying_id, "optionsType": options_type,
"strikePrice": strike_price,
"optionPrice": option_price, "underlyingPrice": underlying_price, "duration": duration,
"priceTick": price_tick}
ins_list.append(ins)
op.call_put = CALLPUT_CALL if options_type == "1" else CALLPUT_PUT
op.strike_price = strike_price
op.underlying_price = underlying_price
op.t = duration
op.r = riskfree_interest
sigma = dce_bs.calc_sigma(op, option_price)
total_sigma = trade_volume * sigma
sum_sigma = sigma
total_volume = trade_volume
sum_count = 1
if underlying_id in md_dict.keys():
total_sigma += md_dict[underlying_id]["totalSigma"]
total_volume += md_dict[underlying_id]["totalVolume"]
sum_sigma += md_dict[underlying_id]["sumSigma"]
sum_count += md_dict[underlying_id]["sumCount"]
md_dict.update({underlying_id: {"totalSigma": total_sigma, "totalVolume": total_volume, "sumSigma": sum_sigma,
"sumCount": sum_count}})
for underlying_id in md_dict.keys():
total_sigma = md_dict[underlying_id]["totalSigma"]
total_volume = md_dict[underlying_id]["totalVolume"]
sum_sigma = md_dict[underlying_id]["sumSigma"]
sum_count = md_dict[underlying_id]["sumCount"]
settle_sigma = 0
if trade_volume == 0:
settle_sigma = round(sum_sigma / sum_count, 4)
else:
settle_sigma = round(total_sigma / total_volume, 4)
md_dict[underlying_id].update({"sigma": settle_sigma})
# todo 修改为如果结算价为零才更新
sql = """update dbclear.t_marketdata t
SET t.SettlementPrice = %s
WHERE t.TradingDay = %s
AND t.SettlementID = %s
AND t.SettlementGroupID = %s
AND t.InstrumentID = %s
AND t.settlementprice = 0"""
sql_params = []
for ins in ins_list:
instrument_id = ins["instrumentID"]
underlying_id = ins["underlyingInsID"]
price_tick = ins["priceTick"]
sigma = md_dict[underlying_id]["sigma"]
op.call_put = CALLPUT_CALL if ins["optionsType"] == "1" else CALLPUT_PUT
op.strike_price = ins["strikePrice"]
op.underlying_price = ins["underlyingPrice"]
op.t = ins["duration"]
op.r = riskfree_interest
op.sigma = sigma
settle_price = dce_bs.calc_price(op)
a, b = divmod(settle_price, price_tick)
settle_price = price_tick * a + (0 if b < price_tick / 2 else price_tick)
sql_params.append((settle_price, current_trading_day, settlement_id, settlement_group_id, instrument_id))
cursor.executemany(sql, sql_params)
# 结算价为零赋值为昨结算
sql = """UPDATE dbclear.t_marketdata t
SET t.SettlementPrice = t.PreSettlementPrice
WHERE
t.TradingDay = %s
AND t.SettlementID = %s
AND t.SettlementGroupID = %s
AND t.SettlementPrice = %s"""
cursor.execute(sql, (current_trading_day, settlement_id, settlement_group_id, 0))
# 交收持仓处理
logger.info("[Move Options DelivPosition] is processing......")
# 1)插入到t_delivinstrument表
sql = """insert into dbclear.t_delivinstrument(TradingDay, SettlementGroupID, SettlementID, InstrumentID
)select %s, t.SettlementGroupID, %s, t.instrumentid
from siminfo.t_instrumentproperty t, siminfo.t_instrument t1
where t.SettlementGroupID = t1.SettlementGroupID and t.InstrumentID = t1.InstrumentID
and t1.ProductClass = '2' and t.settlementgroupid = %s and t.ExpireDate < %s"""
cursor.execute(sql, (current_trading_day, settlement_id, settlement_group_id, next_trading_day))
# 2)插入到t_clientdelivposition
sql = """insert into dbclear.t_clientdelivposition(TradingDay,SettlementGroupID,SettlementID,HedgeFlag,
PosiDirection,YdPosition,Position,LongFrozen,ShortFrozen,YdLongFrozen,YdShortFrozen,
BuyTradeVolume,SellTradeVolume,PositionCost,YdPositionCost,UseMargin,FrozenMargin,
LongFrozenMargin,ShortFrozenMargin,FrozenPremium,InstrumentID,ParticipantID,ClientID
)select TradingDay,SettlementGroupID,SettlementID,HedgeFlag,PosiDirection,YdPosition,Position,
LongFrozen,ShortFrozen,YdLongFrozen,YdShortFrozen,BuyTradeVolume,SellTradeVolume,PositionCost,
YdPositionCost,UseMargin,FrozenMargin,LongFrozenMargin,ShortFrozenMargin,FrozenPremium,
InstrumentID,ParticipantID,ClientID from dbclear.t_clientposition
where tradingday = %s
and settlementgroupid = %s
and settlementid = %s
and Position != '0'
and instrumentid in
(select t.instrumentid
from dbclear.t_delivinstrument t
where t.tradingday = %s
and t.settlementgroupid = %s
and t.settlementid = %s)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id,
current_trading_day, settlement_group_id, settlement_id))
# 3) 删除t_clientposition
sql = """delete from dbclear.t_clientposition
where (tradingday = %s
and settlementgroupid = %s
and settlementid = %s
and instrumentid in
(select t.instrumentid
from dbclear.t_delivinstrument t
where t.tradingday = %s
and t.settlementgroupid = %s
and t.settlementid = %s))
or Position = '0'"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id,
current_trading_day, settlement_group_id, settlement_id))
# 删除 t_FuturePositionDtl
sql = """delete from dbclear.t_FuturePositionDtl
where (tradingday = %s
and settlementgroupid = %s
and settlementid = %s
and instrumentid in
(select t.instrumentid
from dbclear.t_delivinstrument t
where t.tradingday = %s
and t.settlementgroupid = %s
and t.settlementid = %s))"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id,
current_trading_day, settlement_group_id, settlement_id))
# 4) 计算行权盈亏
sql = """INSERT INTO dbclear.t_clientdelivprofit(TradingDay, SettlementGroupID, SettlementID, ParticipantID, AccountID, ClientID, HedgeFlag, InstrumentID, PosiDirection, POSITION, OptionsType, VolumeMultiple, UnderlyingMultiple, StrikePrice, SettlementPrice, Profit)
SELECT
t1.tradingday,
t1.settlementgroupid,
t1.settlementid,
t1.participantid,
t2.accountid,
t1.clientid,
t1.hedgeflag,
t1.instrumentid,
t1.posidirection,
t1.position,
t3.optionstype,
t3.volumemultiple,
t3.underlyingmultiple,
t3.strikeprice,
t3.settlementprice,
CASE
WHEN t3.optionstype = '1'
THEN IF(t1.posidirection = '2', 1, - 1) * (
t3.settlementprice - t3.strikeprice
) * t1.position * t3.volumemultiple * t3.underlyingmultiple
WHEN t3.optionstype = '2'
THEN IF(t1.posidirection = '2', - 1, 1) * (
t3.settlementprice - t3.strikeprice
) * t1.position * t3.volumemultiple * t3.underlyingmultiple
ELSE 0
END AS delivprofit
FROM
(SELECT
t1.*,
t2.tradingrole
FROM
dbclear.t_clientdelivposition t1,
siminfo.t_client t2
WHERE t1.clientid = t2.clientid) t1,
siminfo.t_PartRoleAccount t2,
(SELECT
t2.tradingday,
t1.settlementgroupid,
t2.settlementid,
t1.instrumentid,
t1.strikeprice,
t1.optionstype,
t1.volumemultiple,
t1.underlyingmultiple,
t2.UnderlyingClosePx as settlementprice
FROM
siminfo.t_instrument t1,
dbclear.t_marketdata t2
WHERE t1.settlementgroupid = %s
AND t2.tradingday = %s
AND t2.settlementid = %s
AND (
(
t1.optionstype = '1'
AND t1.strikeprice < t2.settlementprice
)
OR (
t1.optionstype = '2'
AND t1.strikeprice > t2.settlementprice
)
)
AND t1.SettlementGroupID = t2.SettlementGroupID
AND t1.instrumentid = t2.instrumentid) t3
WHERE t2.TradingRole = t1.TradingRole
AND t2.SettlementGroupID = t1.SettlementGroupID
AND t2.ParticipantID = t1.ParticipantID
AND t1.instrumentid = t3.instrumentid
AND t1.tradingday = t3.tradingday
AND t1.settlementgroupid = t3.settlementgroupid
AND t1.settlementid = t3.settlementid
AND (
t1.posidirection = '2'
OR t1.posidirection = '3'
)
AND t1.tradingday = %s
AND t1.settlementgroupid = %s
AND t1.settlementid = %s
"""
cursor.execute(sql, (settlement_group_id, current_trading_day, settlement_id, current_trading_day, settlement_group_id, settlement_id,))
# 交割手续费
sql = """"""
# 交易手续费
logger.info("[Calculate Options TransFee] is processing......")
# 插入t_clienttransfee表中
sql = """insert into dbclear.t_clienttransfee(TradingDay, SettlementGroupID, SettlementID, ParticipantID, ClientID, AccountID, ProductGroupID, ProductID, UnderlyingInstrID, InstrumentID, TradeID, Direction, TradingRole, HedgeFlag, OffsetFlag, Volume, Price, TransFeeRatio, ValueMode, TransFee, OrderSysID, MinFee, MaxFee
)select t1.tradingday,t1.settlementgroupid,t1.settlementid,t1.participantid,t1.clientid,
t1.accountid,t3.productgroupid,t3.productid,t3.underlyinginstrid,t1.instrumentid,
t1.tradeid,t1.direction,t1.tradingrole,t1.hedgeflag,t1.offsetflag,t1.volume,t1.price,
case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' or t1.offsetflag = '4' then
t2.closetodayfeeratio
end as transfeeratio,
t2.valuemode,
if(t2.valuemode='2',
round((case when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' or t1.offsetflag = '4' then
t2.closetodayfeeratio
end) * t1.volume, 2),
round((case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' or t1.offsetflag = '4' then
t2.closetodayfeeratio
end) * t1.price * t3.volumemultiple * t1.volume,2)) as transfee,
t1.OrderSysID,
'0' as Minfee,
'0' as MaxFee
from dbclear.t_trade t1,dbclear.t_clienttransfeeratio t2, siminfo.t_instrument t3
where t1.TradingDay = t2.TradingDay
and t1.SettlementID = t2.SettlementID
and t1.settlementgroupid = t2.settlementgroupid
and t2.participantid = '00000000'
and t2.clientid = '00000000'
and t1.instrumentid = t2.instrumentid
and t1.tradingrole = t2.tradingrole
and t1.hedgeflag = t2.hedgeflag
and t1.settlementgroupid = t3.settlementgroupid
and t1.instrumentid = t3.instrumentid
and t3.ProductClass = '2'
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 插入t_clienttransfee表中
sql = """insert into dbclear.t_clienttransfee(TradingDay, SettlementGroupID, SettlementID, ParticipantID, ClientID, AccountID, ProductGroupID, ProductID, UnderlyingInstrID, InstrumentID, TradeID, Direction, TradingRole, HedgeFlag, OffsetFlag, Volume, Price, TransFeeRatio, ValueMode, TransFee, OrderSysID, MinFee, MaxFee
)select t1.tradingday,t1.settlementgroupid,t1.settlementid,t1.participantid,t1.clientid,
t1.accountid,t3.productgroupid,t3.productid,t3.underlyinginstrid,t1.instrumentid,
t1.tradeid,t1.direction,t1.tradingrole,t1.hedgeflag,t1.offsetflag,t1.volume,t1.price,
case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' or t1.offsetflag = '4' then
t2.closetodayfeeratio
end as transfeeratio,
t2.valuemode,
if(t2.valuemode='2',
round((case when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' or t1.offsetflag = '4' then
t2.closetodayfeeratio
end) * t1.volume, 2) ,
round((case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' or t1.offsetflag = '4' then
t2.closetodayfeeratio
end) * t1.price * t3.volumemultiple * t1.volume, 2)) as transfee,
t1.OrderSysID,
'0' as Minfee,
'0' as MaxFee
from dbclear.t_trade t1,dbclear.t_clienttransfeeratio t2, siminfo.t_instrument t3
where t1.TradingDay = t2.TradingDay
and t1.SettlementID = t2.SettlementID
and t1.settlementgroupid = t2.settlementgroupid
and t2.participantid = t1.participantid
and t2.clientid = '00000000'
and t1.instrumentid = t2.instrumentid
and t1.tradingrole = t2.tradingrole
and t1.hedgeflag = t2.hedgeflag
and t1.settlementgroupid = t3.settlementgroupid
and t1.instrumentid = t3.instrumentid
and t3.ProductClass = '2'
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s
ON DUPLICATE KEY UPDATE transfeeratio = VALUES(transfeeratio), valuemode = VALUES(valuemode),
transfee = VALUES(transfee)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 插入t_clienttransfee表中
sql = """insert into dbclear.t_clienttransfee(TradingDay, SettlementGroupID, SettlementID, ParticipantID, ClientID, AccountID, ProductGroupID, ProductID, UnderlyingInstrID, InstrumentID, TradeID, Direction, TradingRole, HedgeFlag, OffsetFlag, Volume, Price, TransFeeRatio, ValueMode, TransFee, OrderSysID, MinFee, MaxFee
)select t1.tradingday,t1.settlementgroupid,t1.settlementid,t1.participantid,t1.clientid,
t1.accountid,t3.productgroupid,t3.productid,t3.underlyinginstrid,t1.instrumentid,
t1.tradeid,t1.direction,t1.tradingrole,t1.hedgeflag,t1.offsetflag,t1.volume,t1.price,
case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' or t1.offsetflag = '4' then
t2.closetodayfeeratio
end as transfeeratio,
t2.valuemode,
if(t2.valuemode='2',
round((case when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' or t1.offsetflag = '4' then
t2.closetodayfeeratio
end) * t1.volume, 2),
round((case
when t1.offsetflag = '0' or
t1.offsetflag = '2' then
t2.openfeeratio
when t1.offsetflag = '3' or t1.offsetflag = '1' or t1.offsetflag = '4' then
t2.closetodayfeeratio
end) * t1.price * t3.volumemultiple * t1.volume, 2)) as transfee,
t1.OrderSysID,
'0' as Minfee,
'0' as MaxFee
from dbclear.t_trade t1,dbclear.t_clienttransfeeratio t2, siminfo.t_instrument t3
where t1.TradingDay = t2.TradingDay
and t1.SettlementID = t2.SettlementID
and t1.settlementgroupid = t2.settlementgroupid
and t2.participantid = t1.participantid
and t2.clientid = t1.clientid
and t1.instrumentid = t2.instrumentid
and t1.tradingrole = t2.tradingrole
and t1.hedgeflag = t2.hedgeflag
and t1.settlementgroupid = t3.settlementgroupid
and t1.instrumentid = t3.instrumentid
and t3.ProductClass = '2'
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s
ON DUPLICATE KEY UPDATE transfeeratio = VALUES(transfeeratio), valuemode = VALUES(valuemode),
transfee = VALUES(transfee)"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 持仓权利金
logger.info("[Calculate Options PositionPremium] is processing......")
# 插入t_clientpositionpremium表中
sql = """INSERT INTO dbclear.t_clientpositionpremium
(TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,InstrumentID,Volume,UserID,Premium)
SELECT
TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,InstrumentID,sum(Volume),UserID,sum( Premium )
FROM
(
SELECT
t1.TradingDay,t1.SettlementGroupID,t1.SettlementID,t1.Direction,t1.ParticipantID,t1.ClientID,t1.AccountID,
t1.InstrumentID,if (t1.OffsetFlag = '0',t1.Volume, -1 * t1.Volume ) as Volume,t1.UserID,
ROUND( IF ( t1.Direction = '0', - 1 * Price * t2.VolumeMultiple * t2.UnderlyingMultiple, Price * t2.VolumeMultiple * t2.UnderlyingMultiple) * t1.Volume ,2) AS Premium
FROM
dbclear.t_trade t1,siminfo.t_instrument t2
WHERE
t1.settlementgroupid = t2.settlementgroupid
AND t1.instrumentid = t2.instrumentid
and t2.ProductClass = '2'
AND t1.tradingday = %s
AND t1.settlementgroupid = %s
AND t1.settlementid = %s
) t
GROUP BY TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,
InstrumentID,UserID"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 持仓保证金
logger.info("[Calculate Options PositionMargin] is processing......")
# 插入t_clientpositionmargin表中
# (一)期权合约结算价×期权合约相对应的期货交易单位+标的期货合约交易保证金-(1/2)×期权虚职额;
# (二)期权合约结算价×期权合约相对应的期货交易单位+(1/2)×标的期货合约交易保证金。
# 看涨期权的虚值额=Max(期权合约行权价格-标的期货合约结算价,0)×交易单位;
# 看跌期权的虚值额=Max(标的期货合约结算价-期权合约行权价格,0)×交易单位。
sql = """insert into dbclear.t_clientpositionmargin(TradingDay,SettlementGroupID,SettlementID,ParticipantID,ClientID,AccountID,ProductGroupID,ProductID,UnderlyingInstrID,InstrumentID,TradingRole,HedgeFlag,PosiDirection,Position,MarginRatio,ValueMode,SettlementPrice,PositionMargin
)SELECT t1.tradingday,
t1.settlementgroupid,
t1.settlementid,
t1.participantid,
t1.clientid,
t2.AccountID,
t4.productgroupid,
t4.productid,
t4.underlyinginstrid,
t1.instrumentid,
t1.tradingrole,
t1.hedgeflag,
t1.posidirection,
t1.position,
if (t1.posidirection = '3',t5.ShortMarginRatio,0) as MarginRatio,
t5.ValueMode,
t3.SettlementPrice,
if (t1.posidirection = '3', GREATEST(
round(t3.SettlementPrice * t4.VolumeMultiple * t4.UnderlyingMultiple +
if(t5.ValueMode = '1',
t5.ShortMarginRatio * t4.underlyvolumemultiple * t4.SettlementPrice,
t5.ShortMarginRatio * t4.underlyvolumemultiple
) -
(1/2) * if(t4.OptionsType = '1',
GREATEST(t4.StrikePrice - t4.SettlementPrice,0) * t4.VolumeMultiple * t4.UnderlyingMultiple,
GREATEST(t4.SettlementPrice - t4.StrikePrice,0) * t4.VolumeMultiple * t4.UnderlyingMultiple), 2),
round(t3.SettlementPrice * t4.VolumeMultiple * t4.UnderlyingMultiple +
(1/2) * if(t5.ValueMode = '1',
t5.ShortMarginRatio * t4.underlyvolumemultiple * t4.SettlementPrice,
t5.ShortMarginRatio * t4.underlyvolumemultiple) , 2)
), 0) * (t1.Position + t1.YdPosition) as positionmargin
FROM
( SELECT t1.*, t2.tradingrole FROM dbclear.t_clientposition t1, siminfo.t_client t2 WHERE t1.clientid = t2.clientid and t2.SettlementGroupID = %s) t1,
siminfo.t_PartRoleAccount t2,
dbclear.t_marketdata t3,
(select t.*, t1.volumemultiple as underlyvolumemultiple,t2.SettlementPrice
from siminfo.t_instrument t left join siminfo.t_instrument t1 on t.UnderlyingInstrID = t1.InstrumentID and t.SettlementGroupID = t1.SettlementGroupID
left join dbclear.t_marketdata t2 on t2.TradingDay = %s and t2.SettlementID = %s and t.SettlementGroupID = t2.SettlementGroupID and t.UnderlyingInstrID = t2.InstrumentID
where t.ProductClass = '2' and t.SettlementGroupID = %s) t4,
siminfo.t_marginratedetail t5
WHERE t2.TradingRole = t1.TradingRole
and t2.SettlementGroupID = t1.SettlementGroupID
and t2.ParticipantID = t1.ParticipantID
AND t1.tradingday = t3.tradingday
and t1.hedgeflag = t5.HedgeFlag
and t4.underlyinginstrid = t5.InstrumentID
AND t1.instrumentid = t4.instrumentid
and t1.InstrumentID = t3.InstrumentID
AND t1.settlementid = t3.settlementid
AND t1.settlementgroupid = t3.settlementgroupid
AND t1.settlementgroupid = t4.settlementgroupid
and t1.SettlementGroupID = t5.SettlementGroupID
and t4.ProductClass = '2'
AND ( t1.posidirection = '2' OR t1.posidirection = '3' )
and t1.tradingday = %s
and t1.settlementgroupid = %s
and t1.settlementid = %s"""
cursor.execute(sql, (settlement_group_id, current_trading_day, settlement_id, settlement_group_id,
current_trading_day, settlement_group_id, settlement_id))
def calc_future_posdtl(logger, cursor, current_trading_day, settlement_group_id, settlement_id, exchange_id):
logger.info("[calc_future_posdtl ] begin")
sql = """DELETE FROM dbclear.t_FuturePositionDtl WHERE tradingday = %s AND settlementgroupid= %s AND settlementid = %s"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id))
# 结算正在进行的赛事数据
position_dtls = []
direction_pairs = [["0", "1"], ["1", "0"]]
for direction_pair in direction_pairs:
sql = """SELECT t.ClientID,t.InstrumentID,t.HedgeFlag,t.Direction,t.OpenDate,t.TradeID,t.Volume,t.OpenPrice,t.TradeType,t.ParticipantID, t.CloseProfitByDate,t.CloseProfitByTrade,t.PositionProfitByDate,t.PositionProfitByTrade,t.Margin,t.ExchMargin,t.MarginRateByMoney,t.MarginRateByVolume,t.LastSettlementPrice,t.SettlementPrice,t.CloseVolume,t.CloseAmount
FROM (SELECT ClientID,
InstrumentID,
HedgeFlag,
Direction,
OpenDate,
TradeID,
Volume,
OpenPrice,
TradeType,
ParticipantID,
CloseProfitByDate,
CloseProfitByTrade,
PositionProfitByDate,
PositionProfitByTrade,
Margin,
ExchMargin,
MarginRateByMoney,
MarginRateByVolume,
LastSettlementPrice,
SettlementPrice,
CloseVolume,
CloseAmount
FROM siminfo.t_FuturePositionDtl
WHERE tradingday = %s AND settlementgroupid = %s AND direction = %s
UNION ALL
SELECT ClientID,
InstrumentID,
HedgeFlag,
Direction,
TradingDay AS OpenDate,
TradeID,
Volume,
Price AS OpenPrice,
TradeType,
ParticipantID,
0 AS CloseProfitByDate,
0 AS CloseProfitByTrade,
0 AS PositionProfitByDate,
0 AS PositionProfitByTrade,
0 AS Margin,
0 AS ExchMargin,
0 AS MarginRateByMonfutureey,
0 AS MarginRateByVolume,
0 AS LastSettlementPrice,
0 AS SettlementPrice,
0 AS CloseVolume,
0 AS CloseAmount
FROM
dbclear.t_trade
WHERE tradingday = %s AND settlementgroupid = %s AND settlementid = %s AND direction = %s AND offsetflag = '0' ) t
ORDER BY t.ClientID, t.InstrumentID, t.HedgeFlag, t.OpenDate, t.TradeID"""
cursor.execute(sql, (
current_trading_day, settlement_group_id, direction_pair[0], current_trading_day, settlement_group_id,
settlement_id, direction_pair[0],))
open_positions = cursor.fetchall()
sql = """SELECT t.ClientID,t.InstrumentID,
t.HedgeFlag,
t.Direction,
t.TradeID,
t.Volume AS CloseVolume,
t.Price AS ClosePrice,
t1.VolumeMultiple AS VolumeMultiple
FROM dbclear.t_trade t, siminfo.t_instrument t1
WHERE t.instrumentid = t1.instrumentid AND t.settlementgroupid = t1.settlementgroupid
AND t.tradingday = %s AND t.settlementgroupid = %s AND t.settlementid = %s AND t.direction = %s AND t.offsetflag = '1'
ORDER BY t.ClientID, t.InstrumentID, t.HedgeFlag, t.TradeID"""
cursor.execute(sql, (current_trading_day, settlement_group_id, settlement_id, direction_pair[1],))
close_positions = cursor.fetchall()
open_positions_array = []
for open_position in open_positions:
open_positions_array.append(list(open_position))
open_index = 0
for close_position in close_positions:
client_id = str(close_position[0])
instrument_id = str(close_position[1])
close_volume = int(str(close_position[5]))
close_price = float(str(close_position[6]))
volume_multiple = int(str(close_position[7]))
ranges = range(open_index, len(open_positions_array))
for idx in ranges:
open_index = idx
open_position = open_positions_array[idx]
if client_id != str(open_position[0]) or instrument_id != str(open_position[1]):
continue
open_volume = int(str(open_position[6]))
open_price = float(str(open_position[7]))
total_close_profit = float(str(open_position[10]))
total_close_volume = int(str(open_position[20]))
total_close_amount = float(str(open_position[21]))
curr_close_volume = 0
if open_volume > 0:
if open_volume >= close_volume:
curr_close_volume = close_volume
open_volume -= close_volume
close_volume = 0
else:
curr_close_volume = open_volume
close_volume -= open_volume
open_volume = 0
total_close_volume += curr_close_volume
total_close_amount += round(close_price * curr_close_volume * volume_multiple, 2)
if direction_pair[0] == '0':
total_close_profit += round((close_price - open_price) * curr_close_volume * volume_multiple, 2)
else:
total_close_profit += round((open_price - close_price) * curr_close_volume * volume_multiple, 2)
open_position[6] = str(open_volume)
open_position[10] = str(total_close_profit)
open_position[20] = str(total_close_volume)
open_position[21] = str(total_close_amount)
if open_volume == 0:
open_index = idx + 1
if close_volume == 0:
break
position_dtls.append(open_positions_array)
sql = """INSERT INTO dbclear.t_FuturePositionDtl(TradingDay,SettlementGroupID,SettlementID,InstrumentID,ParticipantID,ClientID,HedgeFlag,Direction,OpenDate,TradeID,
Volume,OpenPrice,TradeType,CombInstrumentID,ExchangeID,CloseProfitByDate,CloseProfitByTrade,PositionProfitByDate,PositionProfitByTrade,Margin,ExchMargin,MarginRateByMoney,MarginRateByVolume,LastSettlementPrice,SettlementPrice,CloseVolume,CloseAmount)
VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
for position_array in position_dtls:
sql_params = []
for position in position_array:
sql_params.append((current_trading_day, settlement_group_id, settlement_id, str(position[1]),
str(position[9]), str(position[0]),
str(position[2]), str(position[3]), str(position[4]), str(position[5]),
str(position[6]), str(position[7]), str(position[8]), "", exchange_id,
str(position[10]), str(position[11]), str(position[12]), str(position[13]),
str(position[14]), str(position[15]),
str(position[16]), str(position[17]), str(position[18]), str(position[19]),
str(position[20]), str(position[21])))
if len(sql_params) > 0:
cursor.executemany(sql, sql_params)
logger.info("[calc_future_posdtl] end")
def main():
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["mysql"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files)
process_assert(settle_future(context, conf))
if __name__ == "__main__":
main()
```
#### File: xops/settlement/snap_settle_data.py
```python
import json
from utils import log, mysql, Configuration, parse_conf_args, process_assert
def snap_data(context, conf):
result_code = 0
logger = log.get_logger(category="SnapSettleData")
broker_system_id = conf.get("brokerSystemId")
logger.info("[snap settle data %s] begin" % (json.dumps(conf, encoding="UTF-8", ensure_ascii=False)))
mysql_pool = mysql(configs=context.get("mysql").get(conf.get("mysqlId")))
mysql_conn = mysql_pool.get_cnx()
mysql_conn.set_charset_collation('utf8')
try:
mysql_conn.start_transaction()
cursor = mysql_conn.cursor()
logger.info("[get current trading day]......")
sql = """SELECT
DISTINCT t1.tradingday
FROM
siminfo.t_tradesystemtradingday t1,
siminfo.t_tradesystemsettlementgroup t2,
siminfo.t_brokersystemsettlementgroup t3
WHERE t1.tradesystemid = t2.tradesystemid
AND t2.settlementgroupid = t3.settlementgroupid
AND t3.brokersystemid = %s"""
cursor.execute(sql, (broker_system_id,))
row = cursor.fetchone()
current_trading_day = str(row[0])
logger.info("[get current trading day] current_trading_day = %s" % (current_trading_day))
logger.info("[snap order]......")
sql = """INSERT INTO snap.t_s_order(TradingDay,SettlementGroupID,SettlementID,OrderSysID,ParticipantID,ClientID,UserID,InstrumentID,OrderPriceType,Direction,CombOffsetFlag,CombHedgeFlag,LimitPrice,VolumeTotalOriginal,TimeCondition,GTDDate,VolumeCondition,MinVolume,ContingentCondition,StopPrice,ForceCloseReason,OrderLocalID,IsAutoSuspend,OrderSource,OrderStatus,OrderType,VolumeTraded,VolumeTotal,InsertDate,InsertTime,ActiveTime,SuspendTime,UpdateTime,CancelTime,ActiveUserID,Priority,TimeSortID,ClearingPartID,BusinessUnit)
SELECT TradingDay,SettlementGroupID,SettlementID,OrderSysID,ParticipantID,ClientID,UserID,InstrumentID,OrderPriceType,Direction,CombOffsetFlag,CombHedgeFlag,LimitPrice,VolumeTotalOriginal,TimeCondition,GTDDate,VolumeCondition,MinVolume,ContingentCondition,StopPrice,ForceCloseReason,OrderLocalID,IsAutoSuspend,OrderSource,OrderStatus,OrderType,VolumeTraded,VolumeTotal,InsertDate,InsertTime,ActiveTime,SuspendTime,UpdateTime,CancelTime,ActiveUserID,Priority,TimeSortID,ClearingPartID,BusinessUnit
FROM dbclear.t_order WHERE tradingday = %s AND settlementgroupid in (SELECT settlementgroupid FROM siminfo.t_brokersystemsettlementgroup where brokersystemid = %s)"""
cursor.execute(sql, (current_trading_day, broker_system_id,))
logger.info("[snap trade]......")
sql = """INSERT INTO snap.t_s_trade(TradingDay,SettlementGroupID,SettlementID,TradeID,Direction,OrderSysID,ParticipantID,ClientID,TradingRole,AccountID,InstrumentID,OffsetFlag,HedgeFlag,Price,Volume,TradeTime,TradeType,PriceSource,UserID,OrderLocalID,ClearingPartID,BusinessUnit)
SELECT TradingDay,SettlementGroupID,SettlementID,TradeID,Direction,OrderSysID,ParticipantID,ClientID,TradingRole,AccountID,InstrumentID,OffsetFlag,HedgeFlag,Price,Volume,TradeTime,TradeType,PriceSource,UserID,OrderLocalID,ClearingPartID,BusinessUnit
FROM dbclear.t_trade WHERE tradingday = %s AND settlementgroupid in (SELECT settlementgroupid FROM siminfo.t_brokersystemsettlementgroup where brokersystemid = %s)"""
cursor.execute(sql, (current_trading_day, broker_system_id,))
mysql_conn.commit()
except Exception as e:
logger.error("[snap settle data] Error: %s" % (e))
result_code = -1
finally:
mysql_conn.close()
logger.info("[snap settle data] end")
return result_code
def main():
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["mysql"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files)
process_assert(snap_data(context, conf))
if __name__ == "__main__":
main()
```
#### File: xops/tinit/broker_future_csv.py
```python
import csv
import os
from utils import log
from utils import parse_conf_args
from utils import path
from utils import Configuration
from utils import mysql
from utils import csv_tool
class exchange_future_csv:
def __init__(self, context, configs):
settlementGroupID = configs.get("settlementGroupID")
log_conf = None if context.get("log") is None else context.get("log").get(configs.get("logId"))
# 初始化日志
self.logger = log.get_logger(category="exchange_future_csv", configs=log_conf)
if log_conf is None:
self.logger.warning("exchange_future_csv未配置Log日志")
# 初始化数据库连接
self.mysqlDB = mysql(configs=context.get("mysql")[configs.get("mysqlId")])
# 初始化tradeSystemID
self.settlementGroupID = settlementGroupID
# 初始化生成CSV文件路径
output = path.convert(context.get("csv")[configs.get("csv")]['broker'])
self.csv_path = os.path.join(output, str(configs.get("csvRoute")))
self.__to_csv()
def __to_csv(self):
mysqlDB = self.mysqlDB
# PositionDateType=1 and MaxMarginSideAlgorithm=1
self.__data_to_csv("t_Instrument", mysqlDB)
self.__data_to_csv("t_DepthMarketData", mysqlDB)
self.__data_to_csv("t_ExchangeMarginRate", mysqlDB)
# PositionDateType=1 and MortgageFundUseRange=0 and MortgageFundUseRange=0
self.__data_to_csv("t_Product", mysqlDB)
self.__data_to_csv("t_TradingAccount", mysqlDB)
self.__data_to_csv("t_Investor", mysqlDB)
self.__data_to_csv("t_InvestorAccount", mysqlDB)
self.__data_to_csv("t_InvestorDepartmentFlat", mysqlDB)
self.__data_to_csv("t_InvestorPassword", mysqlDB)
self.__data_to_csv("t_LinkMan", mysqlDB)
# self.__data_to_csv("t_MDInstrument", mysqlDB)
self.__data_to_csv("t_ProductGroup", mysqlDB)
self.__data_to_csv("t_IndexPrice", mysqlDB)
# self.__data_to_csv("t_InstrumentMarginRate", mysqlDB)
self.__data_to_csv("t_InstrumentCommissionRate", mysqlDB)
self.__data_to_csv("t_OptionInstrCommRate", mysqlDB)
# ======== 0301 新增 ========
self.__data_to_csv("t_Trader", mysqlDB)
self.__data_to_csv("t_TraderAssign", mysqlDB)
self.__data_to_csv("t_BrokerUser", mysqlDB)
self.__data_to_csv("t_BrokerUserPassword", mysqlDB)
self.__data_to_csv("t_DepartmentUser", mysqlDB)
self.__data_to_csv("t_PartBroker", mysqlDB)
self.__data_to_csv("t_TradingAccountPassword", mysqlDB)
self.__data_to_csv("t_TradingCode", mysqlDB)
self.__data_to_csv("t_UserRightsAssign", mysqlDB)
self.__data_to_csv("t_InvestorPositionDtl", mysqlDB)
def __data_to_csv(self, table_name, mysqlDB):
table_sqls = dict(
t_Instrument=dict(columns=("InstrumentID", "ExchangeID", "InstrumentName", "ExchangeInstID", "ProductID",
"ProductClass", "DeliveryYear", "DeliveryMonth", "MaxMarketOrderVolume",
"MinMarketOrderVolume", "MaxLimitOrderVolume", "MinLimitOrderVolume",
"VolumeMultiple", "PriceTick", "CreateDate", "OpenDate", "ExpireDate",
"StartDelivDate", "EndDelivDate", "InstLifePhase", "IsTrading", "PositionType",
"PositionDateType", "LongMarginRatio", "ShortMarginRatio",
"MaxMarginSideAlgorithm", "UnderlyingInstrID", "StrikePrice",
"OptionsType", "UnderlyingMultiple", "CombinationType"),
sql="""SELECT t.InstrumentID,t3.ExchangeID AS ExchangeID,t.InstrumentName,
t.InstrumentID AS ExchangeInstID,t.ProductID,t.ProductClass,t.DeliveryYear,
t.DeliveryMonth,t1.MaxMarketOrderVolume,t1.MinMarketOrderVolume,
t1.MaxLimitOrderVolume,t1.MinLimitOrderVolume,t.VolumeMultiple,t1.PriceTick,
t1.CreateDate,t1.OpenDate,t1.ExpireDate,t1.StartDelivDate,t1.EndDelivDate,
t1.InstLifePhase, '1' AS IsTrading,t.PositionType,'2' as PositionDateType,
t2.LongMarginRatio,t2.ShortMarginRatio,'1' AS MaxMarginSideAlgorithm,
t.UnderlyingInstrID AS tUnderlyingInstrID,t.StrikePrice,t.OptionsType,
t.UnderlyingMultiple,'0' AS CombinationType
FROM siminfo.t_Instrument t INNER JOIN siminfo.t_instrumentproperty t1 on
t.SettlementGroupID = t1.SettlementGroupID and t.InstrumentID = t1.InstrumentID
AND t.SettlementGroupID in """ + str(tuple([str(i) for i in self.settlementGroupID])) + """
INNER JOIN siminfo.t_settlementgroup t3 on t.SettlementGroupID = t3.SettlementGroupID
AND t.SettlementGroupID in """ + str(tuple([str(i) for i in self.settlementGroupID])) + """
LEFT JOIN siminfo.t_marginratedetail t2 on t.SettlementGroupID = t2.SettlementGroupID
and t.InstrumentID = t2.InstrumentID AND t.SettlementGroupID in """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_DepthMarketData=dict(columns=("TradingDay", "InstrumentID", "ExchangeID", "ExchangeInstID", "LastPrice",
"PreSettlementPrice", "PreClosePrice", "PreOpenInterest", "OpenPrice",
"HighestPrice", "LowestPrice", "Volume", "Turnover", "OpenInterest",
"ClosePrice", "SettlementPrice", "UpperLimitPrice", "LowerLimitPrice",
"PreDelta", "CurrDelta", "UpdateTime", "UpdateMillisec", "BidPrice1",
"BidVolume1", "AskPrice1", "AskVolume1", "BidPrice2", "BidVolume2",
"AskPrice2", "AskVolume2", "BidPrice3", "BidVolume3", "AskPrice3",
"AskVolume3", "BidPrice4", "BidVolume4", "AskPrice4", "AskVolume4",
"BidPrice5", "BidVolume5", "AskPrice5", "AskVolume5", "AveragePrice",
"ActionDay"),
sql="""SELECT t1.TradingDay,t.InstrumentID,t2.ExchangeID,
t.InstrumentID AS ExchangeInstID,t1.LastPrice,t1.PreSettlementPrice,
t1.PreClosePrice,t1.PreOpenInterest,t1.OpenPrice,t1.HighestPrice,t1.LowestPrice,
'0' AS Volume,'0' AS Turnover,t1.OpenInterest,t1.ClosePrice,t1.SettlementPrice,
t1.UpperLimitPrice,t1.LowerLimitPrice,t1.PreDelta,t1.CurrDelta,'00:00:00' as UpdateTime,
'0' as UpdateMillisec,'0' AS BidPrice1,'0' AS BidVolume1,'0' AS AskPrice1,
'0' AS AskVolume1,'0' AS BidPrice2,'0' AS BidVolume2,'0' AS AskPrice2,
'0' AS AskVolume2,'0' AS BidPrice3,'0' AS BidVolume3,'0' AS AskPrice3,
'0' AS AskVolume3,'0' AS BidPrice4,'0' AS BidVolume4,'0' AS AskPrice4,
'0' AS AskVolume4,'0' AS BidPrice5,'0' AS BidVolume5,'0' AS AskPrice5,
'0' AS AskVolume5,'0' AS AveragePrice,'' AS ActionDay
FROM siminfo.t_Instrument t,siminfo.t_MarketData t1,siminfo.t_SettlementGroup t2
WHERE t.SettlementGroupID = t1.SettlementGroupID
AND t.InstrumentID = t1.InstrumentID
AND t.SettlementGroupID = t2.SettlementGroupID
AND t.SettlementGroupID in """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_ExchangeMarginRate=dict(columns=("BrokerID", "InstrumentID", "HedgeFlag", "LongMarginRatioByMoney",
"LongMarginRatioByVolume", "ShortMarginRatioByMoney",
"ShortMarginRatioByVolume"),
sql="""SELECT '10010' AS BrokerID,t.InstrumentID AS InstrumentID,
t.HedgeFlag AS HedgeFlag,t.LongMarginRatio LongMarginRatioByMoney,
'0' AS LongMarginRatioByVolume,t.ShortMarginRatio ShortMarginRatioByMoney,
'0' AS ShortMarginRatioByVolume
FROM siminfo.t_MarginRateDetail t,siminfo.t_Instrument t1
WHERE t.InstrumentID = t1.InstrumentID AND t1.SettlementGroupID in """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_Product=dict(columns=("ProductID", "ProductName", "ExchangeID", "ProductClass", "VolumeMultiple",
"PriceTick", "MaxMarketOrderVolume", "MinMarketOrderVolume", "MaxLimitOrderVolume",
"MinLimitOrderVolume", "PositionType", "PositionDateType", "CloseDealType",
"TradeCurrencyID", "MortgageFundUseRange", "ExchangeProductID",
"UnderlyingMultiple"),
sql="""SELECT t.ProductID,t3.ProductName,t2.ExchangeID,t.ProductClass,t.VolumeMultiple,
t1.PriceTick,t1.MaxMarketOrderVolume,t1.MinMarketOrderVolume,
t1.MaxLimitOrderVolume,t1.MinLimitOrderVolume,t.PositionType,
'2' AS PositionDateType,'0' AS CloseDealType,t2.Currency AS TradeCurrencyID,
'0' AS MortgageFundUseRange,'' AS ExchangeProductID,t.UnderlyingMultiple
FROM siminfo.t_Instrument t,siminfo.t_InstrumentProperty t1,
siminfo.t_SettlementGroup t2,siminfo.t_Product t3
WHERE t.InstrumentID = t1.InstrumentID
AND t.SettlementGroupID = t1.SettlementGroupID
AND t.SettlementGroupID = t2.SettlementGroupID
AND t.SettlementGroupID = t3.SettlementGroupID
AND t.ProductID = t3.ProductID
AND t.SettlementGroupID in """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_TradingAccount=dict(columns=("BrokerID", "AccountID", "PreMortgage", "PreCredit", "PreDeposit",
"PreBalance", "PreMargin", "InterestBase", "Interest", "Deposit", "Withdraw",
"FrozenMargin", "FrozenCash", "FrozenCommission", "CurrMargin", "CashIn",
"Commission", "CloseProfit", "PositionProfit", "Balance", "Available",
"WithdrawQuota", "Reserve", "TradingDay", "SettlementID", "Credit",
"Mortgage", "ExchangeMargin", "DeliveryMargin", "ExchangeDeliveryMargin",
"ReserveBalance", "CurrencyID", "PreFundMortgageIn", "PreFundMortgageOut",
"FundMortgageIn", "FundMortgageOut", "FundMortgageAvailable",
"MortgageableFund", "SpecProductMargin", "SpecProductFrozenMargin",
"SpecProductCommission", "SpecProductFrozenCommission",
"SpecProductPositionProfit", "SpecProductCloseProfit",
"SpecProductPositionProfitByAlg", "SpecProductExchangeMargin",
"FrozenSwap", "RemainSwap"),
sql="""SELECT DISTINCT
'10010' AS BrokerID,
t.InvestorID AS AccountID,
'0' AS PreMortgage,
'0' AS PreCredit,
t.Available AS PreDeposit,
t.Available AS PreBalance,
t.CurrMargin,
'0' AS InterestBase,
'0' AS Interest,
t.Deposit,
t.Withdraw,
'0' AS FrozenMargin,
'0' AS FrozenCash,
'0' AS FrozenCommission,
t.CurrMargin,
'0' AS CashIn,
'0' AS Commission,
t.CloseProfit,
'0' AS PositionProfit,
t.Balance,
t.Available,
'0' AS WithdrawQuota,
'0' AS Reserve,
t1.TradingDay,
'1' AS SettlementID,
'0' AS Credit,
'0' AS Mortgage,
'0' AS ExchangeMargin,
'0' AS DeliveryMargin,
'0' AS ExchangeDeliveryMargin,
'0' AS ReserveBalance,
'CNY' AS CurrencyID,
'0' AS PreFundMortgageIn,
'0' AS PreFundMortgageOut,
'0' AS FundMortgageIn,
'0' AS FundMortgageOut,
'0' AS FundMortgageAvailable,
'0' AS MortgageableFund,
'0' AS SpecProductMargin,
'0' AS SpecProductFrozenMargin,
'0' AS SpecProductCommission,
'0' AS SpecProductFrozenCommission,
'0' AS SpecProductPositionProfit,
'0' AS SpecProductCloseProfit,
'0' AS SpecProductPositionProfitByAlg,
'0' AS SpecProductExchangeMargin,
'0' AS FrozenSwap,
'0' AS RemainSwap
FROM
siminfo.t_InvestorFund t,
siminfo.t_TradeSystemTradingDay t1,
siminfo.t_brokersystemsettlementgroup t2,
siminfo.t_tradesystembrokersystem t3
WHERE
t.BrokerSystemID = t2.BrokerSystemID
AND t.BrokerSystemID = t3.BrokerSystemID
AND t3.TradeSystemID = t1.TradeSystemID
AND t2.SettlementGroupID IN """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_Investor=dict(columns=("InvestorID", "BrokerID", "InvestorGroupID", "InvestorName", "IdentifiedCardType",
"IdentifiedCardNo", "IsActive", "Telephone", "Address", "OpenDate", "Mobile",
"CommModelID", "MarginModelID"),
sql="""SELECT t.InvestorID AS InvestorID,'10010' AS BrokerID,'' AS InvestorGroupID,
t.InvestorName AS InvestorName,'' AS IdentifiedCardType,'' AS IdentifiedCardNo,
'1' AS IsActive,'' AS Telephone,'' AS Address,'' AS OpenDate,'' AS Mobile,
'' AS CommModelID,'' AS MarginModelID
FROM siminfo.t_Investor t""",
quoting=True),
t_InvestorAccount=dict(columns=("BrokerID", "InvestorID", "AccountID", "CurrencyID"),
sql="""SELECT '10010' AS BrokerID,t.InvestorID AS InvestorID,
t.InvestorID AS AccountID,'CNY' AS CurrencyID
FROM siminfo.t_Investor t""",
quoting=True),
t_InvestorDepartmentFlat=dict(columns=("BrokerID", "InvestorID", "DepartmentID"),
sql="""SELECT '10010' AS BrokerID,t.InvestorID,'01' AS DepartmentID
FROM siminfo.t_Investor t
UNION ALL
SELECT '10010' AS BrokerID,t.InvestorID,'0101' AS DepartmentID
FROM siminfo.t_Investor t""",
quoting=True),
t_InvestorPassword=dict(columns=("BrokerID", "InvestorID", "Password"),
sql="""SELECT '10010' AS BrokerID,t.InvestorID,t.Password
FROM siminfo.t_Investor t""",
quoting=True),
t_LinkMan=dict(columns=("BrokerID", "InvestorID", "PersonType", "IdentifiedCardType", "IdentifiedCardNo",
"PersonName", "Telephone", "Address", "ZipCode", "Priority", "UOAZipCode",
"PersonFullName"),
sql="""SELECT '10010' AS BrokerID,t.InvestorID,'1' AS PersonType,'1' AS IdentifiedCardType,
'' AS IdentifiedCardNo,t.InvestorName AS PersonName,'' AS Telephone,
'' AS Address,'' AS ZipCode,'0' AS Priority,'' AS UOAZipCode,
t.InvestorName AS PersonFullName
FROM siminfo.t_Investor t""",
quoting=True),
t_MDInstrument=dict(columns=("InstrumentID", "ExchangeID", "InstrumentName", "ExchangeInstID", "ProductID",
"ProductClass", "DeliveryYear", "DeliveryMonth", "MaxMarketOrderVolume",
"MinMarketOrderVolume", "MaxLimitOrderVolume", "MinLimitOrderVolume",
"VolumeMultiple", "PriceTick", "CreateDate", "OpenDate", "ExpireDate",
"StartDelivDate", "EndDelivDate", "InstLifePhase", "IsTrading", "PositionType",
"PositionDateType", "LongMarginRatio", "ShortMarginRatio",
"MaxMarginSideAlgorithm", "UnderlyingInstrID", "StrikePrice", "OptionsType",
"UnderlyingMultiple", "CombinationType"),
sql="""SELECT t.InstrumentID,t3.ExchangeID AS ExchangeID,t.InstrumentName,
t.InstrumentID AS ExchangeInstID,t.ProductID,t.ProductClass,t.DeliveryYear,
t.DeliveryMonth,t1.MaxMarketOrderVolume,t1.MinMarketOrderVolume,
t1.MaxLimitOrderVolume,t1.MinLimitOrderVolume,t.VolumeMultiple,t1.PriceTick,
t1.CreateDate,t1.OpenDate,t1.ExpireDate,t1.StartDelivDate,t1.EndDelivDate,
t1.InstLifePhase, '1' AS IsTrading,t.PositionType,'1' AS PositionDateType,
t2.LongMarginRatio,t2.ShortMarginRatio,'1' AS MaxMarginSideAlgorithm,
t.UnderlyingInstrID AS tUnderlyingInstrID,t.StrikePrice,t.OptionsType,
t.UnderlyingMultiple,'0' AS CombinationType
FROM siminfo.t_Instrument t,siminfo.t_InstrumentProperty t1,
siminfo.t_MarginRateDetail t2,siminfo.t_SettlementGroup t3
WHERE t.SettlementGroupID = t1.SettlementGroupID
AND t.InstrumentID = t1.InstrumentID
AND t.SettlementGroupID = t2.SettlementGroupID
AND t.InstrumentID = t2.InstrumentID
AND t.SettlementGroupID = t3.SettlementGroupID
AND t.SettlementGroupID in """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_ProductGroup=dict(columns=("ProductID", "ExchangeID", "ProductGroupID"),
sql="""SELECT t.ProductID,t1.ExchangeID,t.ProductGroupID
FROM siminfo.t_Product t,siminfo.t_SettlementGroup t1
WHERE t.SettlementGroupID = t1.SettlementGroupID
AND t.SettlementGroupID IN """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_IndexPrice=dict(columns=("BrokerID", "InstrumentID", "ClosePrice"),
sql="""SELECT '10010' AS BrokerID,t.InstrumentID,t.ClosePrice
FROM siminfo.t_MarketData t WHERE t.SettlementGroupID in """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_InstrumentMarginRate=dict(columns=("InstrumentID", "InvestorRange", "BrokerID", "InvestorID", "HedgeFlag",
"LongMarginRatioByMoney", "LongMarginRatioByVolume",
"ShortMarginRatioByMoney", "ShortMarginRatioByVolume", "IsRelative"),
sql="""SELECT t.InstrumentID,'1' AS InvestorRange,'10010' AS BrokerID,
'00000000' AS InvestorID,t1.HedgeFlag,
t1.LongMarginRatio AS LongMarginRatioByMoney,
'0' AS LongMarginRatioByVolume,
t1.ShortMarginRatio AS ShortMarginRatioByMoney,
'0' AS ShortMarginRatioByVolume,'1' AS IsRelative
FROM siminfo.t_Instrument t,siminfo.t_MarginRateDetail t1
WHERE t.SettlementGroupID = t1.SettlementGroupID
AND t.InstrumentID = t1.InstrumentID
AND t.SettlementGroupID IN """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_InstrumentCommissionRate=dict(columns=("InstrumentID", "InvestorRange", "BrokerID", "InvestorID",
"OpenRatioByMoney", "OpenRatioByVolume", "CloseRatioByMoney",
"CloseRatioByVolume", "CloseTodayRatioByMoney",
"CloseTodayRatioByVolume"),
sql="""SELECT t.InstrumentID,
'1' AS InvestorRange,
'10010' AS BrokerID,
'00000000' AS InvestorID,
if (t1.ValueMode = '1', t1.OpenFeeRatio , '0') as OpenRatioByMoney,
if (t1.ValueMode = '2', t1.OpenFeeRatio , '0') as OpenRatioByVolume,
if (t1.ValueMode = '1', t1.CloseTodayFeeRatio , '0') as CloseRatioByMoney,
if (t1.ValueMode = '2', t1.CloseTodayFeeRatio , '0') as CloseRatioByVolume,
if (t1.ValueMode = '1', t1.CloseTodayFeeRatio , '0') as CloseTodayRatioByMoney,
if (t1.ValueMode = '2', t1.CloseTodayFeeRatio , '0') as CloseTodayRatioByVolume
FROM
siminfo.t_Instrument t,
siminfo.t_transfeeratedetail t1
WHERE t.SettlementGroupID = t1.SettlementGroupID
and t.InstrumentID = t1.InstrumentID
and t.SettlementGroupID IN """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_OptionInstrCommRate=dict(columns=("InstrumentID", "InvestorRange", "BrokerID", "InvestorID",
"OpenRatioByMoney", "OpenRatioByVolume", "CloseRatioByMoney",
"CloseRatioByVolume", "CloseTodayRatioByMoney",
"CloseTodayRatioByVolume", "StrikeRatioByMoney", "StrikeRatioByVolume"),
sql="""SELECT t.InstrumentID,
'1' as InvestorRange,
'10010' as BrokerID,
'00000000' as InvestorID,
if(t1.ValueMode = '1',t1.OpenFeeRatio,'0') as OpenRatioByMoney,
if(t1.ValueMode = '2',t1.OpenFeeRatio,'0') as OpenRatioByVolume,
if(t1.ValueMode = '1',t1.CloseTodayFeeRatio,'0') as CloseRatioByMoney,
if(t1.ValueMode = '2',t1.CloseTodayFeeRatio,'0') as CloseRatioByVolume,
if(t1.ValueMode = '1',t1.CloseTodayFeeRatio,'0') as CloseTodayRatioByMoney,
if(t1.ValueMode = '2',t1.CloseTodayFeeRatio,'0') as CloseTodayRatioByVolume,
if(t1.ValueMode = '1',t1.CloseTodayFeeRatio,'0') as StrikeRatioByMoney,
if(t1.ValueMode = '2',t1.CloseTodayFeeRatio,'0') as StrikeRatioByVolume
from siminfo.t_instrument t, siminfo.t_transfeeratedetail t1
where t.SettlementGroupID = t1.SettlementGroupID
and t.InstrumentID = t1.InstrumentID
and t.productclass = '2'
AND t.SettlementGroupID IN """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_Trader=dict(columns=("ExchangeID", "TraderID", "ParticipantID", "Password", "InstallCount", "BrokerID"),
sql="""SELECT t2.ExchangeID,t1.UserID AS TraderID,t1.ParticipantID,
'111111' Password,'1' AS InstallCount,'10010' AS BrokerID
FROM siminfo.t_user t1,siminfo.t_settlementgroup t2
WHERE t1.SettlementGroupID = t2.SettlementGroupID
AND t1.UserID NOT IN ( 'TRADE01', 'TRADE02' )
AND t1.SettlementGroupID IN """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_TraderAssign=dict(columns=("BrokerID", "ExchangeID", "TraderID", "ParticipantID", "DRIdentityID"),
sql="""SELECT '10010' AS BrokerID,t2.ExchangeID,t1.UserID AS TraderID,
t1.ParticipantID,'1' AS DRIdentityID
FROM siminfo.t_user t1,siminfo.t_settlementgroup t2
WHERE t1.SettlementGroupID = t2.SettlementGroupID
AND t1.UserID NOT IN ( 'TRADE01', 'TRADE02' )
AND t1.SettlementGroupID IN """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_BrokerUser=dict(columns=("BrokerID", "UserID", "UserName", "UserType", "IsActive",
"IsUsingOTP", "IsAuthForce"),
sql="""SELECT '10010' AS BrokerID,t.InvestorID AS UserID,t.InvestorName AS UserName,
'0' AS UserType,'1' AS IsActive,'0' AS IsUsingOTP,
'0' AS IsAuthForce
FROM siminfo.t_investor t UNION ALL
SELECT '10010' AS BrokerID,'10010_admin' AS UserID,
'10010_admin' AS UserName,'1' AS UserType,'1' AS IsActive,
'0' AS IsUsingOTP,'0' AS IsAuthForce""",
quoting=True),
t_BrokerUserPassword=dict(columns=("BrokerID", "UserID", "Password", "LastUpdateTime", "LastLoginTime",
"ExpireDate", "WeakExpireDate"),
sql="""SELECT '10010' AS BrokerID,t.InvestorID AS UserID,t.`Password` AS PASSWORD,
'' AS LastUpdateTime,'20180101' AS LastLoginTime,'' AS ExpireDate,
'' AS WeakExpireDate
FROM siminfo.t_investor t UNION ALL
SELECT '10010' AS BrokerID,'10010_admin' AS UserID,'<PASSWORD>' AS PASSWORD,
'' AS LastUpdateTime,'20180101' AS LastLoginTime,'' AS ExpireDate,
'' AS WeakExpireDate""",
quoting=True),
t_DepartmentUser=dict(columns=("BrokerID", "UserID", "InvestorRange", "InvestorID"),
sql="""SELECT '10010' AS BrokerID,t.InvestorID AS UserID,'3' AS InvestorRange,
t.InvestorID FROM siminfo.t_investor t UNION ALL
SELECT '10010' AS BrokerID,'10010_admin' AS UserID,
'1' AS InvestorRange,'00000000' AS InvestorID""",
quoting=True),
t_PartBroker=dict(columns=("BrokerID", "ExchangeID", "ParticipantID", "IsActive"),
sql="""SELECT '10010' AS BrokerID,t.ExchangeID AS ExchangeID,t1.ParticipantID,t1.IsActive
FROM siminfo.t_settlementgroup t,siminfo.t_participant t1
WHERE t.SettlementGroupID = t1.SettlementGroupID
AND t.SettlementGroupID IN """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_TradingAccountPassword=dict(columns=("BrokerID", "AccountID", "Password", "CurrencyID"),
sql="""SELECT
'10010' AS BrokerID,
t.InvestorID AS AccountID,
t.`Password` AS PASSWORD,
'CNY' AS CurrencyID
FROM siminfo.t_investor t""",
quoting=True),
t_TradingCode=dict(columns=("InvestorID", "BrokerID", "ExchangeID", "ClientID", "IsActive", "ClientIDType"),
sql="""SELECT t.InvestorID,'10010' AS BrokerID,t1.ExchangeID,t.ClientID,
t2.IsActive,'1' AS ClientIDType
FROM siminfo.t_investorclient t,siminfo.t_settlementgroup t1,siminfo.t_client t2
WHERE t.SettlementGroupID = t1.SettlementGroupID
AND t.SettlementGroupID = t2.SettlementGroupID
AND t.ClientID = t2.ClientID AND t.SettlementGroupID IN """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
t_UserRightsAssign=dict(columns=("BrokerID", "UserID", "DRIdentityID"),
sql="""SELECT '10010' AS BrokerID,t.InvestorID AS UserID,'1' AS DRIdentityID
FROM siminfo.t_investor t
UNION ALL
SELECT '10010' AS BrokerID,'10010_admin' AS UserID,'1' AS DRIdentityID """,
quoting=True),
t_InvestorPositionDtl=dict(columns=("InstrumentID", "BrokerID", "InvestorID", "HedgeFlag", "Direction",
"OpenDate", "TradeID", "Volume", "OpenPrice", "TradingDay",
"SettlementID", "TradeType", "CombInstrumentID", "ExchangeID",
"CloseProfitByDate", "CloseProfitByTrade", "PositionProfitByDate",
"PositionProfitByTrade", "Margin", "ExchMargin", "MarginRateByMoney",
"MarginRateByVolume", "LastSettlementPrice", "SettlementPrice",
"CloseVolume", "CloseAmount"),
sql="""select InstrumentID,'10010' as BrokerID,InvestorID,HedgeFlag,Direction,
OpenDate,TradeID,Volume,OpenPrice,TradingDay,SettlementID,TradeType,
CombInstrumentID,ExchangeID,CloseProfitByDate,CloseProfitByTrade,
PositionProfitByDate,PositionProfitByTrade,Margin,ExchMargin,
MarginRateByMoney,MarginRateByVolume,LastSettlementPrice,
SettlementPrice,CloseVolume,CloseAmount
from sync.t_futurepositiondtl
where SettlementGroupID in """ +
str(tuple([str(i) for i in self.settlementGroupID])),
quoting=True),
)
# 查询sync数据库数据内容
csv_data = mysqlDB.select(table_sqls[table_name]["sql"], table_sqls[table_name].get("params"))
# 生成csv文件
self.__produce_csv(table_name, table_sqls[table_name], csv_data)
# 生成csv文件
def __produce_csv(self, table_name, columns, csv_data):
self.logger.info("%s%s%s" % ("开始生成 ", table_name, ".csv"))
_path = "%s%s%s%s" % (str(self.csv_path), os.path.sep, table_name, '.csv')
# 如果不存在目录则先创建
if not os.path.exists(str(self.csv_path)):
os.makedirs(str(self.csv_path))
with open(_path, 'wb') as csvfile:
if "quoting" in columns and columns['quoting']:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
else:
writer = csv.writer(csvfile)
writer.writerow(csv_tool.covert_to_gbk(columns['columns']))
writer.writerows(csv_tool.covert_to_gbk(csv_data))
self.logger.info("%s%s%s" % ("生成 ", table_name, ".csv 文件完成"))
if __name__ == '__main__':
base_dir, config_names, config_files, add_ons = parse_conf_args(__file__, config_names=["mysql", "log", "csv"])
context, conf = Configuration.load(base_dir=base_dir, config_names=config_names, config_files=config_files)
# 启动脚本
exchange_future_csv(context=context, configs=conf)
```
#### File: utils/config/config_tool.py
```python
import os
import json
import path_tool
class Configuration:
@staticmethod
def load(base_dir, config_names, config_files, add_ons={}):
context = {}
conf = {}
config_base_dir = base_dir
if config_base_dir is None:
default_base_dir = path_tool.path.parent(__file__, 3)
# 项目路径【默认为当程序路径】
config_base_dir = os.environ.get("SIM_PLATFORM_HOME", default_base_dir)
config_base_dir = os.path.join(config_base_dir, "configuration")
if config_base_dir is not None and config_names is not None:
for config_name in config_names:
if config_name.find(":") == -1:
config_file = os.path.join(config_base_dir, os.environ.get("SIM_RELEASE"), config_name + ".json")
context.update({config_name: Configuration.load_json(config_file)})
else:
config_name_items = config_name.split(":", 1)
config_file = os.path.join(config_base_dir, os.environ.get("SIM_RELEASE"), config_name_items[1] + ".json")
context.update({config_name_items[0]: Configuration.load_json(config_file)})
if config_files is not None:
for config_file in config_files:
if config_file is not None:
# 判断conf是否根目录开始
if not config_file.startswith(os.getcwd()[0:3]):
config_file = os.path.join(config_base_dir, os.environ.get("SIM_RELEASE"), config_file)
if os.path.exists(config_file):
conf.update(Configuration.load_json(config_file))
else:
print("can not find customize config file ==> [%s]" % config_file)
exit(-1)
if add_ons is not None:
conf.update(add_ons)
return context, conf
# 1、查询同级目录下配置文件
# 2、检查环境变量指向配置文件
@staticmethod
def find_selfconfig(file_path, has_config):
self_config_file = file_path[:-3] + ".json"
check_files = [self_config_file]
if os.path.exists(self_config_file):
return self_config_file
elif os.environ.get("SIM_RELEASE") is not None:
# 加载系统版本配置分支
release = os.environ.get("SIM_RELEASE")
default_base_dir = path_tool.path.parent(__file__, 3).replace("\\", "/")
config_base_dir = os.path.join(default_base_dir, "configuration")
config_base_dir = os.path.join(config_base_dir, release).replace("\\", "/")
self_config_file = self_config_file.replace("\\", "/")
diff = self_config_file.replace(default_base_dir, "")
diff = diff[1:] if diff[0] == '/' else diff
config_base_dir = os.path.join(config_base_dir, diff)
check_files.append(config_base_dir)
if os.path.exists(config_base_dir):
return config_base_dir
print("can not find config file ==> %s" % check_files)
if has_config:
print("has_customize_config, countinue")
else:
exit()
@staticmethod
def load_json(file_name):
with open(file_name) as f:
config = json.load(f)
return config
```
#### File: utils/database/oracle_tool.py
```python
import cx_Oracle
from utils.logger.log import log
class oracle:
def __init__(self, configs):
self.logger = log.get_logger(category="oracle")
_user = configs["user"]
_password = configs["password"]
_host = configs["host"]
_port = configs["port"]
_sid = configs["sid"]
_min = configs["pool_min"]
_max = configs["pool_max"]
self.__connect(_user, _password, _host, _port, _sid, _min, _max)
def __connect(self, user, password, host, port, sid, _min, _max):
dsn = cx_Oracle.makedsn(host, port, sid)
self.logger.info("start connect oracle database [ user=%s, host=%s, port=%s ]", user, host, port)
self.pool = cx_Oracle.SessionPool(user=user, password=password, dsn=dsn, min=_min, max=_max, increment=1)
def get_cnx(self):
acq = self.pool.acquire()
return acq
def __release_cnx(self, cnx):
self.pool.release(cnx)
# 判断是否存在记录
def is_exist(self, sql, params):
res = self.select(sql=sql, params=params)
if len(res) > 0:
return True
else:
return False
# 查询
def select(self, sql, params=None):
cnx = self.get_cnx()
try:
self.logger.debug({"sql": sql, "params": params})
cursor = cnx.cursor()
if params is None:
cursor.execute(sql)
else:
cursor.execute(sql, params)
fc = cursor.fetchall()
return fc
except Exception as err:
self.logger.error(err)
finally:
cursor.close()
self.__release_cnx(cnx)
# 执行
def execute(self, sql, params=None):
cnx = self.get_cnx()
try:
self.logger.debug({"sql": sql, "params": params})
cursor = cnx.cursor()
if params is None:
cursor.execute(sql)
else:
cursor.execute(sql, params)
cnx.commit()
except Exception as err:
self.logger.error(err)
finally:
cursor.close()
self.__release_cnx(cnx)
# 批量执行
def executemany(self, sql, params):
cnx = self.get_cnx()
try:
self.logger.debug({"sql": sql, "params": params})
cursor = cnx.cursor()
cursor.prepare(sql)
cursor.executemany(None, params)
cnx.commit()
except Exception as err:
cnx.rollback()
self.logger.error(err)
finally:
cursor.close()
self.__release_cnx(cnx)
``` |
{
"source": "jiafeng5513/BinocularNet",
"score": 2
} |
#### File: comparisons/depth_from_video_in_the_wild/render_euroc_depth.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from absl import app
from absl import flags
import matplotlib.image
import numpy as np
flags.DEFINE_string('room_path', '', 'Path to the EuRoC DataFlow for one of the '
'rooms ')
flags.DEFINE_string('output_path', '', 'Path where to store the outputs.')
FLAGS = flags.FLAGS
# A 4D transform that connects the Cam0 to the body of the MAV. This is taken
# from the sensor.yaml file. To project the point cloud on Cam1, please replace
# with the respective extrinsic matrix. This is constant across all the rooms in
# the dataset.
CAM0_TO_BODY = np.array(
[[0.0148655429818, -0.999880929698, 0.00414029679422, -0.0216401454975],
[0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768],
[-0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949],
[0.0, 0.0, 0.0, 1.0]])
# Intrinsics of Cam0. This is taken from cam0/sensor.yaml and is the same for
# all rooms.
FX = 458.654
FY = 457.296
X0 = 367.215
Y0 = 248.375
K1 = -0.28340811
K2 = 0.07395907
H = 480
W = 752
def get_camera_view_pointcloud(transform, xyz, greyscale_color):
"""Transform point cloud to camera view, prune points outside of the view.
Args:
transform: 4x4 transform matrix representing position and orientation of
the body of the MAV.
xyz: A 4xN matrix, point cloud in homogeneous coordinates. The k-th column
is (x, y, z, 1), where x, y, z are the coordinates of the k-th point.
greyscale_color: N-vector, vertex grayscale value. The k-th entry is the
greyscale color of the k-th point.
Returns:
3xM (M < N) matrix representing the point cloud in the camera view.
M vector, vertex grayscale value.
Only points that fall within the camera viweing angle and are in front of
the camera are kept.
"""
overall_transform = np.linalg.inv(CAM0_TO_BODY).dot(np.linalg.inv(transform))
transformed_xyz = xyz.dot(overall_transform.transpose())
x, y, z, _ = _split(transformed_xyz)
u, v = _project_and_distort(x, y, z)
# Remove points that are out of frame. Keep some margin (1.05), to make sure
# occlusions are addressed correctly at the edges of the field of view. For
# example a point that is just slightly out of frame can occlude a neighboring
# point inside the frame.
valid_mask = np.logical_and.reduce(
(z > 0.0, u > -0.05 * W, u < W * 1.05, v > -0.05 * H, v < H * 1.05),
axis=0)
valid_points = valid_mask.nonzero()[0]
return transformed_xyz[valid_points, :3], greyscale_color[valid_points]
def get_occluded_points(xyz, neighborhood_radius, z_threshold):
"""Remove points that are occluded by others from a camera-view point cloud.
Args:
xyz: A 3xN matrix representing the point cloud in the camera view.
neighborhood_radius: The radius around each point in which it occludes
others.
z_threshold: Minimum z distance betweem two points for them considered to
be occluding each other. If two points are verty close in z, they likely
belong to the same surface and thus do not occlude each other.
Returns:
A list of indices in xyz corresponding to points that are occluded.
"""
def get_bin(xz, yz):
xbin = int(round(xz / neighborhood_radius))
ybin = int(round(yz / neighborhood_radius))
return xbin, ybin
xs, ys, zs = _split(xyz)
xzs = xs / zs
yzs = ys / zs
grid = collections.defaultdict(lambda: np.inf)
for ind in range(xyz.shape[0]):
# Place each point in the bin where it belongs, and in the neighboring bins.
# Keep only the closest point to the camera in each bin.
xbin, ybin = get_bin(xzs[ind], yzs[ind])
for i in range(-1, 2):
for j in range(-1, 2):
grid[(xbin + i, ybin + j)] = min(grid[(xbin + i, ybin + j)], zs[ind])
occluded_indices = []
for ind in range(xyz.shape[0]):
# Loop over all points and see if they are occluded, by finding the closest
# point to the camera within the same bin and testing for the occlusion
# condition. A point is occluded if there is another point in the same bin
# that is far enough in z, so that it cannot belong to the same surface,
zmin = grid[get_bin(xzs[ind], yzs[ind])]
if zmin < (1 - z_threshold) * zs[ind]:
occluded_indices.append(ind)
return occluded_indices
def render_rgb(xyz, c):
"""Given a colored cloud in camera coordinates, render an image.
This function is useful for visualization / debugging.
Args:
xyz: A 3xN matrix representing the point cloud in the camera view.
c: A N-long vector containing (greyscale) colors of the points.
Returns:
A rendered image.
"""
x, y, z = _split(xyz)
u, v = _project_and_distort(x, y, z)
u = np.floor(0.5 * u).astype(int)
v = np.floor(0.5 * v).astype(int)
rendered_c = np.full((int(H / 2), int(W / 2)), 0.0)
rendered_c[v, u] = c
rendered_c = np.stack([rendered_c] * 3, axis=2)
return rendered_c
def render_z(xyz):
"""Given a colored cloud in camera coordinates, render a depth map.
This function is useful for visualization / debugging.
Args:
xyz: A 3xN matrix representing the point cloud in the camera view.
Returns:
A rendered depth map.
"""
x, y, z = _split(xyz)
u, v = _project_and_distort(x, y, z)
u = np.floor(0.5 * u).astype(int)
v = np.floor(0.5 * v).astype(int)
rendered_z = np.full((int(H / 2), int(W / 2)), -np.inf)
rendered_z[v, u] = z
maxz = np.max(rendered_z)
rendered_z = np.where(rendered_z == -np.inf, np.nan, rendered_z)
rendered_z /= maxz
return rendered_z
class GroundTruthInterpolator(object):
"""Interpolates MAV position and orientation groundtruth to a timestamp."""
def __init__(self, filename):
"""Creates an instance.
Args:
filename: A string, filepath of the state_groundtruth_estimate0.csv file.
"""
with open(filename) as f:
lines = f.readlines()
lines = lines[1:] # skip the first line
gt = []
for l in lines:
tokens = l.split(',')
gt.append([float(t) for t in tokens[:8]])
self._gt = np.array(gt)
self._mint = np.min(self._gt[:, 0])
self._maxt = np.max(self._gt[:, 0])
def get_transform(self, timestamp):
"""Interpolates the MAV's transform matrix at a timestamp."""
if timestamp < self._mint or timestamp > self._maxt:
return None
# self._gt[:, 0], the 0th column, is the timestamp. Columns 1-3 are x, y, z,
# and columns 4-7 are quaternion components describing the rotation.
timestamps = self._gt[:, 0]
x = np.interp(timestamp, timestamps, self._gt[:, 1])
y = np.interp(timestamp, timestamps, self._gt[:, 2])
z = np.interp(timestamp, timestamps, self._gt[:, 3])
qw = np.interp(timestamp, timestamps, self._gt[:, 4])
qx = np.interp(timestamp, timestamps, self._gt[:, 5])
qy = np.interp(timestamp, timestamps, self._gt[:, 6])
qz = np.interp(timestamp, timestamps, self._gt[:, 7])
# Creates a matrix
transform = np.array([[
1 - 2 * qy * qy - 2 * qz * qz, 2 * qx * qy - 2 * qz * qw,
2 * qx * qz + 2 * qy * qw, x
], # pylint: disable=bad-continuation
[
2 * qx * qy + 2 * qz * qw,
1 - 2 * qx * qx - 2 * qz * qz,
2 * qy * qz - 2 * qx * qw, y
],
[
2 * qx * qz - 2 * qy * qw,
2 * qy * qz + 2 * qx * qw,
1 - 2 * qx * qx - 2 * qy * qy, z
], [0.0, 0.0, 0.0, 1.0]])
return transform
def read_ply(filename):
"""Reads a PLY file representing EuRoc's point cloud."""
with open(filename) as f:
lines = f.readlines()
lines = lines[11:]
xyz = []
c = [] # The color channel (just one, it's greyscale)
for l in lines:
tokens = l.split(' ')
xyz.append([float(t) for t in tokens[:3]])
c.append(float(tokens[3]))
return np.array(xyz), np.array(c)
def filter_out_ot_frame_points(xyz, c):
"""Remove all points in a camera-view pointcloud that are out of frame.
Args:
xyz: A 3xN matrix representing the point cloud in the camera view.
c: A N-long vector containing (greyscale) colors of the points.
Returns:
A 3xM matrix and a M-long vector representing the filtered colored point
cloud.
"""
x, y, z = _split(xyz)
u, v = _project_and_distort(x, y, z)
u = np.floor(u).astype(int)
v = np.floor(v).astype(int)
valid_mask = np.logical_and.reduce((u >= 0, u < W, v >= 0, v < H), axis=0)
valid_points = valid_mask.nonzero()[0]
return xyz[valid_points, :], c[valid_points]
def sample_uniform(xyz, bin_size):
"""subsamples a point cloud to be more uniform in perspective coordinates.
Args:
xyz: A 3xN matrix representing the point cloud in the camera view.
bin_size: Size of a square in which we allow only a single point.
Returns:
A list of indices, corresponding to a subset of the original `xyz`, to keep.
"""
x, y, z = _split(xyz)
xbins = (x / z / bin_size)
ybins = (y / z / bin_size)
xbins_rounded = np.round(xbins)
ybins_rounded = np.round(ybins)
xbins_diff = xbins_rounded - xbins
ybins_diff = ybins_rounded - ybins
diff_sq = xbins_diff**2 + ybins_diff**2
bin_to_ind = {}
for ind in range(len(diff_sq)):
bin_ = (xbins_rounded[ind], ybins_rounded[ind])
if bin_ not in bin_to_ind or diff_sq[ind] < bin_to_ind[bin_][1]:
bin_to_ind[bin_] = (ind, diff_sq[ind])
inds_to_keep = sorted([i[0] for i in bin_to_ind.values()])
return inds_to_keep
def main(argv):
del argv # unused
gti = GroundTruthInterpolator(
os.path.join(FLAGS.room_path, 'state_groundtruth_estimate0/DataFlow.csv'))
print('Groundtruth loaded.')
xyz, c = read_ply(os.path.join(FLAGS.room_path, 'pointcloud0/DataFlow.ply'))
print('PLY loaded.')
xyz_homogeneous = np.concatenate([xyz, np.ones((xyz.shape[0], 1))], axis=1)
imagesto_render = sorted(
os.listdir(os.path.join(FLAGS.room_path, 'cam0/DataFlow')))
imagesto_render = imagesto_render[0::5] # render every fifth image
for imfile in imagesto_render:
timestamp = float(imfile.split('.')[0])
transform = gti.get_transform(timestamp)
if transform is None:
print ('Timestamp %d has no groundtruth.' % int(timestamp))
continue
else:
print ('Rendering timestamp %d...' % int(timestamp))
xyz_view, c_view = get_camera_view_pointcloud(transform, xyz_homogeneous, c)
print ('View pointcloud generated, %d points.' % xyz_view.shape[0])
occluded_inds = get_occluded_points(xyz_view, 0.02, 0.08)
occluded_inds = set(occluded_inds)
visible_indices = [
i for i in range(xyz_view.shape[0]) if i not in occluded_inds
]
print ('%d visible points found.' % len(visible_indices))
visible_xyz = xyz_view[visible_indices, :]
visible_c = c_view[visible_indices]
visible_xyz, visible_c = filter_out_ot_frame_points(visible_xyz, visible_c)
inds_to_keep = sample_uniform(visible_xyz, 1e-2)
visible_xyz = visible_xyz[inds_to_keep]
visible_c = visible_c[inds_to_keep]
rgb_image = render_rgb(visible_xyz, visible_c)
z_image = render_z(visible_xyz)
matplotlib.image.imsave(
os.path.join(FLAGS.output_path, '%dgrayscale.png' % int(timestamp)),
rgb_image)
matplotlib.image.imsave(
os.path.join(FLAGS.output_path, '%ddepth.png' % int(timestamp)),
z_image)
np.save(
os.path.join(FLAGS.output_path, '%d.npy' % int(timestamp)), visible_xyz)
def _split(matrix):
return [
np.squeeze(v, axis=1) for v in np.split(matrix, matrix.shape[1], axis=1)
]
def _project_and_distort(x, y, z):
"""Apply perspective projection and distortion on a point cloud.
Args:
x: A vector containing the x coordinates of the points.
y: A vector containing the y coordinates of the points, same length as x.
z: A vector containing the z coordinates of the points, same length as x.
Returns:
A tuple of two vectors of the same length as x, containing the image-plane
coordinates (u, v) of the point cloud.
"""
xz = (x / z)
yz = (y / z)
# 2. Apply radial camera distortion:
rr = xz**2 + yz**2
distortion = (1 + K1 * rr + K2 * rr * rr)
xz *= distortion
yz *= distortion
# 3. Apply intrinsic matrix to get image coordinates:
u = FX * xz + X0
v = FY * yz + Y0
return u, v
if __name__ == '__main__':
app.run(main)
```
#### File: SfmLeaner_pytorch/data/prepare_train_data.py
```python
import argparse
import scipy.misc
import numpy as np
from pebble import ProcessPool
import sys
from tqdm import tqdm
from path import Path
parser = argparse.ArgumentParser()
parser.add_argument("dataset_dir", metavar='DIR',
help='path to original dataset')
parser.add_argument("--dataset-format", type=str, default='kitti', choices=["kitti", "cityscapes"])
parser.add_argument("--static-frames", default=None,
help="list of imgs to discard for being static, if not set will discard them based on speed \
(careful, on KITTI some frames have incorrect speed)")
parser.add_argument("--with-depth", action='store_true',
help="If available (e.g. with KITTI), will store depth ground truth along with images, for validation")
parser.add_argument("--with-pose", action='store_true',
help="If available (e.g. with KITTI), will store pose ground truth along with images, for validation")
parser.add_argument("--no-train-gt", action='store_true',
help="If selected, will delete ground truth depth to save space")
parser.add_argument("--dump-root", type=str, default='dump', help="Where to dump the DataFlow")
parser.add_argument("--height", type=int, default=128, help="image height")
parser.add_argument("--width", type=int, default=416, help="image width")
parser.add_argument("--depth-size-ratio", type=int, default=1, help="will divide depth size by that ratio")
parser.add_argument("--num-threads", type=int, default=4, help="number of threads to use")
args = parser.parse_args()
def dump_example(args, scene):
scene_list = data_loader.collect_scenes(scene)
for scene_data in scene_list:
dump_dir = args.dump_root/scene_data['rel_path']
dump_dir.makedirs_p()
intrinsics = scene_data['intrinsics']
dump_cam_file = dump_dir/'cam.txt'
np.savetxt(dump_cam_file, intrinsics)
poses_file = dump_dir/'poses.txt'
poses = []
for sample in data_loader.get_scene_imgs(scene_data):
img, frame_nb = sample["img"], sample["id"]
dump_img_file = dump_dir/'{}.jpg'.format(frame_nb)
scipy.misc.imsave(dump_img_file, img)
if "pose" in sample.keys():
poses.append(sample["pose"].tolist())
if "depth" in sample.keys():
dump_depth_file = dump_dir/'{}.npy'.format(frame_nb)
np.save(dump_depth_file, sample["depth"])
if len(poses) != 0:
np.savetxt(poses_file, np.array(poses).reshape(-1, 12), fmt='%.6e')
if len(dump_dir.files('*.jpg')) < 3:
dump_dir.rmtree()
def main():
args.dump_root = Path(args.dump_root)
args.dump_root.mkdir_p()
global data_loader
if args.dataset_format == 'kitti':
from kitti_raw_loader import KittiRawLoader
data_loader = KittiRawLoader(args.dataset_dir,
static_frames_file=args.static_frames,
img_height=args.height,
img_width=args.width,
get_depth=args.with_depth,
get_pose=args.with_pose,
depth_size_ratio=args.depth_size_ratio)
if args.dataset_format == 'cityscapes':
from cityscapes_loader import cityscapes_loader
data_loader = cityscapes_loader(args.dataset_dir,
img_height=args.height,
img_width=args.width)
n_scenes = len(data_loader.scenes)
print('Found {} potential scenes'.format(n_scenes))
print('Retrieving frames')
if args.num_threads == 1:
for scene in tqdm(data_loader.scenes):
dump_example(args, scene)
else:
with ProcessPool(max_workers=args.num_threads) as pool:
tasks = pool.map(dump_example, [args]*n_scenes, data_loader.scenes)
try:
for _ in tqdm(tasks.result(), total=n_scenes):
pass
except KeyboardInterrupt as e:
tasks.cancel()
raise e
print('Generating train val lists')
np.random.seed(8964)
# to avoid DataFlow snooping, we will make two cameras of the same scene to fall in the same set, train or val
subdirs = args.dump_root.dirs()
canonic_prefixes = set([subdir.basename()[:-2] for subdir in subdirs])
with open(args.dump_root / 'train.txt', 'w') as tf:
with open(args.dump_root / 'val.txt', 'w') as vf:
for pr in tqdm(canonic_prefixes):
corresponding_dirs = args.dump_root.dirs('{}*'.format(pr))
if np.random.random() < 0.1:
for s in corresponding_dirs:
vf.write('{}\n'.format(s.name))
else:
for s in corresponding_dirs:
tf.write('{}\n'.format(s.name))
if args.with_depth and args.no_train_gt:
for gt_file in s.files('*.npy'):
gt_file.remove_p()
if __name__ == '__main__':
main()
``` |
{
"source": "JiafengZhou/foreverlms.github.io",
"score": 3
} |
#### File: JiafengZhou/foreverlms.github.io/auto_post.py
```python
import os
import sys
import argparse
import time
def get_parser():
parser = argparse.ArgumentParser("Automatically create Jekyll posts.")
parser.add_argument("title",metavar="TITLE",type=str,help="Post's title.")
return parser;
if __name__ == '__main__':
parser = get_parser()
args = vars(parser.parse_args())
title = args["title"]
#if it's in a root folder
if not os.path.exists("./_posts/"):
print("You are not under a Jekyll blog root folder.")
sys.exit(1)
if title:
time_str = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
file_name = "./_posts/"+time_str[:10]+"-"+title+".md"
#if there has already existed a blog with same name
if os.path.exists(file_name):
print("This post's title has existed in _post/")
sys.exit(1)
with open(file_name,'w') as f:
f.write("---");
f.write("\n")
f.write("layout: post")
f.write("\n");
f.write("title: "+title)
f.write("\n")
f.write("date: "+time_str)
#修正时区问题导致博客不显示
f.write(" +0800")
f.write("\n")
f.write("categories: ")
f.write("\n")
f.write("tags: ")
f.write("\n")
f.write("---")
f.write("\n")
print("Post generation done!")
#replace this with your own editor command
command = "subl "+file_name;
os.system(command)
sys.exit(0)
``` |
{
"source": "Jiafi/dagster",
"score": 2
} |
#### File: core/asset_defs/assets_job.py
```python
from typing import AbstractSet, Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union, cast
from dagster import check
from dagster.core.definitions.config import ConfigMapping
from dagster.core.definitions.decorators.op import op
from dagster.core.definitions.dependency import (
DependencyDefinition,
IDependencyDefinition,
SolidInvocation,
)
from dagster.core.definitions.events import AssetKey
from dagster.core.definitions.graph import GraphDefinition
from dagster.core.definitions.input import InputDefinition
from dagster.core.definitions.job import JobDefinition
from dagster.core.definitions.node import NodeDefinition
from dagster.core.definitions.op import OpDefinition
from dagster.core.definitions.output import Out, OutputDefinition
from dagster.core.definitions.partition import PartitionedConfig
from dagster.core.definitions.resource import ResourceDefinition
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.core.execution.context.input import InputContext, build_input_context
from dagster.core.execution.context.output import build_output_context
from dagster.core.storage.root_input_manager import RootInputManagerDefinition, root_input_manager
from dagster.utils.backcompat import experimental
from dagster.utils.merger import merge_dicts
from .foreign_asset import ForeignAsset
@experimental
def build_assets_job(
name: str,
assets: List[OpDefinition],
source_assets: Optional[Sequence[Union[ForeignAsset, OpDefinition]]] = None,
resource_defs: Optional[Dict[str, ResourceDefinition]] = None,
description: Optional[str] = None,
config: Union[ConfigMapping, Dict[str, Any], PartitionedConfig] = None,
tags: Optional[Dict[str, Any]] = None,
) -> JobDefinition:
"""Builds a job that materializes the given assets.
The dependencies between the ops in the job are determined by the asset dependencies defined
in the metadata on the provided asset nodes.
Args:
name (str): The name of the job.
assets (List[OpDefinition]): A list of assets or multi-assets - usually constructed using
the :py:func:`@asset` or :py:func:`@multi_asset` decorator.
source_assets (Optional[Sequence[Union[ForeignAsset, OpDefinition]]]): A list of assets
that are not materialized by this job, but that assets in this job depend on.
resource_defs (Optional[Dict[str, ResourceDefinition]]): Resource defs to be included in
this job.
description (Optional[str]): A description of the job.
Examples:
.. code-block:: python
@asset
def asset1():
return 5
@asset
def asset2(asset1):
return my_upstream_asset + 1
my_assets_job = build_assets_job("my_assets_job", assets=[asset1, asset2])
Returns:
JobDefinition: A job that materializes the given assets.
"""
check.str_param(name, "name")
check.list_param(assets, "assets", of_type=OpDefinition)
check.opt_list_param(source_assets, "source_assets", of_type=(ForeignAsset, OpDefinition))
check.opt_str_param(description, "description")
source_assets_by_key = build_source_assets_by_key(source_assets)
op_defs = build_op_deps(assets, source_assets_by_key.keys())
root_manager = build_root_manager(source_assets_by_key)
return GraphDefinition(
name=name,
node_defs=cast(List[NodeDefinition], assets),
dependencies=op_defs,
description=description,
input_mappings=None,
output_mappings=None,
config=None,
).to_job(
resource_defs=merge_dicts(resource_defs or {}, {"root_manager": root_manager}),
config=config,
tags=tags,
)
def build_source_assets_by_key(
source_assets: Optional[Sequence[Union[ForeignAsset, OpDefinition]]]
) -> Mapping[AssetKey, Union[ForeignAsset, OutputDefinition]]:
source_assets_by_key: Dict[AssetKey, Union[ForeignAsset, OutputDefinition]] = {}
for asset_source in source_assets or []:
if isinstance(asset_source, ForeignAsset):
source_assets_by_key[asset_source.key] = asset_source
elif isinstance(asset_source, OpDefinition):
for output_def in asset_source.output_defs:
if output_def.get_asset_key(None):
source_assets_by_key[output_def.get_asset_key(None)] = output_def
return source_assets_by_key
def build_op_deps(
assets: List[OpDefinition], source_paths: AbstractSet[AssetKey]
) -> Dict[Union[str, SolidInvocation], Dict[str, IDependencyDefinition]]:
op_outputs_by_asset: Dict[AssetKey, Tuple[OpDefinition, str]] = {}
for asset_op in assets:
for output_def in asset_op.output_defs:
logical_asset = get_asset_key(output_def, f"Output of asset '{asset_op.name}'")
if logical_asset in op_outputs_by_asset:
prev_op = op_outputs_by_asset[logical_asset][0].name
raise DagsterInvalidDefinitionError(
f"Two ops produce the same logical asset: '{asset_op.name}' and '{prev_op.name}"
)
op_outputs_by_asset[logical_asset] = (asset_op, output_def.name)
op_defs: Dict[Union[str, SolidInvocation], Dict[str, IDependencyDefinition]] = {}
for asset_op in assets:
op_defs[asset_op.name] = {}
for input_def in asset_op.input_defs:
logical_asset = get_asset_key(
input_def, f"Input '{input_def.name}' of asset '{asset_op.name}'"
)
if logical_asset in op_outputs_by_asset:
op_def, output_name = op_outputs_by_asset[logical_asset]
op_defs[asset_op.name][input_def.name] = DependencyDefinition(
op_def.name, output_name
)
elif logical_asset not in source_paths and not input_def.dagster_type.is_nothing:
raise DagsterInvalidDefinitionError(
f"Input asset '{logical_asset.to_string()}' for asset '{asset_op.name}' is not "
"produced by any of the provided asset ops and is not one of the provided "
"sources"
)
return op_defs
def build_root_manager(
source_assets_by_key: Mapping[AssetKey, Union[ForeignAsset, OutputDefinition]]
) -> RootInputManagerDefinition:
source_asset_io_manager_keys = {
source_asset.io_manager_key for source_asset in source_assets_by_key.values()
}
@root_input_manager(required_resource_keys=source_asset_io_manager_keys)
def _root_manager(input_context: InputContext) -> Any:
source_asset_key = cast(AssetKey, input_context.asset_key)
source_asset = source_assets_by_key[source_asset_key]
@op(out={source_asset_key.path[-1]: Out(asset_key=source_asset_key)})
def _op():
pass
output_context = build_output_context(
name=source_asset_key.path[-1],
step_key="none",
solid_def=_op,
metadata=merge_dicts(
source_asset.metadata or {}, {"logical_asset_key": source_asset_key}
),
)
input_context_with_upstream = build_input_context(
name=input_context.name,
metadata=input_context.metadata,
config=input_context.config,
dagster_type=input_context.dagster_type,
upstream_output=output_context,
op_def=input_context.op_def,
)
io_manager = getattr(cast(Any, input_context.resources), source_asset.io_manager_key)
return io_manager.load_input(input_context_with_upstream)
return _root_manager
def get_asset_key(
input_or_output: Union[InputDefinition, OutputDefinition], error_prefix: str
) -> AssetKey:
asset_key = input_or_output.get_asset_key(None)
if asset_key is None:
raise DagsterInvalidDefinitionError(f"{error_prefix}' is missing asset_key")
else:
return asset_key
```
#### File: definitions/decorators/pipeline.py
```python
from functools import update_wrapper
from typing import Any, Callable, Dict, List, Optional, Set, Union
from dagster import check
from dagster.core.definitions.policy import RetryPolicy
from dagster.utils.backcompat import experimental_arg_warning
from ..graph import GraphDefinition
from ..hook import HookDefinition
from ..input import InputDefinition
from ..mode import ModeDefinition
from ..output import OutputDefinition
from ..pipeline import PipelineDefinition
from ..preset import PresetDefinition
from ..version_strategy import VersionStrategy
class _Pipeline:
def __init__(
self,
name: Optional[str] = None,
mode_defs: Optional[List[ModeDefinition]] = None,
preset_defs: Optional[List[PresetDefinition]] = None,
description: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
hook_defs: Optional[Set[HookDefinition]] = None,
input_defs: Optional[List[InputDefinition]] = None,
output_defs: Optional[List[OutputDefinition]] = None,
config_schema: Optional[Dict[str, Any]] = None,
config_fn: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None,
solid_retry_policy: Optional[RetryPolicy] = None,
version_strategy: Optional[VersionStrategy] = None,
):
self.name = check.opt_str_param(name, "name")
self.mode_definitions = check.opt_list_param(mode_defs, "mode_defs", ModeDefinition)
self.preset_definitions = check.opt_list_param(preset_defs, "preset_defs", PresetDefinition)
self.description = check.opt_str_param(description, "description")
self.tags = check.opt_dict_param(tags, "tags")
self.hook_defs = check.opt_set_param(hook_defs, "hook_defs", of_type=HookDefinition)
self.input_defs = check.opt_list_param(input_defs, "input_defs", of_type=InputDefinition)
self.did_pass_outputs = output_defs is not None
self.output_defs = check.opt_nullable_list_param(
output_defs, "output_defs", of_type=OutputDefinition
)
self.config_schema = config_schema
self.config_fn = check.opt_callable_param(config_fn, "config_fn")
self.solid_retry_policy = check.opt_inst_param(
solid_retry_policy, "solid_retry_policy", RetryPolicy
)
self.version_strategy = check.opt_inst_param(
version_strategy, "version_strategy", VersionStrategy
)
def __call__(self, fn: Callable[..., Any]) -> PipelineDefinition:
check.callable_param(fn, "fn")
if not self.name:
self.name = fn.__name__
from dagster.core.definitions.decorators.composite_solid import (
do_composition,
get_validated_config_mapping,
)
config_mapping = get_validated_config_mapping(
self.name, self.config_schema, self.config_fn, decorator_name="pipeline"
)
(
input_mappings,
output_mappings,
dependencies,
solid_defs,
config_mapping,
positional_inputs,
) = do_composition(
"@pipeline",
self.name,
fn,
self.input_defs,
self.output_defs,
config_mapping,
ignore_output_from_composition_fn=not self.did_pass_outputs,
)
pipeline_def = PipelineDefinition(
mode_defs=self.mode_definitions,
preset_defs=self.preset_definitions,
graph_def=GraphDefinition(
name=self.name,
description=None, # put desc on the pipeline
dependencies=dependencies,
node_defs=solid_defs,
input_mappings=input_mappings,
output_mappings=output_mappings,
config=config_mapping,
positional_inputs=positional_inputs,
),
tags=self.tags,
description=self.description or fn.__doc__,
hook_defs=self.hook_defs,
solid_retry_policy=self.solid_retry_policy,
version_strategy=self.version_strategy,
)
update_wrapper(pipeline_def, fn)
return pipeline_def
def pipeline(
name: Union[Callable[..., Any], Optional[str]] = None,
description: Optional[str] = None,
mode_defs: Optional[List[ModeDefinition]] = None,
preset_defs: Optional[List[PresetDefinition]] = None,
tags: Optional[Dict[str, Any]] = None,
hook_defs: Optional[Set[HookDefinition]] = None,
input_defs: Optional[List[InputDefinition]] = None,
output_defs: Optional[List[OutputDefinition]] = None,
config_schema: Optional[Dict[str, Any]] = None,
config_fn: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None,
solid_retry_policy: Optional[RetryPolicy] = None,
version_strategy: Optional[VersionStrategy] = None,
) -> Union[PipelineDefinition, _Pipeline]:
"""Create a pipeline with the specified parameters from the decorated composition function.
Using this decorator allows you to build up the dependency graph of the pipeline by writing a
function that invokes solids and passes the output to other solids.
Args:
name (Optional[str]): The name of the pipeline. Must be unique within any
:py:class:`RepositoryDefinition` containing the pipeline.
description (Optional[str]): A human-readable description of the pipeline.
mode_defs (Optional[List[ModeDefinition]]): The set of modes in which this pipeline can
operate. Modes are used to attach resources, custom loggers, custom system storage
options, and custom executors to a pipeline. Modes can be used, e.g., to vary
available resource and logging implementations between local test and production runs.
preset_defs (Optional[List[PresetDefinition]]): A set of preset collections of configuration
options that may be used to execute a pipeline. A preset consists of an environment
dict, an optional subset of solids to execute, and a mode selection. Presets can be used
to ship common combinations of options to pipeline end users in Python code, and can
be selected by tools like Dagit.
tags (Optional[Dict[str, Any]]): Arbitrary metadata for any execution run of the pipeline.
Values that are not strings will be json encoded and must meet the criteria that
`json.loads(json.dumps(value)) == value`. These tag values may be overwritten by tag
values provided at invocation time.
hook_defs (Optional[Set[HookDefinition]]): A set of hook definitions applied to the
pipeline. When a hook is applied to a pipeline, it will be attached to all solid
instances within the pipeline.
solid_retry_policy (Optional[RetryPolicy]): The default retry policy for all solids in
this pipeline. Only used if retry policy is not defined on the solid definition or
solid invocation.
version_strategy (Optional[VersionStrategy]): The version strategy to use with this
pipeline. Providing a VersionStrategy will enable memoization on the pipeline.
Example:
.. code-block:: python
@solid(output_defs=[OutputDefinition(int, "two"), OutputDefinition(int, "four")])
def emit_two_four(_) -> int:
yield Output(2, "two")
yield Output(4, "four")
@lambda_solid
def add_one(num: int) -> int:
return num + 1
@lambda_solid
def mult_two(num: int) -> int:
return num * 2
@pipeline
def math_pipeline():
two, four = emit_two_four()
add_one(two)
mult_two(four)
"""
if input_defs is not None:
experimental_arg_warning("input_defs", "pipeline")
if output_defs is not None:
experimental_arg_warning("output_defs", "pipeline")
if config_schema is not None:
experimental_arg_warning("config_schema", "pipeline")
if config_fn is not None:
experimental_arg_warning("config_fn", "pipeline")
if callable(name):
check.invariant(description is None)
return _Pipeline()(name)
return _Pipeline(
name=name,
mode_defs=mode_defs,
preset_defs=preset_defs,
description=description,
tags=tags,
hook_defs=hook_defs,
input_defs=input_defs,
output_defs=output_defs,
config_schema=config_schema,
config_fn=config_fn,
solid_retry_policy=solid_retry_policy,
version_strategy=version_strategy,
)
```
#### File: dagster-docker/dagster_docker_tests/test_launcher_and_executor.py
```python
import os
from dagster.core.storage.pipeline_run import PipelineRunStatus
from dagster.core.test_utils import poll_for_finished_run
from dagster.utils.merger import merge_dicts
from dagster.utils.yaml_utils import merge_yamls
from dagster_test.test_project import (
ReOriginatedExternalPipelineForTest,
find_local_test_image,
get_buildkite_registry_config,
get_test_project_docker_image,
get_test_project_environments_path,
get_test_project_recon_pipeline,
get_test_project_workspace_and_external_pipeline,
)
from . import IS_BUILDKITE, docker_postgres_instance
def test_image_on_pipeline():
docker_image = get_test_project_docker_image()
launcher_config = {
"env_vars": [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
],
"networks": ["container:test-postgres-db-docker"],
"container_kwargs": {
"auto_remove": True,
"volumes": ["/var/run/docker.sock:/var/run/docker.sock"],
},
}
if IS_BUILDKITE:
launcher_config["registry"] = get_buildkite_registry_config()
else:
find_local_test_image(docker_image)
executor_config = {
"execution": {"docker": {"config": {}}},
}
run_config = merge_dicts(
merge_yamls(
[
os.path.join(get_test_project_environments_path(), "env.yaml"),
os.path.join(get_test_project_environments_path(), "env_s3.yaml"),
]
),
executor_config,
)
with docker_postgres_instance(
overrides={
"run_launcher": {
"class": "DockerRunLauncher",
"module": "dagster_docker",
"config": launcher_config,
}
}
) as instance:
recon_pipeline = get_test_project_recon_pipeline("demo_pipeline_docker", docker_image)
with get_test_project_workspace_and_external_pipeline(
instance, "demo_pipeline_docker", container_image=docker_image
) as (
workspace,
orig_pipeline,
):
external_pipeline = ReOriginatedExternalPipelineForTest(
orig_pipeline, container_image=docker_image
)
run = instance.create_run_for_pipeline(
pipeline_def=recon_pipeline.get_definition(),
run_config=run_config,
external_pipeline_origin=external_pipeline.get_external_origin(),
pipeline_code_origin=external_pipeline.get_python_origin(),
)
instance.launch_run(run.run_id, workspace)
poll_for_finished_run(instance, run.run_id, timeout=60)
for log in instance.all_logs(run.run_id):
print(log) # pylint: disable=print-call
assert instance.get_run_by_id(run.run_id).status == PipelineRunStatus.SUCCESS
``` |
{
"source": "jiafulow/emtf-nnet",
"score": 3
} |
#### File: emtf_nnet/architecture/endless_common.py
```python
import types
import tensorflow as tf
class _BaseLayer(tf.keras.layers.Layer):
"""Layer used as the base."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_config(self):
config = super().get_config()
if hasattr(self, 'zone'):
config.update({'zone': self.zone})
if hasattr(self, 'timezone'):
config.update({'timezone': self.timezone})
return config
# Export as base_layer.Layer
base_layer = types.ModuleType('base_layer')
base_layer.Layer = _BaseLayer
__all__ = [
'base_layer',
]
```
#### File: emtf_nnet/keras/callbacks.py
```python
from keras import backend
from keras.optimizer_v2 import learning_rate_schedule
from keras.callbacks import Callback
class LearningRateLogger(Callback):
"""Learning rate logger."""
def __init__(self):
super().__init__()
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
lr_schedule = getattr(self.model.optimizer, 'lr', None)
if isinstance(lr_schedule, learning_rate_schedule.LearningRateSchedule):
logs['lr'] = backend.get_value(lr_schedule(self.model.optimizer.iterations))
else:
logs['lr'] = backend.get_value(self.model.optimizer.lr)
if hasattr(self.model.optimizer, 'gradient_maxnorm'):
gradient_maxnorm = backend.get_value(self.model.optimizer.gradient_maxnorm)
logs['gradient_maxnorm'] = gradient_maxnorm
```
#### File: keras/layers/inverse_integer_lookup.py
```python
import tensorflow.compat.v2 as tf
import numpy as np
from keras.engine.base_layer import Layer
def listify_tensors(x):
"""Convert any tensors or numpy arrays to lists for config serialization."""
if tf.is_tensor(x):
x = x.numpy()
if isinstance(x, np.ndarray):
x = x.tolist()
return x
class InverseIntegerLookup(Layer):
"""Maps integer indices to integer vocabulary items."""
def __init__(self,
vocabulary,
max_tokens=None,
num_oov_indices=0,
mask_token=None,
oov_token=-1,
invert=True,
output_mode="int",
sparse=False,
pad_to_max_tokens=False,
**kwargs):
allowed_dtypes = [tf.int32]
if "dtype" in kwargs and kwargs["dtype"] not in allowed_dtypes:
raise ValueError("The value of the dtype argument for IntegerLookup may "
"only be one of %s." % (allowed_dtypes,))
if "dtype" not in kwargs:
kwargs["dtype"] = tf.int32
# If max_tokens is set, the token must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens <= 1:
raise ValueError("If set, max_tokens must be greater than 1. "
"You passed %s" % (max_tokens,))
if num_oov_indices < 0:
raise ValueError(
"num_oov_indices must be greater than or equal to 0. You passed %s" %
(num_oov_indices,))
if vocabulary is None:
raise ValueError("Vocabulary must be provided.")
# Make sure mask and oov are of the dtype we want.
mask_token = None if mask_token is None else np.int32(mask_token)
oov_token = None if oov_token is None else np.int32(oov_token)
super().__init__(**kwargs)
self.input_vocabulary = vocabulary
self._has_input_vocabulary = True
self.invert = invert # unused
self.max_tokens = max_tokens # unused
self.num_oov_indices = num_oov_indices # unused
self.mask_token = mask_token # unused
self.oov_token = oov_token
self.output_mode = output_mode # unused
self.sparse = sparse # unused
self.pad_to_max_tokens = pad_to_max_tokens # unused
self._key_dtype = tf.as_dtype(self.dtype)
self._value_dtype = tf.as_dtype(self.dtype)
self._default_value = self.oov_token
def build(self, input_shape):
tokens = np.array(self.input_vocabulary)
indices = np.arange(len(tokens))
keys, values = (indices, tokens)
initializer = tf.lookup.KeyValueTensorInitializer(keys, values,
self._key_dtype,
self._value_dtype)
self._table = tf.lookup.StaticHashTable(initializer, self._default_value)
self.built = True
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs)
if inputs.dtype != self._key_dtype:
inputs = tf.cast(inputs, self._key_dtype)
outputs = self._table.lookup(inputs)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = self._value_dtype
return tf.TensorSpec(shape=output_shape, dtype=output_dtype)
def get_config(self):
config = super().get_config()
config.update({
"invert": self.invert,
"max_tokens": self.max_tokens,
"num_oov_indices": self.num_oov_indices,
"oov_token": self.oov_token,
"mask_token": self.mask_token,
"output_mode": self.output_mode,
"sparse": self.sparse,
"pad_to_max_tokens": self.pad_to_max_tokens,
"vocabulary": listify_tensors(self.input_vocabulary),
})
return config
```
#### File: keras/layers/mutated_batch_normalization.py
```python
import tensorflow.compat.v2 as tf
from keras.utils import control_flow_util
from keras.layers.normalization.batch_normalization import BatchNormalization
class MutatedBatchNormalization(BatchNormalization):
"""Batch normalization layer with simplified call()."""
def __init__(self,
axis=-1,
**kwargs):
super().__init__(axis=axis, **kwargs)
assert self._USE_V2_BEHAVIOR
assert not self.renorm
assert self.virtual_batch_size is None
assert self.adjustment is None
assert self.fused is None
def build(self, input_shape):
super().build(input_shape)
assert self.built
assert not self.fused
def call(self, inputs, training=None):
training = self._get_training_value(training)
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.shape
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
# Determine a boolean value for `training`: could be True, False, or None.
training_value = control_flow_util.constant_value(training)
if training_value is not None and bool(training_value) is False:
mean, variance = self.moving_mean, self.moving_variance
else:
keep_dims = False
mean, variance = tf.nn.moments(
tf.cast(inputs, self._param_dtype),
reduction_axes,
keepdims=keep_dims)
mean = control_flow_util.smart_cond(
training,
lambda: mean,
lambda: tf.convert_to_tensor(self.moving_mean))
variance = control_flow_util.smart_cond(
training,
lambda: variance,
lambda: tf.convert_to_tensor(self.moving_variance))
def _do_update(var, value):
input_batch_size = None
return self._assign_moving_average(var, value, self.momentum,
input_batch_size)
def _fake_update(var):
return tf.identity(var)
def mean_update():
return control_flow_util.smart_cond(
training,
lambda: _do_update(self.moving_mean, mean),
lambda: _fake_update(self.moving_mean))
def variance_update():
return control_flow_util.smart_cond(
training,
lambda: _do_update(self.moving_variance, variance),
lambda: _fake_update(self.moving_variance))
self.add_update(mean_update)
self.add_update(variance_update)
# Get gamma and beta
scale, offset = self.gamma, self.beta
mean = tf.cast(mean, inputs.dtype)
variance = tf.cast(variance, inputs.dtype)
if offset is not None:
offset = tf.cast(offset, inputs.dtype)
if scale is not None:
scale = tf.cast(scale, inputs.dtype)
outputs = tf.nn.batch_normalization(inputs, mean, variance, offset, scale,
self.epsilon)
return outputs
```
#### File: keras/quantization/default_quantize_scheme.py
```python
import tensorflow as tf
from tensorflow_model_optimization.python.core.quantization.keras import quantize_aware_activation
from tensorflow_model_optimization.python.core.quantization.keras import quantize_layout_transform
from tensorflow_model_optimization.python.core.quantization.keras import quantize_registry
from tensorflow_model_optimization.python.core.quantization.keras import quantize_scheme
from tensorflow_model_optimization.python.core.quantization.keras import quantize_wrapper
from tensorflow_model_optimization.python.core.quantization.keras import quantizers
from tensorflow_model_optimization.python.core.quantization.keras.graph_transformations import model_transformer
from emtf_nnet.keras.layers import (
ActivityRegularization, FeatureNormalization, LinearActivation,
MutatedBatchNormalization, MutatedDense, MutatedDenseFold, ScaleActivation,
TanhActivation)
from .default_quantize_configs import (
DefaultDenseQuantizeConfig, DefaultDenseFoldQuantizeConfig,
DefaultInputQuantizeConfig, DefaultOutputQuantizeConfig, NoOpQuantizeConfig)
from .default_transforms import InputLayerQuantize, MutatedDenseFolding
from .quantizers import FixedRangeQuantizer
class DefaultQuantizeLayoutTransform(quantize_layout_transform.QuantizeLayoutTransform):
"""Default quantization layout transformations."""
_TRANSFORMS = [
#InputLayerQuantize(),
MutatedDenseFolding(),
]
def apply(self, model, layer_quantize_map):
"""Implement default 8-bit transforms.
Currently this means the following.
1. Pull activations into layers, and apply fuse activations. (TODO)
2. Modify range in incoming layers for Concat. (TODO)
3. Fuse Conv2D/DepthwiseConv2D + BN into single layer.
Args:
model: Keras model to be quantized.
layer_quantize_map: Map with keys as layer names, and values as dicts
containing custom `QuantizeConfig`s which may have been passed with
layers.
Returns:
(Transformed Keras model to better match TensorFlow Lite backend, updated
layer quantize map.)
"""
return model_transformer.ModelTransformer(
model,
self._TRANSFORMS,
candidate_layers=set(layer_quantize_map.keys()),
layer_metadata=layer_quantize_map).transform()
class DefaultQuantizeRegistry(quantize_registry.QuantizeRegistry):
"""Default quantization registry."""
def __init__(self, disable_per_axis=False):
self._layer_quantize_map = {}
#self._layer_quantize_map[tf.keras.layers.Activation] = DefaultOutputQuantizeConfig()
#self._layer_quantize_map[tf.keras.layers.BatchNormalization] = DefaultOutputQuantizeConfig()
#self._layer_quantize_map[tf.keras.layers.Dense] = DefaultDenseQuantizeConfig()
#self._layer_quantize_map[tf.keras.layers.Rescaling] = NoOpQuantizeConfig()
self._layer_quantize_map[ActivityRegularization] = NoOpQuantizeConfig()
self._layer_quantize_map[LinearActivation] = DefaultOutputQuantizeConfig()
#self._layer_quantize_map[MutatedBatchNormalization] = DefaultOutputQuantizeConfig()
self._layer_quantize_map[MutatedDense] = DefaultDenseQuantizeConfig()
self._layer_quantize_map[MutatedDenseFold] = DefaultDenseFoldQuantizeConfig()
self._layer_quantize_map[FeatureNormalization] = DefaultOutputQuantizeConfig()
self._layer_quantize_map[ScaleActivation] = NoOpQuantizeConfig()
self._layer_quantize_map[TanhActivation] = DefaultOutputQuantizeConfig()
self._disable_per_axis = disable_per_axis # unused
def _is_supported_layer(self, layer_class):
return layer_class in self._layer_quantize_map
def _get_quantize_config(self, layer_class):
return self._layer_quantize_map[layer_class]
def supports(self, layer):
"""Returns whether the registry supports this layer type.
# TODO(pulkitb): Consider pushing this function up to the registry.
Args:
layer: The layer to check for support.
Returns:
True/False whether the layer type is supported.
"""
if self._is_supported_layer(layer.__class__):
return True
return False
def get_quantize_config(self, layer):
"""Returns the quantization config for the given layer.
Args:
layer: input layer to return quantize config for.
Returns:
Returns the QuantizeConfig for the given layer.
"""
if not self.supports(layer):
raise ValueError(
'`get_quantize_config()` called on an unsupported layer {}. Check '
'if layer is supported by calling `supports()`. Alternatively, you '
'can use `QuantizeConfig` to specify a behavior for your layer.'
.format(layer.__class__))
return self._get_quantize_config(layer.__class__)
class DefaultQuantizeScheme(quantize_scheme.QuantizeScheme):
"""Quantization scheme which specifies how quantization should be applied."""
_QUANTIZATION_OBJECTS = {
'ActivityRegularization': ActivityRegularization,
'FeatureNormalization': FeatureNormalization,
'LinearActivation': LinearActivation,
'MutatedBatchNormalization': MutatedBatchNormalization,
'MutatedDense': MutatedDense,
'MutatedDenseFold': MutatedDenseFold,
'ScaleActivation': ScaleActivation,
'TanhActivation': TanhActivation,
'FixedRangeQuantizer': FixedRangeQuantizer,
'DefaultDenseQuantizeConfig': DefaultDenseQuantizeConfig,
'DefaultInputQuantizeConfig': DefaultInputQuantizeConfig,
'DefaultOutputQuantizeConfig': DefaultOutputQuantizeConfig,
'NoOpQuantizeConfig': NoOpQuantizeConfig,
'DefaultDenseFoldQuantizeConfig': DefaultDenseFoldQuantizeConfig,
# from tensorflow_model_optimization
'QuantizeAwareActivation': quantize_aware_activation.QuantizeAwareActivation,
'QuantizeWrapper': quantize_wrapper.QuantizeWrapper,
'QuantizeWrapperV2': quantize_wrapper.QuantizeWrapperV2,
'AllValuesQuantizer': quantizers.AllValuesQuantizer,
'LastValueQuantizer': quantizers.LastValueQuantizer,
'MovingAverageQuantizer': quantizers.MovingAverageQuantizer,
}
def __init__(self, disable_per_axis=False):
self._disable_per_axis = disable_per_axis
def get_layout_transformer(self):
return DefaultQuantizeLayoutTransform()
def get_quantize_registry(self):
return DefaultQuantizeRegistry(
disable_per_axis=self._disable_per_axis)
```
#### File: keras/utils/data_utils.py
```python
import itertools
import numpy as np
from keras.utils import data_utils
class DataGenerator(data_utils.Sequence):
"""Data generator that implements the base `keras.utils.Sequence`.
It implements the `__getitem__` and the `__len__` abstract methods.
"""
def __init__(self, x, batch_size=None, steps=None, shuffle=False):
super().__init__()
self.x = x
self.num_samples = int(x.shape[0])
if not batch_size:
batch_size = int(np.ceil(self.num_samples / float(steps))) if steps else 32
self.batch_size = batch_size
self.num_batches = int(np.ceil(self.num_samples / float(batch_size)))
self.shuffle = shuffle
self.index_array = np.arange(self.num_samples)
if self.shuffle:
np.random.shuffle(self.index_array)
def __len__(self):
"""Number of batch in the Sequence."""
return self.num_batches
def __getitem__(self, index):
"""Gets batch at position `index`."""
start, stop = (index * self.batch_size, min(self.num_samples,
(index + 1) * self.batch_size))
return self.x[self.index_array[start:stop]]
def on_epoch_end(self):
"""Method called at the end of every epoch."""
if self.shuffle:
np.random.shuffle(self.index_array)
class TransformedDataGenerator(DataGenerator):
"""Data generator that applies data transformation.
It applies a transformation to each batch of samples while being iterated.
"""
def __init__(self, x, transform_fn=None, **kwargs):
super().__init__(x, **kwargs)
self.transform_fn = transform_fn
def __getitem__(self, index):
"""Gets a batch of transformed samples."""
start, stop = (index * self.batch_size, min(self.num_samples,
(index + 1) * self.batch_size))
if self.transform_fn is None:
return self.x[self.index_array[start:stop]]
else:
return self.transform_fn(self.x[self.index_array[start:stop]])
def train_test_split(*arrays, test_size=0.25, batch_size=32, shuffle=True):
"""Split arrays into train and test subsets."""
if not len(arrays) >= 2:
raise ValueError('Expect more than 2 array-like objects.')
num_samples = arrays[0].shape[0]
num_train_samples = int(np.ceil(num_samples * (1. - test_size)))
num_train_samples = int(np.ceil(num_train_samples / float(batch_size)) * batch_size)
index_array = np.arange(num_samples)
if shuffle:
np.random.shuffle(index_array)
index_array_train = index_array[:num_train_samples]
index_array_test = index_array[num_train_samples:]
train_test_pairs = (
(arr[index_array_train], arr[index_array_test])
for arr in arrays)
return tuple(itertools.chain.from_iterable(train_test_pairs))
```
#### File: emtf_nnet/sparse/sparse_tensor_value.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
class SparseTensorValue(object):
"""Represents the value of a `SparseTensor`.
See [Sparse tensor](https://www.tensorflow.org/guide/sparse_tensor) for descriptions.
Example:
>>> SparseTensorValue(indices=np.array([[0, 0], [1, 2], [2, 3]]),
... values=np.array([1, 2, 3]),
... dense_shape=np.array([3, 4]))
SparseTensorValue(indices=array([[0, 0],
[1, 2],
[2, 3]]), values=array([1, 2, 3]), dense_shape=array([3, 4]))
"""
def __init__(self, indices, values, dense_shape):
"""Creates a `SparseTensor`.
Args:
indices: A 2-D int64 tensor of shape `[N, ndims]`.
values: A 1-D tensor of any type and shape `[N]`.
dense_shape: A 1-D int64 tensor of shape `[ndims]`.
"""
if not (isinstance(indices, (np.ndarray, np.generic)) and
indices.dtype in (np.int64, np.int32) and indices.ndim == 2):
raise TypeError("indices must be a 2D int32 or int64 numpy array")
if not (isinstance(values, (np.ndarray, np.generic)) and values.ndim >= 1):
raise TypeError("values must be a n-D numpy array")
if not (isinstance(dense_shape, (np.ndarray, np.generic)) and
dense_shape.dtype in (np.int64, np.int32) and dense_shape.ndim == 1):
raise TypeError("dense_shape must be a 1D int32 or int64 numpy array")
if not (indices.shape[0] == values.shape[0]):
raise TypeError("indices and values must have the same first dim")
if not (indices.shape[1] + (values.ndim - 1) == dense_shape.shape[0]):
raise TypeError("indices, values, and dense_shape must have consistent shapes")
self._indices = indices
self._values = values
self._dense_shape = dense_shape
indices = property(
lambda self: self._indices,
doc="""The indices of non-zero values in the represented dense tensor.""")
values = property(
lambda self: self._values,
doc="""The non-zero values in the represented dense tensor.""")
dtype = property(
lambda self: self._values.dtype,
doc="""The numpy dtype of values in this tensor.""")
dense_shape = property(
lambda self: tuple(self._dense_shape),
doc="""A tuple representing the shape of the dense tensor.""")
shape = property(
lambda self: tuple(self._dense_shape),
doc="""A tuple representing the shape of the dense tensor.""")
def __str__(self):
return "SparseTensorValue(indices=%s, values=%s, dense_shape=%s)" % (
self._indices, self._values, self._dense_shape)
def __repr__(self):
return "SparseTensorValue(indices=%r, values=%r, dense_shape=%r)" % (
self._indices, self._values, self._dense_shape)
def with_values(self, new_values):
"""Returns a copy of `self` with `values` replaced by `new_values`."""
return SparseTensorValue(self._indices, new_values, self._dense_shape)
SparseTensorNamedTuple = collections.namedtuple(
'SparseTensorNamedTuple', ['indices', 'values', 'dense_shape'])
def dense_to_sparse(dense):
dense = np.asarray(dense)
indices = np.argwhere(dense)
values = dense[dense.nonzero()]
dense_shape = np.asarray(dense.shape)
return SparseTensorValue(indices=indices, values=values, dense_shape=dense_shape)
def sparse_to_dense(sparse):
dense = np.zeros(sparse.dense_shape, dtype=sparse.dtype)
ndims = sparse.indices.shape[1]
tup = tuple(sparse.indices[:, i] for i in range(ndims))
dense[tup] = sparse.values
return dense
def sparse_to_dense_n(sparse, n):
dense_shape = (n,) + sparse.dense_shape[1:]
dense = np.zeros(dense_shape, dtype=sparse.dtype)
for i in range(len(sparse.indices)):
if sparse.indices[i, 0] >= n:
break
tup = tuple(sparse.indices[i])
dense[tup] = sparse.values[i]
return dense
```
#### File: emtf-nnet/tests/nest_test.py
```python
import numpy as np
from emtf_nnet.nest import flatten, pack_sequence_as
def test_me():
structure = ((3, 4), 5, (6, 7, (9, 10), 8))
flat = ["a", "b", "c", "d", "e", "f", "g", "h"]
assert flatten(structure) == [3, 4, 5, 6, 7, 9, 10, 8]
assert pack_sequence_as(structure, flat) == (("a", "b"), "c",
("d", "e", ("f", "g"), "h"))
``` |
{
"source": "jiafulow/emtf-tree",
"score": 2
} |
#### File: emtf-tree/emtf_tree/io.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from .defaults import ROOT, log
def _expand_path(s):
return os.path.expanduser(os.path.expandvars(s))
def root_open(filename, mode=''):
"""
Open a ROOT file via ROOT's static ROOT.TFile.Open [1] function and return
an asrootpy'd File.
Parameters
----------
filename : string
The absolute or relative path to the ROOT file.
mode : string, optional (default='')
Mode indicating how the file is to be opened. This can be either one
of the options supported by ROOT.TFile.Open [2], or one of `a`, `a+`,
`r`, `r+`, `w` or `w+`, with meanings as for the built-in `open()`
function [3].
Returns
-------
root_file : File
an instance of rootpy's File subclass of ROOT's TFile.
References
----------
.. [1] http://root.cern.ch/root/html/TFile.html#TFile:Open
.. [2] http://root.cern.ch/root/html/TFile.html#TFile:TFile@2
.. [3] https://docs.python.org/2/library/functions.html#open
"""
mode_map = {'a': 'UPDATE',
'a+': 'UPDATE',
'r': 'READ',
'r+': 'UPDATE',
'w': 'RECREATE',
'w+': 'RECREATE'}
if mode in mode_map:
mode = mode_map[mode]
filename = _expand_path(filename)
log.debug("Opening file '{0}'".format(filename))
root_file = ROOT.TFile.Open(filename, mode)
if not root_file:
raise IOError("could not open file: '{0}'".format(filename))
# give Python ownership of the TFile so we can delete it
ROOT.SetOwnership(root_file, True)
return root_file
class DoesNotExist(Exception):
"""
This exception is raised if an attempt is made to access an object
that does not exist in a directory.
"""
pass
```
#### File: emtf-tree/emtf_tree/tree.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
from six.moves import range
from collections import OrderedDict
from .defaults import log
from .treebuffer import TreeBuffer
class BaseTree(object):
"""
A base class for Tree.
"""
def __init__(self, tree,
read_branches_on_demand=False,
always_read=None):
if not hasattr(tree, '__iter__') or not hasattr(tree, '__contains__'):
raise RuntimeError("unable to initialize Tree")
self._tree = tree
# only set _buffer if it does not exist
if not hasattr(self, '_buffer'):
self._buffer = TreeBuffer()
self._read_branches_on_demand = read_branches_on_demand
if always_read is None:
self._always_read = []
else:
self._always_read = always_read
#self._branch_cache = {}
self._inited = True # affects __setattr__ and __getattr__ behaviors
@classmethod
def branch_type(cls, branch):
"""
Return the string representation for the type of a branch
"""
typename = branch.GetClassName()
if not typename:
leaf = branch.GetListOfLeaves()[0]
typename = leaf.GetTypeName()
# check if leaf has multiple elements
leaf_count = leaf.GetLeafCount()
if leaf_count:
length = leaf_count.GetMaximum()
else:
length = leaf.GetLen()
if length > 1:
typename = '{0}[{1:d}]'.format(typename, length)
return typename
@classmethod
def branch_is_supported(cls, branch):
"""
Currently the branch must only have one leaf but the leaf may have one
or multiple elements
"""
return branch.GetNleaves() == 1
def create_buffer(self, ignore_unsupported=False):
"""
Create this tree's TreeBuffer
"""
bufferdict = OrderedDict()
for branch in self.iterbranches():
# only include activated branches
if not self.GetBranchStatus(branch.GetName()):
continue
if BaseTree.branch_is_supported(branch):
bufferdict[branch.GetName()] = BaseTree.branch_type(branch)
elif not ignore_unsupported:
raise TypeError(
"branch `{0}` is unsupported".format(branch.GetName()))
else:
log.warning(
"ignore unsupported branch `{0}`".format(branch.GetName()))
self.set_buffer(TreeBuffer(
bufferdict,
ignore_unsupported=ignore_unsupported))
def update_buffer(self, treebuffer, transfer_objects=False):
"""
Merge items from a TreeBuffer into this Tree's TreeBuffer
Parameters
----------
buffer : rootpy.tree.buffer.TreeBuffer
The TreeBuffer to merge into this Tree's buffer
transfer_objects : bool, optional (default=False)
If True then all objects and collections on the input buffer will
be transferred to this Tree's buffer.
"""
self.set_buffer(treebuffer, transfer_objects=transfer_objects)
def set_buffer(self, treebuffer,
branches=None,
ignore_branches=None,
create_branches=False,
visible=True,
ignore_missing=False,
ignore_duplicates=False,
transfer_objects=False):
"""
Set the Tree buffer
Parameters
----------
treebuffer : rootpy.tree.buffer.TreeBuffer
a TreeBuffer
branches : list, optional (default=None)
only include these branches from the TreeBuffer
ignore_branches : list, optional (default=None)
ignore these branches from the TreeBuffer
create_branches : bool, optional (default=False)
If True then the branches in the TreeBuffer should be created.
Use this option if initializing the Tree. A ValueError is raised
if an attempt is made to create a branch with the same name as one
that already exists in the Tree. If False the addresses of existing
branches will be set to point at the addresses in this buffer.
visible : bool, optional (default=True)
If True then the branches will be added to the buffer and will be
accessible as attributes of the Tree.
ignore_missing : bool, optional (default=False)
If True then any branches in this buffer that do not exist in the
Tree will be ignored, otherwise a ValueError will be raised. This
option is only valid when ``create_branches`` is False.
ignore_duplicates : bool, optional (default=False)
If False then raise a ValueError if the tree already has a branch
with the same name as an entry in the buffer. If True then skip
branches that already exist. This option is only valid when
``create_branches`` is True.
transfer_objects : bool, optional (default=False)
If True, all tree objects and collections will be transferred from
the buffer into this Tree's buffer.
"""
# determine branches to keep while preserving branch order
if branches is None:
branches = treebuffer.keys()
if ignore_branches is not None:
branches = [b for b in branches if b not in ignore_branches]
for name in branches:
value = treebuffer[name]
if self.has_branch(name):
self.SetBranchAddress(name, value)
elif not ignore_missing:
raise ValueError(
"Attempting to set address for "
"branch `{0}` which does not exist".format(name))
else:
log.warning(
"Skipping entry in buffer for which no "
"corresponding branch in the "
"tree exists: `{0}`".format(name))
self._buffer.update(treebuffer)
if transfer_objects:
self._buffer.set_objects(treebuffer)
def activate(self, branches, exclusive=False):
"""
Activate branches
Parameters
----------
branches : str or list
branch or list of branches to activate
exclusive : bool, optional (default=False)
if True deactivate the remaining branches
"""
if exclusive:
self.SetBranchStatus('*', 0)
if isinstance(branches, str):
branches = [branches]
for branch in branches:
if '*' in branch:
matched_branches = self._glob(branch)
for b in matched_branches:
self.SetBranchStatus(b, 1)
elif self.has_branch(branch):
self.SetBranchStatus(branch, 1)
def deactivate(self, branches, exclusive=False):
"""
Deactivate branches
Parameters
----------
branches : str or list
branch or list of branches to deactivate
exclusive : bool, optional (default=False)
if True activate the remaining branches
"""
if exclusive:
self.SetBranchStatus('*', 1)
if isinstance(branches, str):
branches = [branches]
for branch in branches:
if '*' in branch:
matched_branches = self._glob(branch)
for b in matched_branches:
self.SetBranchStatus(b, 0)
elif self.has_branch(branch):
self.SetBranchStatus(branch, 0)
@property
def branches(self):
"""
List of the branches
"""
return [branch for branch in self.GetListOfBranches()]
def iterbranches(self):
"""
Iterator over the branches
"""
for branch in self.GetListOfBranches():
yield branch
@property
def branchnames(self):
"""
List of branch names
"""
return [branch.GetName() for branch in self.GetListOfBranches()]
def iterbranchnames(self):
"""
Iterator over the branch names
"""
for branch in self.iterbranches():
yield branch.GetName()
def _glob(self, patterns, exclude=None):
"""
Return a list of branch names that match ``pattern``.
Exclude all matched branch names which also match a pattern in
``exclude``. ``exclude`` may be a string or list of strings.
Parameters
----------
patterns: str or list
branches are matched against this pattern or list of patterns where
globbing is performed with '*'.
exclude : str or list, optional (default=None)
branches matching this pattern or list of patterns are excluded
even if they match a pattern in ``patterns``.
Returns
-------
matches : list
List of matching branch names
"""
if isinstance(patterns, str):
patterns = [patterns]
if isinstance(exclude, str):
exclude = [exclude]
matches = []
for pattern in patterns:
matches += fnmatch.filter(self.iterbranchnames(), pattern)
if exclude is not None:
for exclude_pattern in exclude:
matches = [match for match in matches
if not fnmatch.fnmatch(match, exclude_pattern)]
return matches
def __iter__(self):
"""
Iterator over the entries in the Tree.
"""
if not self._buffer:
log.warning("buffer does not exist or is empty")
self.create_buffer()
if self._read_branches_on_demand:
self._buffer.set_tree(self)
# drop all branches from the cache
self.DropBranchFromCache('*')
for attr in self._always_read:
try:
branch = self._branch_cache[attr]
except KeyError: # one-time hit
branch = self.GetBranch(attr)
if not branch:
raise AttributeError(
"branch `{0}` specified in "
"`always_read` does not exist".format(attr))
self._branch_cache[attr] = branch
# add branches that we should always read to cache
self.AddBranchToCache(branch)
for i in range(self.GetEntries()):
# Only increment current entry.
# getattr on a branch will then GetEntry on only that branch
# see ``TreeBuffer.get_with_read_if_cached``.
self.LoadTree(i)
for attr in self._always_read:
# Always read branched in ``self._always_read`` since
# these branches may never be getattr'd but the TreeBuffer
# should always be updated to reflect their current values.
# This is useful if you are iterating over an input tree
# and writing to an output tree that shares the same
# TreeBuffer but you don't getattr on all branches of the
# input tree in the logic that determines which entries
# to keep.
self._branch_cache[attr].GetEntry(i)
self._buffer._entry.set(i)
yield self._buffer
self._buffer.next_entry()
self._buffer.reset_collections()
else:
for i in range(self.GetEntries()):
# Read all activated branches (can be slow!).
self.GetEntry(i)
self._buffer._entry.set(i)
yield self._buffer
self._buffer.reset_collections()
def __setattr__(self, attr, value):
# this test allows attributes to be set in the __init__ method
# any normal attributes are handled normally
if '_inited' not in self.__dict__ or attr in self.__dict__:
super(BaseTree, self).__setattr__(attr, value)
return
try:
setattr(self._buffer, attr, value)
except AttributeError:
raise AttributeError(
"`{0}` instance has no attribute `{1}`".format(
self.__class__.__name__, attr))
def __getattr__(self, attr):
if '_inited' not in self.__dict__:
raise AttributeError(
"`{0}` instance has no attribute `{1}`".format(
self.__class__.__name__, attr))
try:
return getattr(self._buffer, attr)
except AttributeError:
try:
return getattr(self._tree, attr)
except AttributeError:
raise AttributeError(
"`{0}` instance has no attribute `{1}`".format(
self.__class__.__name__, attr))
def __len__(self):
"""
Same as GetEntries
"""
return self.GetEntries()
def __contains__(self, branch):
"""
Same as has_branch
"""
return self.has_branch(branch)
def has_branch(self, branch):
"""
Determine if this Tree contains a branch with the name ``branch``
Parameters
----------
branch : str
branch name
Returns
-------
has_branch : bool
True if this Tree contains a branch with the name ``branch`` or
False otherwise.
"""
return not not self.GetBranch(branch)
class Tree(BaseTree):
pass
``` |
{
"source": "Jiage-leia/morning_report",
"score": 3
} |
#### File: morning_report/src/SQL_operations_all.py
```python
import pymssql
# import MySQLdb
# from MySQLdb import _mysql
#the SQL AST vocabulary allows SQL code abstract syntax trees to be published in RDF (resource description framework)
import ast
#json stands for Javascript Object Notation, it is mainly used in storing and transporting data
#yaml helps to translate data format
import json, yaml
import re
# set time format
from datetime import datetime as dt, timedelta
# connect python with sql
def generate_connector_of_MS_SQL(ip, user, password, db_name):
return pymssql.connect(ip, user, password, db_name, charset="utf8")
# define "IOV" appointment
def find_daily_iov(conn, schema, date=None):
#define query date
if not date:
_tomorrow = dt.now().date() + timedelta(1)
else:
_tomorrow = date
# print(_tomorrow)
#define sql
_sql = "select reason, status, CONVERT(time, appt_time) as appt_time " \
"from {0}.appointment_view " \
"where appt_date = '{1}' and reason like 'IOV%' " \
"order by appt_time ;" \
";".format(schema, _tomorrow)
try:
cursor = conn.cursor()
cursor.execute(_sql)
return cursor.fetchall()
except:
return ()
# define "talk" appoinment
def find_daily_talk(conn, schema, date=None):
#define query date
if not date:
_tomorrow = dt.now().date() + timedelta(1)
else:
_tomorrow = date
#define sql
_sql_talk = "select reason, status, CONVERT(time, appt_time) as appt_time " \
"from {0}.appointment_view " \
"where appt_date = '{1}' and (reason like 'PHONE%' " \
"or reason like 'TELE%' or reason like 'CON%' " \
"or reason like 'COUR%' or reason like '%TALK') " \
"order by appt_time ;" \
";".format(schema, _tomorrow)
# cursor is an object which helps to execute the query and fetch the records from the database
try:
cursor = conn.cursor()
cursor.execute(_sql_talk)
return cursor.fetchall()
except:
return ()
# define "nppa" appoinment
def find_daily_nppa(conn, schema, date=None):
#define query date
if not date:
_tomorrow = dt.now().date() + timedelta(1)
else:
_tomorrow = date
#define sql
_sql_nppa = "select reason, status, CONVERT(time, appt_time) as appt_time " \
"from {0}.appointment_view " \
"where appt_date = '{1}' and ( reason like 'IUI%' " \
"or reason like '%HSG%' "\
"or reason like 'R1%' "\
"or reason like 'SIS%' "\
"order by appt_time;" \
";".format(schema, _tomorrow)
# cursor is an object which helps to execute the query and fetch the records from the database
try:
cursor = conn.cursor()
cursor.execute(_sql_nppa)
return cursor.fetchall()
except:
return ()
``` |
{
"source": "jiaguilera/a-walk-in-graphql",
"score": 3
} |
#### File: day_02/python/resolvers.py
```python
from ariadne import QueryType, ObjectType
from random import randint
from models import Skill, Person
from data import session
from datetime import datetime
query = QueryType()
# Type definition
skill = ObjectType("Skill")
person = ObjectType("Person")
# Top level resolvers
@query.field("randomSkill")
def resolve_random_skill(_, info):
records = session.query(Skill).count()
random_id = str(randint(1, records))
return session.query(Skill).get(random_id)
@query.field("randomPerson")
def resolve_random_person(_, info):
records = session.query(Person).count()
random_id = str(randint(1, records))
return session.query(Person).get(random_id)
@query.field("persons")
def resolve_persons(_, info, id=None):
return session.query(Person).filter_by(id=id) if id else session.query(Person).all()
# Field level resolvers
@skill.field("now")
def resolve_now(_, info):
return datetime.now()
@skill.field("parent")
def resolve_parent(obj, info):
return obj.parent_skill
@person.field("fullName")
def resolve_full_name(obj, info):
return f'{obj.name} {obj.surname}'
@person.field("friends")
def resolve_friends(obj, info):
return obj.friends
@person.field("skills")
def resolve_skills(obj, info):
return obj.skills
@person.field("favSkill")
def resolve_fav_skill(obj, info):
return obj.person_favSkill
``` |
{
"source": "jiaguobing/FastCAE",
"score": 2
} |
#### File: vtk/util/vtkConstants.py
```python
VTK_FLOAT_MAX = 1.0e+38
VTK_INT_MAX = 2147483647 # 2^31 - 1
# These types are returned by GetDataType to indicate pixel type.
VTK_VOID = 0
VTK_BIT = 1
VTK_CHAR = 2
VTK_SIGNED_CHAR =15
VTK_UNSIGNED_CHAR = 3
VTK_SHORT = 4
VTK_UNSIGNED_SHORT = 5
VTK_INT = 6
VTK_UNSIGNED_INT = 7
VTK_LONG = 8
VTK_UNSIGNED_LONG = 9
VTK_FLOAT =10
VTK_DOUBLE =11
VTK_ID_TYPE =12
# These types are not currently supported by GetDataType, but are
# for completeness.
VTK_STRING =13
VTK_OPAQUE =14
VTK_LONG_LONG =16
VTK_UNSIGNED_LONG_LONG =17
# Legacy. This type is never enabled.
VTK___INT64 =18
# Legacy. This type is never enabled.
VTK_UNSIGNED___INT64 =19
# These types are required by vtkVariant and vtkVariantArray
VTK_VARIANT =20
VTK_OBJECT =21
# Storage for Unicode strings
VTK_UNICODE_STRING =22
# Some constant required for correct template performance
VTK_BIT_MIN = 0
VTK_BIT_MAX = 1
VTK_CHAR_MIN = -128
VTK_CHAR_MAX = 127
VTK_UNSIGNED_CHAR_MIN = 0
VTK_UNSIGNED_CHAR_MAX = 255
VTK_SHORT_MIN = -32768
VTK_SHORT_MAX = 32767
VTK_UNSIGNED_SHORT_MIN = 0
VTK_UNSIGNED_SHORT_MAX = 65535
VTK_INT_MIN = (-VTK_INT_MAX-1)
VTK_INT_MAX = VTK_INT_MAX
#VTK_UNSIGNED_INT_MIN = 0
#VTK_UNSIGNED_INT_MAX = 4294967295
VTK_LONG_MIN = (-VTK_INT_MAX-1)
VTK_LONG_MAX = VTK_INT_MAX
#VTK_UNSIGNED_LONG_MIN = 0
#VTK_UNSIGNED_LONG_MAX = 4294967295
VTK_FLOAT_MIN = -VTK_FLOAT_MAX
VTK_FLOAT_MAX = VTK_FLOAT_MAX
VTK_DOUBLE_MIN = -1.0e+99
VTK_DOUBLE_MAX = 1.0e+99
# These types are returned to distinguish dataset types
VTK_POLY_DATA = 0
VTK_STRUCTURED_POINTS = 1
VTK_STRUCTURED_GRID = 2
VTK_RECTILINEAR_GRID = 3
VTK_UNSTRUCTURED_GRID = 4
VTK_PIECEWISE_FUNCTION = 5
VTK_IMAGE_DATA = 6
VTK_DATA_OBJECT = 7
VTK_DATA_SET = 8
VTK_POINT_SET = 9
VTK_UNIFORM_GRID = 10
VTK_COMPOSITE_DATA_SET = 11
VTK_MULTIGROUP_DATA_SET = 12 # OBSOLETE
VTK_MULTIBLOCK_DATA_SET = 13
VTK_HIERARCHICAL_DATA_SET = 14 # OBSOLETE
VTK_HIERARCHICAL_BOX_DATA_SET = 15
VTK_GENERIC_DATA_SET = 16
VTK_HYPER_OCTREE = 17
VTK_TEMPORAL_DATA_SET = 18
VTK_TABLE = 19
VTK_GRAPH = 20
VTK_TREE = 21
VTK_SELECTION = 22
# These types define error codes for vtk functions
VTK_OK = 1
VTK_ERROR = 2
# These types define different text properties
VTK_ARIAL = 0
VTK_COURIER = 1
VTK_TIMES = 2
VTK_UNKNOWN_FONT = 3
VTK_TEXT_LEFT = 0
VTK_TEXT_CENTERED = 1
VTK_TEXT_RIGHT = 2
VTK_TEXT_BOTTOM = 0
VTK_TEXT_TOP = 2
VTK_TEXT_GLOBAL_ANTIALIASING_SOME = 0
VTK_TEXT_GLOBAL_ANTIALIASING_NONE = 1
VTK_TEXT_GLOBAL_ANTIALIASING_ALL = 2
VTK_LUMINANCE = 1
VTK_LUMINANCE_ALPHA = 2
VTK_RGB = 3
VTK_RGBA = 4
VTK_COLOR_MODE_DEFAULT = 0
VTK_COLOR_MODE_MAP_SCALARS = 1
# Constants for InterpolationType
VTK_NEAREST_INTERPOLATION = 0
VTK_LINEAR_INTERPOLATION = 1
# For volume rendering
VTK_MAX_VRCOMP = 4
# These types define the 17 linear VTK Cell Types
# See Filtering/vtkCellType.h
# Linear cells
VTK_EMPTY_CELL = 0
VTK_VERTEX = 1
VTK_POLY_VERTEX = 2
VTK_LINE = 3
VTK_POLY_LINE = 4
VTK_TRIANGLE = 5
VTK_TRIANGLE_STRIP = 6
VTK_POLYGON = 7
VTK_PIXEL = 8
VTK_QUAD = 9
VTK_TETRA = 10
VTK_VOXEL = 11
VTK_HEXAHEDRON = 12
VTK_WEDGE = 13
VTK_PYRAMID = 14
VTK_PENTAGONAL_PRISM = 15
VTK_HEXAGONAL_PRISM = 16
# Quadratic, isoparametric cells
VTK_QUADRATIC_EDGE = 21
VTK_QUADRATIC_TRIANGLE = 22
VTK_QUADRATIC_QUAD = 23
VTK_QUADRATIC_TETRA = 24
VTK_QUADRATIC_HEXAHEDRON = 25
VTK_QUADRATIC_WEDGE = 26
VTK_QUADRATIC_PYRAMID = 27
VTK_BIQUADRATIC_QUAD = 28
VTK_TRIQUADRATIC_HEXAHEDRON = 29
VTK_QUADRATIC_LINEAR_QUAD = 30
VTK_QUADRATIC_LINEAR_WEDGE = 31
VTK_BIQUADRATIC_QUADRATIC_WEDGE = 32
VTK_BIQUADRATIC_QUADRATIC_HEXAHEDRON = 33
# Special class of cells formed by convex group of points
VTK_CONVEX_POINT_SET = 41
# Higher order cells in parametric form
VTK_PARAMETRIC_CURVE = 51
VTK_PARAMETRIC_SURFACE = 52
VTK_PARAMETRIC_TRI_SURFACE = 53
VTK_PARAMETRIC_QUAD_SURFACE = 54
VTK_PARAMETRIC_TETRA_REGION = 55
VTK_PARAMETRIC_HEX_REGION = 56
# Higher order cells
VTK_HIGHER_ORDER_EDGE = 60
VTK_HIGHER_ORDER_TRIANGLE = 61
VTK_HIGHER_ORDER_QUAD = 62
VTK_HIGHER_ORDER_POLYGON = 63
VTK_HIGHER_ORDER_TETRAHEDRON = 64
VTK_HIGHER_ORDER_WEDGE = 65
VTK_HIGHER_ORDER_PYRAMID = 66
VTK_HIGHER_ORDER_HEXAHEDRON = 67
# A macro to get the name of a type
__vtkTypeNameDict = {VTK_VOID:"void",
VTK_DOUBLE:"double",
VTK_FLOAT:"float",
VTK_LONG:"long",
VTK_UNSIGNED_LONG:"unsigned long",
VTK_INT:"int",
VTK_UNSIGNED_INT:"unsigned int",
VTK_SHORT:"short",
VTK_UNSIGNED_SHORT:"unsigned short",
VTK_CHAR:"char",
VTK_UNSIGNED_CHAR:"unsigned char",
VTK_SIGNED_CHAR:"signed char",
VTK_BIT:"bit"}
def vtkImageScalarTypeNameMacro(type):
return __vtkTypeNameDict[type]
``` |
{
"source": "JiahanBro/DMD_Test",
"score": 3
} |
#### File: DMD_Test/IncompNST_Tools/FiniteDifferences.py
```python
import numpy as np
import numpy.linalg as npla
import scipy as sc
import scipy.sparse.linalg as la
import scipy.sparse as sp
import matplotlib.pyplot as plt
def Grad_x(dx, nx, ny):
Gx = sc.zeros([nx, nx])
# Build gradient matrix
for i in range(0, nx):
Gx[(i-2), i] = 1/6
Gx[(i-1), i] = -1
Gx[i % nx, i] = 1/2
Gx[(i+1) % nx, i] = 1/3
# Divide through increments
Gx /= dx
# Expand gradient matrix to whole field
Gx = sc.kron(sc.eye(ny), Gx)
return Gx
def Grad_y(dy, nx, ny):
Gy = sc.zeros([ny, ny])
# Build gradient matrix
for i in range(0, ny):
Gy[i, (i-2)] = 1/6
Gy[i, (i-1)] = -1
Gy[i, i % ny] = 1/2
Gy[i, (i+1) % ny] = 1/3
# Divide through increments
Gy /= dy
# Expand gradient matrix to whole field
Gy = sc.kron(Gy, sc.eye(nx))
return Gy
def Div_x(dx, nx, ny):
Dx = sc.zeros([nx, nx])
# Build divergence matrix
for i in range(0, nx):
Dx[(i-1), i] = -1/3
Dx[i % nx, i] = -1/2
Dx[(i+1) % nx, i] = 1
Dx[(i+2) % nx, i] = -1/6
# Divide through increments
Dx /= dx
# Expand divergence matrix to whole field
Dx = sc.kron(sc.eye(ny), Dx)
return Dx
def Div_y(dy, nx, ny):
Dy = sc.zeros([ny, ny])
# Build divergence matrix
for i in range(0, ny):
Dy[i, (i-1)] = -1/3
Dy[i, i % ny] = -1/2
Dy[i, (i+1) % ny] = 1
Dy[i, (i+2) % ny] = -1/6
# Divide through increments
Dy /= dy
# Expand divergence matrix to whole field
Dy = sc.kron(Dy, sc.eye(nx))
return Dy
def Laplace(Gx, Gy, Dx, Dy):
return sc.dot(Dx, Gx) + sc.dot(Dy, Gy)
def Convective(Gx, Gy, Dx, Dy, U, V):
return (sc.dot(Dx, U) + sc.dot(U, Gx) + sc.dot(Dy, V) + sc.dot(V, Gy))/2
def rhs(u, p, G, Du, L, nu):
return nu*sc.dot(L, u) - sc.dot(Du, u) - sc.dot(G, p)
def RK4(ht, u, p, G, Du, L, nu):
k1 = rhs(u, p, G, Du, L, nu)
k2 = rhs(u + k1.multiply(ht/2), p, G, Du, L, nu)
k3 = rhs(u + k2.multiply(ht/2), p, G, Du, L, nu)
k4 = rhs(u + k3.multiply(ht), p, G, Du, L, nu)
return u + ht.multiply(1/6*k1 + 1/3*k2 + 1/3*k3 + 1/6*k4)
def solve_laplace(p, u, v, Gx, Gy, Dx, Dy, L, nx, ny):
s = sc.dot(Dx, u) + sc.dot(Dy, v)
dp = sp.csc_matrix(sc.reshape(la.spsolve(L, s), (nx*ny, 1), order="F"))
u -= sc.dot(Gx, dp)
v -= sc.dot(Gy, dp)
p += dp
return u, v, p
def taylor_green_vortex(a, b, nu, rho, nx, ny, dx, dy):
# Initialize velocity field
u = sc.zeros([nx, ny])
v = sc.zeros([nx, ny])
# Initialize pressure field
p = sc.zeros([nx, ny])
for i in range(0, nx):
for j in range(0, ny):
u[i, j] = sc.exp(-2*nu) * sc.cos(a*i*dx) * sc.sin(b*j*dy)
v[i, j] = sc.exp(-2*nu) * sc.sin(a*i*dx) * sc.cos(b*j*dy)
p[i, j] = -(pow(sc.exp(-2*nu), 2)) * (rho/4) * \
(sc.cos(2*i*dx) + sc.cos(2*j*dy))
# Plot pressure field
plt.imshow(p)
plt.colorbar()
plt.show()
# Plot velocity field
x = sc.linspace(dx, 2*sc.pi, nx)
y = sc.linspace(dy, 2*sc.pi, ny)
xv, yv = sc.meshgrid(x, y)
plt.quiver(xv, yv, u, v, units='width')
plt.show()
print("Initialized Taylor-Green vortex")
return u, v, p
def dancing_vortices(nx, ny, dx, dy):
# Initial vortex x-position
x0s = sc.array([sc.pi*0.75, sc.pi*1.25, sc.pi*1.25])
# Initial vortex y-position
y0s = sc.array([1, 1, 1+1/(2*sc.sqrt(2))]) * sc.pi
# V ortex core size
betas = sc.array([1, 1, 1]) / sc.pi
# Strength
alphas = sc.array([1, 1, -1/2]) * sc.pi
# Build field
x = sc.linspace(dx, 2*sc.pi, nx)
y = sc.linspace(dx, 2*sc.pi, ny)
x, y = sc.meshgrid(x, y)
x = sc.transpose(x)
y = sc.transpose(y)
# Gradient operators
Gx = Grad_x(dx, nx, ny)
Gy = Grad_y(dy, nx, ny)
# Divergence operators
Dx = Div_x(dx, nx, ny)
Dy = Div_y(dy, nx, ny)
# Laplace-Operator in 2D
L = Laplace(Gx, Gy, Dx, Dy)
# Calculate omega
omega = sc.zeros([nx, ny])
for i in range(0, len(x0s)):
x0 = x0s[i]
y0 = y0s[i]
beta = betas[i]
alpha = alphas[i]
R2 = (sc.multiply((x-x0), (x-x0)) + sc.multiply((y-y0), (y-y0))) / \
pow(beta, 2)
omega_part = alpha * np.exp(-R2)
omega += omega_part
omega = sc.reshape(omega, (nx*ny, 1), order="F")
# Determine psi
psi = npla.solve(L, omega)
psi_x = np.dot(Gx, psi)
psi_y = np.dot(Gy, psi)
# Determine velocity components
u = -psi_y
v = psi_x
# Compensate numerical divergence
s = np.dot(Dx, u) + np.dot(Dy, v)
dp = sc.reshape(npla.solve(L, s), (nx*ny, 1), order="F")
u -= np.dot(Gx, dp)
v -= np.dot(Gy, dp)
# Initialize pressure field
p = sc.zeros([nx, ny])
# Plot rotation of velocity field
rot_uv = np.dot(Dx, v) - np.dot(Dy, u)
rot_uv = sc.reshape(rot_uv, (nx, ny), order="F")
print("Initialized three dancing vortices")
plt.imshow(rot_uv)
plt.colorbar()
plt.pause(0.5)
# Reshape velocity arrays back for output
u = sc.reshape(u, (nx, ny), order="F")
v = sc.reshape(u, (nx, ny), order="F")
return u, v, p
def vortex_pair(nx, ny, dx, dy):
# Domain size
lx = nx * dx
ly = ny * dy
# Initial vortex x-position
x0s = sc.array([0.4, 0.6])*lx
# Initial vortex y-position
y0s = sc.array([0.5, 0.5])*ly
# Strength
alphas = sc.array([-299.5, 299.5])
# Build field
x = sc.linspace(dx, lx, nx)
y = sc.linspace(dx, ly, ny)
x, y = sc.meshgrid(x, y)
x = sc.transpose(x)
y = sc.transpose(y)
# Gradient operators
Gx = Grad_x(dx, nx, ny)
Gy = Grad_y(dy, nx, ny)
# Divergence operators
Dx = Div_x(dx, nx, ny)
Dy = Div_y(dy, nx, ny)
# Laplace-Operator in 2D
L = Laplace(Gx, Gy, Dx, Dy)
# Calculate omega
omega = sc.zeros([nx, ny], dtype='float64')
for i in range(0, len(x0s)):
x0 = x0s[i]
y0 = y0s[i]
alpha = alphas[i]
r = 10*sc.sqrt((x-x0)**2 + (y-y0)**2)
omega_part = alpha * (1-(r**2)) * sc.exp(-r**2)
omega += omega_part
omega = sc.reshape(omega, (nx*ny, 1), order="F")
# Determine psi
psi = npla.solve(L, omega)
psi_x = np.dot(Gx, psi)
psi_y = np.dot(Gy, psi)
# Determine velocity components
u = -psi_y
v = psi_x
# Compensate numerical divergence
s = np.dot(Dx, u) + np.dot(Dy, v)
dp = sc.reshape(npla.solve(L, s), (nx*ny, 1), order="F")
u -= np.dot(Gx, dp)
v -= np.dot(Gy, dp)
# Initialize pressure field
p = sc.zeros([nx, ny])
# Plot rotation of velocity field
rot_uv = np.dot(Dx, v) - np.dot(Dy, u)
rot_uv = sc.reshape(rot_uv, (nx, ny), order="F")
print("Initialized three dancing vortices")
plt.imshow(rot_uv)
plt.colorbar()
plt.pause(0.5)
# Reshape velocity arrays back for output
u = sc.reshape(u, (nx, ny), order="F")
v = sc.reshape(u, (nx, ny), order="F")
return u, v, p
def vortex_pair2(nx, ny, dx, dy):
# Domain size
lx = nx * dx
ly = ny * dy
# Initial vortex x-position
x0s = sc.array([0.4, 0.6])*lx
# Initial vortex y-position
y0s = sc.array([0.5, 0.5])*ly
# Strength
alphas = sc.array([-299.5, 299.5])
# Build field
x = sc.linspace(dx, lx, nx)
y = sc.linspace(dx, ly, ny)
x, y = sc.meshgrid(x, y)
x = sc.transpose(x)
y = sc.transpose(y)
# Calculate omega
omega = sc.zeros([nx, ny], dtype='float64')
for i in range(0, len(x0s)):
x0 = x0s[i]
y0 = y0s[i]
alpha = alphas[i]
r = 10*sc.sqrt((x-x0)**2 + (y-y0)**2)
omega_part = alpha * (1-(r**2)) * sc.exp(-r**2)
omega += omega_part
# Initialize pressure field
p = sc.zeros([nx, ny])
print("Initialized vortex pair")
plt.imshow(omega)
plt.colorbar()
plt.pause(0.05)
return omega, p
``` |
{
"source": "JiahangGu/RFN",
"score": 3
} |
#### File: src/model/nebrn.py
```python
import torch
import torch.nn as nn
from model import common
def make_model(args, parent=False):
return NEBRN(args)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.ReLU(True)):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
def forward(self, x):
res = self.body(x)
res += x
return res
class SpaceToDepth(nn.Module):
def __init__(self, bs):
super().__init__()
self.bs = bs
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous()
x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs)
return x
def up(in_channels, out_channels, kernel_size):
return nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=kernel_size)
class UpBlock(nn.Module):
def __init__(self, conv, in_ch, n_feats, kernel_size=3, act=nn.ReLU(True), scale=2, res_head_num=1, res_tail_num=1):
super(UpBlock, self).__init__()
self.conv_head = conv(in_ch, n_feats, 1)
self.res_head = nn.Sequential(*[ResBlock(conv, n_feats, kernel_size, act=act) for _ in range(res_head_num)])
self.mid = conv(n_feats, n_feats, kernel_size)
self.up = nn.Sequential(*[conv(n_feats, n_feats*4, 1), nn.PixelShuffle(2)])
# self.up = up(n_feats, n_feats, scale)
self.res_tail = nn.Sequential(*[ResBlock(conv, n_feats, kernel_size, act=act) for _ in range(res_tail_num)])
self.conv_tail = conv(n_feats, n_feats, 3)
def forward(self, x):
o1 = self.conv_head(x)
o2 = self.res_head(o1)
o3 = self.mid(o2)
sr = self.up(o3 + o1)
o3 = self.res_tail(sr)
out = self.conv_tail(o3)
return out + sr
class NEBRN(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(NEBRN, self).__init__()
n_feats = 64
kernel_size = 3
scale = args.scale[0]
act = nn.LeakyReLU(0.1, inplace=True)
num_blocks = 10
self.scale = scale
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
self.head = conv(3, n_feats, kernel_size)
self.blocks = nn.ModuleList([UpBlock(conv, n_feats * scale * scale, n_feats, kernel_size=kernel_size, act=act, scale=scale, res_head_num=5, res_tail_num=5) for _ in range(num_blocks)])
self.pixelUnShuffle = SpaceToDepth(scale)
# self.pixelUnShuffle = nn.MaxPool2d(2)
self.tail = conv(n_feats * num_blocks, 3, 3)
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
origin = torch.cat([x for _ in range(self.scale ** 2)], dim=1)
# origin = x
o1 = self.blocks[0](origin)
lr1 = self.pixelUnShuffle(o1)
res1 = origin - lr1
o2 = self.blocks[1](res1)
lr2 = self.pixelUnShuffle(o2)
res2 = res1 - lr2
o3 = self.blocks[2](res2)
lr3 = self.pixelUnShuffle(o3)
res3 = res2 - lr3
o4 = self.blocks[3](res3)
lr4 = self.pixelUnShuffle(o4)
res4 = res3 - lr4
o5 = self.blocks[4](res4)
lr5 = self.pixelUnShuffle(o5)
res5 = res4 - lr5
o6 = self.blocks[5](res5)
lr6 = self.pixelUnShuffle(o6)
res6 = res5 - lr6
o7 = self.blocks[6](res6)
lr7 = self.pixelUnShuffle(o7)
res7 = res6 - lr7
o8 = self.blocks[7](res7)
lr8 = self.pixelUnShuffle(o8)
res8 = res7 - lr8
o9 = self.blocks[8](res8)
lr9 = self.pixelUnShuffle(o9)
res9 = res8 - lr9
o10 = self.blocks[9](res9)
x = self.tail(torch.cat([o1, o2, o3, o4, o5, o6, o7, o8, o9, o10], dim=1))
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') == -1:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
```
#### File: code/data/fractsr.py
```python
import torch
import torch.nn as nn
from model import common
def make_model(args, parent=False):
return FRACTSR(args)
## Channel Attention (CA) Layer
class CALayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
# global average pooling: feature --> point
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
# nn.ReLU(inplace=True),
nn.ReLU(),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
## Residual Channel Attention Block (RCAB)
class RCAB(nn.Module):
def __init__(
self, conv, n_feat, kernel_size, reduction=16,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(RCAB, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(conv(n_feat, n_feat, kernel_size))
if i == 0: modules_body.append(act)
# modules_body.append(CALayer(n_feat, reduction))
self.body = nn.Sequential(*modules_body)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x)
# res = self.body(x).mul(self.res_scale)
res += x
return res
class SALayer(nn.Module):
def __init__(self, channels, kernel_size=7, act=nn.ReLU(True)):
super(SALayer, self).__init__()
padding = kernel_size // 2
self.conv1_1 = nn.Conv2d(channels, channels // 2, (1, kernel_size), padding=(0, padding))
self.conv1_2 = nn.Conv2d(channels // 2, 1, (kernel_size, 1), padding=(padding, 0))
self.conv2_1 = nn.Conv2d(channels, channels // 2, (kernel_size, 1), padding=(padding, 0))
self.conv2_2 = nn.Conv2d(channels // 2, 1, (1, kernel_size), padding=(0, padding))
self.conv3_1 = nn.Conv2d(channels, channels // 2, 3, padding=1)
self.conv3_2 = nn.Conv2d(channels // 2, 1, 3, padding=2, dilation=2)
self.conv1x1 = nn.Conv2d(3, 1, 1)
self.act = act
self.sig = nn.Sigmoid()
def forward(self, x):
attention1 = self.conv1_2(self.act(self.conv1_1(x)))
attention2 = self.conv2_2(self.act(self.conv2_1(x)))
attention3 = self.conv3_2(self.act(self.conv3_1(x)))
attention = self.conv1x1(torch.cat([attention1, attention2, attention3], dim=1))
sa = self.sig(attention)
return x * sa
# class SALayer(nn.Module):
# def __init__(self, kernel_size=7):
# super(SALayer, self).__init__()
# # global average pooling: feature --> point
# padding = kernel_size // 2
# self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=True)
# self.sigmoid = nn.Sigmoid()
#
# def forward(self, x):
# avg_out = torch.mean(x, dim=1, keepdim=True)
# max_out, _ = torch.max(x, dim=1, keepdim=True)
# a = torch.cat([avg_out,max_out], dim=1)
# a = self.conv1(a)
# a = self.sigmoid(a)
# return a * x
class GCB(nn.Module):
def __init__(self, channels, reduction=16, dilation=2, act=nn.ReLU(True)):
super(GCB, self).__init__()
self.conv1 = nn.Conv2d(channels, 1, kernel_size=1, padding=0, bias=True)
self.softmax = nn.Softmax(dim=2)
self.ca = nn.Sequential(
nn.Conv2d(channels, channels // reduction, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(channels // reduction, channels, kernel_size=1)
)
def forward(self, x):
b, c, h, w = x.size()
input_x = x
input_x = input_x.view(b, c, h * w).unsqueeze(1)
content_mask = self.conv1(x)
content_mask = content_mask.view(b, 1, h * w)
content_mask = self.softmax(content_mask)
content_mask = content_mask.unsqueeze(3)
res = torch.matmul(input_x, content_mask)
res = res.view(b, c, 1, 1)
ca_out = self.ca(res)
res = x * ca_out
return res
class RSAB(nn.Module):
def __init__(self, conv, n_feat, kernel_size, act=nn.ReLU(True)):
super(RSAB, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(conv(n_feat, n_feat, kernel_size))
if i == 0: modules_body.append(act)
modules_body.append(SALayer(n_feat))
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
res += x
return res
class fractIn2(nn.Module):
def __init__(self, conv, n_feats, kernel_size, block=RCAB, flag=False, act=nn.ReLU(True)):
super(fractIn2, self).__init__()
self.fract1 = block(conv, n_feats, kernel_size)
self.fract2 = block(conv, n_feats, kernel_size)
self.fract3 = block(conv, n_feats, kernel_size)
def forward(self, x):
res = self.fract1(x)
res = self.fract2(res)
out = self.fract3(x)
res = torch.cat([res + x, out], dim=1)
return res
class fractIn4(nn.Module):
def __init__(self, conv, n_feats, kernel_size, block=RCAB, flag=False, act=nn.ReLU(True)):
super(fractIn4, self).__init__()
self.fract1 = fractIn2(conv, n_feats, kernel_size, block=block, flag=flag, act=act)
self.join = conv(n_feats * 2, n_feats, 1)
# self.join = LAB(n_feats * 2, n_feats)
self.fract2 = fractIn2(conv, n_feats, kernel_size, block=block, flag=flag, act=act)
self.fract3 = block(conv, n_feats, kernel_size)
def forward(self, x):
res = self.fract1(x)
res = self.join(res)
ans = self.fract2(res)
out = self.fract3(x)
ans = torch.cat([ans + torch.cat([x, x], dim=1), out], dim=1)
return ans
class fractIn8(nn.Module):
def __init__(self, conv, n_feats, kernel_size, block=RCAB, flag=False, act=nn.ReLU(True)):
super(fractIn8, self).__init__()
self.fract1 = fractIn4(conv, n_feats, kernel_size, block=block, flag=flag, act=act)
self.join = conv(n_feats * 3, n_feats, 1)
# self.join = LAB(n_feats * 3, n_feats)
self.fract2 = fractIn4(conv, n_feats, kernel_size, block=block, flag=flag, act=act)
self.fract3 = block(conv, n_feats, kernel_size)
def forward(self, x):
res = self.fract1(x)
res = self.join(res)
res = self.fract2(res)
out = self.fract3(x)
res = torch.cat([res + torch.cat([x, x, x], dim=1), out], dim=1)
return res
class fractIn16(nn.Module):
def __init__(self, conv, n_feats, kernel_size, block=RCAB, flag=False, act=nn.ReLU(True)):
super(fractIn16, self).__init__()
self.fract1 = fractIn8(conv, n_feats, kernel_size, block=block, flag=flag, act=act)
self.join = conv(n_feats * 4, n_feats, 1)
# self.join = LAB(n_feats * 4, n_feats)
self.fract2 = fractIn8(conv, n_feats, kernel_size, block=block, flag=flag, act=act)
self.fract3 = block(conv, n_feats, kernel_size)
def forward(self, x):
res = self.fract1(x)
res = self.join(res)
res = self.fract2(res)
out = self.fract3(x)
res = torch.cat([res + torch.cat([x, x, x, x], dim=1), out], dim=1)
return res
class fractIn32(nn.Module):
def __init__(self, conv, n_feats, kernel_size, block=RCAB, flag=False, act=nn.ReLU(True)):
super(fractIn32, self).__init__()
self.fract1 = fractIn16(conv, n_feats, kernel_size, block=block, flag=flag, act=act)
self.join = conv(n_feats * 5, n_feats, 1)
# self.join = LAB(n_feats * 5, n_feats)
self.fract2 = fractIn16(conv, n_feats, kernel_size, block=block, flag=flag, act=act)
self.fract3 = block(conv, n_feats, kernel_size)
def forward(self, x):
res = self.fract1(x)
res = self.join(res)
res = self.fract2(res)
out = self.fract3(x)
res = torch.cat([res + torch.cat([x, x, x, x, x], dim=1), out], dim=1)
return res
class fractIn64(nn.Module):
def __init__(self, conv, n_feats, kernel_size, block=RCAB, flag=False, act=nn.ReLU(True)):
super(fractIn64, self).__init__()
self.fract1 = fractIn32(conv, n_feats, kernel_size, block=block, flag=flag, act=act)
self.join = conv(n_feats * 6, n_feats, 1)
# self.join = LAB(n_feats * 6, n_feats)
self.fract2 = fractIn32(conv, n_feats, kernel_size, block=block, flag=flag, act=act)
self.fract3 = block(conv, n_feats, kernel_size)
def forward(self, x):
res = self.fract1(x)
res = self.join(res)
res = self.fract2(res)
out = self.fract3(x)
res = torch.cat([res + torch.cat([x, x, x, x, x, x], dim=1), out], dim=1)
return res
class fractIn128(nn.Module):
def __init__(self, conv, n_feats, kernel_size, block=RCAB, flag=False, act=nn.ReLU(True)):
super(fractIn128, self).__init__()
self.fract1 = fractIn64(conv, n_feats, kernel_size, block=block, flag=flag, act=act)
self.join1 = conv(n_feats * 7, n_feats, kernel_size=1)
self.fract2 = fractIn64(conv, n_feats, kernel_size, block=block, flag=flag, act=act)
self.fract3 = block(conv, n_feats, kernel_size)
def forward(self, x):
res = self.fract1(x)
res = self.join1(res)
res = self.fract2(res)
out = self.fract3(x)
res = torch.cat([res + torch.cat([x, x, x, x, x, x, x], dim=1), out], dim=1)
return res
class FRACTSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(FRACTSR, self).__init__()
# dug_num = args.dug_num
n_feats = args.n_feats
kernel_size = 3
scale = args.scale[0]
act = nn.ReLU(True)
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
# define head module
m_head = [conv(args.n_colors, n_feats, kernel_size)]
# 最后一层的join层输入为log(层数)
self.body = nn.Sequential(fractIn64(conv, n_feats, kernel_size, block=RCAB, flag=False, act=act), \
conv(n_feats * 7, n_feats, 1))
# define tail module
m_tail = [
common.Upsampler(conv, scale, n_feats, act=False),
conv(n_feats, args.n_colors, kernel_size)]
# m_tail = [
# nn.Conv2d(n_feats*7, n_feats*scale*scale, 3, padding=1),
# nn.PixelShuffle(scale),
# nn.Conv2d(n_feats, args.n_colors, 3, padding=1)
# ]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*m_head)
# self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') == -1:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
``` |
{
"source": "JiahangXu/nn-Meter",
"score": 2
} |
#### File: nn_meter/utils/export_op_latency.py
```python
import jsonlines
import nn_meter
from nn_meter.dataset import bench_dataset
def create_dummy_input(name):
dummy_input = {
"inbounds": [],
"attr": {
"name": name,
"type": "Placeholder",
"output_shape": [],
"attr": {},
"input_shape": []
},
"outbounds": []
}
return dummy_input
predictors = nn_meter.list_latency_predictors()
for p in predictors:
print(f"[Predictor] {p['name']}: version={p['version']}")
# load predictor
predictor_name = 'adreno640gpu_tflite21'
predictor_version = 1.0
predictor = nn_meter.load_latency_predictor(predictor_name, predictor_version)
datasets = bench_dataset()
test_data = datasets[0]
print(datasets)
with jsonlines.open(test_data) as data_reader:
n = len(data_reader)
for i, item in enumerate(data_reader):
print(f'{i}/{n}')
model = item['graph']
for node_name, node in model.items():
if node["inbounds"] == []:
continue
dummy_model = {}
for input in node["inbounds"]:
dummy_model[input] = create_dummy_input(input)
dummy_model[node_name] = node
latency = predictor.predict(dummy_model, model_type="nnmeter-ir")
if "latency" not in node["attr"]:
node["attr"]["latency"] = {}
node["attr"]["latency"][predictor_name] = latency
item['graph'] = model
with jsonlines.open('output.jsonl', mode='a') as writer:
writer.write(item)
```
#### File: nn_meter/utils/utils.py
```python
import os
from zipfile import ZipFile
from tqdm import tqdm
import requests
from packaging import version
import logging
def download_from_url(urladdr, ppath):
"""
download the kernel predictors from the url
@params:
urladdr: github release url address
ppath: the targeting dir to save the download data (usually hardware_inferenceframework)
"""
file_name = os.path.join(ppath, ".zip")
if not os.path.isdir(ppath):
os.makedirs(ppath)
# logging.keyinfo(f'Download from {urladdr}')
response = requests.get(urladdr, stream=True)
total_size_in_bytes = int(response.headers.get("content-length", 0))
block_size = 2048 # 2 Kibibyte
progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
with open(file_name, "wb") as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
zipfile = ZipFile(file_name)
zipfile.extractall(path=ppath)
zipfile.close()
progress_bar.close()
os.remove(file_name)
def try_import_onnx(require_version = "1.9.0"):
try:
import onnx
if version.parse(onnx.__version__) != version.parse(require_version):
logging.warning(f'onnx=={onnx.__version__} is not well tested now, well tested version: onnx=={require_version}' )
return onnx
except ImportError:
logging.error(f'You have not install the onnx package, please install onnx=={require_version} and try again.')
exit()
def try_import_torch(require_version = "1.7.1"):
try:
import torch
if version.parse(torch.__version__) != version.parse(require_version):
logging.warning(f'torch=={torch.__version__} is not well tested now, well tested version: torch=={require_version}' )
return torch
except ImportError:
logging.error(f'You have not install the torch package, please install torch=={require_version} and try again.')
exit()
def try_import_tensorflow(require_version = "1.15.0"):
try:
import tensorflow
if version.parse(tensorflow.__version__) != version.parse(require_version):
logging.warning(f'tensorflow=={tensorflow.__version__} is not well tested now, well tested version: tensorflow=={require_version}' )
return tensorflow
except ImportError:
logging.error(f'You have not install the tensorflow package, please install tensorflow=={require_version} and try again.')
exit()
def try_import_torchvision_models():
try:
import torchvision
return torchvision.models
except ImportError:
logging.error(f'You have not install the torchvision package, please install torchvision and try again.')
exit()
def try_import_onnxsim():
try:
from onnxsim import simplify
return simplify
except ImportError:
logging.error(f'You have not install the onnx-simplifier package, please install onnx-simplifier and try again.')
exit()
def try_import_dgl():
try:
import dgl
return dgl
except ImportError:
logging.error(f'You have not install the dgl package, please install dgl and try again.')
exit()
``` |
{
"source": "jiahan-wu/app_icon_maker",
"score": 2
} |
#### File: jiahan-wu/app_icon_maker/app_icon_maker.py
```python
import argparse
import json
import os
from PIL import Image
formats = [
("iPhone Notification iOS 7 - 14 20pt_2x", (40, 40), 'iphone', '2x', '20x20'),
("iPhone Notification iOS 7 - 14 20pt_3x", (60, 60), 'iphone', '3x', '20x20'),
("iPhone Settings iOS 7 - 14 29pt_2x", (58, 58), 'iphone', '2x', '29x29'),
("iPhone Settings iOS 7 - 14 29pt_3x", (87, 87), 'iphone', '3x', '29x29'),
("iPhone Spotlight iOS 7 - 14 40pt_2x", (80, 80), 'iphone', '2x', '40x40'),
("iPhone Spotlight iOS 7 - 14 40pt_3x", (120, 120), 'iphone', '3x', '40x40'),
("iPhone App iOS 7 - 14 60pt_2x", (120, 120), 'iphone', '2x', '60x60'),
("iPhone App iOS 7 - 14 60pt_3x", (180, 180), 'iphone', '3x', '60x60'),
("iPad Notification iOS 7 - 14 20pt_1x", (20, 20), 'ipad', '1x', '20x20'),
("iPad Notification iOS 7 - 14 20pt_2x", (40, 40), 'ipad', '2x', '20x20'),
("iPad Settings iOS 7 - 14 29pt_1x", (29, 29), 'ipad', '1x', '29x29'),
("iPad Settings iOS 7 - 14 29pt_2x", (58, 58), 'ipad', '2x', '29x29'),
("iPad Spotlight iOS 7 - 14 40pt_1x", (40, 40), 'ipad', '1x', '40x40'),
("iPad Spotlight iOS 7 - 14 40pt_2x", (80, 80), 'ipad', '2x', '40x40'),
("iPad App iOS 7 - 14 76pt_1x", (76, 76), 'ipad', '1x', '76x76'),
("iPad App iOS 7 - 14 76pt_2x", (152, 152), 'ipad', '2x', '76x76'),
("iPad (12.9-inch) App iOS 9 - 14 83.5pt_2x", (167, 167), 'ipad', '2x', '83.5x83.5'),
("App Store iOS 1024pt_1x", (1024, 1024), 'ios-marketing', '1x', '1024x1024')
]
def make(input, output):
with Image.open(input) as image:
if image.size != (1024, 1024):
raise Exception("The image size must be 1024x1024.")
metadata_bus = []
for name, size, idiom, scale, size_class in formats:
duplication = image.copy()
duplication.thumbnail(size)
file_name = f'{name}.' + f'{image.format}'.lower()
file_path = os.path.join(output, file_name)
duplication.save(file_path, image.format)
metadata_bus.append({'filename': file_name, 'idiom': idiom, 'scale': scale, 'size': size_class})
contents_json_data = {
'images': metadata_bus,
'info': {
'author': 'xcode',
'version': 1
}
}
contents_json_file = os.path.join(output, 'Contents.json')
with open(contents_json_file, 'w', encoding='utf-8') as file:
json.dump(contents_json_data, file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog='aim',
usage='App Icon Maker will generate all required app icon sizes for iOS projects.')
parser.add_argument('input')
parser.add_argument('-o', '--output', default='./')
args = parser.parse_args()
make(args.input, args.output)
``` |
{
"source": "Jiahao000/ORL",
"score": 2
} |
#### File: datasets/data_sources/coco_orl_json.py
```python
import os
import random
from PIL import Image
import mmcv
from ..registry import DATASOURCES
from .utils import McLoader
@DATASOURCES.register_module
class COCOORLJson(object):
def __init__(self, root, json_file, topk_knn_image, memcached=False, mclient_path=None):
data = mmcv.load(json_file)
self.fns = data['images']['file_name']
self.intra_bboxes = data['pseudo_annotations']['bbox']
self.total_knn_image_num = data['info']['knn_image_num']
self.knn_image_ids = data['pseudo_annotations']['knn_image_id']
self.knn_bbox_pairs = data['pseudo_annotations']['knn_bbox_pair'] # NxKx(topk_bbox_num)x8
self.fns = [os.path.join(root, fn) for fn in self.fns]
self.topk_knn_image = topk_knn_image
assert self.topk_knn_image <= self.total_knn_image_num, \
"Top-k knn image number exceeds total number of knn images available. Abort!"
self.memcached = memcached
self.mclient_path = mclient_path
self.initialized = False
def _init_memcached(self):
if not self.initialized:
assert self.mclient_path is not None
self.mc_loader = McLoader(self.mclient_path)
self.initialized = True
def get_length(self):
return len(self.fns)
def get_sample(self, idx):
# randomly select one knn image
rnd = random.randint(0, self.topk_knn_image - 1)
target_id = self.knn_image_ids[idx][rnd]
if self.memcached:
self._init_memcached()
if self.memcached:
img = self.mc_loader(self.fns[idx])
knn_img = self.mc_loader(self.fns[target_id])
else:
img = Image.open(self.fns[idx])
knn_img = Image.open(self.fns[target_id])
img = img.convert('RGB')
knn_img = knn_img.convert('RGB')
# load proposals
intra_bbox = self.intra_bboxes[idx]
knn_bbox = self.knn_bbox_pairs[idx][rnd] # (topk_bbox_num)x8
return img, knn_img, intra_bbox, knn_bbox
```
#### File: openselfsup/datasets/selective_search.py
```python
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
from openselfsup.utils import build_from_cfg
from torchvision.transforms import Compose
import torchvision.transforms.functional as TF
from .registry import DATASETS, PIPELINES
from .builder import build_datasource
from .utils import to_numpy
def selective_search(image, method="fast"):
# initialize OpenCV's selective search implementation
ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
# set the input image
ss.setBaseImage(image)
# check to see if we are using the *fast* but *less accurate* version
# of selective search
if method == "fast":
# print("[INFO] using *fast* selective search")
ss.switchToSelectiveSearchFast()
# otherwise we are using the *slower* but *more accurate* version
else:
# print("[INFO] using *quality* selective search")
ss.switchToSelectiveSearchQuality()
# run selective search on the input image
boxes = ss.process()
return boxes
def box_filter(boxes, min_size=None, max_ratio=None, topN=None):
proposal = []
for box in boxes:
# Calculate width and height of the box
w, h = box[2], box[3]
# Filter for size
if min_size:
if w < min_size or h < min_size:
continue
# Filter for box ratio
if max_ratio:
if w / h > max_ratio or h / w > max_ratio:
continue
proposal.append(box)
if topN:
if topN <= len(proposal):
return proposal[:topN]
else:
return proposal
else:
return proposal
@DATASETS.register_module
class SelectiveSearchDataset(Dataset):
"""Dataset for generating selective search proposals.
"""
def __init__(self,
data_source,
method='fast',
min_size=None,
max_ratio=None,
topN=None):
self.data_source = build_datasource(data_source)
self.method = method
self.min_size = min_size
self.max_ratio = max_ratio
self.topN = topN
def __len__(self):
return self.data_source.get_length()
def __getitem__(self, idx):
img = self.data_source.get_sample(idx)
img_cv2 = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
boxes = selective_search(img_cv2, self.method)
if self.topN is not None:
boxes = box_filter(boxes, self.min_size, self.max_ratio, self.topN)
boxes = torch.from_numpy(np.array(boxes))
# bbox: Bx4
# B is the total number of original/topN selective search bboxes
return dict(bbox=boxes)
def evaluate(self, bbox, **kwargs):
if not isinstance(bbox, list):
bbox = bbox.tolist()
# dict
data_ss = {}
data_ss['bbox'] = bbox
return data_ss
``` |
{
"source": "jiahao42/jiahao42.github.io",
"score": 2
} |
#### File: bug_template/week2/search.py
```python
import subprocess
import re
import sys
ID = r"""([_a-zA-Z][_a-zA-Z0-9]{0,30})"""
CONSTANT = r"""([A-Z]+)"""
DIGIT = r"""([0-9]+)"""
VALID = r"""(%s|%s)""" % (ID, DIGIT)
OP = r"""(\+|-|\*|/|\%|\+\+|--)"""
SPACES = r"""\s*"""
DB = {}
"""
propotype: php-src/ext/standard/http_fopen_wrapper.c
static php_stream *php_stream_url_wrap_http_ex(...) {
...
php_stream_write(stream, ZSTR_VAL(req_buf.s), ZSTR_LEN(req_buf.s));
...
if (php_stream_get_line(stream, tmp_line, sizeof(tmp_line) - 1, &tmp_line_len) != NULL) {
...
if (response_code >= 200 && response_code < 400) {
...
switch(response_code) {
case 403:
php_stream_notify_error(context, PHP_STREAM_NOTIFY_AUTH_RESULT,
tmp_line, response_code);
break;
default:
php_stream_notify_error(context, PHP_STREAM_NOTIFY_FAILURE,
tmp_line, response_code);
}
if (tmp_line_len >= 1 && tmp_line[tmp_line_len - 1] == '\n') {
--tmp_line_len;
if (tmp_line_len >= 1 &&tmp_line[tmp_line_len - 1] == '\r') {
--tmp_line_len;
}
}
...
}
}
}
"""
# Assume there are 80 chars at most in one line
# FOCUS_STMTS = r""".{0,80}write.{0,3200}response.{0,3200}403.{0,3200}==\s*\'\\n\'"""# % (ID, OP, DIGIT)
FOCUS_STMTS = r""".{0,80}write.{0,3200}response.{0,3200}==\s*\'\\n\'"""
TEMPLATE = (FOCUS_STMTS)
DB['url_wrap_http_ex'] = TEMPLATE
STEP = 10 # scan the +- STEP lines around FOCUS STMT
POSTFIX = r"""\\.c$\|\\.cpp$""" # only scan these files
def traverse_source(PATH = '.'):
CMD = 'find %s | grep -E %s | xargs grep -rn -e . ' % (PATH, POSTFIX)
# print(CMD)
child = subprocess.Popen(CMD,shell=True,stdout=subprocess.PIPE)
output = str(child.communicate()[0]).replace('b\'', '').replace('\\\'', '\'').replace('\\\\', '\\').replace('\\t', '\t')
output = re.sub(r'\\n\.', r'\n.', output)
# print(output)
for k in DB:
res = re.findall(DB[k], output, flags=re.S)
for s in res:
print(s)
print('-' * 77)
if __name__ == '__main__':
if (len(sys.argv) > 1):
traverse_source(sys.argv[1])
else:
traverse_source()
``` |
{
"source": "jiahao87/travel_chatbot",
"score": 3
} |
#### File: jiahao87/travel_chatbot/server.py
```python
from flask import Flask, request
from travel_planner_chatbot import reply_message
import json
import requests
import urllib.request
app = Flask(__name__)
FB_API_URL = 'https://graph.facebook.com/v2.6/me/messages'
VERIFY_TOKEN = <VERIFY TOKEN># <paste your verify token here>
PAGE_ACCESS_TOKEN = <PAGE ACCESS TOKEN>'# paste your page access token here>"
def get_name(sender):
profile_link = "https://graph.facebook.com/{}?fields=first_name&access_token={}".format(sender, PAGE_ACCESS_TOKEN)
with urllib.request.urlopen(profile_link) as url:
name = json.loads(url.read().decode())
name = name['first_name']
return name
def get_bot_response(message):
"""This function returns a response to what the user said."""
reply, type, state = reply_message(message)
return reply, type, state
def verify_webhook(req):
if req.args.get("hub.verify_token") == VERIFY_TOKEN:
return req.args.get("hub.challenge")
else:
return "incorrect"
def respond(sender, message):
"""Formulate a response to the user and
pass it on to a function that sends it."""
response, msg_type, state = get_bot_response(message)
if state == 0:
send_message(sender, "Hi {}!".format(get_name(sender)))
send_message(sender, response)
send_start(sender)
else:
if msg_type == "text":
send_message(sender, response)
elif msg_type == "url":
send_url(sender, response)
elif msg_type == "generic":
send_generic_template(sender, response)
elif msg_type == "quick":
send_quick_replies(sender, response)
def is_user_message(message):
"""Check if the message is a message from the user"""
return (message.get('message') and
message['message'].get('text') and
not message['message'].get("is_echo"))
def is_user_postback(message):
"""Check if the message is a message from the user"""
return (message.get('postback') and
message['postback'].get('payload'))
@app.route("/webhook",methods=['GET','POST'])
def listen():
"""This is the main function flask uses to
listen at the `/webhook` endpoint"""
if request.method == 'GET':
return verify_webhook(request)
if request.method == 'POST':
payload = request.json
event = payload['entry'][0]['messaging']
for x in event:
if is_user_message(x):
text = x['message']['text']
sender_id = x['sender']['id']
respond(sender_id, text)
elif is_user_postback(x):
text = x['postback']['payload']
sender_id = x['sender']['id']
respond(sender_id, text)
return "ok"
def send_message(recipient_id, text):
"""Send a response to Facebook"""
payload = {
'message': {
'text': text
},
'recipient': {
'id': recipient_id
},
'notification_type': 'regular'
}
auth = {
'access_token': PAGE_ACCESS_TOKEN
}
response = requests.post(
FB_API_URL,
params=auth,
json=payload
)
return response.json()
def send_url(recipient_id, url):
data = json.dumps({
"recipient": {"id": recipient_id},
"message": {
"attachment": {
"type": "image",
"payload": {
"url": url
}
}}
})
params = {
"access_token": PAGE_ACCESS_TOKEN
}
headers = {
"Content-Type": "application/json"
}
r = requests.post(FB_API_URL,
params=params,
headers=headers,
data=data)
def send_generic_template(recipient_id, elements):
data = json.dumps({
"recipient": {"id": recipient_id},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": elements
}
}}
})
params = {"access_token": PAGE_ACCESS_TOKEN}
headers = {"Content-Type": "application/json"}
r = requests.post(FB_API_URL,
params=params,
headers=headers,
data=data)
def send_start(recipient_id):
data = json.dumps({
"recipient": {"id": recipient_id},
"message": {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [
{
"title": "Gardens by the Bay",
"image_url": "https://res.klook.com/images/fl_lossy.progressive,q_65/c_fill,w_1295,h_720,f_auto/w_80,x_15,y_15,g_south_west,l_klook_water/activities/b26c5529-Gardens-by-the-Bay/GardensbytheBayTicketSingapore.jpg",
"subtitle": "Gardens by the Bay is a showpiece of horticulture and garden artistry in the city",
"buttons": [
{
"type": "postback",
"title": "Tell me more",
"payload": "tell_me_more gardens by the bay"
}
]
},
{
"title": "Universal Studios Singapore",
"image_url": "http://blog.raynatours.com/wp-content/uploads/2016/12/Universal-Studios-in-Singapore.jpg",
"subtitle": "Universal Studios is a theme park located within Resorts World Sentosa on Sentosa Island",
"buttons": [
{
"type": "postback",
"title": "Tell me more",
"payload": "tell_me_more universal studios"
}
]
},
{
"title": "<NAME>",
"image_url": "https://media.karousell.com/media/photos/products/2018/07/19/sea_aquarium_tickets_1532015525_d6347f03.jpg",
"subtitle": "Large aquarium & resort featuring 800 species of marine life",
"buttons": [
{
"type": "postback",
"title": "Tell me more",
"payload": "tell_me_more aquarium"
}
]
},
{
"title": "Jewel Changi Airport",
"image_url": "https://static.businessinsider.sg/2019/03/Jewel-Changi-Airport-image-1.jpg",
"subtitle": "Jewel is an airport mall with world's tallest indoor waterfall",
"buttons": [
{
"type": "postback",
"title": "Tell me more",
"payload": "tell_me_more jewel"
}
]
},
{
"title": "Singapore Zoo",
"image_url": "http://ttgasia.2017.ttgasia.com/wp-content/uploads/sites/2/2018/04/Singapore-Zoo.jpg",
"subtitle": "Set in a rainforest environment, Singapore Zoo adopts an open concept",
"buttons": [
{
"type": "postback",
"title": "Tell me more",
"payload": "tell_me_more zoo"
}
]
},
]
}
}}
})
params = {"access_token": PAGE_ACCESS_TOKEN}
headers = {"Content-Type": "application/json"}
r = requests.post(FB_API_URL,
params=params,
headers=headers,
data=data)
def send_quick_replies(recipient_id, text):
data = json.dumps({
"recipient": {"id": recipient_id},
"message": {
"text": text,
"quick_replies": [
{
"content_type": "text",
"title": "Indoor",
"payload": "find_activity indoor"
},
{
"content_type": "text",
"title": "Outdoor",
"payload": "find_activity outdoor"
},
{
"content_type": "text",
"title": "Shopping",
"payload": "find_activity shopping"
},
{
"content_type": "text",
"title": "Scenic",
"payload": "find_activity scenic"
},
{
"content_type": "text",
"title": "Nature",
"payload": "find_activity nature"
},
{
"content_type": "text",
"title": "Heritage",
"payload": "find_activity heritage"
}
]
}
})
params = {
"access_token": PAGE_ACCESS_TOKEN
}
headers = {
"Content-Type": "application/json"
}
r = requests.post(FB_API_URL,
params=params,
headers=headers,
data=data)
```
#### File: jiahao87/travel_chatbot/travel_planner_chatbot.py
```python
import re
import random
import urllib.request
import json
import numpy as np
from datetime import date
import joblib
import sqlite3
today = date.today()
intent_model = joblib.load('intent_classification.pkl')
count_vect = joblib.load('count_vect.pkl')
tf_transformer = joblib.load('tf_transformer.pkl')
labels = ['find_activity_type', 'tell_me_more']
def fetch_weather_forecast(today_date):
weather_api = "https://api.data.gov.sg/v1/environment/24-hour-weather-forecast?date={}".format(today_date)
with urllib.request.urlopen(weather_api) as url:
weather = json.loads(url.read().decode())
forecast = weather['items'][0]['periods'][0]['regions']
return forecast
#weather_forecast = fetch_weather_forecast(today)
def reply_message(message):
if match_intent(message) == 'greet':
reply = respond(message)
type = 'text'
state = 0
elif match_intent(message):
reply = respond(message)
type = 'text'
state = 3
elif message.lower() in activity_type.keys():
activity = find_activity_type(message.lower())
reply = combine_elements(activity)
type = 'generic'
state = 1
elif message[:12] == "tell_me_more":
reply = tell_me_more(message)
type = "text"
state = 1
elif intent_classification([message]) == 'find_activity_type':
if find_activity_type(message) is not None:
activity = find_activity_type(message)
reply = combine_elements(activity)
type = 'generic'
state = 1
else:
reply = "Sorry, I do not quite understand what activities you are finding. \n\n" \
"Could you choose one area below instead?"
type = 'quick'
state = 1
else:
reply = "Sorry, I do not quite understand what you are asking. \n\n" \
"Would you like to choose an area below to get started?"
type = 'quick'
state = 1
return reply, type, state
keywords = {'greet': ['hello', 'hi', 'hey', 'yo', 'greeting','whats up', 'gd morning', 'good morning', 'gd afternoon',
'good afternoon', 'gd evening', 'good evening', 'hi there'],
'goodbye': ['bye', 'farewell', 'goodbye', 'see you', 'see ya'],
'thanks': ['thank', 'thx', 'thks']
}
responses = {'greet': ["I'm Zapedo, your travel planner for day trip in Singapore. "
"Here are 5 suggestions for you. Feel free to also ask me for other suggestions, e.g., shopping"],
'goodbye': ['Have a good trip :)', 'Goodbye!', 'Enjoy your trip :)'],
'thanks': ['You are welcome', 'Thank you too']
}
# Define a dictionary of patterns
patterns = {}
# Iterate over the keywords dictionary
for intent, keys in keywords.items():
# Create regular expressions and compile them into pattern objects
patterns[intent] = re.compile('|'.join(keys))
# Define a function to find the intent of a message
def match_intent(message):
matched_intent = None
for intent, pattern in patterns.items():
# Check if the pattern occurs in the message
if re.search(pattern, message):
matched_intent = intent
return matched_intent
# Define a respond function
def respond(message):
# Call the match_intent function
intent = match_intent(message)
# Fall back to the default response
key = "default"
if intent in responses:
key = intent
return random.choice(responses[key])
# Use svm model to classify intent
def intent_classification(message):
p_count = count_vect.transform(message)
p_tfidf = tf_transformer.transform(p_count)
prob = intent_model.predict_proba(p_tfidf)
if np.max(prob) > 0.7:
intention = labels[int(np.argmax(prob))]
else:
intention = 'others'
return intention
activity_type = {'indoor': ['indoor'],
'outdoor': ['outdoor'],
'shopping': ['shopping', 'shop', 'buy', 'souvenir', 'gift'],
'heritage': ['culture', 'museum'],
'scenic': ['scenic', 'view', 'scenery'],
'nature': ['nature', 'greenery']
}
# Define a dictionary of patterns
activity_keywords = {}
# Iterate over the keywords dictionary
for act_type, keys in activity_type.items():
# Create regular expressions and compile them into pattern objects
activity_keywords[act_type] = re.compile('|'.join(keys))
# if intention is to find suggestions for activity type, then find out activity type
def find_activity_type(message):
activity = None
for act_type, pattern in activity_keywords.items():
# Check if the pattern occurs in the message
if re.search(pattern, message):
activity = act_type
return activity
def query_criteria(activity):
criteria = {}
if activity == 'indoor':
criteria['indoor'] = 1
elif activity == 'outdoor':
criteria['outdoor'] = 1
elif activity == 'shopping':
criteria['shopping'] = 1
elif activity == 'heritage':
criteria['heritage'] = 1
elif activity == 'scenic':
criteria['scenic'] = 1
elif activity == 'nature':
criteria['nature'] = 1
return criteria
# Define find_hotels()
def find_places(criteria):
# Create the base query
query = 'SELECT * FROM places'
# Add filter clauses for each of the parameters
if len(criteria) > 0:
filters = ["{}=?".format(k) for k in criteria]
query += " WHERE " + " AND ".join(filters)
# Create the tuple of values
t = tuple(criteria.values())
# Open connection to DB
conn = sqlite3.connect("travel_places.db")
# Create a cursor
c = conn.cursor()
# Execute the query
c.execute(query, t)
# Return the results
return c.fetchall()
def combine_elements(activity):
criteria = query_criteria(activity)
results = find_places(criteria)
elements = []
for result in results[:5]:
element = {}
element['title'] = result[1]
element['image_url'] = result[2]
element['subtitle'] = result[3]
element['buttons'] = [{"type": "postback", "title": "Tell me more", "payload": result[4]}]
elements.append(element)
return elements
def tell_me_more(message):
criteria = {'payload': message}
results = find_places(criteria)
description = results[0][5]
return description
``` |
{
"source": "jiahao95/hypertension_clustering",
"score": 3
} |
#### File: hypertension_clustering/clustering/cluster_analysis.py
```python
import pandas as pd
import numpy as np
from scipy import stats
from statistics import median
import os
path = ''
os.chdir('path')
clusters = pd.read_csv('clusters.csv')
#%%
def get_subgroups(df,n):
dfs = []
for i in range(1,n+1):
cluster_+str(i) = df.loc[df.labels == i]
return dfs
# %%
def analyze_sex_race(df):
tot = len(df['rgnid'].unique())
male_percent = round((len(df.loc[df['sex']==0])/tot)*100,1)
female_percent = round((len(df.loc[df['sex']==1])/tot)*100,1)
male = len(df.loc[df['sex']==0])
female = len(df.loc[df['sex']==1])
Hispanic = len(df.loc[df['self_reported_race']=='Hispanic'])
African_American = len(df.loc[df['self_reported_race']=='African American'])
European_American = len(df.loc[df['self_reported_race']=='European American'])
Asian = len(df.loc[df['self_reported_race']=='Asian'])
Native_American = len(df.loc[df['self_reported_race']=='Native American'])
Other = len(df.loc[df['self_reported_race']=='Other'])
row = {'total':tot, 'male_in_%':male_percent, 'female_in_%': female_percent, 'male':male, 'female':female, 'Hispanic': Hispanic, 'African_American': African_American, 'Asian': Asian, 'European_American':European_American, 'Native_American': Native_American, 'Other':Other}
return row
# %%
def create_sex_race_table(dfs, n):
l = []
for df, i in zip(dfs, range(1,n+1):
row + str(i) = analyze_sex_race(df)
df = pd.concat(l).T
return df
# %%
sex_df = create_sex_race_table(dfs)
sex_df = sex_df.rename(columns={0:'cluster_1',1:'cluster_2',2:'cluster_3',3:'cluster_4',4:'cluster_5', 5:'cluster_6'})
sex_df.to_csv('gender_race_table.csv')
# %%
# load labs
labs_num = pd.read_csv('filtered_labs.csv')
# %%
def analyze_lab_res(df, label, path):
cluster_labs_num = pd.merge(labs_num,df, on='rgnid', how='inner').drop(columns=['reference_unit ','delta','sex', 'birth_date'])
labs = cluster_labs_num.groupby(["rgnid","test"])['value'].median().unstack().reset_index().rename_axis(None, axis=1)
converted_labs_median = df[['rgnid', 'self_reported_race']].merge(labs, on='rgnid')
converted_labs_median = converted_labs_median[['SODIUM-BLD','TRIGLYCERIDES','TROPONIN-I','TSH','U-CREATININE (CONC)', 'VENTRICULAR RATE', 'WB POTASSIUM , VEN', 'WB GLUCOSE , VEN','WHITE BLOOD CELL','WB SODIUM , VEN', 'WB UREA NITROGEN, VEN', 'WB NA , VEN (POCT)', 'WB LACTATE-VEN (POCT)', 'WB K , VEN (POCT)', 'WB GLUCOSE-VEN (POCT)', 'WB CO2, VEN', 'WB CHLORIDE, VEN', 'WB CA++ , VEN(POCT)', 'VITAMIN D, 25 HYDROXY', 'VITAMIN B12, SERUM', 'UROBILINOGEN', 'UREA NITROGEN-BLD', 'U-SPECIFIC GRAVITY', 'U-PH', 'TRANSFERRIN SAT', 'TOT CO2 , VEN(POCT)', 'TIBC', 'self_reported_race']]
converted_labs_median['labels'] = label
col_medians = converted_labs_median.median()
converted_labs_median = converted_labs_median.fillna(col_medians)
converted_labs_median.to_csv(path+ label +'_labs.csv', index=False)
lab_summary = converted_labs_median.describe().T
lab_summary.to_csv(path + label +'_summary.csv')
return converted_labs_median
#%%
def prepare_for_anova(dfs, labels, path):
lab_final = []
for df,label in zip(dfs, labels):
res = analyze_lab_res(df = df, path = path, label = label)
lab_final.append(res)
df_anova = pd.concat(lab_final)
df_anova.to_csv(path+'lab_numerical_all_for_anova_w_etn.csv')
#%%
dfs = [cluster_1, cluster_2, cluster_3, cluster_4, cluster_5, cluster_6]
labels = ['cluster_1', 'cluster_2', 'cluster_3', 'cluster_4', 'cluster_5', 'cluster_6']
prepare_for_anova(dfs = dfs, labels = labels, path = path)
# %%
import scipy.stats as stats
from statistics import mean
from sklearn import preprocessing
import pickle
from pickle import dump
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
import numpy as np
from scipy.stats import chi2_contingency
# %%
diag = pd.read_csv('diagnoses_w_age.csv')
diag = pd.merge(diag,clusters, on='rgnid', how='inner')
diag = diag[['rgnid','dx_name','labels','phecode', 'self_reported_race']]
diag = diag.drop_duplicates(subset=['rgnid', 'phecode'])
diag.to_csv('disease_cluster_table_w_etn.csv', index=False)
# medications
# %%
medication = pd.read_csv('/sc/arion/projects/rg_HPIMS/user/jiahao01/data_july/BioMe/BRSPD/meds_final.csv')
meds = pd.merge(medication,clusters, on='rgnid', how='inner')
meds = meds[['rgnid','event', 'labels', 'description']]
meds = meds.drop_duplicates(subset=['rgnid', 'event'])
meds.to_csv('medication_cluster_table_w_etn.csv', index=False)
# procedures
# %%
procedures = pd.read_csv('/sc/arion/projects/rg_HPIMS/user/jiahao01/data_july/BioMe/BRSPD/procedures.csv')
procedures = pd.merge(procedures,clusters, on='rgnid', how='inner')
procedures = procedures[['rgnid', 'proc_code', 'proc_description','labels','self_reported_race']]
procedures = procedures.drop_duplicates(subset=['rgnid','proc_code','proc_description'])
procedures.to_csv('procedure_table_w_etn.csv', index=False)
#blood pressure
# %%
bp = pd.read_csv('/sc/arion/projects/rg_HPIMS/user/jiahao01/data_july/BioMe/BRSPD/bp_w_age.csv')
#%%
def analyze_bp(df, path, i):
bp_num = pd.merge(bp,df, on='rgnid', how='inner').drop(columns=['diff','sex','birth_date'])
bp_num['sbp_median'] = bp_num.groupby(["rgnid"])['sbp'].transform('median')
bp_num['dbp_median'] = bp_num.groupby(["rgnid"])['dbp'].transform('median')
bp_num = bp_num[['rgnid', 'labels', 'sbp_median', 'dbp_median', 'self_reported_race']]
bp_num = bp_num.drop_duplicates()
bp_summary = bp_num.describe().T.rename(index={'sbp_median':'sbp_'+ str(i), 'dbp_median':'dbp_'+ str(i)})
bp_summary.to_csv(path +'/bp_summary_'+ str(i) +'.csv')
return bp_num
#%%
def prepare_bp_anova(dfs, path):
bps = []
for df, i in zip(dfs, range(1,(len(dfs)+1))):
bp_num = analyze_bp(df = df, path = path, i = i)
bps.append(bp_num)
df_anova = pd.concat(bps)
df_anova.to_csv(path+'blood_pressure_median_perperson.csv')
#%%
prepare_bp_anova(dfs=dfs, path = path)
#%%
from datetime import date
#%%
clusters['combined'] = pd.to_datetime('2020-07-15',format='%Y-%m-%d')
clusters['birth_date'] = pd.to_datetime(clusters['birth_date'], format='%Y-%m-%d')
clusters['delta'] = ((clusters['combined'] - clusters['birth_date'])/np.timedelta64(1,'D'))/365
age = clusters[['rgnid', 'labels', 'self_reported_race', 'delta']]
age.to_csv('age_wt_etn.csv', index = False)
# %%
bmi = pd.read_csv('/sc/arion/projects/rg_HPIMS/user/jiahao01/convae_architecture/recluster/bmi_reclustered.csv')
#%%
bmi_ = bmi[['rgnid', 'bmi', 'self_reported_race']].merge(clusters, on = 'rgnid')
#%%
bmi_.to_csv('/bmi.csv', index=False)
```
#### File: hypertension_clustering/multimodal model/data_loader.py
```python
from torch.utils.data import Dataset
from utils import ehr_len_padded, prs_len_padded
import random
import torch
import os
import csv
import pandas as pd
import ast
class EHRdata(Dataset):
def __init__(self, datadir, ehr_file, sampling):
self.ehr = {}
df = pd.read_csv(os.path.join(datadir, ehr_file), converters={'unique_concept_x':ast.literal_eval, 'unique_concept_y':ast.literal_eval})
df = df.sample(frac=1).reset_index(drop=True)
for row in df.itertuples():
seq = row.unique_concept_x
prs = row.unique_concept_y
ids = row.rgnid
if len(seq) < ehr_len_padded:
self.ehr[ids] = [seq + [0] * (ehr_len_padded - len(seq)),
prs + [0] * (prs_len_padded - len(prs))]
elif len(seq) % ehr_len_padded != 0:
nseq, nleft = divmod(len(seq), ehr_len_padded)
self.ehr[ids] = [seq + [0] * \
(ehr_len_padded - nleft),
prs + [0] * (prs_len_padded - len(prs))]
else:
self.ehr[ids] = [seq,prs + [0] * (prs_len_padded - len(prs))]
# sub-sample dataset
if sampling is not None:
mrns = list(self.ehr.keys())
random.shuffle(mrns)
ehr = {}
for k in mrns[:sampling]:
ehr[k] = self.ehr[k]
self.ehr = ehr
self.ehr_list = [[mrn, term] for mrn, term in self.ehr.items()]
def __getitem__(self, index):
prs_seq = self.ehr_list[index][1][1]
ehr_seq = self.ehr_list[index][1][0]
pat = self.ehr_list[index][0]
return pat, ehr_seq, prs_seq
def __len__(self):
return len(self.ehr)
def ehr_collate(batch):
ehr_data = []
prs_data = []
mrn = []
for pat, ehr_seq, prs_seq in batch:
mrn.append(pat)
# print('length: ', len(ehr_seq))
# print(pat)
# print('ehr_seq: ', ehr_seq)
if len(ehr_seq) == ehr_len_padded and len(prs_seq) == prs_len_padded:
ehr_data.append(torch.tensor(
[ehr_seq], dtype=torch.long).view(-1, ehr_len_padded))
prs_data.append(torch.tensor([prs_seq], dtype=torch.long).view(-1, prs_len_padded))
elif len(ehr_seq) > ehr_len_padded and len(prs_seq) == prs_len_padded:
ps = []
for i in range(0, len(ehr_seq) - ehr_len_padded + 1,
ehr_len_padded + 1):
ps.append(ehr_seq[i:i + ehr_len_padded])
ehr_data.append(torch.tensor(
ps, dtype=torch.long).view(-1, ehr_len_padded))
prs_data.append(torch.tensor([prs_seq], dtype=torch.long).view(-1, prs_len_padded))
elif len(ehr_seq) == ehr_len_padded and len(prs_seq) > prs_len_padded:
pr = []
for j in range(0, len(prs_seq) - prs_len_padded + 1, prs_len_padded +1):
pr.append(prs_seq[j:j + prs_len_padded])
prs_data.append(torch.tensor(pr, dtype=torch.long).view(-1, prs_len_padded))
ehr_data.append(torch.tensor([ehr_seq], dtype=torch.long).view(-1, ehr_len_padded))
elif len(ehr_seq) > ehr_len_padded and len(prs_seq) > prs_len_padded:
ps = []
pr = []
for i in range(0, len(ehr_seq) - ehr_len_padded + 1, ehr_len_padded + 1):
ps.append(ehr_seq[i:i + ehr_len_padded])
ehr_data.append(torch.tensor(ps, dtype=torch.long).view(-1, ehr_len_padded))
for j in range(0, len(prs_seq) - prs_len_padded + 1, prs_len_padded +1):
pr.append(prs_seq[j:j + prs_len_padded])
prs_data.append(torch.tensor(pr, dtype=torch.long).view(-1, prs_len_padded))
else:
raise Warning(
'Not all sequences have length multiple than %d' % ehr_len_padded)
#print('prs_data ',prs_data)
#print('ehr_data ', ehr_data)
#print('mrn ', mrn)
return mrn, ehr_data, prs_data
``` |
{
"source": "jiahao95/project_lab-ss2020",
"score": 3
} |
#### File: project_lab-ss2020/Deep learning/convert_raw_spectra.py
```python
from pyteomics import mzml
import numpy as np
import pandas as pd
# %%
# define parsing function
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra = []
data = '/home/ubuntu/data/jiahao/trp/output/Run1_U4_2000ng.mzML'
file = mzml.MzML(data)
for i,entry in enumerate(file.map(_parse_mzml_entry)):
tupl = (data,)+entry
all_spectra.append(tupl)
# %%
# generate pandas dataframe
mzml_location, ids, mz, intensities = zip(*all_spectra)
spectra_df = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
# %%
#split the column id into 3
spectra_df[['controllertype', 'controllernumber', 'scan']] = spectra_df.id.str.split(expand=True)
# %%
#split scan into 2 columns to keep scan number in a single column
spectra_df[['SCAN', 'scan_number']] = spectra_df.scan.str.split("=", expand=True)
#%%
# save data frame
spectra_df = spectra_df[['file', 'id', 'mz', 'intensities', 'scan:number']]
spectra_df.to_hdf('spectra_for_knn_search.hdf5', key="df", mode="w")
```
#### File: project_lab-ss2020/Deep learning/merge_train_set.py
```python
import os
from pyteomics import mzid, mzml
import pandas as pd
import numpy as np
import glob
"""
Files are downloaded and manually randomly divided into different folders
the following code is repeated but has the same effect, it is applied to various folders to
generate pandas data frames and to store all the data in a single hdf5 file
"""
#%%
os.chdir('./files/train')
mzid_files=glob.glob('*.mzid')
indexed_mzid = mzid.chain.from_iterable(mzid_files, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid = []
for entry in(indexed_mzid):
all_mzid.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid)
mzid_df = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra)
spectra_df = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df = pd.merge(mzid_df,spectra_df,how='left',on=['file','id'])
merged_df = merged_df[['id','seq','mz','intensities']]
#%%
hdf = pd.HDFStore('/home/ubuntu/data/jiahao/files/train.hdf5', mode="w")
hdf.put(value=merged_df, key="df")
#%%
os.chdir('./train_1')
mzid_files_1=glob.glob('*.mzid')
indexed_mzid_1 = mzid.chain.from_iterable(mzid_files_1, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_1 = []
for entry in(indexed_mzid_1):
all_mzid_1.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_1)
mzid_df_1 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_1 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_1.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_1)
spectra_df_1 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_1 = pd.merge(mzid_df_1,spectra_df_1,how='left',on=['file','id'])
merged_df_1 = merged_df_1[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_1, key="df1")
# %%
os.chdir('./train_2')
mzid_files_2=glob.glob('*.mzid')
indexed_mzid_2 = mzid.chain.from_iterable(mzid_files_2, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_2 = []
for entry in(indexed_mzid_2):
all_mzid_2.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_2)
mzid_df_2 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_2 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_2.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_2)
spectra_df_2 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_2 = pd.merge(mzid_df_2,spectra_df_2,how='left',on=['file','id'])
merged_df_2 = merged_df_2[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_2, key="df2")
#%%
os.chdir('./train_3')
mzid_files_3 = glob.glob('*.mzid')
indexed_mzid_3 = mzid.chain.from_iterable(mzid_files_3, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_3 = []
for entry in(indexed_mzid_3):
all_mzid_3.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_3)
mzid_df_3 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_3 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_3.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_3)
spectra_df_3 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_3 = pd.merge(mzid_df_3,spectra_df_3,how='left',on=['file','id'])
merged_df_3 = merged_df_3[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_3, key="df3")
#%%
os.chdir('./train_4')
mzid_files_4 = glob.glob('*.mzid')
indexed_mzid_4 = mzid.chain.from_iterable(mzid_files_4, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_4 = []
for entry in(indexed_mzid_4):
all_mzid_4.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_4)
mzid_df_4 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_4 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_4.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_4)
spectra_df_4= pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_4 = pd.merge(mzid_df_4,spectra_df_4,how='left',on=['file','id'])
merged_df_4 = merged_df_4[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_4, key="df4")
#%%
os.chdir('./train_5')
mzid_files_5 = glob.glob('*.mzid')
indexed_mzid_5 = mzid.chain.from_iterable(mzid_files_5, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_5 = []
for entry in(indexed_mzid_5):
all_mzid_5.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_5)
mzid_df_5 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_5 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_5.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_5)
spectra_df_5 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_5 = pd.merge(mzid_df_5,spectra_df_5,how='left',on=['file','id'])
merged_df_5 = merged_df_5[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_5, key="df5")
#%%
os.chdir('./train_6')
mzid_files_6 = glob.glob('*.mzid')
indexed_mzid_6 = mzid.chain.from_iterable(mzid_files_6, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_6 = []
for entry in(indexed_mzid_6):
all_mzid_6.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_6)
mzid_df_6 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_6 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_6.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_6)
spectra_df_6 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_6 = pd.merge(mzid_df_6,spectra_df_6,how='left',on=['file','id'])
merged_df_6 = merged_df_6[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_6, key="df6")
#%%
os.chdir('./train_7')
mzid_files_7 = glob.glob('*.mzid')
indexed_mzid_7 = mzid.chain.from_iterable(mzid_files_7, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_7 = []
for entry in(indexed_mzid_7):
all_mzid_7.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_7)
mzid_df_7 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_7 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_7.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_7)
spectra_df_7 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_7 = pd.merge(mzid_df_7,spectra_df_7,how='left',on=['file','id'])
merged_df_7 = merged_df_7[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_7, key="df7")
#%%
os.chdir('./train_8')
mzid_files_8 = glob.glob('*.mzid')
indexed_mzid_8 = mzid.chain.from_iterable(mzid_files_8, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_8 = []
for entry in(indexed_mzid_8):
all_mzid_8.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_8)
mzid_df_8 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_8 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_8.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_8)
spectra_df_8 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_8 = pd.merge(mzid_df_8,spectra_df_8,how='left',on=['file','id'])
merged_df_8 = merged_df_8[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_8, key="df8")
#%%
os.chdir('./train_9')
mzid_files_9 = glob.glob('*.mzid')
indexed_mzid_9 = mzid.chain.from_iterable(mzid_files_9, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_9 = []
for entry in(indexed_mzid_9):
all_mzid_9.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_9)
mzid_df_9 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_9 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_9.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_9)
spectra_df_9 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_9 = pd.merge(mzid_df_9,spectra_df_9,how='left',on=['file','id'])
merged_df_9 = merged_df_9[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_9, key="df9")
#%%
os.chdir('./train_10')
mzid_files_10 = glob.glob('*.mzid')
indexed_mzid_10 = mzid.chain.from_iterable(mzid_files_10, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_10 = []
for entry in(indexed_mzid_10):
all_mzid_10.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_10)
mzid_df_10 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_10 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_10.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_10)
spectra_df_10 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_10 = pd.merge(mzid_df_10,spectra_df_10,how='left',on=['file','id'])
merged_df_10 = merged_df_10[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_10, key="df10")
#%%
os.chdir('./train_11')
mzid_files_11 = glob.glob('*.mzid')
indexed_mzid_11 = mzid.chain.from_iterable(mzid_files_11, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_11 = []
for entry in(indexed_mzid_11):
all_mzid_11.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_11)
mzid_df_11 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_11 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_11.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_11)
spectra_df_11 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_11 = pd.merge(mzid_df_11,spectra_df_11,how='left',on=['file','id'])
merged_df_11 = merged_df_11[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_11, key="df11")
#%%
os.chdir('./train_12')
mzid_files_12 = glob.glob('*.mzid')
indexed_mzid_12 = mzid.chain.from_iterable(mzid_files_12, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_12 = []
for entry in(indexed_mzid_12):
all_mzid_12.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_12)
mzid_df_12 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_12 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_12.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_12)
spectra_df_12 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
merged_df_12 = pd.merge(mzid_df_12,spectra_df_12,how='left',on=['file','id'])
merged_df_12 = merged_df_12[['id','seq','mz','intensities']]
# %%
os.chdir('./train_13')
mzid_files_13 = glob.glob('*.mzid')
indexed_mzid_13 = mzid.chain.from_iterable(mzid_files_13, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_13 = []
for entry in(indexed_mzid_13):
all_mzid_13.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_13)
mzid_df_13 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_13 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_13.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_13)
spectra_df_13 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
merged_df_13 = pd.merge(mzid_df_13,spectra_df_13,how='left',on=['file','id'])
merged_df_13 = merged_df_13[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_13, key="df13")
# %%
os.chdir('./train_14')
mzid_files_14 = glob.glob('*.mzid')
indexed_mzid_14 = mzid.chain.from_iterable(mzid_files_14, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_14 = []
for entry in(indexed_mzid_14):
all_mzid_14.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_14)
mzid_df_14 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_14 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_14.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_14)
spectra_df_14 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
merged_df_14 = pd.merge(mzid_df_14,spectra_df_14,how='left',on=['file','id'])
merged_df_14 = merged_df_14[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_14, key="df14")
# %%
os.chdir('./train_15')
mzid_files_15 = glob.glob('*.mzid')
indexed_mzid_15 = mzid.chain.from_iterable(mzid_files_15, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_15 = []
for entry in(indexed_mzid_15):
all_mzid_15.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_15)
mzid_df_15 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_15 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_15.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_15)
spectra_df_15 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
merged_df_15 = pd.merge(mzid_df_15,spectra_df_15,how='left',on=['file','id'])
merged_df_15 = merged_df_15[['id','seq','mz','intensities']]
#%%
hdf.put(value=merged_df_15, key="df15")
# %%
os.chdir('./train_16')
mzid_files_16 = glob.glob('*.mzid')
indexed_mzid_16 = mzid.chain.from_iterable(mzid_files_16, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_16 = []
for entry in(indexed_mzid_16):
all_mzid_16.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_16)
mzid_df_16 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_16 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_16.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_16)
spectra_df_16 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
merged_df_16 = pd.merge(mzid_df_16,spectra_df_16,how='left',on=['file','id'])
merged_df_16 = merged_df_16[['id','seq','mz','intensities']]
# %%
hdf.put(value=merged_df_16, key="df16")
# %%
os.chdir('./train_17')
mzid_files_17 = glob.glob('*.mzid')
indexed_mzid_17 = mzid.chain.from_iterable(mzid_files_17, use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_17 = []
for entry in(indexed_mzid_17):
all_mzid_17.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid)
mzid_df_17 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_17 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_17.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_17)
spectra_df_17 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
merged_df_17 = pd.merge(mzid_df_17,spectra_df_17,how='left',on=['file','id'])
merged_df_17 = merged_df_17[['id','seq','mz','intensities']]
# %%
hdf.put(value=merged_df_17, key="df17")
```
#### File: project_lab-ss2020/Deep learning/merge_validation_set.py
```python
import os
from pyteomics import mzid, mzml
import pandas as pd
import numpy as np
import glob
"""
Identically as how we did with the training data set, we randomly divided the test files into different
folders, then we generated different data frames and stored all of them in one single hdf file as our
validation daata set
"""
#%%
os.chdir('./test')
mzid_files=glob.glob('*.mzid')
indexed_mzid = mzid.chain.from_iterable(mzid_files,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid = []
for entry in(indexed_mzid):
all_mzid.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid)
mzid_df = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra)
spectra_df = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df = pd.merge(mzid_df,spectra_df,how='left',on=['file','id'])
merged_df = merged_df[['id','seq','mz','intensities']]
#%%
hdf_test = pd.HDFStore('/home/ubuntu/data/jiahao/files/test.hdf5', mode='w')
#%%
hdf_test.put(value=merged_df, key="df")
#%%
os.chdir('./test_1')
mzid_files_1 = glob.glob('*.mzid')
indexed_mzid_1 = mzid.chain.from_iterable(mzid_files_1,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_1 = []
for entry in(indexed_mzid_1):
all_mzid_1.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_1)
mzid_df_1 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_1 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_1.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_1)
spectra_df_1 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_1 = pd.merge(mzid_df_1,spectra_df_1,how='left',on=['file','id'])
merged_df_1 = merged_df_1[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_1, key="df1")
#%%
os.chdir('./test_2')
mzid_files_2 = glob.glob('*.mzid')
indexed_mzid_2 = mzid.chain.from_iterable(mzid_files_2,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_2 = []
for entry in(indexed_mzid_2):
all_mzid_2.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_2)
mzid_df_2 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_2 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_2.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_2)
spectra_df_2 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_2 = pd.merge(mzid_df_2,spectra_df_2,how='left',on=['file','id'])
merged_df_2 = merged_df_2[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_2, key="df2")
#%%
os.chdir('./test_4')
mzid_files_4 = glob.glob('*.mzid')
indexed_mzid_4 = mzid.chain.from_iterable(mzid_files_4,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_4 = []
for entry in(indexed_mzid_4):
all_mzid_4.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_4)
mzid_df_4 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_4 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_4.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_4)
spectra_df_4 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_4 = pd.merge(mzid_df_4,spectra_df_4,how='left',on=['file','id'])
merged_df_4 = merged_df_4[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_4, key="df4")
#%%
os.chdir('./test_5')
mzid_files_5 = glob.glob('*.mzid')
indexed_mzid_5 = mzid.chain.from_iterable(mzid_files_5,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_5 = []
for entry in(indexed_mzid_5):
all_mzid_5.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_5)
mzid_df_5 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_5 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_5.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_5)
spectra_df_5 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_5 = pd.merge(mzid_df_5,spectra_df_5,how='left',on=['file','id'])
merged_df_5 = merged_df_5[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_5, key="df5")
#%%
os.chdir('./test_6')
mzid_files_6 = glob.glob('*.mzid')
indexed_mzid_6 = mzid.chain.from_iterable(mzid_files_6,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_6 = []
for entry in(indexed_mzid_6):
all_mzid_6.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_6)
mzid_df_6 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_6 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_6.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_6)
spectra_df_6 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_6 = pd.merge(mzid_df_6,spectra_df_6,how='left',on=['file','id'])
merged_df_6 = merged_df_6[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_6, key="df6")
# %%
os.chdir('./test_7')
mzid_files_7 = glob.glob('*.mzid')
indexed_mzid_7 = mzid.chain.from_iterable(mzid_files_7,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_7 = []
for entry in(indexed_mzid_7):
all_mzid_7.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_7)
mzid_df_7 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_7 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_7.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_7)
spectra_df_7 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
#### MERGE: mzid + mzml
merged_df_7 = pd.merge(mzid_df_7,spectra_df_7,how='left',on=['file','id'])
merged_df_7 = merged_df_7[['id','seq','mz','intensities']]
#%%
hdf_test.put(value=merged_df_7, key="df7")
# %%
os.chdir('./test_8')
mzid_files_8 = glob.glob('*.mzid')
indexed_mzid_8 = mzid.chain.from_iterable(mzid_files_8,use_index=True)
def _parse_mzid_entry(entry):
spectrum_id = str(entry['spectrumID'])
seq = str(entry['SpectrumIdentificationItem'][0]['PeptideSequence'])
try:
mods = entry['SpectrumIdentificationItem'][0]['Modification']
except:
mods = None
rank = int(entry['SpectrumIdentificationItem'][0]['rank'])
file_location = str(entry['name'])
return file_location,spectrum_id,seq,mods,rank
all_mzid_8 = []
for entry in(indexed_mzid_8):
all_mzid_8.append(_parse_mzid_entry(entry))
file_location,spectrum_ids,seq,mods,rank = zip(*all_mzid_8)
mzid_df_8 = pd.DataFrame({'file':file_location,'id':spectrum_ids,'seq':seq})
def _parse_mzml_entry(entry):
ID = str(entry['id'])
mz = np.array(entry['m/z array'])
intensities = np.array(entry['intensity array'])
return ID, mz, intensities
all_spectra_8 = []
for file in np.unique(file_location):
print(file)
indexed = mzml.MzML(file)
for i,entry in enumerate(indexed.map(_parse_mzml_entry)):
tupl = (file,)+entry
all_spectra_7.append(tupl)
mzml_location, ids, mz, intensities = zip(*all_spectra_8)
spectra_df_8 = pd.DataFrame({'file':mzml_location,'id':ids,'mz':mz,'intensities':intensities})
merged_df_8 = pd.merge(mzid_df_8,spectra_df_8,how='left',on=['file','id'])
merged_df_8 = merged_df_8[['id','seq','mz','intensities']]
# %%
hdf.put(value=merged_df_8, key="df8")
```
#### File: project_lab-ss2020/KNN/spectra_embedder.py
```python
from pyteomics import mzml
import numpy as np
import pandas as pd
import tensorflow as tf
# %%
MZ_MAX=1900
SPECTRUM_RESOLUTION=2
k = 50
def set_k(new_k):
global k
k = new_k
return k
def tf_preprocess_spectrum(mz,intensity):
'''
converts a peaks list (mz,intensity) into a dense spectrum.
'''
#global MZ_MAX, SPECTRUM_RESOLUTION
n_spectrum = MZ_MAX * 10**SPECTRUM_RESOLUTION
mz = mz*10**SPECTRUM_RESOLUTION
# TODO: check this:
indices = tf.math.floor(mz)
indices = tf.cast(indices,tf.int64)
uniq_indices, i = tf.unique(indices)
# TODO: check what exactly to use here, sum, max, mean, ...
uniq_values = tf.math.segment_max(intensity,i)
# create as mask to truncate between min<mz<max
# eliminate zeros:
lower_bound = 100 * 10**SPECTRUM_RESOLUTION
notzero_mask = tf.math.greater(uniq_indices,tf.zeros_like(uniq_indices)+lower_bound)
# truncate :
trunc_mask = tf.math.less(uniq_indices,tf.zeros_like(uniq_indices)+n_spectrum)
# put into joint mask:
mask = tf.logical_and(notzero_mask,trunc_mask)
# apply mask:
uniq_indices = tf.boolean_mask(uniq_indices,mask)
uniq_indices = uniq_indices - lower_bound
uniq_values = tf.boolean_mask(uniq_values,mask)
#### workaroud, cause tf.SparseTensor only works with tuple indices, so with stack zeros
zeros = tf.zeros_like(uniq_indices)
uniq_indices_tuples = tf.stack([uniq_indices, zeros],axis = 1)
sparse_spectrum = tf.SparseTensor(indices = uniq_indices_tuples, values = uniq_values,dense_shape = [n_spectrum-lower_bound,1])
dense_spectrum = tf.sparse.to_dense(sparse_spectrum)
return dense_spectrum
def tf_maxpool(dense):
shape = dense.shape
dense = tf.reshape(dense,[1,-1,1,1])
#k = 100
n_spectrum = int(shape[0])
x, i = tf.nn.max_pool_with_argmax(dense,[1,k,1,1],[1,k,1,1],padding='SAME')
i0 = tf.constant(np.arange(0,n_spectrum,k))
i0 = tf.reshape(i0,[1,int(n_spectrum/k),1,1])
i = i-i0
return x,i
def tf_maxpool_with_argmax(dense,k):
dense = tf.reshape(dense,[-1,k])
x = tf.reduce_max(dense,axis=-1)
i = tf.math.argmax(dense,axis=-1)
return x,i
def ion_current_normalize(intensities):
total_sum = tf.reduce_sum(intensities**2)
normalized = intensities/total_sum
return normalized
def parse(mz,intensity):
'''
converts a peaks list (mz,intensity) into a two-vector spectrum
'''
intensity = ion_current_normalize(intensity)
spectrum_dense = tf_preprocess_spectrum(mz, intensity)
x,i = tf_maxpool_with_argmax(spectrum_dense,k=k)
x = tf.cast(x,tf.float32)
i = tf.cast(i,tf.float32)
i = i/tf.cast(k,tf.float32)
spectrum_two_vec = tf.stack([x,i],axis=1)
return spectrum_two_vec
#%%
# define data generator
def fire_up_generator(file_path="./ptm21.hdf5",n=1):
with pd.HDFStore(file_path) as hdf:
keys = np.array(hdf.keys())
df = pd.DataFrame()
for key in keys:
print(key)
df = df.append(hdf.select(key=key))
n = len(df)
print('n datapoints:',len(df))
global index
index = 0
def generator():
global index
r_entry = df
if index > (n-1):
index=0
mz,i = np.array(r_entry.iloc[index]['mz']), np.array(r_entry.iloc[index]['intensities'])
index+=1
yield mz,i
return generator
#%%
# create tf dataset
def get_dataset(generator, batch_size=1):
ds = tf.data.Dataset.from_generator(generator,output_types=(tf.float32,tf.float32),output_shapes=(None,None))
ds = ds.map(lambda mz,intensities: parse(mz,intensities), num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.repeat(175921) #number of spectra contained (len(df))
ds = ds.batch(batch_size=batch_size)
return ds
#%%
# disable eager execution
tf.compat.v1.disable_eager_execution()
# %%
# read
spectra = get_dataset(fire_up_generator('/home/ubuntu/data/jiahao/my_spectra.hdf5'))
# %%
# recall the model
spectrum_model = tf.keras.models.load_model('/home/ubuntu/data/jiahao/files/_model_relu/spectrum_model',compile=False)
base_model = tf.keras.models.load_model('/home/ubuntu/data/jiahao/files/_model_relu/siamese_model',compile=False)
intermediate_out = spectrum_model.output
base_out = base_model(intermediate_out)
spec_model = tf.keras.Model(inputs=spectrum_model.input, outputs=base_out)
#%%
# compute spectra embeddings
spec_features = spec_model.predict(spectra)
# %%
np.save('spectra_embeddings.npy',spec_features)
``` |
{
"source": "JiahaoChenConor/INFO1110-Acorn",
"score": 4
} |
#### File: JiahaoChenConor/INFO1110-Acorn/game_parser.py
```python
from cells import (
Start,
End,
Air,
Wall,
Fire,
Water,
Teleport
)
def read_lines(filename):
"""Read in a file, process them
using parse(), and return the contents as a list of list of cells."""
try:
f = open(filename, 'r')
except FileNotFoundError:
print("{} does not exist!".format(filename))
exit()
lines = f.readlines()
f.close()
return parse(lines)
def parse(lines):
"""Transform the input into a grid.
Arguments:
lines -- list of strings representing the grid
Returns:
list -- contains list of lists of Cells
"""
# Transform the input into a nested list which can be changed
grid = [list(line.strip()) for line in lines]
char = {' ': 0, 'X': 0, 'Y': 0, '*': 0, 'F': 0, 'W': 0,
'1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0, '7': 0, '8': 0, '9': 0}
for i in range(len(grid)):
for j in range(len(grid[i])):
c = str(grid[i][j])
# Invalid cases
if c not in char:
raise ValueError("Bad letter in configuration file: {}.".format(c))
else:
char[c] += 1
# initial the objects
# The purpose of initializing the coordinates of each object
# is to establish the adjacency list in solver.py
if c == ' ':
grid[i][j] = Air()
grid[i][j].x = i
grid[i][j].y = j
elif c == 'X':
grid[i][j] = Start()
grid[i][j].x = i
grid[i][j].y = j
elif c == 'Y':
grid[i][j] = End()
grid[i][j].x = i
grid[i][j].y = j
elif c == '*':
grid[i][j] = Wall()
grid[i][j].x = i
grid[i][j].y = j
elif c == 'F':
grid[i][j] = Fire()
grid[i][j].x = i
grid[i][j].y = j
elif c == 'W':
grid[i][j] = Water()
grid[i][j].x = i
grid[i][j].y = j
elif c in '123456789':
grid[i][j] = Teleport()
# change the attribute
teleport = grid[i][j]
teleport.display = int(c)
grid[i][j].x = i
grid[i][j].y = j
# other invalid cases
if char['X'] != 1:
raise ValueError("Expected 1 starting position, got {}.".format(char['X']))
if char['Y'] != 1:
raise ValueError("Expected 1 ending position, got {}.".format(char['Y']))
for t in '123456789':
if char[t] != 2 and char[t] != 0:
raise ValueError("Teleport pad {} does not have an exclusively matching pad.".format(t))
return grid
```
#### File: JiahaoChenConor/INFO1110-Acorn/solver.py
```python
from game_parser import read_lines
import sys
from cells import (
Start,
End,
Air,
Wall,
Fire,
Water,
Teleport
)
import itertools
# Adjacency list
class Graph:
def __init__(self):
self.vertList = {}
def addEdge(self, f, t):
if f not in self.vertList:
self.vertList[f] = []
if t not in self.vertList:
self.vertList[t] = []
self.vertList[f].append(t)
# achieve bfs
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
# achieve dfs
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peak(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
if __name__ == "__main__":
grid = read_lines(sys.argv[1])
row = len(grid)
column = len(grid[0])
def findTheOtherTeleport(grid, i, j):
for m in range(row):
for n in range(column):
if grid[m][n].display == grid[i][j].display and grid[m][n] != grid[i][j]:
return grid[m][n]
def findStart():
for i in range(row):
for j in range(column):
if grid[i][j].display == 'X':
start = grid[i][j]
return start
def findEnd():
for i in range(row):
for j in range(column):
if grid[i][j].display == 'Y':
end = grid[i][j]
return end
# Initialization diagram
def createDiagram():
g = Graph()
dx = [0, 1, -1, 0]
dy = [1, 0, 0, -1]
for i in range(row):
for j in range(column):
# This is for the special case when a teleport is surrounded by walls
if isinstance(grid[i][j].display, int):
theOther = findTheOtherTeleport(grid, i, j)
g.addEdge(grid[i][j], theOther)
for k in range(4):
x = i + dx[k]
y = j + dy[k]
if 0 <= x <= row - 1 and 0 <= y <= column - 1 and grid[i][j].display != '*' \
and grid[x][y].display != '*' and grid[i][j].display != 'Y':
if isinstance(grid[x][y].display, int):
theOther = findTheOtherTeleport(grid, x, y)
g.addEdge(grid[i][j], theOther)
else:
g.addEdge(grid[i][j], grid[x][y])
return g
## these can check the coordinates in adjacency list
# g = createDiagram()
# for f in g.vertList.keys():
# print((f.x,f.y), [(t.x, t.y) for t in g.vertList[f]])
def judge(cur, next):
if next[0] - cur[0] == 1 and next[1] == cur[1]:
return 's'
elif next[0] - cur[0] == -1 and next[1] == cur[1]:
return 'w'
elif next[1] - cur[1] == 1 and next[0] == cur[0]:
return 'd'
elif next[1] - cur[1] == -1 and next[0] == cur[0]:
return 'a'
else:
return None
# change the coordinates into moves
def move(track):
dx = [0, 1, -1, 0]
dy = [1, 0, 0, -1]
moves = []
for i in range(len(track) - 1):
cur, next = track[i], track[i + 1]
m = judge(cur, next)
if m:
moves.append(m)
# If the current coordinate and the next coordinate do not satisfy
# the row coordinate difference 1 or the column coordinate difference 1,
# there are two possibilities
else:
# 1. the next coordinate is teleport
noWay = True
for j in range(4):
cell = grid[cur[0] + dx[j]][cur[1] + dy[j]]
if isinstance(cell.display, int):
next = [cell.x, cell.y]
m = judge(cur, next)
moves.append(m)
noWay = False
# 2. the current coordinate is teleport and wait because there is no way except teleport
if noWay == True:
moves.append('e')
return moves
# judge water or fire, if water + 1, else - 1
def waterFire(cell):
if cell.display == 'W':
return 1
elif cell.display == 'F':
return -1
else:
return 0
def dfs(start):
end = findEnd()
g = createDiagram()
stack = Stack()
water = 0
water += waterFire(start)
# Record the valid path
track = []
stack.push(start)
coordinate = (start.x, start.y)
track.append(coordinate)
# Record the points that have been passed
searched = [start]
next = None
while not stack.isEmpty():
# if I change cur, it will also change stack.items[-1]!!
cur = stack.items[-1]
water += waterFire(cur)
# if meet water , clear the nodeSet
if cur.display == 'W':
searched.clear()
cur.display = ' '
# for-else: If a "break" is executed in the "for" loop, then "else" will not be executed
for next in g.vertList[cur]:
if next not in searched:
# check the next node considering the fire
water += waterFire(next)
if water >= 0:
stack.push(next)
# update the track
track.append((next.x, next.y))
# this node is already be used
searched.append(next)
water -= waterFire(next)
break
water -= waterFire(next)
# backtrack
else:
water -= waterFire(cur)
stack.pop()
track.pop()
# if reach the end, break the loop
if next == end:
break
return track
# First use bfs to find how much fire is needed
# Find all water objects stored in the list []
# for example, there is three fire on the shortest way to End, and there are five waters on the map
# Pick five out of three for full arrangement
# Actually it is Brute force + BFS
def bfs(start, end):
g = createDiagram()
queue = Queue()
queue.enqueue(start)
# record the points which have been searched
searched = [start]
while not queue.isEmpty():
cur = queue.dequeue()
for node in g.vertList[cur]:
if node not in searched:
if node == end:
searched.append(node)
# the shortest path
path = get_path(searched, g)
return path
# if the current point is not the end, enqueue all its adjacent points
else:
queue.enqueue(node)
searched.append(node)
return []
# Find the path
def get_path(searched, g):
end = searched[-1]
path = [(end.x, end.y)]
while end != searched[0]:
for i in searched:
if end in g.vertList[i]:
end = i
path.append((end.x, end.y))
break
return path[::-1]
def search():
global solution_found
global ans
if sys.argv[2] == 'DFS':
track = dfs(findStart())
ans = move(track)
solution_found = True
if sys.argv[2] == 'BFS':
fireNum = 0
track = bfs(findStart(), findEnd())
# The first bfs, find how many fires in the shortest path
# so we can decide how many water we need
for coord in track:
if grid[coord[0]][coord[1]].display == 'F':
fireNum += 1
waterObjects = []
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j].display == 'W':
waterObjects.append(grid[i][j])
# Is there enough water?
if fireNum > len(waterObjects):
solution_found = False
return
# Full arrangement
comb = list(itertools.permutations(waterObjects, fireNum))
paths = []
for waters in comb:
# waters is the combination of water objects, such as (water2, water3, water1)
start = findStart()
end = findEnd()
waters = list(waters)
waters.append(end)
path = []
path += bfs(start, waters[0])
for i in range(len(waters)-1):
start, end = waters[i], waters[i+1]
# If we directly connect the two paths, the coordinates of the water will be repeated,so [1:]
path += bfs(start, end)[1:]
paths.append(path)
# Find the shortest path
shortest = float('inf')
ans = None
for way in paths:
if len(way) < shortest:
shortest = len(way)
ans = way
ans = move(ans)
solution_found = True
solution_found = False
ans = []
search()
if len(ans) == 0:
solution_found = False
if solution_found:
print("Path has {} moves.".format(len(ans)))
print("Path: " + ', '.join(ans))
else:
print("There is no possible path.")
```
#### File: JiahaoChenConor/INFO1110-Acorn/test_game.py
```python
from game import Game
def test_game():
game = Game('test_game.txt')
game.setStart()
assert [game.player.row, game.player.col] == [0, 2], 'Method setStart failed'
print('test1 passed: Method setStart')
assert game.row == 4, 'game.row failed'
print('test2 passed: game.row passed')
assert game.column == 6, 'game.column failed'
print('test3 passed: game.column passed')
"""Positive test cases"""
# step into air
game.coordinateUpdate('s', game)
new = game.showGrid()
assert game.moves == ['s'], 'game.moves failed'
assert game.movesNum == 1, 'game.movesNum failed'
print('test4 passed: game attributes are correct')
assert new == '**X***\n' \
'*1A *\n' \
'*W F1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', 'Step into Air failed'
print('test5 passed: Step into Air')
# step into teleport
game.coordinateUpdate('a', game)
new = game.showGrid()
assert game.moves == ['s', 'a'], 'game.moves failed'
assert game.movesNum == 2, 'game.movesNum failed'
print('test6 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 *\n' \
'*W FA*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', 'Step into Teleport failed'
print('test7 passed: Step into Teleport')
# step into water
for move in ['w', 'a', 'a', 's', 'a']:
game.coordinateUpdate(move, game)
new = game.showGrid()
assert game.moves == ['s', 'a', 'w', 'a', 'a', 's', 'a'], 'game.moves failed'
assert game.movesNum == 7, 'game.movesNum failed'
print('test8 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 *\n' \
'*A F1*\n' \
'* Y**\n\n' \
'You have 1 water bucket.', 'Step into Water failed'
print('test9 passed: Step into Water')
# step into fire with water
for move in ['d', 'd']:
game.coordinateUpdate(move, game)
new = game.showGrid()
assert game.moves == ['s', 'a', 'w', 'a', 'a', 's', 'a', 'd', 'd'], 'game.moves failed'
assert game.movesNum == 9, 'game.movesNum failed'
print('test10 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 *\n' \
'* A1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', 'Step into Fire with water failed'
print('test11 passed: Step into Fire with water passed')
# reach the end successfully
game.coordinateUpdate('s', game)
new = game.showGrid()
assert game.moves == ['s', 'a', 'w', 'a', 'a', 's', 'a', 'd', 'd', 's'], 'game.moves failed'
assert game.movesNum == 10, 'game.movesNum failed'
print('test12 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 *\n' \
'* 1*\n' \
'* A**\n\n' \
'You have 0 water buckets.', 'Step into End failed'
print('test13 passed: Step into End')
# step into fire without water
game2 = Game('test_game.txt')
game2.setStart()
for move in ['s', 'd', 's']:
game2.coordinateUpdate(move, game2)
new = game2.showGrid()
assert game2.moves == ['s', 'd', 's'], 'game.moves failed'
assert game2.movesNum == 3, 'game.movesNum failed'
print('test14 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 *\n' \
'*W A1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', 'Step into Fire without water failed'
print('test15 passed: Step into Fire without water')
# test command 'e'
game = Game('test_game.txt')
game.setStart()
for move in ['s', 'e']:
game.coordinateUpdate(move, game)
new = game.showGrid()
assert game.moves == ['s', 'e'], 'game.moves failed'
assert game.movesNum == 2, 'game.movesNum failed'
print('test16 passed: game attributes are correct')
assert new == '**X***\n' \
'*1A *\n' \
'*W F1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', "Command 'e' failed"
print("test17 passed: Command 'e' passed")
# test command 'q'
game.coordinateUpdate('q', game)
new = game.showGrid()
assert game.moves == ['s', 'e'], 'game.moves failed'
assert game.movesNum == 2, 'game.movesNum failed'
print('test18 passed: game attributes are correct')
assert new == '**X***\n' \
'*1A *\n' \
'*W F1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', "Command 'q' failed"
print("test19 passed: Command 'q' passed")
# test uppercase
# these should be still uppercase, because I turned them into lowercase when using cell.step()
for move in ['D']:
game.coordinateUpdate(move, game)
new = game.showGrid()
assert game.moves == ['s', 'e', 'D'], 'game.moves failed'
assert game.movesNum == 3, 'game.movesNum failed'
print('test20 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 A *\n' \
'*W F1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', "uppercase failed"
print("test21 passed: uppercase passed")
print('positive tests all passed')
"""Negative cases"""
# invalid input
game.coordinateUpdate('asc', game)
new = game.showGrid()
assert game.moves == ['s', 'e', 'D'], 'game.moves failed'
assert game.movesNum == 3, 'game.movesNum failed'
print('test22 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 A *\n' \
'*W F1*\n' \
'* Y**\n\n' \
'You have 0 water buckets.', "invalid input failed"
print("test23 passed: invalid input passed")
print('negative tests all passed')
"""Edge case"""
# go beyond the map
for move in ['a', 's', 's', 's']:
game.coordinateUpdate(move, game)
new = game.showGrid()
assert game.moves == ['s', 'e', 'D', 'a', 's', 's'], 'game.moves failed'
assert game.movesNum == 6, 'game.movesNum failed'
print('test24 passed: game attributes are correct')
assert new == '**X***\n' \
'*1 *\n' \
'*W F1*\n' \
'* AY**\n\n' \
'You have 0 water buckets.', "go beyond the map failed"
print("test25 passed: go beyond the map passed")
print('edge tests all passed')
def run_tests():
test_game()
print('test_game all passed \n')
```
#### File: JiahaoChenConor/INFO1110-Acorn/test_parser.py
```python
from game_parser import parse
from cells import (
Start,
End,
Air,
Wall,
Fire,
Water,
Teleport
)
def test_parse():
"""Positive test"""
lines1 = ['**X**',
'*1 1*',
'**Y**']
grid = parse(lines1)
for i in range(3):
for j in range(5):
if i == 0 and j == 2:
assert isinstance(grid[i][j], Start), 'test1 Start failed'
elif (i == 1 and j == 1) or (i == 1 and j == 3):
assert isinstance(grid[i][j], Teleport), 'test1 Teleport failed'
assert grid[i][j].display == 1, 'test1 Teleport failed'
elif i == 2 and j == 2:
assert isinstance(grid[i][j], End), 'test1 End failed'
elif i == 1 and j == 2:
assert isinstance(grid[i][j], Air), 'test1 Air failed'
else:
assert isinstance(grid[i][j], Wall), 'test1 Wall failed'
print('test1 passed')
print('postive tests all passed')
"""Negative tests"""
# without Start
lines2 = ['***',
'* *',
'*Y*']
excepted = ValueError("Expected 1 starting position, got 0.")
try:
parse(lines2)
except ValueError as e:
assert str(excepted) == str(e), 'test2 failed.'
print('test2 passed')
# two instances of End class
lines3 = ['*X*',
'* *',
'*YY']
excepted = ValueError("Expected 1 ending position, got 2.")
try:
parse(lines3)
except ValueError as e:
assert str(excepted) == str(e), 'test3 failed.'
print('test3 passed')
# display of teleport is not in 1-9
lines4 = ['**X**',
'*0 0*',
'**Y**']
excepted = ValueError('Bad letter in configuration file: 0.')
try:
parse(lines4)
except ValueError as e:
assert str(excepted) == str(e), 'test4 failed.'
print('test4 passed')
# teleports don't come in pairs
lines5 = ['**X**',
'*1 1*',
'*1 *',
'**Y**']
excepted = ValueError("Teleport pad 1 does not have an exclusively matching pad.")
try:
parse(lines5)
except ValueError as e:
assert str(excepted) == str(e), 'test5 failed.'
print('test5 passed')
print('negative tests all paseed')
"""Edge tests"""
# if empty file,it will raise the first error(Zero staring position).
lines6 = []
excepted = ValueError("Expected 1 starting position, got 0.")
try:
parse(lines6)
except ValueError as e:
assert str(excepted) == str(e), 'test6 failed.'
print('test6 passed')
# not all element in list are string
line7 = ['**X*',
1122,
'**Y*']
excepted = AttributeError("'int' object has no attribute 'strip'")
try:
parse(line7)
except AttributeError as e:
assert str(excepted) == str(e), 'test7 failed'
print('test7 passed')
print('edge tests all passed')
def run_tests():
test_parse()
print('test_parse all passed\n')
``` |
{
"source": "JiahaoChenConor/INFO2222-Simple_website",
"score": 3
} |
#### File: INFO2222-Simple_website/project_src/controller.py
```python
import bottle
import model
from verification import validate_picture
from io import BytesIO
import base64
app = application = bottle.Bottle()
cookie_secret = '1B32E674-443E-4602-89EA-643ACF6FD637'
# -----------------------------------------------------------------------------
# Static file paths
# -----------------------------------------------------------------------------
# Allow image loading
@app.route('/img/<picture:path>')
def serve_pictures(picture):
"""
serve_pictures
Serves images from static/img/
:: picture :: A path to the bottle.requested picture
Returns a static file object containing the bottle.requested picture
"""
return bottle.static_file(picture, root='static/img/')
# -----------------------------------------------------------------------------
# Allow CSS
@app.route('/css/<css:path>')
def serve_css(css):
"""
serve_css
Serves css from static/css/
:: css :: A path to the bottle.requested css
Returns a static file object containing the bottle.requested css
"""
return bottle.static_file(css, root='static/css/')
# -----------------------------------------------------------------------------
# Allow javascript
@app.route('/js/<js:path>')
def serve_js(js):
"""
serve_js
Serves js from static/js/
:: js :: A path to the bottle.requested javascript
Returns a static file object containing the bottle.requested javascript
"""
return bottle.static_file(js, root='static/js/')
# -----------------------------------------------------------------------------
# Pages
# -----------------------------------------------------------------------------
@app.get('/knowledge')
def get_knowledge():
return model.header(bottle.request, model.knowledge)
# Redirect to login
@app.get('/')
@app.get('/home')
def get_index():
"""
get_index
Serves the index page
"""
return model.header(bottle.request, model.index)
# -----------------------------------------------------------------------------
# Display the login page
@app.get('/login')
def get_login_controller():
"""
get_login
Serves the login page
"""
data = set_verification(bottle.response)
return model.login_form(data)
# -----------------------------------------------------------------------------
# Attempt the login
@app.post('/login')
def post_login():
"""
post_login
Handles login attempts
Expects a form containing 'username' and 'password' fields
"""
# Handle the form processing
username = bottle.request.forms.get('username')
if len(username) > 255:
return model.header(bottle.request, model.custom_error, 'Username too long! ', '/login')
password = bottle.request.forms.get('password')
verification = bottle.request.forms.get('verification')
true_verification = bottle.request.get_cookie('verification', secret=cookie_secret)
if verification != true_verification:
return model.header(bottle.request, model.custom_error, 'Wrong Verification! ', '/login')
result = model.login_check(username, password)
if result is not None:
bottle.response.set_cookie("login", result[1], secret=cookie_secret)
return model.header(bottle.request, model.custom_error,
'This username-password combination is valid, welcome! ', '/')
else:
return model.header(bottle.request, model.custom_error,
'This username-password combination is invalid, try again! ',
'/login')
# -----------------------------------------------------------------------------
# Display the register page
@app.get('/register')
def get_register_controller():
"""
get_login
Serves the login page
"""
data = set_verification(bottle.response)
return model.register_form(data)
# -----------------------------------------------------------------------------
# Attempt the register
@app.post('/register')
def post_login():
# Handle the form processing
username = bottle.request.forms.get('username')
if len(username) > 255:
return model.header(bottle.request, model.custom_error, 'Username too long! ', '/login')
password = bottle.request.forms.get('password')
password_confirmation = bottle.request.forms.get('password-confirmation')
verification = bottle.request.forms.get('verification')
true_verification = bottle.request.get_cookie('verification', secret=cookie_secret)
if verification != true_verification:
return model.header(bottle.request, model.custom_error, 'Wrong Verification! ', '/register')
return model.register(username, password, password_confirmation)
# -----------------------------------------------------------------------------
@app.get('/about')
def get_about():
"""
get_about
Serves the about page
"""
return model.header(bottle.request, model.about)
# -----------------------------------------------------------------------------
# Help with debugging
@app.post('/debug/<cmd:path>')
def post_debug(cmd):
return model.debug(cmd)
# -----------------------------------------------------------------------------
# 404 errors, use the same trick for other types of errors
@app.error(404)
def error(error):
return model.handle_errors(error)
# -----------------------------------------------------------------------------
@app.get('/forum')
def forum_page():
thread_id = bottle.request.query.thread_id
data = set_verification(bottle.response)
return model.header(bottle.request, model.forum_thread, thread_id, data)
@app.post('/forum')
def form_reply():
verification = bottle.request.forms.get('verification')
true_verification = bottle.request.get_cookie('verification', secret=cookie_secret)
if verification != true_verification:
return model.header(bottle.request, model.custom_error, 'Wrong Verification! ', '/forumtopic')
cookie = bottle.request.get_cookie("login", secret=cookie_secret)
user_id = model.verify_cookie(cookie)
if user_id is None:
return model.header(bottle.request, model.custom_error, 'You cannot reply if not logged in!', '/login')
user_id = user_id[0]
thread_id = bottle.request.query.thread_id
comment = bottle.request.forms.get("comment")
result = model.reply(thread_id, user_id, comment)
if result:
return model.header(bottle.request, model.custom_error, 'Reply successful!', '/forumtopic')
else:
return model.header(bottle.request, model.custom_error, 'Reply failed!', '/forumtopic')
# -----------------------------------------------------------------------------
@app.get('/forumtopic')
def forum_topic_page():
topic_id = bottle.request.query.topic_id
return model.header(bottle.request, model.forum_topic, bottle.request, topic_id)
# admin function delete post
@app.post('/forumtopic')
def delete_forum():
is_admin = model.is_admin(bottle.request)
if is_admin:
thread_id = bottle.request.forms.get('thread_id')
res = model.delete_post_by_id(thread_id)
if res:
return model.header(bottle.request, model.custom_error, 'Restoration/Deletion successful', '/forumtopic')
else:
return model.header(bottle.request, model.custom_error, 'Restoration/Deletion failed', '/forumtopic')
return model.header(bottle.request, model.custom_error, 'Restoration/Deletion failed', '/forumtopic')
# -----------------------------------------------------------------------------
@app.get('/allusers')
def all_users_page():
return model.header(bottle.request, model.all_users, bottle.request)
# admin function mute user
@app.post('/allusers')
def mute_user():
is_admin = model.is_admin(bottle.request)
if is_admin:
user_id = bottle.request.forms.get('user_id')
res = model.mute_user(user_id)
if res:
return model.header(bottle.request, model.custom_error, 'Mute/unmute successful', '/allusers')
else:
return model.header(bottle.request, model.custom_error, 'Mute/unmute failed', '/allusers')
return model.header(bottle.request, model.custom_error, 'Mute/unmute failed', '/allusers')
# -----------------------------------------------------------------------------
@app.get('/post')
def post_page():
data = set_verification(bottle.response)
return model.header(bottle.request, model.post_get, data)
@app.post('/post')
def post_request():
verification = bottle.request.forms.get('verification')
true_verification = bottle.request.get_cookie('verification', secret=cookie_secret)
if verification != true_verification:
return model.header(bottle.request, model.custom_error, 'Wrong Verification! ', '/post')
cookie = bottle.request.get_cookie("login", secret=cookie_secret)
result = model.verify_cookie(cookie)
if result is None:
return model.header(bottle.request, model.custom_error, 'You cannot post if not logged in!', '/login')
user_id = result[0]
muted = result[4]
if muted == 1:
return model.header(bottle.request, model.custom_error, "You're Muted", '/post')
thread_id = bottle.request.forms.get('thread_id')
subject = bottle.request.forms.get('subject')
if len(subject) > 255:
return model.header(bottle.request, model.custom_error, "Subject too long", '/post')
content = bottle.request.forms.get('content')
result = model.post_post(user_id, thread_id, subject, content)
if result:
return model.header(bottle.request, model.custom_error, 'Post successful', '/forumtopic')
else:
return model.header(bottle.request, model.custom_error, 'Post failed', '/post')
def set_verification(response):
img, code = validate_picture()
buf = BytesIO()
img.save(buf, 'jpeg')
buf_str = buf.getvalue()
data = str(base64.b64encode(buf_str))[1:].strip("'")
response.set_cookie("verification", code, secret=cookie_secret)
return data
```
#### File: INFO2222-Simple_website/project_src/waf.py
```python
from bottle import run, request, post, get
import re
import string
# Important globals
host = "localhost"
port = "8081"
# Debug mode to check whether or not attacks are working
# Start with it as "True", try the attack, flip it to false, try the attack again and see if your WAF blocked it
# Debug should be set to false when launching the final version
debug = False
@post('/waf/detect/<string_in:path>')
def detect_attack(string_in):
if not debug:
if 'attack' in string_in:
return 'False'
return 'True'
return 'False'
@post('/waf/email/<email:path>')
def verify_email(email):
if '@' in email:
return 'True'
else:
return "Not an email address"
@post('/waf/password/<password:path>')
def verify_password(password):
if len(password) < 8:
return "Password is too short"
if not any(c in string.ascii_lowercase for c in password):
return "Password must contain at least one lowercase character"
if not any(c in string.ascii_uppercase for c in password):
return "Password must contain at least one uppercase character"
return 'True'
# Rather than using paths, you could throw all the requests with form data filled using the
# requests module and extract it here. Alternatively you could use JSON objects.
# Custom definition waf
@post('/waf/custom/field=<field:path>%20test=<test:path>')
def custom_waf(field, test):
if re.search(test, field) is not None:
return "True"
return "False"
# Debug toggle
@post('/waf/debug')
def enable_debugger():
global debug
if debug:
debug = False
else:
debug = True
# Run the server
run(host=host, port=port)
```
#### File: JiahaoChenConor/INFO2222-Simple_website/server.py
```python
from bottle import route, run, static_file, template
@route('/')
@route('/hello/<name>')
def greet(name='Stranger'):
return template('hello_template', name=name)
@route('/aboutus')
def aboutus():
return static_file('aboutus', root='/')
if __name__ == '__main__':
run(host='localhost', port=8000, debug=True)
```
#### File: INFO2222-Simple_website/webapp/webapp.py
```python
from bottle import route, run, template, static_file, url, default_app, request, get, post
import bottle
import os
import sys
def calculate_something(input_data):
return "the answer"
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
print(dir_path)
if(sys.platform == 'win32'):
templates_dir = os.path.join(dir_path, 'views')
if templates_dir not in bottle.TEMPLATE_PATH:
bottle.TEMPLATE_PATH.insert(0, templates_dir)
# the code above allows the same file layout to be used on the localhost and
# pythonanywhere site. In the app directory is wsgi.py and two directories
# static and views. Static has the css/js/images, views contains index.html
# on pythonanywhere the bottle.TEMPLATE_PATH is set in the app_wsgi.py file
# located at /var/www
@route('/')
def home():
''' A bit of documentation
'''
return template('index.html')
@route('/static/<filename:path>')
def send_static(filename):
''' This makes the extant template start working
Woo-Hoo!
'''
return static_file(filename, root= dir_path + '/static/')
# the dir_path+'/'+ needed to be added to get this to serve static pages on PythonAnywhere
# also I had to create a 'views' directory and put the index.html file into the views directory
@route('/hello')
def hello():
''' A bit of documentation
'''
return '<h1>Hello World!</h1>'
@route('/hello/', method='GET')
def hello():
''' A bit of documentation
'''
return '<h1>Hello World (two slash...) !</h1>'
@route('/location', method = 'POST')
def location():
return calculate_something(input_data)
#
# the lines below recommended by PythonAnywhere forum for Bottle webapp
#
application = default_app()
if __name__ == '__main__':
bottle.debug(True) # remove this for production
bottle.run(host='0.0.0.0', port=8080)
``` |
{
"source": "jiahaoliang/cool_finance",
"score": 2
} |
#### File: cool_finance/data_sources/manager.py
```python
from cool_finance.data_sources import constants as const
from cool_finance.data_sources.vendors import (common, google_finance,
google_finance_batch)
class DataSourceManager(object):
_supported_vendors = {
const.BASE_VENDOR: common.BaseSource,
const.GOOGLE_FINANCE_VENDOR: google_finance.GoogleFinance,
const.GOOGLE_FINANCE_BATCH_VENDOR:
google_finance_batch.GoogleFinanceBatch
}
_support_batch_query_vendor = {
const.GOOGLE_FINANCE_BATCH_VENDOR:
google_finance_batch.GoogleFinanceBatch
}
def __init__(self, default_vendor=const.DEFAULT_DATASOURCE):
self._default_vendor = default_vendor
def get_vendor(self, vendor=None):
# return the vendor class
if not vendor:
vendor = self._default_vendor
return self._supported_vendors[vendor]
``` |
{
"source": "jiahaoLjh/HDNet",
"score": 3
} |
#### File: HDNet/data/dataset.py
```python
import logging
import copy
import cv2
import numpy as np
import torch
import torch.utils.data
from config import cfg
class Dataset(torch.utils.data.Dataset):
def __init__(self, db):
self.logger = logging.getLogger(self.__class__.__name__)
self.db = db
self.mode = db.mode
self.n_joints = db.n_joints
self.joint_name = db.joint_name
self.bin_start = db.bin_start
self.bin_end = db.bin_end
self.n_bins = db.n_bins
self.adj = db.adj
self.data = db.get_data()
def evaluate(self, preds):
return self.db.evaluate(preds)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
data = copy.deepcopy(self.data[idx])
img_path = data["image_path"]
f = data["f"]
c = data["c"]
joints_2d = data["joints_2d"] # [J, 2]
bins = data["bins"] # [K]
bin_idx = data["bin_idx"] # []
img = cv2.imread(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
if not isinstance(img, np.ndarray):
raise IOError("Fail to read {}".format(img_path))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_height, img_width, img_channels = img.shape
scale, rot, do_flip = 1.0, 0.0, False
# transform image
cx, cy, pw, ph = c[0], c[1], data["img_effective_width"], data["img_effective_height"]
mat_input = get_trans_from_patch((cx, cy), (pw, ph), cfg.input_size, do_flip, scale, rot)
trans_img = cv2.warpAffine(img, mat_input, cfg.input_size, flags=cv2.INTER_LINEAR)
mat_output = get_trans_from_patch((cx, cy), (pw, ph), cfg.output_size, do_flip, scale, rot)
# transform coord_map
coord_map = np.stack(np.meshgrid(np.arange(pw), np.arange(ph)), axis=-1).astype(np.float32) # [H, W, 2]
coord_map = (coord_map - np.array([pw / 2., ph / 2.])) / f
mat_coord = get_trans_from_patch((pw / 2., ph / 2.), (pw, ph), cfg.output_size, do_flip, scale, rot)
trans_coord_map = cv2.warpAffine(coord_map, mat_coord, cfg.output_size, flags=cv2.INTER_LINEAR)
# transform 2d joints to output space
trans_joints_2d = np.zeros_like(joints_2d)
for j in range(self.n_joints):
trans_joints_2d[j] = trans_point2d(joints_2d[j], mat_output)
# generate heatmap
trans_heatmap = get_heatmap(trans_joints_2d, cfg.output_size, sigma=cfg.hm_sigma) # [H, W, J]
# bbox mask
bbox_mask = np.ones([cfg.output_size[0], cfg.output_size[1]])
# visibility mask
joints_vis = np.ones([self.n_joints])
# transform 2d pose
trans_pose = (joints_2d - c) / f
out_img = resnet_normalize(trans_img)
out_img = torch.as_tensor(out_img.transpose((2, 0, 1))).float()
out_coord_map = torch.as_tensor(trans_coord_map.transpose((2, 0, 1))).float()
out_heatmap = torch.as_tensor(trans_heatmap.transpose((2, 0, 1))).float()
out_pose = torch.as_tensor(trans_pose.reshape([-1])).float()
out_bins = torch.as_tensor(bins).float()
out_bin_idx = torch.as_tensor(bin_idx).float()
out_bbox_mask = torch.as_tensor(bbox_mask).float()
out_vis_mask = torch.as_tensor(joints_vis).float()
return {
"img": out_img, # [3, H, W]
"coord_map": out_coord_map, # [2, H, W]
"heatmap": out_heatmap, # [J, H, W]
"pose": out_pose, # [J*2]
"bins": out_bins, # [K+1]
"bin_idx": out_bin_idx, # []
"bbox_mask": out_bbox_mask, # [H, W]
"vis_mask": out_vis_mask, # [J]
}
def get_heatmap(joints_2d, output_size, sigma):
"""
joints_2d: [J, 2] in output space
output_size: (H, W) of output space
"""
H, W = output_size
n_joints, _ = joints_2d.shape
hm = np.stack(np.meshgrid(np.arange(W), np.arange(H)), axis=-1).astype(np.float32) # [H, W, 2]
hm = hm.reshape([H, W, 1, 2]).repeat(n_joints, axis=2) # [H, W, J, 2]
j2d = joints_2d.reshape([1, 1, n_joints, 2]) # [1, 1, J, 2]
j2d = j2d.repeat(H, axis=0).repeat(W, axis=1) # [H, W, J, 2]
# prob = exp(-(dx * dx + dy * dy) / 2)
hm = np.exp(-1 * np.sum((hm - j2d) ** 2, axis=-1) / (2 * sigma ** 2)) # [H, W, J]
hm = hm / (np.sum(hm, axis=(0, 1)) + 1e-10)
return hm
def trans_point2d(pt_2d, trans):
src_pt = np.array([pt_2d[0], pt_2d[1], 1.]).T
dst_pt = np.dot(trans, src_pt)
return dst_pt[0:2]
def rotate_2d(pt_2d, rot_rad):
x = pt_2d[0]
y = pt_2d[1]
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
xx = x * cs - y * sn
yy = x * sn + y * cs
return np.array([xx, yy], dtype=np.float32)
def get_trans_from_patch(center, input_size, output_size, do_flip, scale, rot, inv=False):
c_x, c_y = center
src_width, src_height = input_size
dst_width, dst_height = output_size
# augment size with scale
src_w = src_width * scale
src_h = src_height * scale
src_center = np.array([c_x, c_y], dtype=np.float32)
# augment rotation
rot_rad = np.pi * rot / 180
src_downdir = rotate_2d(np.array([0, src_h * 0.5], dtype=np.float32), rot_rad)
if do_flip:
src_rightdir = rotate_2d(np.array([src_w * -0.5, 0], dtype=np.float32), rot_rad)
else:
src_rightdir = rotate_2d(np.array([src_w * 0.5, 0], dtype=np.float32), rot_rad)
dst_w = dst_width
dst_h = dst_height
dst_center = np.array([dst_w * 0.5, dst_h * 0.5], dtype=np.float32)
dst_downdir = np.array([0, dst_h * 0.5], dtype=np.float32)
dst_rightdir = np.array([dst_w * 0.5, 0], dtype=np.float32)
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = src_center
src[1, :] = src_center + src_downdir
src[2, :] = src_center + src_rightdir
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = dst_center
dst[1, :] = dst_center + dst_downdir
dst[2, :] = dst_center + dst_rightdir
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def resnet_normalize(image):
image = image.astype(np.float32) / 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
preprocessed_img = image.copy()
for i in range(3):
preprocessed_img[:, :, i] = (preprocessed_img[:, :, i] - mean[i]) / std[i]
return preprocessed_img
```
#### File: jiahaoLjh/HDNet/test.py
```python
import os.path as osp
import sys
import tqdm
import argparse
import logging
import logging.config
import coloredlogs
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from config import cfg
from model import get_model
from dataset import Dataset
# create logger and direct log to file
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
coloredlogs.install(level=logging.INFO, logger=logger)
fh = logging.FileHandler(osp.join(cfg.log_dir, "log.txt"))
fmt = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s - %(message)s")
fh.setFormatter(fmt)
logger.addHandler(fh)
# dynamically import dataset
for i in range(len(cfg.trainset)):
exec("from {} import {}".format(cfg.trainset[i], cfg.trainset[i]))
if cfg.testset not in cfg.trainset:
exec("from {} import {}".format(cfg.testset, cfg.testset))
class Tester:
def __init__(self, load_tag):
self.logger = logging.getLogger(self.__class__.__name__)
self.dataset, self.data_loader, self.n_steps = self.prepare_data()
self.model = self.create_model(load_tag)
def create_model(self, load_tag):
self.logger.info("Creating model...")
model = get_model(self.dataset.n_joints, self.dataset.n_bins, self.dataset.adj)
model = nn.DataParallel(model).cuda()
self.load_model(model, load_tag)
return model
def load_model(self, model, load_tag):
load_path = osp.join(cfg.exp_root, load_tag, "saved_models", "ckpt.pth.tar")
assert osp.isfile(load_path), "Pre-trained model {} not exist".format(load_path)
ckpt = torch.load(load_path)
self.logger.info("Loading model from {} epoch {}".format(load_path, ckpt["epoch"]))
if isinstance(model, nn.DataParallel):
model.module.load_state_dict(ckpt["model"])
else:
model.load_state_dict(ckpt["model"])
def prepare_data(self):
self.logger.info("Creating test dataset...")
dataset = Dataset(eval(cfg.testset)("test"))
data_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.num_gpus * cfg.batch_size_per_gpu,
shuffle=False,
num_workers=cfg.num_workers,
pin_memory=True)
n_steps = len(data_loader)
return dataset, data_loader, n_steps
def test_an_epoch(self):
self.model.eval()
preds_pose = []
preds_bin_idx = []
tbar = tqdm.tqdm(total=self.n_steps, ncols=100)
with torch.no_grad():
for step, batch_data in enumerate(self.data_loader):
img = batch_data["img"]
coord_map = batch_data["coord_map"]
heatmap = batch_data["heatmap"]
pose = batch_data["pose"]
bins = batch_data["bins"]
bin_idx = batch_data["bin_idx"]
bbox_mask = batch_data["bbox_mask"]
vis_mask = batch_data["vis_mask"]
out, _ = self.model(img, coord_map, bbox_mask, vis_mask, epoch=0, target=(heatmap, pose, bins, bin_idx))
out_pose = out["pose"] # [B, J*2]
out_bin_idx = out["bin_idx"] # [B]
preds_pose.append(out_pose.cpu().data.numpy())
preds_bin_idx.append(out_bin_idx.cpu().data.numpy())
tbar.update(1)
tbar.close()
preds_pose = np.concatenate(preds_pose, axis=0) # [N, J*2]
preds_bin_idx = np.concatenate(preds_bin_idx, axis=0) # [N]
err_pose, err_pose_root, error_x, error_y, error_z, mrpe = self.dataset.evaluate((preds_pose, preds_bin_idx))
def parse_command_line():
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=str, help="gpu(s) to use")
parser.add_argument("--bs", type=int, help="batch size per gpu")
parser.add_argument("--tag", type=str, required=True, help="experiment to load for evaluation")
args, _ = parser.parse_known_args()
return args
def main():
args = parse_command_line()
cfg.update_args(vars(args))
logger.debug("COMMAND LINE: {}".format(str(sys.argv)))
logger.info("CONFIG:")
for k in sorted(vars(cfg)):
logger.info("\t{}: {}".format(k, vars(cfg)[k]))
cudnn.enabled = True
cudnn.benchmark = True
tester = Tester(cfg.tag)
logger.info("Evaluating exp {} on gpu {}".format(cfg.tag, cfg.gpus))
tester.test_an_epoch()
if __name__ == "__main__":
main()
``` |
{
"source": "jiahaoLjh/PoseGrouping",
"score": 3
} |
#### File: jiahaoLjh/PoseGrouping/utils.py
```python
import numpy as np
import os
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
return True
return False
class AverageMeter(object):
def __init__(self):
self.tot = 0.0
self.n = 0
def add(self, value, n=1):
self.tot += value * n
self.n += n
def avg(self):
n = self.n
if n == 0:
mean = np.nan
else:
mean = self.tot / n
return mean
def sum(self):
return self.tot
def reset(self):
self.tot = 0.0
self.n = 0
``` |
{
"source": "jiahaoLjh/trajectory-pose-3d",
"score": 2
} |
#### File: jiahaoLjh/trajectory-pose-3d/test.py
```python
import os
import glob
import argparse
import logging
import logging.config
import coloredlogs
import numpy as np
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from model import PoseNet
from dataset import H36M_Dataset
from evaluate import h36m_evaluate
config = {
"exp_root": "./log",
"cameras_path": "data/cameras.h5",
"bases_path": "data/bases.npy",
"bases_to_use": "dct",
"input_data": "det",
"n_bases": 8,
"n_frames": 50,
"window_slide": 5,
"n_joints": 17,
"num_workers": 0,
"gpus": [0],
"batch_size_per_gpu": 256,
"args": {},
}
def parse_command_line():
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--exp", required=True, type=str, help="experiment to load")
parser.add_argument("-g", "--gpu", type=int, help="gpu to use")
args, _ = parser.parse_known_args()
return args
def load_ckpt(model, exp_path):
load_path = os.path.join(exp_path, "ckpt.pth.tar")
ckpt = torch.load(load_path)
config["logger"].info("Load model from {}".format(load_path))
if isinstance(model, nn.DataParallel):
model.module.load_state_dict(ckpt["model_state_dict"], strict=False)
else:
model.load_state_dict(ckpt["model_state_dict"], strict=False)
def main():
cl_args = parse_command_line()
config["args"].update(vars(cl_args))
# override config with command line args
if config["args"]["gpu"] is not None:
config["gpus"] = [config["args"]["gpu"]]
# exp folder
dir_name = glob.glob(os.path.join(config["exp_root"], "{}*".format(config["args"]["exp"])))
assert len(dir_name) == 1, "Invalid exp folder to load: {}".format(config["args"]["exp"])
exp_tag = os.path.basename(dir_name[0])
exp_path = os.path.join(config["exp_root"], exp_tag)
config["exp_tag"] = exp_tag
config["exp_path"] = exp_path
# readout config from exp tag
_, _, exp_f, exp_k, exp_bases, exp_input = exp_tag.split("_")
exp_f = int(exp_f[1:])
exp_k = int(exp_k[1:])
config["bases_to_use"] = exp_bases
config["input_data"] = exp_input
config["n_bases"] = exp_k
config["n_frames"] = exp_f
# logger
logger = logging.getLogger()
coloredlogs.install(level="DEBUG", logger=logger)
config["logger"] = logger
# setup gpus
gpus = ','.join([str(x) for x in config["gpus"]])
os.environ["CUDA_VISIBLE_DEVICES"] = gpus
logger.info("Set CUDA_VISIBLE_DEVICES to {}".format(gpus))
# model
model = PoseNet(config)
model = nn.DataParallel(model)
model = model.cuda()
# load bases
if config["bases_to_use"] == "svd":
fixed_bases = np.load(config["bases_path"])
assert config["n_bases"] <= fixed_bases.shape[0] and config["n_frames"] == fixed_bases.shape[1], fixed_bases.shape
fixed_bases = fixed_bases[:config["n_bases"]]
# scale svd bases to the same magnitude as dct bases
# the scaling factor here is for F=50
fixed_bases *= np.sqrt(25)
elif config["bases_to_use"] == "dct":
x = np.arange(config["n_frames"])
fixed_bases = [np.ones([config["n_frames"]]) * np.sqrt(0.5)]
for i in range(1, config["n_bases"]):
fixed_bases.append(np.cos(i * np.pi * ((x + 0.5) / config["n_frames"])))
fixed_bases = np.array(fixed_bases)
else:
assert False, config["bases_to_use"]
config["bases"] = fixed_bases
fixed_bases = torch.from_numpy(fixed_bases).float() # (K, F)
fixed_bases = fixed_bases.view(1, config["n_bases"], config["n_frames"]) # (1, K, F)
# dataset & dataloader
train_dataset = H36M_Dataset(config, "train") # training set must be loaded first to compute stats
test_dataset = H36M_Dataset(config, "test")
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=config["batch_size_per_gpu"]*len(config["gpus"]), shuffle=False, num_workers=config["num_workers"], pin_memory=True)
load_ckpt(model, exp_path)
model.eval()
logger.info("Inference on test set...")
gts_3d = []
preds_3d = []
indices = []
with torch.no_grad():
for step, batch in enumerate(test_loader):
# parse batch data
data_2d_gt = batch["data_2d_gt"] # (B, Jx2, F)
data_2d_cpn = batch["data_2d_cpn"] # (B, Jx2, F)
if config["input_data"] == "det":
data_2d = data_2d_cpn
elif config["input_data"] == "gt":
data_2d = data_2d_gt
else:
assert False, config["input_data"]
data_2d_gt_flip = batch["data_2d_gt_flip"] # (B, Jx2, F)
data_2d_cpn_flip = batch["data_2d_cpn_flip"] # (B, Jx2, F)
if config["input_data"] == "det":
data_2d_flip = data_2d_cpn_flip
elif config["input_data"] == "gt":
data_2d_flip = data_2d_gt_flip
else:
assert False, config["input_data"]
data_3d = batch["data_3d"] # (B, Jx3, F)
data_3d_flip = batch["data_3d_flip"] # (B, Jx3, F)
mean_3d = batch["mean_3d"] # (B, Jx3)
std_3d = batch["std_3d"] # (B, Jx3)
idx = batch["idx"] # (B,)
B = data_3d.shape[0]
batch_bases = fixed_bases.repeat(B, 1, 1) # (B, K, F)
data_2d = data_2d.cuda()
data_2d_flip = data_2d_flip.cuda()
data_3d = data_3d.cuda()
data_3d_flip = data_3d_flip.cuda()
batch_bases = batch_bases.cuda()
mean_3d = mean_3d.cuda()
std_3d = std_3d.cuda()
# forward pass
coeff = model(data_2d, batch_bases)
coeff_flip = model(data_2d_flip, batch_bases)
_, res = model.module.build_loss_test((coeff, coeff_flip), batch_bases, (data_3d, data_3d_flip), mean_3d, std_3d)
pred_3d, gt_3d = res
preds_3d.append(pred_3d)
gts_3d.append(gt_3d)
indices.append(idx.data.numpy())
preds_3d = np.concatenate(preds_3d, 0)
gts_3d = np.concatenate(gts_3d, 0)
indices = np.concatenate(indices, 0)
h36m_evaluate(preds_3d, gts_3d, indices, test_dataset, config)
if __name__ == "__main__":
main()
```
#### File: jiahaoLjh/trajectory-pose-3d/train.py
```python
import os
import sys
import datetime
import argparse
import logging
import logging.config
import coloredlogs
import numpy as np
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from model import PoseNet
from dataset import H36M_Dataset
from evaluate import h36m_evaluate
from utils import mkdir, AverageMeter
config = {
"exp_root": "./log",
"cameras_path": "data/cameras.h5",
"bases_path": "data/bases.npy",
"bases_to_use": "dct",
"input_data": "det",
"n_bases": 8,
"n_frames": 50,
"window_slide": 5,
"n_joints": 17,
"num_workers": 8,
"gpus": [0],
"batch_size_per_gpu": 256,
"train": {
"init_lr": 1e-4,
"lr_decay": 0.1,
"num_epochs": 100,
"log_per_n_iterations": 10,
},
"args": {},
}
def parse_command_line():
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--bases", type=str, help="bases to use (dct or svd)")
parser.add_argument("-i", "--input", type=str, help="input data to the model (det or gt)")
parser.add_argument("-f", "--nframes", type=int, help="number of frames")
parser.add_argument("-k", "--nbases", type=int, help="number of bases")
parser.add_argument("-g", "--gpu", type=int, help="gpu to use")
args, _ = parser.parse_known_args()
return args
def save_ckpt(model, exp_path):
save_path = os.path.join(exp_path, "ckpt.pth.tar")
torch.save({
"model_state_dict": model.module.state_dict() if isinstance(model, nn.DataParallel) else model.state_dict(),
}, save_path)
config["logger"].info("Save model to {}".format(save_path))
def main():
cl_args = parse_command_line()
config["args"].update(vars(cl_args))
# override config with command line args
if config["args"]["bases"] is not None:
assert config["args"]["bases"] in ["dct", "svd"], "Invalid bases: {}".format(config["args"]["bases"])
config["bases_to_use"] = config["args"]["bases"]
if config["args"]["input"] is not None:
assert config["args"]["input"] in ["det", "gt"], "Invalid input: {}".format(config["args"]["input"])
config["input_data"] = config["args"]["input"]
if config["args"]["nframes"] is not None:
config["n_frames"] = config["args"]["nframes"]
if config["args"]["nbases"] is not None:
config["n_bases"] = config["args"]["nbases"]
if config["args"]["gpu"] is not None:
config["gpus"] = [config["args"]["gpu"]]
# exp folder
exp_tag = "{}_F{}_k{}_{}_{}".format(datetime.datetime.now().strftime("%m%d_%H%M%S"), config["n_frames"], config["n_bases"], config["bases_to_use"], config["input_data"])
exp_path = os.path.join(config["exp_root"], exp_tag)
config["exp_tag"] = exp_tag
config["exp_path"] = exp_path
mkdir(exp_path)
# logger
logger = logging.getLogger()
coloredlogs.install(level="DEBUG", logger=logger)
fileHandler = logging.FileHandler(os.path.join(exp_path, "log.txt"))
logFormatter = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s - %(message)s")
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
config["logger"] = logger
logger.info(sys.argv)
# setup gpus
gpus = ','.join([str(x) for x in config["gpus"]])
os.environ["CUDA_VISIBLE_DEVICES"] = gpus
logger.info("Set CUDA_VISIBLE_DEVICES to {}".format(gpus))
# model
model = PoseNet(config)
model = nn.DataParallel(model)
model = model.cuda()
# optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=config["train"]["init_lr"])
# load bases
if config["bases_to_use"] == "svd":
fixed_bases = np.load(config["bases_path"])
assert config["n_bases"] <= fixed_bases.shape[0] and config["n_frames"] == fixed_bases.shape[1], fixed_bases.shape
fixed_bases = fixed_bases[:config["n_bases"]]
# scale svd bases to the same magnitude as dct bases
# the scaling factor here is for F=50
fixed_bases *= np.sqrt(25)
elif config["bases_to_use"] == "dct":
x = np.arange(config["n_frames"])
fixed_bases = [np.ones([config["n_frames"]]) * np.sqrt(0.5)]
for i in range(1, config["n_bases"]):
fixed_bases.append(np.cos(i * np.pi * ((x + 0.5) / config["n_frames"])))
fixed_bases = np.array(fixed_bases)
else:
assert False, config["bases_to_use"]
config["bases"] = fixed_bases
fixed_bases = torch.from_numpy(fixed_bases).float() # (K, F)
fixed_bases = fixed_bases.view(1, config["n_bases"], config["n_frames"]) # (1, K, F)
# dataset & dataloader
train_dataset = H36M_Dataset(config, "train")
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=config["batch_size_per_gpu"]*len(config["gpus"]), shuffle=True, num_workers=config["num_workers"], pin_memory=True)
test_dataset = H36M_Dataset(config, "test")
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=config["batch_size_per_gpu"]*len(config["gpus"]), shuffle=False, num_workers=config["num_workers"], pin_memory=True)
tot_step = 0
for epoch in range(config["train"]["num_epochs"]):
# learning rate decay
if epoch in [60, 85]:
optimizer.param_groups[0]["lr"] = optimizer.param_groups[0]["lr"] * config["train"]["lr_decay"]
logger.info("Learning rate set to {}".format(optimizer.param_groups[0]["lr"]))
# train one epoch
model.train()
for step, batch in enumerate(train_loader):
# parse batch data
data_2d_gt = batch["data_2d_gt"] # (B, Jx2, F)
data_2d_cpn = batch["data_2d_cpn"] # (B, Jx2, F)
if config["input_data"] == "det":
data_2d = data_2d_cpn
elif config["input_data"] == "gt":
data_2d = data_2d_gt
else:
assert False, config["input_data"]
data_3d = batch["data_3d"] # (B, Jx3, F)
mean_3d = batch["mean_3d"] # (B, Jx3)
std_3d = batch["std_3d"] # (B, Jx3)
B = data_3d.shape[0]
batch_bases = fixed_bases.repeat(B, 1, 1) # (B, K, F)
data_2d = data_2d.cuda()
data_3d = data_3d.cuda()
batch_bases = batch_bases.cuda()
mean_3d = mean_3d.cuda()
std_3d = std_3d.cuda()
# forward pass
coeff = model(data_2d, batch_bases)
# compute loss
loss = model.module.build_loss_training(coeff, batch_bases, data_3d, mean_3d, std_3d)
# backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
tot_step += 1
if "log_per_n_iterations" in config["train"] and (step + 1) % config["train"]["log_per_n_iterations"] == 0:
logger.info("TRAIN Epoch {}, step {}/{} ({}): loss = {:.6f}".format(
epoch + 1,
step + 1,
len(train_loader),
tot_step,
loss.item()))
# testing
model.eval()
logger.info("Testing on test set...")
total_loss = AverageMeter()
gts_3d = []
preds_3d = []
indices = []
with torch.no_grad():
for step, batch in enumerate(test_loader):
# parse batch data
data_2d_gt = batch["data_2d_gt"] # (B, Jx2, F)
data_2d_cpn = batch["data_2d_cpn"] # (B, Jx2, F)
if config["input_data"] == "det":
data_2d = data_2d_cpn
elif config["input_data"] == "gt":
data_2d = data_2d_gt
else:
assert False, config["input_data"]
data_2d_gt_flip = batch["data_2d_gt_flip"] # (B, Jx2, F)
data_2d_cpn_flip = batch["data_2d_cpn_flip"] # (B, Jx2, F)
if config["input_data"] == "det":
data_2d_flip = data_2d_cpn_flip
elif config["input_data"] == "gt":
data_2d_flip = data_2d_gt_flip
else:
assert False, config["input_data"]
data_3d = batch["data_3d"] # (B, Jx3, F)
data_3d_flip = batch["data_3d_flip"] # (B, Jx3, F)
mean_3d = batch["mean_3d"] # (B, Jx3)
std_3d = batch["std_3d"] # (B, Jx3)
idx = batch["idx"] # (B,)
B = data_3d.shape[0]
batch_bases = fixed_bases.repeat(B, 1, 1) # (B, K, F)
data_2d = data_2d.cuda()
data_2d_flip = data_2d_flip.cuda()
data_3d = data_3d.cuda()
data_3d_flip = data_3d_flip.cuda()
batch_bases = batch_bases.cuda()
mean_3d = mean_3d.cuda()
std_3d = std_3d.cuda()
# forward pass
coeff = model(data_2d, batch_bases)
coeff_flip = model(data_2d_flip, batch_bases)
# compute loss
loss, res = model.module.build_loss_test((coeff, coeff_flip), batch_bases, (data_3d, data_3d_flip), mean_3d, std_3d)
pred_3d, gt_3d = res
total_loss.add(loss.item())
preds_3d.append(pred_3d)
gts_3d.append(gt_3d)
indices.append(idx.data.numpy())
avg_loss = total_loss.value()
logger.info("Test loss: {}".format(avg_loss))
if epoch == config["train"]["num_epochs"] - 1:
preds_3d = np.concatenate(preds_3d, 0)
gts_3d = np.concatenate(gts_3d, 0)
indices = np.concatenate(indices, 0)
h36m_evaluate(preds_3d, gts_3d, indices, test_dataset, config)
save_ckpt(model, exp_path)
if __name__ == "__main__":
main()
``` |
{
"source": "jiahaooo/SimpleSegModel",
"score": 2
} |
#### File: SimpleSegModel/utils/utils_option.py
```python
import os
from collections import OrderedDict
from datetime import datetime
import json
import re
import glob
import sys
import datetime
import logging
'''
# --------------------------------------------
# https://github.com/cszn
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
# --------------------------------------------
# report the training process in a log file
# --------------------------------------------
def log(*args, **kwargs):
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S:"), *args, **kwargs)
def logger_info(logger_name, log_path='default_logger.log'):
log = logging.getLogger(logger_name)
if log.hasHandlers():
print('LogHandlers exist!')
else:
print('LogHandlers setup!')
level = logging.INFO
formatter = logging.Formatter('%(asctime)s.%(msecs)03d : %(message)s', datefmt='%y-%m-%d %H:%M:%S')
fh = logging.FileHandler(log_path, mode='a')
fh.setFormatter(formatter)
log.setLevel(level)
log.addHandler(fh)
# sh = logging.StreamHandler()
# sh.setFormatter(formatter)
# log.addHandler(sh)
# --------------------------------------------
# get time
# --------------------------------------------
def get_timestamp():
return datetime.datetime.now().strftime('_%y%m%d_%H%M%S')
# --------------------------------------------
# process opt
# --------------------------------------------
def parse(opt_path, is_train=True):
# ----------------------------------------
# remove comments starting with '//'
# ----------------------------------------
json_str = ''
with open(opt_path, 'r') as f:
for line in f:
line = line.split('//')[0] + '\n'
json_str += line
# ----------------------------------------
# initialize opt
# ----------------------------------------
opt = json.loads(json_str, object_pairs_hook=OrderedDict)
opt['opt_path'] = opt_path
opt['is_train'] = is_train
# ----------------------------------------
# datasets
# ----------------------------------------
for phase, dataset in opt['datasets'].items():
phase = phase.split('_')[0]
dataset['phase'] = phase
dataset['n_channels'] = opt['n_channels'] # broadcast
if phase == 'train':
dataset['dataroot_Label'] = os.path.join(opt['dataroot'], 'train_Label')
dataset['dataroot_Input'] = os.path.join(opt['dataroot'], 'train_Input')
if phase == 'test':
dataset['dataroot_Label'] = os.path.join(opt['dataroot'], 'val_Label')
dataset['dataroot_Input'] = os.path.join(opt['dataroot'], 'val_Input')
if 'dataroot_Label' in dataset and dataset['dataroot_Label'] is not None:
dataset['dataroot_Label'] = os.path.expanduser(dataset['dataroot_Label']) # use 'expanduser' to get the ~ path
if 'dataroot_Input' in dataset and dataset['dataroot_Input'] is not None:
dataset['dataroot_Input'] = os.path.expanduser(dataset['dataroot_Input'])
# ----------------------------------------
# path
# ----------------------------------------
for key, path in opt['path'].items():
if path and key in opt['path']:
opt['path'][key] = os.path.expanduser(path)
path_task = os.path.join(opt['path']['root'], opt['task'])
opt['path']['task'] = path_task
opt['path']['log'] = path_task
opt['path']['options'] = os.path.join(path_task, 'options')
if is_train:
opt['path']['models'] = os.path.join(path_task, 'models')
opt['path']['images'] = os.path.join(path_task, 'images')
else: # test
opt['path']['images'] = os.path.join(path_task, 'test_images')
# ----------------------------------------
# GPU devices
# ----------------------------------------
# gpu_list = ','.join(str(x) for x in opt['gpu_ids'])
# os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list
# print('export CUDA_VISIBLE_DEVICES=' + gpu_list)
return opt
# --------------------------------------------
# continue training from the last cheakpoint
# --------------------------------------------
def find_last_checkpoint(save_dir):
"""
Args:
save_dir: model folder
Return:
init_iter: iteration number
init_path: model path
"""
file_list = glob.glob(os.path.join(save_dir, '*.pth'))
if file_list:
iter_exist = []
for file_ in file_list:
iter_current = re.findall(r"(\d+).pth", file_)
iter_exist.append(int(iter_current[0]))
init_iter = max(iter_exist)
init_path = os.path.join(save_dir, '{}.pth'.format(init_iter))
else:
init_iter = 0
init_path = None
return init_iter, init_path
# --------------------------------------------
# convert the opt into json file
# --------------------------------------------
def save(opt):
opt_path = opt['opt_path']
opt_path_copy = opt['path']['options']
dirname, filename_ext = os.path.split(opt_path)
filename, ext = os.path.splitext(filename_ext)
dump_path = os.path.join(opt_path_copy, filename+get_timestamp()+ext)
with open(dump_path, 'w') as dump_file:
json.dump(opt, dump_file, indent=2)
# --------------------------------------------
# dict to string for logger
# --------------------------------------------
def dict2str(opt, indent_l=1):
msg = ''
for k, v in opt.items():
if isinstance(v, dict):
msg += ' ' * (indent_l * 2) + k + ':[\n'
msg += dict2str(v, indent_l + 1)
msg += ' ' * (indent_l * 2) + ']\n'
else:
msg += ' ' * (indent_l * 2) + k + ': ' + str(v) + '\n'
return msg
# --------------------------------------------
# convert OrderedDict to NoneDict,
# return None for missing key
# --------------------------------------------
def dict_to_nonedict(opt):
if isinstance(opt, dict):
new_opt = dict()
for key, sub_opt in opt.items():
new_opt[key] = dict_to_nonedict(sub_opt)
return NoneDict(**new_opt)
elif isinstance(opt, list):
return [dict_to_nonedict(sub_opt) for sub_opt in opt]
else:
return opt
class NoneDict(dict):
def __missing__(self, key):
return None
``` |
{
"source": "jiahaowork/code2pdf",
"score": 3
} |
#### File: jiahaowork/code2pdf/code2pdf.py
```python
import os
import argparse
import shutil
from collections import OrderedDict
# main.tex content
MAIN_TEX = \
r"""
\documentclass[letterpaper]{article}
\usepackage[utf8]{inputenc}
\usepackage{multicol}
\usepackage[margin=0.8in]{geometry} % set margin
\usepackage{minted} % code formatting
\usepackage[colorlinks=true,linkcolor=red]{hyperref}
\usepackage{seqsplit} % wrap lines by character
\usepackage{listings} % used to include text file verbatim
\usemintedstyle{vs} % set minted style
\begin{document}
\setlength{\parindent}{0pt} % remove all indents
\begin{multicols}{2}
\subsubsection*{Directories}
\input{dir.tex}
\subsubsection*{Files}
\input{file.tex}
\input{code.tex}
\end{multicols}
\end{document}
"""
# escape some characters for latex
def escape(string):
string = '\_'.join(string.split('_'))
return string
# wrap the text with \seqsplit
def seqsplit(string):
string = string.split('\n')
for i in range(len(string)):
if string[i]:
string[i] = '\\seqsplit{%s}' % string[i]
return '\n'.join(string)
# wrap the text with \textbf
def textbf(string):
return '\\textbf{%s}' % string
def main(out_path, code_path, language):
# make output dir and copy code into it
os.mkdir(out_path)
shutil.copytree(code_path, os.path.join(out_path, code_path.strip('/').split('/')[-1]))
os.chdir(out_path) # change working directory
code_path = code_path.strip('/').split('/')[-1]
# parse code directory
dirs = {}
files = {}
for root, _, fnames in os.walk(code_path):
# skip if it is hidden dir
if sum([x.startswith('.') for x in root.split('/')]) > 0: continue
# get dir name
dname = root.split('/')[-1]
# get parent path
parent = '/'.join(root.split('/')[:-1])
# record directories
entry = textbf(escape(dname)) + ' \\pageref{' + root + '} ' + seqsplit(escape(parent)) + '/\n\n'
dirs[root] = entry
# record file names, eliminating hidden files
files[root] = [x for x in fnames if not x.startswith('.')]
# write dir.tex
with open('dir.tex', 'w') as f:
for x in sorted(dirs.keys()):
f.write(dirs[x])
# write file.tex
with open('file.tex', 'w') as f:
for path in sorted(files.keys()):
f.write(textbf(escape(path.split('/')[-1])))
f.write(' \\label{' + path + '}')
f.write('\n\n')
for filename in sorted(files[path]):
f.write(seqsplit('~~' + escape(filename)) + ' \\pageref{' + os.path.join(path, filename) + '}\n\n')
# write main.tex
fullpaths = []
for path in files:
for filename in files[path]:
fp = os.path.join(path, filename)
fullpaths.append(fp)
fullpaths.sort()
with open('code.tex', 'w') as f:
for fp in fullpaths:
filename = fp.split('/')[-1]
f.write('\\subsubsection*{%s}\n\\label{%s}\n\\inputminted[fontsize=\scriptsize, breaklines]{%s}{%s}\n' % (escape(filename), fp, language, fp))
# write main.tex
with open('main.tex', 'w') as f:
f.write(MAIN_TEX)
# shutil.copy2('../resources/main.tex', '.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='copy2pdf')
parser.add_argument('outpath', help='Path of output')
parser.add_argument('codepath', help='Path of code')
parser.add_argument('language', help='Programming language, see https://www.overleaf.com/learn/latex/Code_Highlighting_with_minted for supported languages.')
args = parser.parse_args()
main(args.outpath, args.codepath, args.language)
``` |
{
"source": "jiahaowork/gif2text",
"score": 3
} |
#### File: jiahaowork/gif2text/gif2text.py
```python
import argparse
import os
import numpy as np
from PIL import Image, ImageFont, ImageDraw, ImageOps, ImageFilter
from tqdm import tqdm
import imageio
def get_ascii_chars():
return ''.join(chr(x) for x in range(32, 127))
def get_chinese_chars():
return open('assets/cn_charset.txt').read().strip() + ' '
def is_ascii(s):
try:
s.encode('ascii')
except UnicodeEncodeError:
return False
else:
return True
def read_gif(path):
im_list = imageio.mimread(path)
im_list = [im[:, :, :3] for im in im_list]
for i in range(1, len(im_list)):
im_list[i] = np.where(im_list[i]>0, im_list[i], im_list[i-1])
duration = Image.open(path).info['duration'] / 1000.
return im_list, duration
class Font():
def __init__(self, path, size):
self.font = ImageFont.truetype(font=path, size=size)
width, height = self.font.getsize('A')
self.patch_size = max(width, height)
self.x_offset = (self.patch_size-width)//2
self.y_offset = (self.patch_size-height)//2
def get_patches(self, chars):
size = self.patch_size
patches = np.zeros([len(chars), size, size], dtype=np.uint8)
if len(set(chars)) != len(chars):
raise Exception('Duplicate characters exist')
for i, c in enumerate(chars):
p = np.zeros([size, size], dtype=np.uint8)
p = Image.fromarray(p)
draw = ImageDraw.Draw(p)
draw.text([self.x_offset, self.y_offset], c, fill='white', font=self.font)
patches[i] = p
return patches
def get_rank(arr):
temp = np.argsort(arr)
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(arr))
return ranks
# get intensities for patches
def get_intensities(patches):
densities = np.mean(patches, (1, 2)) / 255.
intensities = get_rank(densities)
scale = 255./np.max(intensities)
intensities = intensities.astype(np.float32)
intensities *= scale
intensities = intensities.astype(np.uint8)
return intensities
# get a 256-element numpy array containing the index of characters
def get_intensity2idx(chars, intensities):
d = {}
for idx, intensity in zip(range(len(chars)), intensities):
if intensity in d:
d[intensity].append(idx)
else:
d[intensity] = [idx]
unique_intensities = []
char_idx = []
for intensity in d:
unique_intensities.append(intensity)
char_idx.append(np.random.choice(d[intensity]))
unique_intensities = np.array(unique_intensities, dtype=np.uint8)
char_idx = np.array(char_idx, dtype=np.int64)
intensity2idx = np.arange(256, dtype=np.int64)
intensity2idx = intensity2idx[:, np.newaxis] - unique_intensities[np.newaxis, :]
intensity2idx = np.abs(intensity2idx)
intensity2idx = np.argmin(intensity2idx, -1)
intensity2idx = char_idx[intensity2idx]
return intensity2idx
# convert one frame to text image
def im2text(im, patches, intensity2idx, grayscale=False):
im = np.array(im, dtype=np.uint8)
patch_size = patches.shape[-1]
im_h, im_w, im_c = im.shape
text_im_w = im_w * patch_size
text_im_h = im_h * patch_size
gray_im = Image.fromarray(im.copy())
gray_im = np.array(gray_im.convert('L'))
idx = intensity2idx[gray_im]
text_im = patches[idx]
if grayscale:
text_im = text_im.transpose([0, 2, 1, 3])
text_im = text_im.reshape([text_im_h, text_im_w])
else:
text_im = text_im[..., np.newaxis].astype(np.float32)
im = im[:, :, np.newaxis, np.newaxis, :].astype(np.float32)
text_im = text_im * im / 255.
text_im = text_im.astype(np.uint8)
text_im = text_im.transpose([0, 2, 1, 3, 4])
text_im = text_im.reshape([text_im_h, text_im_w, im_c])
return text_im
# convert a list of frames to text image
def images2text(im_list, chars, font, grayscale=False):
chars = ''.join(list(set(chars)))
patches = font.get_patches(chars)
intensities = get_intensities(patches)
intensity2idx = get_intensity2idx(chars, intensities)
text_images = []
print('Converting GIF to text animations...')
for im in tqdm(im_list):
text_images.append(im2text(im, patches, intensity2idx, grayscale=grayscale))
print('Conversion done.')
return text_images
if __name__ == '__main__':
# parse arguments
arg_bool = lambda x: x.lower() in ['true', 't', '1']
parser = argparse.ArgumentParser()
parser.add_argument('--gif_path', type=str, help='Path to the GIF file.')
parser.add_argument('--out_path', type=str, help='Path of the output gif file (including the filename).')
parser.add_argument('--width', type=int, help='Number of characters per row. You can specify only width or height. The other will be automatically set to maintain the aspect ratio.', default=None)
parser.add_argument('--height', type=int, help='Number of characters per column. You can specify only width or height. The other will be automatically set to maintain the aspect ratio.', default=None)
parser.add_argument('--charset', type=str, help='"ascii", "chinese", or a path to a .txt file containing all the characters to be shown.', default='ascii')
parser.add_argument('--font', type=str, help='Path to a .ttf or .ttc font file you want to use.', default=None)
parser.add_argument('--font_size', type=int, help='Font size. Default is 15.', default=15)
parser.add_argument('--reverse_color', type=arg_bool, help='Reverse colors (black to white and white to black) before generating text animations.', default='False')
parser.add_argument('--equalization', type=arg_bool, help='Perform histogram equalization. It helps when the constrast of the original GIF is low, especially when grayscale=True. If grayscale=False, this should not be used since the colors may be changed drastically.', default='False')
parser.add_argument('--denoise', type=arg_bool, help='Perform denoising before converting to text. Sometimes it may help to reduce noise due to downsampling, but some fine details may be lost', default='False')
parser.add_argument('--grayscale', type=arg_bool, help='Output grayscale text GIF. If you set it to True, almost always you also want to set equalization=True.', default='False')
opts = parser.parse_args()
# check path
if not os.path.exists(opts.gif_path):
raise Exception('GIF file not exists.')
# read gif
im_list, duration = read_gif(opts.gif_path)
# set width and height
im_height, im_width, _ = im_list[0].shape
cond1 = opts.width is None
cond2 = opts.height is None
if cond1 and cond2:
scale = 30. / max(im_width, im_height)
width = im_width * scale
height = im_height * scale
elif cond1 and not cond2:
height = opts.height
width = float(height) / im_height * im_width
elif not cond1 and cond2:
width = opts.width
height = float(width) / im_width * im_height
else:
width = opts.width
height = opts.height
width = int(width)
height = int(height)
for i, im in enumerate(im_list):
im = Image.fromarray(im)
im = im.resize([width, height])
im = np.array(im)
im_list[i] = im
# reverse intensities
if opts.reverse_color:
im_list = [255-im for im in im_list]
# denoise
if opts.denoise:
median_filter = ImageFilter.MedianFilter()
for i, im in enumerate(im_list):
im = Image.fromarray(im)
im = im.filter(median_filter)
im = np.array(im, dtype=np.uint8)
im_list[i] = im
# histogram equalization
if opts.equalization:
for i, im in enumerate(im_list):
for channel in range(im.shape[-1]):
equalized = ImageOps.equalize(Image.fromarray(im[:, :, channel]))
im[:, :, channel] = equalized
im = np.array(im, dtype=np.uint8)
im_list[i] = im
# set charset
opts.charset = opts.charset.lower()
if opts.charset == 'ascii':
chars = get_ascii_chars()
elif opts.charset == 'chinese':
chars = get_chinese_chars()
else:
chars = open(opts.charset).read().strip()
# set font
if opts.font is None:
if opts.charset == 'ascii':
font_path = 'assets/Inconsolata-Bold.ttf'
elif opts.charset == 'chinese':
font_path = 'assets/SourceHanSans-Bold.ttc'
elif is_ascii(chars):
font_path = 'assets/Inconsolata-Bold.ttf'
else:
font_path = 'assets/SourceHanSans-Bold.ttc'
else:
font_path = opts.font
font = Font(font_path, opts.font_size)
# convert
text_images = images2text(im_list, chars, font, grayscale=opts.grayscale)
print('Writing to %s' % opts.out_path)
imageio.mimwrite(opts.out_path, text_images, duration=duration)
print('All Done!')
``` |
{
"source": "jiahaowork/idam",
"score": 2
} |
#### File: jiahaowork/idam/util.py
```python
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from scipy.spatial.transform import Rotation
def quat2mat(quat):
x, y, z, w = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)
return rotMat
def transform_point_cloud(point_cloud, rotation, translation):
if len(rotation.size()) == 2:
rot_mat = quat2mat(rotation)
else:
rot_mat = rotation
return torch.matmul(rot_mat, point_cloud) + translation.unsqueeze(2)
def npmat2euler(mats, seq='zyx'):
eulers = []
for i in range(mats.shape[0]):
r = Rotation.from_dcm(mats[i])
eulers.append(r.as_euler(seq, degrees=True))
return np.asarray(eulers, dtype='float32')
def batch_choice(data, k, p=None, replace=False):
# data is [B, N]
out = []
for i in range(len(data)):
out.append(np.random.choice(data[i], size=k, p=p[i], replace=replace))
out = np.stack(out, 0)
return out
def pairwise_distance(src, dst):
# square of distance
inner = 2 * torch.matmul(src.transpose(-1, -2).contiguous(), dst) # src, dst (num_dims, num_points)
distances = torch.sum(src ** 2, dim=-2, keepdim=True).transpose(-1, -2).contiguous() - inner + torch.sum(dst ** 2,
dim=-2,
keepdim=True)
return distances
def nearest_neighbor(src, dst):
inner = -2 * torch.matmul(src.transpose(1, 0).contiguous(), dst) # src, dst (num_dims, num_points)
distances = -torch.sum(src ** 2, dim=0, keepdim=True).transpose(1, 0).contiguous() - inner - torch.sum(dst ** 2,
dim=0,
keepdim=True)
distances, indices = distances.topk(k=1, dim=-1)
return distances, indices
def knn(x, k):
inner = -2 * torch.matmul(x.transpose(2, 1).contiguous(), x)
xx = torch.sum(x ** 2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1).contiguous()
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx
``` |
{
"source": "JiahaoYao/Cirq",
"score": 2
} |
#### File: cirq_google/calibration/workflow_test.py
```python
from typing import Optional
from unittest import mock
import itertools
import numpy as np
import pytest
import cirq
import cirq_google
import cirq_google.calibration.workflow as workflow
from cirq_google.calibration.engine_simulator import PhasedFSimEngineSimulator
from cirq_google.calibration.phased_fsim import (
ALL_ANGLES_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
FloquetPhasedFSimCalibrationOptions,
FloquetPhasedFSimCalibrationRequest,
PhaseCalibratedFSimGate,
PhasedFSimCharacterization,
PhasedFSimCalibrationResult,
WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION,
)
SQRT_ISWAP_INV_PARAMETERS = cirq_google.PhasedFSimCharacterization(
theta=np.pi / 4, zeta=0.0, chi=0.0, gamma=0.0, phi=0.0
)
SQRT_ISWAP_INV_GATE = cirq.FSimGate(np.pi / 4, 0.0)
def _fsim_identity_converter(gate: cirq.Gate) -> Optional[PhaseCalibratedFSimGate]:
if isinstance(gate, cirq.FSimGate):
return PhaseCalibratedFSimGate(gate, 0.0)
return None
def test_prepare_floquet_characterization_for_moment_none_for_measurements():
a, b, c, d = cirq.LineQubit.range(4)
moment = cirq.Moment(cirq.measure(a, b, c, d))
assert (
workflow.prepare_floquet_characterization_for_moment(
moment, WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION
)
is None
)
@pytest.mark.parametrize(
'options',
[WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION, ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION],
)
def test_prepare_characterization_for_moment_none_for_measurements(options):
a, b, c, d = cirq.LineQubit.range(4)
moment = cirq.Moment(cirq.measure(a, b, c, d))
assert workflow.prepare_characterization_for_moment(moment, options) is None
def test_prepare_floquet_characterization_for_moment_fails_for_non_gate_operation():
moment = cirq.Moment(cirq.GlobalPhaseOperation(coefficient=1.0))
with pytest.raises(workflow.IncompatibleMomentError):
workflow.prepare_floquet_characterization_for_moment(
moment, WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION
)
@pytest.mark.parametrize(
'options',
[WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION, ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION],
)
def test_prepare_characterization_for_moment_fails_for_non_gate_operation(options):
moment = cirq.Moment(cirq.GlobalPhaseOperation(coefficient=1.0))
with pytest.raises(workflow.IncompatibleMomentError):
workflow.prepare_characterization_for_moment(moment, options)
def test_prepare_floquet_characterization_for_moment_fails_for_unsupported_gate():
a, b = cirq.LineQubit.range(2)
moment = cirq.Moment(cirq.CZ(a, b))
with pytest.raises(workflow.IncompatibleMomentError):
workflow.prepare_floquet_characterization_for_moment(
moment,
WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
gates_translator=_fsim_identity_converter,
)
@pytest.mark.parametrize(
'options',
[WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION, ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION],
)
def test_prepare_characterization_for_moment_fails_for_unsupported_gate(options):
a, b = cirq.LineQubit.range(2)
moment = cirq.Moment(cirq.CZ(a, b))
with pytest.raises(workflow.IncompatibleMomentError):
workflow.prepare_characterization_for_moment(
moment,
options,
gates_translator=_fsim_identity_converter,
)
def test_prepare_floquet_characterization_for_moment_fails_for_mixed_gates():
a, b, c, d = cirq.LineQubit.range(4)
moment = cirq.Moment(
[
cirq.FSimGate(theta=np.pi / 4, phi=0.0).on(a, b),
cirq.FSimGate(theta=np.pi / 8, phi=0.0).on(c, d),
]
)
with pytest.raises(workflow.IncompatibleMomentError):
workflow.prepare_floquet_characterization_for_moment(
moment,
WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
gates_translator=_fsim_identity_converter,
)
@pytest.mark.parametrize(
'options',
[WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION, ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION],
)
def test_prepare_characterization_for_moment_fails_for_mixed_gates(options):
a, b, c, d = cirq.LineQubit.range(4)
moment = cirq.Moment(
[
cirq.FSimGate(theta=np.pi / 4, phi=0.0).on(a, b),
cirq.FSimGate(theta=np.pi / 8, phi=0.0).on(c, d),
]
)
with pytest.raises(workflow.IncompatibleMomentError):
workflow.prepare_characterization_for_moment(
moment,
WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
gates_translator=_fsim_identity_converter,
)
def test_prepare_floquet_characterization_for_moment_fails_for_mixed_moment():
a, b, c = cirq.LineQubit.range(3)
moment = cirq.Moment([cirq.FSimGate(theta=np.pi / 4, phi=0.0).on(a, b), cirq.Z.on(c)])
with pytest.raises(workflow.IncompatibleMomentError):
workflow.prepare_floquet_characterization_for_moment(
moment, WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION
)
@pytest.mark.parametrize(
'options',
[WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION, ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION],
)
def test_prepare_characterization_for_moment_fails_for_mixed_moment(options):
a, b, c = cirq.LineQubit.range(3)
moment = cirq.Moment([cirq.FSimGate(theta=np.pi / 4, phi=0.0).on(a, b), cirq.Z.on(c)])
with pytest.raises(workflow.IncompatibleMomentError):
workflow.prepare_characterization_for_moment(
moment, WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION
)
def test_prepare_floquet_characterization_for_moments():
a, b, c, d = cirq.LineQubit.range(4)
circuit = cirq.Circuit(
[
[cirq.X(a), cirq.Y(c)],
[SQRT_ISWAP_INV_GATE.on(a, b), SQRT_ISWAP_INV_GATE.on(c, d)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
[cirq.WaitGate(duration=cirq.Duration(micros=5.0)).on(b)],
]
)
options = WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION
circuit_with_calibration, requests = workflow.prepare_floquet_characterization_for_moments(
circuit, options=options
)
assert requests == [
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((a, b), (c, d)), gate=SQRT_ISWAP_INV_GATE, options=options
),
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((b, c),), gate=SQRT_ISWAP_INV_GATE, options=options
),
]
assert circuit_with_calibration.circuit == circuit
assert circuit_with_calibration.moment_to_calibration == [None, 0, 1, None]
@pytest.mark.parametrize(
'options_cls',
[
(
WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
cirq_google.FloquetPhasedFSimCalibrationRequest,
),
(ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION, cirq_google.XEBPhasedFSimCalibrationRequest),
],
)
def test_prepare_characterization_for_moments(options_cls):
options, cls = options_cls
a, b, c, d = cirq.LineQubit.range(4)
circuit = cirq.Circuit(
[
[cirq.X(a), cirq.Y(c)],
[SQRT_ISWAP_INV_GATE.on(a, b), SQRT_ISWAP_INV_GATE.on(c, d)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
[cirq.WaitGate(duration=cirq.Duration(micros=5.0)).on(b)],
]
)
circuit_with_calibration, requests = workflow.prepare_characterization_for_moments(
circuit, options=options
)
assert requests == [
cls(pairs=((a, b), (c, d)), gate=SQRT_ISWAP_INV_GATE, options=options),
cls(pairs=((b, c),), gate=SQRT_ISWAP_INV_GATE, options=options),
]
assert circuit_with_calibration.circuit == circuit
assert circuit_with_calibration.moment_to_calibration == [None, 0, 1, None]
def test_prepare_floquet_characterization_for_moments_merges_sub_sets():
a, b, c, d, e = cirq.LineQubit.range(5)
circuit = cirq.Circuit(
[
[cirq.X(a), cirq.Y(c)],
[SQRT_ISWAP_INV_GATE.on(a, b), SQRT_ISWAP_INV_GATE.on(c, d)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
[SQRT_ISWAP_INV_GATE.on(a, b)],
]
)
circuit += cirq.Moment([SQRT_ISWAP_INV_GATE.on(b, c), SQRT_ISWAP_INV_GATE.on(d, e)])
options = WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION
circuit_with_calibration, requests = workflow.prepare_floquet_characterization_for_moments(
circuit, options=options
)
assert requests == [
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((a, b), (c, d)), gate=SQRT_ISWAP_INV_GATE, options=options
),
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((b, c), (d, e)), gate=SQRT_ISWAP_INV_GATE, options=options
),
]
assert circuit_with_calibration.circuit == circuit
assert circuit_with_calibration.moment_to_calibration == [None, 0, 1, 0, 1]
@pytest.mark.parametrize(
'options_cls',
[
(
WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
cirq_google.FloquetPhasedFSimCalibrationRequest,
),
(ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION, cirq_google.XEBPhasedFSimCalibrationRequest),
],
)
def test_prepare_characterization_for_moments_merges_sub_sets(options_cls):
options, cls = options_cls
a, b, c, d, e = cirq.LineQubit.range(5)
circuit = cirq.Circuit(
[
[cirq.X(a), cirq.Y(c)],
[SQRT_ISWAP_INV_GATE.on(a, b), SQRT_ISWAP_INV_GATE.on(c, d)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
[SQRT_ISWAP_INV_GATE.on(a, b)],
]
)
circuit += cirq.Moment([SQRT_ISWAP_INV_GATE.on(b, c), SQRT_ISWAP_INV_GATE.on(d, e)])
circuit_with_calibration, requests = workflow.prepare_characterization_for_moments(
circuit, options=options
)
assert requests == [
cls(pairs=((a, b), (c, d)), gate=SQRT_ISWAP_INV_GATE, options=options),
cls(pairs=((b, c), (d, e)), gate=SQRT_ISWAP_INV_GATE, options=options),
]
assert circuit_with_calibration.circuit == circuit
assert circuit_with_calibration.moment_to_calibration == [None, 0, 1, 0, 1]
def test_prepare_floquet_characterization_for_moments_merges_many_circuits():
options = WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION
a, b, c, d, e = cirq.LineQubit.range(5)
circuit_1 = cirq.Circuit(
[
[cirq.X(a), cirq.Y(c)],
[SQRT_ISWAP_INV_GATE.on(a, b), SQRT_ISWAP_INV_GATE.on(c, d)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
[SQRT_ISWAP_INV_GATE.on(a, b)],
]
)
circuit_with_calibration_1, requests_1 = workflow.prepare_floquet_characterization_for_moments(
circuit_1, options=options
)
assert requests_1 == [
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((a, b), (c, d)), gate=SQRT_ISWAP_INV_GATE, options=options
),
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((b, c),), gate=SQRT_ISWAP_INV_GATE, options=options
),
]
assert circuit_with_calibration_1.circuit == circuit_1
assert circuit_with_calibration_1.moment_to_calibration == [None, 0, 1, 0]
circuit_2 = cirq.Circuit([SQRT_ISWAP_INV_GATE.on(b, c), SQRT_ISWAP_INV_GATE.on(d, e)])
circuit_with_calibration_2, requests_2 = workflow.prepare_floquet_characterization_for_moments(
circuit_2, options=options, initial=requests_1
)
assert requests_2 == [
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((a, b), (c, d)), gate=SQRT_ISWAP_INV_GATE, options=options
),
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((b, c), (d, e)), gate=SQRT_ISWAP_INV_GATE, options=options
),
]
assert circuit_with_calibration_2.circuit == circuit_2
assert circuit_with_calibration_2.moment_to_calibration == [1]
@pytest.mark.parametrize(
'options_cls',
[
(
WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
cirq_google.FloquetPhasedFSimCalibrationRequest,
),
(ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION, cirq_google.XEBPhasedFSimCalibrationRequest),
],
)
def test_prepare_characterization_for_moments_merges_many_circuits(options_cls):
options, cls = options_cls
a, b, c, d, e = cirq.LineQubit.range(5)
circuit_1 = cirq.Circuit(
[
[cirq.X(a), cirq.Y(c)],
[SQRT_ISWAP_INV_GATE.on(a, b), SQRT_ISWAP_INV_GATE.on(c, d)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
[SQRT_ISWAP_INV_GATE.on(a, b)],
]
)
circuit_with_calibration_1, requests_1 = workflow.prepare_characterization_for_moments(
circuit_1, options=options
)
assert requests_1 == [
cls(pairs=((a, b), (c, d)), gate=SQRT_ISWAP_INV_GATE, options=options),
cls(pairs=((b, c),), gate=SQRT_ISWAP_INV_GATE, options=options),
]
assert circuit_with_calibration_1.circuit == circuit_1
assert circuit_with_calibration_1.moment_to_calibration == [None, 0, 1, 0]
circuit_2 = cirq.Circuit([SQRT_ISWAP_INV_GATE.on(b, c), SQRT_ISWAP_INV_GATE.on(d, e)])
circuit_with_calibration_2, requests_2 = workflow.prepare_characterization_for_moments(
circuit_2, options=options, initial=requests_1
)
assert requests_2 == [
cls(pairs=((a, b), (c, d)), gate=SQRT_ISWAP_INV_GATE, options=options),
cls(pairs=((b, c), (d, e)), gate=SQRT_ISWAP_INV_GATE, options=options),
]
assert circuit_with_calibration_2.circuit == circuit_2
assert circuit_with_calibration_2.moment_to_calibration == [1]
def test_prepare_floquet_characterization_for_moments_does_not_merge_sub_sets_when_disabled():
a, b, c, d, e = cirq.LineQubit.range(5)
circuit = cirq.Circuit(
[
[cirq.X(a), cirq.Y(c)],
[SQRT_ISWAP_INV_GATE.on(a, b), SQRT_ISWAP_INV_GATE.on(c, d)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
[SQRT_ISWAP_INV_GATE.on(a, b)],
]
)
circuit += cirq.Circuit(
[SQRT_ISWAP_INV_GATE.on(b, c), SQRT_ISWAP_INV_GATE.on(d, e)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
)
options = WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION
circuit_with_calibration, requests = workflow.prepare_floquet_characterization_for_moments(
circuit, options=options, merge_subsets=False
)
assert requests == [
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((a, b), (c, d)), gate=SQRT_ISWAP_INV_GATE, options=options
),
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((b, c),), gate=SQRT_ISWAP_INV_GATE, options=options
),
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((a, b),), gate=SQRT_ISWAP_INV_GATE, options=options
),
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((b, c), (d, e)), gate=SQRT_ISWAP_INV_GATE, options=options
),
]
assert circuit_with_calibration.circuit == circuit
assert circuit_with_calibration.moment_to_calibration == [None, 0, 1, 2, 3, 1]
@pytest.mark.parametrize(
'options_cls',
[
(
WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
cirq_google.FloquetPhasedFSimCalibrationRequest,
),
(ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION, cirq_google.XEBPhasedFSimCalibrationRequest),
],
)
def test_prepare_characterization_for_moments_does_not_merge_sub_sets_when_disabled(options_cls):
options, cls = options_cls
a, b, c, d, e = cirq.LineQubit.range(5)
circuit = cirq.Circuit(
[
[cirq.X(a), cirq.Y(c)],
[SQRT_ISWAP_INV_GATE.on(a, b), SQRT_ISWAP_INV_GATE.on(c, d)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
[SQRT_ISWAP_INV_GATE.on(a, b)],
]
)
circuit += cirq.Circuit(
[SQRT_ISWAP_INV_GATE.on(b, c), SQRT_ISWAP_INV_GATE.on(d, e)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
)
circuit_with_calibration, requests = workflow.prepare_characterization_for_moments(
circuit, options=options, merge_subsets=False
)
assert requests == [
cls(pairs=((a, b), (c, d)), gate=SQRT_ISWAP_INV_GATE, options=options),
cls(pairs=((b, c),), gate=SQRT_ISWAP_INV_GATE, options=options),
cls(pairs=((a, b),), gate=SQRT_ISWAP_INV_GATE, options=options),
cls(pairs=((b, c), (d, e)), gate=SQRT_ISWAP_INV_GATE, options=options),
]
assert circuit_with_calibration.circuit == circuit
assert circuit_with_calibration.moment_to_calibration == [None, 0, 1, 2, 3, 1]
def test_prepare_floquet_characterization_for_moments_merges_compatible_sets():
a, b, c, d, e, f = cirq.LineQubit.range(6)
circuit = cirq.Circuit([cirq.X(a), cirq.Y(c)])
circuit += cirq.Moment([SQRT_ISWAP_INV_GATE.on(a, b)])
circuit += cirq.Moment([SQRT_ISWAP_INV_GATE.on(b, c), SQRT_ISWAP_INV_GATE.on(d, e)])
circuit += cirq.Moment([SQRT_ISWAP_INV_GATE.on(c, d)])
circuit += cirq.Moment([SQRT_ISWAP_INV_GATE.on(a, f), SQRT_ISWAP_INV_GATE.on(d, e)])
options = WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION
circuit_with_calibration, requests = workflow.prepare_floquet_characterization_for_moments(
circuit, options=options
)
assert requests == [
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((a, b), (c, d)), gate=SQRT_ISWAP_INV_GATE, options=options
),
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((a, f), (b, c), (d, e)), gate=SQRT_ISWAP_INV_GATE, options=options
),
]
assert circuit_with_calibration.circuit == circuit
assert circuit_with_calibration.moment_to_calibration == [None, 0, 1, 0, 1]
@pytest.mark.parametrize(
'options_cls',
[
(
WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
cirq_google.FloquetPhasedFSimCalibrationRequest,
),
(ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION, cirq_google.XEBPhasedFSimCalibrationRequest),
],
)
def test_prepare_characterization_for_moments_merges_compatible_sets(options_cls):
options, cls = options_cls
a, b, c, d, e, f = cirq.LineQubit.range(6)
circuit = cirq.Circuit([cirq.X(a), cirq.Y(c)])
circuit += cirq.Moment([SQRT_ISWAP_INV_GATE.on(a, b)])
circuit += cirq.Moment([SQRT_ISWAP_INV_GATE.on(b, c), SQRT_ISWAP_INV_GATE.on(d, e)])
circuit += cirq.Moment([SQRT_ISWAP_INV_GATE.on(c, d)])
circuit += cirq.Moment([SQRT_ISWAP_INV_GATE.on(a, f), SQRT_ISWAP_INV_GATE.on(d, e)])
circuit_with_calibration, requests = workflow.prepare_characterization_for_moments(
circuit, options=options
)
assert requests == [
cls(pairs=((a, b), (c, d)), gate=SQRT_ISWAP_INV_GATE, options=options),
cls(pairs=((a, f), (b, c), (d, e)), gate=SQRT_ISWAP_INV_GATE, options=options),
]
assert circuit_with_calibration.circuit == circuit
assert circuit_with_calibration.moment_to_calibration == [None, 0, 1, 0, 1]
def test_prepare_floquet_characterization_for_operations():
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
q10 = cirq.GridQubit(1, 0)
q11 = cirq.GridQubit(1, 1)
q20 = cirq.GridQubit(2, 0)
q21 = cirq.GridQubit(2, 1)
options = WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION
# Prepare characterizations for a single circuit.
circuit_1 = cirq.Circuit(
[
[cirq.X(q00), cirq.Y(q11)],
[SQRT_ISWAP_INV_GATE.on(q00, q01), SQRT_ISWAP_INV_GATE.on(q10, q11)],
[cirq.WaitGate(duration=cirq.Duration(micros=5.0)).on(q01)],
]
)
requests_1 = workflow.prepare_floquet_characterization_for_operations(
circuit_1, options=options
)
assert requests_1 == [
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((q10, q11),), gate=SQRT_ISWAP_INV_GATE, options=options
),
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((q00, q01),), gate=SQRT_ISWAP_INV_GATE, options=options
),
]
# Prepare characterizations for a list of circuits.
circuit_2 = cirq.Circuit(
[
[SQRT_ISWAP_INV_GATE.on(q00, q01), SQRT_ISWAP_INV_GATE.on(q10, q11)],
[SQRT_ISWAP_INV_GATE.on(q00, q10), SQRT_ISWAP_INV_GATE.on(q01, q11)],
[SQRT_ISWAP_INV_GATE.on(q10, q20), SQRT_ISWAP_INV_GATE.on(q11, q21)],
]
)
requests_2 = workflow.prepare_floquet_characterization_for_operations(
[circuit_1, circuit_2], options=options
)
# The order of moments originates from HALF_GRID_STAGGERED_PATTERN.
assert requests_2 == [
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((q00, q10), (q11, q21)), gate=SQRT_ISWAP_INV_GATE, options=options
),
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((q01, q11), (q10, q20)), gate=SQRT_ISWAP_INV_GATE, options=options
),
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((q10, q11),), gate=SQRT_ISWAP_INV_GATE, options=options
),
cirq_google.calibration.FloquetPhasedFSimCalibrationRequest(
pairs=((q00, q01),), gate=SQRT_ISWAP_INV_GATE, options=options
),
]
@pytest.mark.parametrize(
'options_cls',
[
(
WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
cirq_google.FloquetPhasedFSimCalibrationRequest,
),
(ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION, cirq_google.XEBPhasedFSimCalibrationRequest),
],
)
def test_prepare_characterization_for_operations(options_cls):
options, cls = options_cls
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
q10 = cirq.GridQubit(1, 0)
q11 = cirq.GridQubit(1, 1)
q20 = cirq.GridQubit(2, 0)
q21 = cirq.GridQubit(2, 1)
# Prepare characterizations for a single circuit.
circuit_1 = cirq.Circuit(
[
[cirq.X(q00), cirq.Y(q11)],
[SQRT_ISWAP_INV_GATE.on(q00, q01), SQRT_ISWAP_INV_GATE.on(q10, q11)],
[cirq.WaitGate(duration=cirq.Duration(micros=5.0)).on(q01)],
]
)
requests_1 = workflow.prepare_characterization_for_operations(circuit_1, options=options)
assert requests_1 == [
cls(pairs=((q10, q11),), gate=SQRT_ISWAP_INV_GATE, options=options),
cls(pairs=((q00, q01),), gate=SQRT_ISWAP_INV_GATE, options=options),
]
# Prepare characterizations for a list of circuits.
circuit_2 = cirq.Circuit(
[
[SQRT_ISWAP_INV_GATE.on(q00, q01), SQRT_ISWAP_INV_GATE.on(q10, q11)],
[SQRT_ISWAP_INV_GATE.on(q00, q10), SQRT_ISWAP_INV_GATE.on(q01, q11)],
[SQRT_ISWAP_INV_GATE.on(q10, q20), SQRT_ISWAP_INV_GATE.on(q11, q21)],
]
)
requests_2 = workflow.prepare_characterization_for_operations(
[circuit_1, circuit_2], options=options
)
# The order of moments originates from HALF_GRID_STAGGERED_PATTERN.
assert requests_2 == [
cls(pairs=((q00, q10), (q11, q21)), gate=SQRT_ISWAP_INV_GATE, options=options),
cls(pairs=((q01, q11), (q10, q20)), gate=SQRT_ISWAP_INV_GATE, options=options),
cls(pairs=((q10, q11),), gate=SQRT_ISWAP_INV_GATE, options=options),
cls(pairs=((q00, q01),), gate=SQRT_ISWAP_INV_GATE, options=options),
]
def test_prepare_floquet_characterization_for_operations_when_no_interactions():
q00 = cirq.GridQubit(0, 0)
q11 = cirq.GridQubit(1, 1)
circuit = cirq.Circuit([cirq.X(q00), cirq.X(q11)])
assert workflow.prepare_floquet_characterization_for_operations(circuit) == []
@pytest.mark.parametrize(
'options',
[WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION, ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION],
)
def test_prepare_characterization_for_operations_when_no_interactions(options):
q00 = cirq.GridQubit(0, 0)
q11 = cirq.GridQubit(1, 1)
circuit = cirq.Circuit([cirq.X(q00), cirq.X(q11)])
assert workflow.prepare_characterization_for_operations(circuit, options) == []
def test_prepare_floquet_characterization_for_operations_when_non_grid_fails():
q00 = cirq.GridQubit(0, 0)
q11 = cirq.GridQubit(1, 1)
circuit = cirq.Circuit(SQRT_ISWAP_INV_GATE.on(q00, q11))
with pytest.raises(ValueError):
workflow.prepare_floquet_characterization_for_operations(circuit)
@pytest.mark.parametrize(
'options',
[WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION, ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION],
)
def test_prepare_characterization_for_operations_when_non_grid_fails(options):
q00 = cirq.GridQubit(0, 0)
q11 = cirq.GridQubit(1, 1)
circuit = cirq.Circuit(SQRT_ISWAP_INV_GATE.on(q00, q11))
with pytest.raises(ValueError):
workflow.prepare_characterization_for_operations(circuit, options)
def test_prepare_floquet_characterization_for_operations_when_multiple_gates_fails():
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
circuit = cirq.Circuit(
[SQRT_ISWAP_INV_GATE.on(q00, q01), cirq.FSimGate(theta=0.0, phi=np.pi).on(q00, q01)]
)
with pytest.raises(ValueError):
workflow.prepare_floquet_characterization_for_operations(
circuit, gates_translator=_fsim_identity_converter
)
@pytest.mark.parametrize(
'options',
[WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION, ALL_ANGLES_XEB_PHASED_FSIM_CHARACTERIZATION],
)
def test_prepare_characterization_for_operations_when_multiple_gates_fails(options):
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
circuit = cirq.Circuit(
[SQRT_ISWAP_INV_GATE.on(q00, q01), cirq.FSimGate(theta=0.0, phi=np.pi).on(q00, q01)]
)
with pytest.raises(ValueError):
workflow.prepare_characterization_for_operations(
circuit, gates_translator=_fsim_identity_converter, options=options
)
def test_make_zeta_chi_gamma_compensation_for_operations():
a, b, c, d = cirq.LineQubit.range(4)
parameters_ab = cirq_google.PhasedFSimCharacterization(zeta=0.5, chi=0.4, gamma=0.3)
parameters_bc = cirq_google.PhasedFSimCharacterization(zeta=-0.5, chi=-0.4, gamma=-0.3)
parameters_cd = cirq_google.PhasedFSimCharacterization(zeta=0.2, chi=0.3, gamma=0.4)
parameters_dict = {(a, b): parameters_ab, (b, c): parameters_bc, (c, d): parameters_cd}
engine_simulator = cirq_google.PhasedFSimEngineSimulator.create_from_dictionary_sqrt_iswap(
parameters={
pair: parameters.merge_with(SQRT_ISWAP_INV_PARAMETERS)
for pair, parameters in parameters_dict.items()
}
)
circuit = cirq.Circuit(
[
[cirq.X(a), cirq.Y(c)],
[SQRT_ISWAP_INV_GATE.on(a, b), SQRT_ISWAP_INV_GATE.on(c, d)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
]
)
options = cirq_google.FloquetPhasedFSimCalibrationOptions(
characterize_theta=False,
characterize_zeta=True,
characterize_chi=True,
characterize_gamma=True,
characterize_phi=False,
)
characterizations = [
PhasedFSimCalibrationResult(
parameters={pair: parameters}, gate=SQRT_ISWAP_INV_GATE, options=options
)
for pair, parameters in parameters_dict.items()
]
calibrated_circuit = workflow.make_zeta_chi_gamma_compensation_for_operations(
circuit,
characterizations,
)
assert cirq.allclose_up_to_global_phase(
engine_simulator.final_state_vector(calibrated_circuit),
cirq.final_state_vector(circuit),
)
def test_make_zeta_chi_gamma_compensation_for_operations_permit_mixed_moments():
with pytest.raises(NotImplementedError):
workflow.make_zeta_chi_gamma_compensation_for_operations(
cirq.Circuit(), [], permit_mixed_moments=True
)
def test_run_characterization():
q_00, q_01, q_02, q_03 = [cirq.GridQubit(0, index) for index in range(4)]
gate = cirq.FSimGate(theta=np.pi / 4, phi=0.0)
request = FloquetPhasedFSimCalibrationRequest(
gate=gate,
pairs=((q_00, q_01), (q_02, q_03)),
options=FloquetPhasedFSimCalibrationOptions(
characterize_theta=True,
characterize_zeta=True,
characterize_chi=False,
characterize_gamma=False,
characterize_phi=True,
),
)
result = cirq_google.CalibrationResult(
code=cirq_google.api.v2.calibration_pb2.SUCCESS,
error_message=None,
token=None,
valid_until=None,
metrics=cirq_google.Calibration(
cirq_google.api.v2.metrics_pb2.MetricsSnapshot(
metrics=[
cirq_google.api.v2.metrics_pb2.Metric(
name='angles',
targets=[
'0_qubit_a',
'0_qubit_b',
'0_theta_est',
'0_zeta_est',
'0_phi_est',
'1_qubit_a',
'1_qubit_b',
'1_theta_est',
'1_zeta_est',
'1_phi_est',
],
values=[
cirq_google.api.v2.metrics_pb2.Value(str_val='0_0'),
cirq_google.api.v2.metrics_pb2.Value(str_val='0_1'),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.1),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.2),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.3),
cirq_google.api.v2.metrics_pb2.Value(str_val='0_2'),
cirq_google.api.v2.metrics_pb2.Value(str_val='0_3'),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.4),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.5),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.6),
],
)
]
)
),
)
job = cirq_google.engine.EngineJob('', '', '', None)
job._calibration_results = [result]
engine = mock.MagicMock(spec=cirq_google.Engine)
engine.run_calibration.return_value = job
sampler = cirq_google.QuantumEngineSampler(
engine=engine, processor_id='qproc', gate_set=cirq_google.FSIM_GATESET
)
progress_calls = []
def progress(step: int, steps: int) -> None:
progress_calls.append((step, steps))
actual = workflow.run_calibrations([request], sampler, progress_func=progress)
expected = [
PhasedFSimCalibrationResult(
parameters={
(q_00, q_01): PhasedFSimCharacterization(
theta=0.1, zeta=0.2, chi=None, gamma=None, phi=0.3
),
(q_02, q_03): PhasedFSimCharacterization(
theta=0.4, zeta=0.5, chi=None, gamma=None, phi=0.6
),
},
gate=gate,
options=FloquetPhasedFSimCalibrationOptions(
characterize_theta=True,
characterize_zeta=True,
characterize_chi=False,
characterize_gamma=False,
characterize_phi=True,
),
)
]
assert actual == expected
assert progress_calls == [(1, 1)]
def test_run_characterization_with_engine():
q_00, q_01, q_02, q_03 = [cirq.GridQubit(0, index) for index in range(4)]
gate = cirq.FSimGate(theta=np.pi / 4, phi=0.0)
request = FloquetPhasedFSimCalibrationRequest(
gate=gate,
pairs=((q_00, q_01), (q_02, q_03)),
options=FloquetPhasedFSimCalibrationOptions(
characterize_theta=True,
characterize_zeta=True,
characterize_chi=False,
characterize_gamma=False,
characterize_phi=True,
),
)
result = cirq_google.CalibrationResult(
code=cirq_google.api.v2.calibration_pb2.SUCCESS,
error_message=None,
token=None,
valid_until=None,
metrics=cirq_google.Calibration(
cirq_google.api.v2.metrics_pb2.MetricsSnapshot(
metrics=[
cirq_google.api.v2.metrics_pb2.Metric(
name='angles',
targets=[
'0_qubit_a',
'0_qubit_b',
'0_theta_est',
'0_zeta_est',
'0_phi_est',
'1_qubit_a',
'1_qubit_b',
'1_theta_est',
'1_zeta_est',
'1_phi_est',
],
values=[
cirq_google.api.v2.metrics_pb2.Value(str_val='0_0'),
cirq_google.api.v2.metrics_pb2.Value(str_val='0_1'),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.1),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.2),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.3),
cirq_google.api.v2.metrics_pb2.Value(str_val='0_2'),
cirq_google.api.v2.metrics_pb2.Value(str_val='0_3'),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.4),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.5),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.6),
],
)
]
)
),
)
job = cirq_google.engine.EngineJob('', '', '', None)
job._calibration_results = [result]
engine = mock.MagicMock(spec=cirq_google.Engine)
engine.run_calibration.return_value = job
progress_calls = []
def progress(step: int, steps: int) -> None:
progress_calls.append((step, steps))
actual = workflow.run_calibrations(
[request], engine, 'qproc', cirq_google.FSIM_GATESET, progress_func=progress
)
expected = [
PhasedFSimCalibrationResult(
parameters={
(q_00, q_01): PhasedFSimCharacterization(
theta=0.1, zeta=0.2, chi=None, gamma=None, phi=0.3
),
(q_02, q_03): PhasedFSimCharacterization(
theta=0.4, zeta=0.5, chi=None, gamma=None, phi=0.6
),
},
gate=gate,
options=FloquetPhasedFSimCalibrationOptions(
characterize_theta=True,
characterize_zeta=True,
characterize_chi=False,
characterize_gamma=False,
characterize_phi=True,
),
)
]
assert actual == expected
assert progress_calls == [(1, 1)]
def test_run_characterization_empty():
assert workflow.run_calibrations([], None, 'qproc', cirq_google.FSIM_GATESET) == []
def test_run_characterization_fails_when_invalid_arguments():
with pytest.raises(ValueError):
assert workflow.run_calibrations(
[], None, 'qproc', cirq_google.FSIM_GATESET, max_layers_per_request=0
)
request = FloquetPhasedFSimCalibrationRequest(
gate=SQRT_ISWAP_INV_GATE,
pairs=(),
options=WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
)
engine = mock.MagicMock(spec=cirq_google.Engine)
with pytest.raises(ValueError):
assert workflow.run_calibrations([request], engine, None, cirq_google.FSIM_GATESET)
with pytest.raises(ValueError):
assert workflow.run_calibrations([request], engine, 'qproc', None)
with pytest.raises(ValueError):
assert workflow.run_calibrations([request], 0, 'qproc', cirq_google.FSIM_GATESET)
def test_run_characterization_with_simulator():
q_00, q_01, q_02, q_03 = [cirq.GridQubit(0, index) for index in range(4)]
gate = SQRT_ISWAP_INV_GATE
request = FloquetPhasedFSimCalibrationRequest(
gate=gate,
pairs=((q_00, q_01), (q_02, q_03)),
options=FloquetPhasedFSimCalibrationOptions(
characterize_theta=True,
characterize_zeta=True,
characterize_chi=False,
characterize_gamma=False,
characterize_phi=True,
),
)
simulator = PhasedFSimEngineSimulator.create_with_ideal_sqrt_iswap()
actual = workflow.run_calibrations([request], simulator)
assert actual == [
PhasedFSimCalibrationResult(
parameters={
(q_00, q_01): PhasedFSimCharacterization(
theta=np.pi / 4, zeta=0.0, chi=None, gamma=None, phi=0.0
),
(q_02, q_03): PhasedFSimCharacterization(
theta=np.pi / 4, zeta=0.0, chi=None, gamma=None, phi=0.0
),
},
gate=SQRT_ISWAP_INV_GATE,
options=FloquetPhasedFSimCalibrationOptions(
characterize_theta=True,
characterize_zeta=True,
characterize_chi=False,
characterize_gamma=False,
characterize_phi=True,
),
)
]
def test_run_floquet_characterization_for_moments():
q_00, q_01, q_02, q_03 = [cirq.GridQubit(0, index) for index in range(4)]
gate = cirq.FSimGate(theta=np.pi / 4, phi=0.0)
circuit = cirq.Circuit([gate.on(q_00, q_01), gate.on(q_02, q_03)])
options = FloquetPhasedFSimCalibrationOptions(
characterize_theta=True,
characterize_zeta=True,
characterize_chi=False,
characterize_gamma=False,
characterize_phi=True,
)
job = cirq_google.engine.EngineJob('', '', '', None)
job._calibration_results = [
cirq_google.CalibrationResult(
code=cirq_google.api.v2.calibration_pb2.SUCCESS,
error_message=None,
token=None,
valid_until=None,
metrics=cirq_google.Calibration(
cirq_google.api.v2.metrics_pb2.MetricsSnapshot(
metrics=[
cirq_google.api.v2.metrics_pb2.Metric(
name='angles',
targets=[
'0_qubit_a',
'0_qubit_b',
'0_theta_est',
'0_zeta_est',
'0_phi_est',
'1_qubit_a',
'1_qubit_b',
'1_theta_est',
'1_zeta_est',
'1_phi_est',
],
values=[
cirq_google.api.v2.metrics_pb2.Value(str_val='0_0'),
cirq_google.api.v2.metrics_pb2.Value(str_val='0_1'),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.1),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.2),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.3),
cirq_google.api.v2.metrics_pb2.Value(str_val='0_2'),
cirq_google.api.v2.metrics_pb2.Value(str_val='0_3'),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.4),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.5),
cirq_google.api.v2.metrics_pb2.Value(double_val=0.6),
],
)
]
)
),
)
]
engine = mock.MagicMock(spec=cirq_google.Engine)
engine.run_calibration.return_value = job
circuit_with_calibration, requests = workflow.run_floquet_characterization_for_moments(
circuit, engine, 'qproc', cirq_google.FSIM_GATESET, options=options
)
assert requests == [
PhasedFSimCalibrationResult(
parameters={
(q_00, q_01): PhasedFSimCharacterization(
theta=0.1, zeta=0.2, chi=None, gamma=None, phi=0.3
),
(q_02, q_03): PhasedFSimCharacterization(
theta=0.4, zeta=0.5, chi=None, gamma=None, phi=0.6
),
},
gate=gate,
options=options,
)
]
assert circuit_with_calibration.circuit == circuit
assert circuit_with_calibration.moment_to_calibration == [0]
@pytest.mark.parametrize(
'theta,zeta,chi,gamma,phi',
itertools.product([0.1, 0.7], [-0.3, 0.1, 0.5], [-0.3, 0.2, 0.4], [-0.6, 0.1, 0.6], [0.2, 0.6]),
)
def test_fsim_phase_corrections(
theta: float, zeta: float, chi: float, gamma: float, phi: float
) -> None:
a, b = cirq.LineQubit.range(2)
expected_gate = cirq.PhasedFSimGate(theta=theta, zeta=-zeta, chi=-chi, gamma=-gamma, phi=phi)
expected = cirq.unitary(expected_gate)
corrected = workflow.FSimPhaseCorrections.from_characterization(
(a, b),
PhaseCalibratedFSimGate(cirq.FSimGate(theta=theta, phi=phi), 0.0),
cirq_google.PhasedFSimCharacterization(
theta=theta, zeta=zeta, chi=chi, gamma=gamma, phi=phi
),
characterization_index=5,
)
actual = cirq.unitary(corrected.as_circuit())
assert cirq.equal_up_to_global_phase(actual, expected)
assert corrected.moment_to_calibration == [None, 5, None]
@pytest.mark.parametrize(
'theta,zeta,chi,gamma,phi',
itertools.product(
[np.pi / 4, -0.2], [-0.3, 0.1, 0.5], [-0.3, 0.2, 0.4], [-0.6, 0.1, 0.6], [0.2, 0.6]
),
)
def test_phase_corrected_fsim_operations_with_phase_exponent(
theta: float, zeta: float, chi: float, gamma: float, phi: float
) -> None:
a, b = cirq.LineQubit.range(2)
phase_exponent = 0.5
# Theta is negated to match the phase exponent of 0.5.
expected_gate = cirq.PhasedFSimGate(theta=-theta, zeta=-zeta, chi=-chi, gamma=-gamma, phi=phi)
expected = cirq.unitary(expected_gate)
corrected = workflow.FSimPhaseCorrections.from_characterization(
(a, b),
PhaseCalibratedFSimGate(cirq.FSimGate(theta=theta, phi=phi), phase_exponent),
cirq_google.PhasedFSimCharacterization(
theta=theta, zeta=zeta, chi=chi, gamma=gamma, phi=phi
),
characterization_index=5,
)
actual = cirq.unitary(corrected.as_circuit())
assert cirq.equal_up_to_global_phase(actual, expected)
assert corrected.moment_to_calibration == [None, 5, None]
def test_zeta_chi_gamma_calibration_for_moments():
a, b = cirq.LineQubit.range(2)
characterizations = [
PhasedFSimCalibrationResult(
parameters={(a, b): SQRT_ISWAP_INV_PARAMETERS},
gate=SQRT_ISWAP_INV_GATE,
options=ALL_ANGLES_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
)
]
moment_allocations = [0]
for circuit in [
cirq.Circuit(cirq.FSimGate(theta=np.pi / 4, phi=0.0).on(a, b)),
cirq.Circuit(cirq.FSimGate(theta=-np.pi / 4, phi=0.0).on(a, b)),
]:
calibrated_circuit = workflow.make_zeta_chi_gamma_compensation_for_moments(
workflow.CircuitWithCalibration(circuit, moment_allocations), characterizations
)
assert np.allclose(cirq.unitary(circuit), cirq.unitary(calibrated_circuit.circuit))
assert calibrated_circuit.moment_to_calibration == [None, 0, None]
def test_zeta_chi_gamma_calibration_for_moments_invalid_argument_fails() -> None:
a, b, c = cirq.LineQubit.range(3)
with pytest.raises(ValueError):
circuit_with_calibration = workflow.CircuitWithCalibration(cirq.Circuit(), [1])
workflow.make_zeta_chi_gamma_compensation_for_moments(circuit_with_calibration, [])
with pytest.raises(ValueError):
circuit_with_calibration = workflow.CircuitWithCalibration(
cirq.Circuit(SQRT_ISWAP_INV_GATE.on(a, b)), [None]
)
workflow.make_zeta_chi_gamma_compensation_for_moments(circuit_with_calibration, [])
with pytest.raises(ValueError):
circuit_with_calibration = workflow.CircuitWithCalibration(
cirq.Circuit(SQRT_ISWAP_INV_GATE.on(a, b)), [0]
)
characterizations = [
PhasedFSimCalibrationResult(
parameters={},
gate=SQRT_ISWAP_INV_GATE,
options=WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
)
]
workflow.make_zeta_chi_gamma_compensation_for_moments(
circuit_with_calibration, characterizations
)
with pytest.raises(workflow.IncompatibleMomentError):
circuit_with_calibration = workflow.CircuitWithCalibration(
cirq.Circuit(cirq.GlobalPhaseOperation(coefficient=1.0)), [None]
)
workflow.make_zeta_chi_gamma_compensation_for_moments(circuit_with_calibration, [])
with pytest.raises(workflow.IncompatibleMomentError):
circuit_with_calibration = workflow.CircuitWithCalibration(
cirq.Circuit(cirq.CZ.on(a, b)), [None]
)
workflow.make_zeta_chi_gamma_compensation_for_moments(circuit_with_calibration, [])
with pytest.raises(workflow.IncompatibleMomentError):
circuit_with_calibration = workflow.CircuitWithCalibration(
cirq.Circuit([SQRT_ISWAP_INV_GATE.on(a, b), cirq.Z.on(c)]), [0]
)
characterizations = [
PhasedFSimCalibrationResult(
parameters={
(a, b): PhasedFSimCharacterization(
theta=0.1, zeta=0.2, chi=0.3, gamma=0.4, phi=0.5
)
},
gate=SQRT_ISWAP_INV_GATE,
options=WITHOUT_CHI_FLOQUET_PHASED_FSIM_CHARACTERIZATION,
)
]
workflow.make_zeta_chi_gamma_compensation_for_moments(
circuit_with_calibration, characterizations
)
def test_run_zeta_chi_gamma_calibration_for_moments() -> None:
parameters_ab = cirq_google.PhasedFSimCharacterization(zeta=0.5, chi=0.4, gamma=0.3)
parameters_bc = cirq_google.PhasedFSimCharacterization(zeta=-0.5, chi=-0.4, gamma=-0.3)
parameters_cd = cirq_google.PhasedFSimCharacterization(zeta=0.2, chi=0.3, gamma=0.4)
a, b, c, d = cirq.LineQubit.range(4)
engine_simulator = cirq_google.PhasedFSimEngineSimulator.create_from_dictionary_sqrt_iswap(
parameters={
(a, b): parameters_ab.merge_with(SQRT_ISWAP_INV_PARAMETERS),
(b, c): parameters_bc.merge_with(SQRT_ISWAP_INV_PARAMETERS),
(c, d): parameters_cd.merge_with(SQRT_ISWAP_INV_PARAMETERS),
}
)
circuit = cirq.Circuit(
[
[cirq.X(a), cirq.Y(c)],
[SQRT_ISWAP_INV_GATE.on(a, b), SQRT_ISWAP_INV_GATE.on(c, d)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
]
)
options = cirq_google.FloquetPhasedFSimCalibrationOptions(
characterize_theta=False,
characterize_zeta=True,
characterize_chi=True,
characterize_gamma=True,
characterize_phi=False,
)
calibrated_circuit, calibrations = workflow.run_zeta_chi_gamma_compensation_for_moments(
circuit,
engine_simulator,
processor_id=None,
gate_set=cirq_google.SQRT_ISWAP_GATESET,
options=options,
)
assert cirq.allclose_up_to_global_phase(
engine_simulator.final_state_vector(calibrated_circuit.circuit),
cirq.final_state_vector(circuit),
)
assert calibrations == [
cirq_google.PhasedFSimCalibrationResult(
gate=SQRT_ISWAP_INV_GATE,
parameters={(a, b): parameters_ab, (c, d): parameters_cd},
options=options,
),
cirq_google.PhasedFSimCalibrationResult(
gate=SQRT_ISWAP_INV_GATE, parameters={(b, c): parameters_bc}, options=options
),
]
assert calibrated_circuit.moment_to_calibration == [None, None, 0, None, None, 1, None]
def test_run_zeta_chi_gamma_calibration_for_moments_no_chi() -> None:
parameters_ab = cirq_google.PhasedFSimCharacterization(theta=np.pi / 4, zeta=0.5, gamma=0.3)
parameters_bc = cirq_google.PhasedFSimCharacterization(theta=np.pi / 4, zeta=-0.5, gamma=-0.3)
parameters_cd = cirq_google.PhasedFSimCharacterization(theta=np.pi / 4, zeta=0.2, gamma=0.4)
a, b, c, d = cirq.LineQubit.range(4)
engine_simulator = cirq_google.PhasedFSimEngineSimulator.create_from_dictionary_sqrt_iswap(
parameters={(a, b): parameters_ab, (b, c): parameters_bc, (c, d): parameters_cd},
ideal_when_missing_parameter=True,
)
circuit = cirq.Circuit(
[
[cirq.X(a), cirq.Y(c)],
[SQRT_ISWAP_INV_GATE.on(a, b), SQRT_ISWAP_INV_GATE.on(c, d)],
[SQRT_ISWAP_INV_GATE.on(b, c)],
]
)
calibrated_circuit, *_ = workflow.run_zeta_chi_gamma_compensation_for_moments(
circuit, engine_simulator, processor_id=None, gate_set=cirq_google.SQRT_ISWAP_GATESET
)
assert cirq.allclose_up_to_global_phase(
engine_simulator.final_state_vector(calibrated_circuit.circuit),
cirq.final_state_vector(circuit),
)
``` |
{
"source": "JiahaoYao/torchdrug",
"score": 3
} |
#### File: torchdrug/layers/common.py
```python
import inspect
import warnings
import torch
from torch import nn
from torch._six import container_abcs
from torch.nn import functional as F
from torch_scatter import scatter_mean
from torchdrug.layers import functional
class MultiLayerPerceptron(nn.Module):
"""
Multi-layer Perceptron.
Note there is no batch normalization, activation or dropout in the last layer.
Parameters:
input_dim (int): input dimension
hidden_dim (list of int): hidden dimensions
short_cut (bool, optional): use short cut or not
batch_norm (bool, optional): apply batch normalization or not
activation (str or function, optional): activation function
dropout (float, optional): dropout rate
"""
def __init__(self, input_dim, hidden_dims, short_cut=False, batch_norm=False, activation="relu", dropout=0):
super(MultiLayerPerceptron, self).__init__()
if not isinstance(hidden_dims, container_abcs.Sequence):
hidden_dims = [hidden_dims]
self.dims = [input_dim] + hidden_dims
self.short_cut = short_cut
if isinstance(activation, str):
self.activation = getattr(F, activation)
else:
self.activation = activation
if dropout:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.layers = nn.ModuleList()
for i in range(len(self.dims) - 1):
self.layers.append(nn.Linear(self.dims[i], self.dims[i + 1]))
if batch_norm:
self.batch_norms = nn.ModuleList()
for i in range(len(self.dims) - 2):
self.batch_norms.append(nn.BatchNorm1d(self.dims[i + 1]))
else:
self.batch_norms = None
def forward(self, input):
""""""
layer_input = input
for i, layer in enumerate(self.layers):
hidden = layer(layer_input)
if i < len(self.layers) - 1:
if self.batch_norms:
x = hidden.flatten(0, -2)
hidden = self.batch_norms[i](x).view_as(hidden)
hidden = self.activation(hidden)
if self.dropout:
hidden = self.dropout(hidden)
if self.short_cut and hidden.shape == layer_input.shape:
hidden = hidden + layer_input
layer_input = hidden
return hidden
class GaussianSmearing(nn.Module):
r"""
Gaussian smearing from
`SchNet: A continuous-filter convolutional neural network for modeling quantum interactions`_.
There are two modes for Gaussian smearing.
Non-centered mode:
.. math::
\mu = [0, 1, ..., n], \sigma = [1, 1, ..., 1]
Centered mode:
.. math::
\mu = [0, 0, ..., 0], \sigma = [0, 1, ..., n]
.. _SchNet\: A continuous-filter convolutional neural network for modeling quantum interactions:
https://arxiv.org/pdf/1706.08566.pdf
Parameters:
start (int, optional): minimal input value
stop (int, optional): maximal input value
num_kernel (int, optional): number of RBF kernels
centered (bool, optional): centered mode or not
learnable (bool, optional): learnable gaussian parameters or not
"""
def __init__(self, start=0, stop=5, num_kernel=100, centered=False, learnable=False):
super(GaussianSmearing, self).__init__()
if centered:
mu = torch.zeros(num_kernel)
sigma = torch.linspace(start, stop, num_kernel)
else:
mu = torch.linspace(start, stop, num_kernel)
sigma = torch.ones(num_kernel) * (mu[1] - mu[0])
if learnable:
self.mu = nn.Parameter(mu)
self.sigma = nn.Parameter(sigma)
else:
self.register_buffer("mu", mu)
self.register_buffer("sigma", sigma)
def forward(self, x, y):
"""
Compute smeared gaussian features between data.
Parameters:
x (Tensor): data of shape :math:`(..., d)`
y (Tensor): data of shape :math:`(..., d)`
Returns:
Tensor: features of shape :math:`(..., num\_kernel)`
"""
distance = (x - y).norm(2, dim=-1, keepdim=True)
z = (distance - self.mu) / self.sigma
prob = torch.exp(-0.5 * z * z)
return prob
class PairNorm(nn.Module):
"""
Pair normalization layer proposed in `PairNorm: Tackling Oversmoothing in GNNs`_.
.. _PairNorm\: Tackling Oversmoothing in GNNs:
https://openreview.net/pdf?id=rkecl1rtwB
Parameters:
scale_individual (bool, optional): additionally normalize each node representation to have the same L2-norm
"""
eps = 1e-8
def __init__(self, scale_individual=False):
super(PairNorm, self).__init__()
self.scale_individual = scale_individual
def forward(self, graph, input):
""""""
if graph.batch_size > 1:
warnings.warn("PairNorm is proposed for a single graph, but now applied to a batch of graphs.")
x = input.flatten(1)
x = x - x.mean(dim=0)
if self.scale_individual:
output = x / (x.norm(dim=-1, keepdim=True) + self.eps)
else:
output = x * x.shape[0] ** 0.5 / (x.norm() + self.eps)
return output.view_as(input)
class InstanceNorm(nn.modules.instancenorm._InstanceNorm):
"""
Instance normalization for graphs. This layer follows the definition in
`GraphNorm: A Principled Approach to Accelerating Graph Neural Network Training`.
.. _GraphNorm\: A Principled Approach to Accelerating Graph Neural Network Training:
https://arxiv.org/pdf/2009.03294.pdf
Parameters:
input_dim (int): input dimension
eps (float, optional): epsilon added to the denominator
affine (bool, optional): use learnable affine parameters or not
"""
def __init__(self, input_dim, eps=1e-5, affine=False):
super(InstanceNorm, self).__init__(input_dim, eps, affine=affine)
def forward(self, graph, input):
""""""
assert (graph.num_nodes >= 1).all()
mean = scatter_mean(input, graph.node2graph, dim=0, dim_size=graph.batch_size)
centered = input - mean[graph.node2graph]
var = scatter_mean(centered ** 2, graph.node2graph, dim=0, dim_size=graph.batch_size)
std = (var + self.eps).sqrt()
output = centered / std[graph.node2graph]
if self.affine:
output = torch.addcmul(self.bias, self.weight, output)
return output
class MutualInformation(nn.Module):
"""
Mutual information estimator from
`Learning deep representations by mutual information estimation and maximization`_.
.. _Learning deep representations by mutual information estimation and maximization:
https://arxiv.org/pdf/1808.06670.pdf
Parameters:
input_dim (int): input dimension
num_mlp_layer (int, optional): number of MLP layers
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, num_mlp_layer=2, activation="relu"):
super(MutualInformation, self).__init__()
self.x_mlp = MultiLayerPerceptron(input_dim, [input_dim] * num_mlp_layer, activation=activation)
self.y_mlp = MultiLayerPerceptron(input_dim, [input_dim] * num_mlp_layer, activation=activation)
def forward(self, x, y, pair_index=None):
""""""
x = self.x_mlp(x)
y = self.y_mlp(y)
score = x @ y.t()
score = score.flatten()
if pair_index is None:
assert len(x) == len(y)
pair_index = torch.arange(len(x), device=x.device).unsqueeze(-1).expand(-1, 2)
index = pair_index[:, 0] * len(y) + pair_index[:, 1]
positive = torch.zeros_like(score, dtype=torch.bool)
positive[index] = 1
negative = ~positive
mutual_info = - functional.shifted_softplus(-score[positive]).mean() \
- functional.shifted_softplus(score[negative]).mean()
return mutual_info
class Sequential(nn.Sequential):
"""
Improved sequential container.
Modules will be called in the order they are passed to the constructor.
Compared to the vanilla nn.Sequential, this layer additionally supports the following features.
1. Multiple input / output arguments.
>>> # layer1 signature: (...) -> (a, b)
>>> # layer2 signature: (a, b) -> (...)
>>> layer = layers.Sequential(layer1, layer2)
2. Global arguments.
>>> # layer1 signature: (graph, a) -> b
>>> # layer2 signature: (graph, b) -> c
>>> layer = layers.Sequential(layer1, layer2, global_args=("graph",))
Note the global arguments don't need to be present in every layer.
>>> # layer1 signature: (graph, a) -> b
>>> # layer2 signature: b -> c
>>> # layer3 signature: (graph, c) -> d
>>> layer = layers.Sequential(layer1, layer2, global_args=("graph",))
3. Dict outputs.
>>> # layer1 signature: a -> {"b": b, "c": c}
>>> # layer2 signature: b -> d
>>> layer = layers.Sequential(layer1, layer2, allow_unused=True)
When dict outputs are used with global arguments, the global arguments can be explicitly
overwritten by any layer outputs.
>>> # layer1 signature: (graph, a) -> {"graph": graph, "b": b}
>>> # layer2 signature: (graph, b) -> c
>>> # layer2 takes in the graph output by layer1
>>> layer = layers.Sequential(layer1, layer2, global_args=("graph",))
"""
def __init__(self, *args, global_args=None, allow_unused=False):
super(Sequential, self).__init__(*args)
if global_args is not None:
self.global_args = set(global_args)
else:
self.global_args = {}
self.allow_unused = allow_unused
def forward(self, *args, **kwargs):
""""""
global_kwargs = {}
for i, module in enumerate(self._modules.values()):
sig = inspect.signature(module.forward)
parameters = list(sig.parameters.values())
param_names = [param.name for param in parameters]
j = 0
for name in param_names:
if j == len(args):
break
if name in kwargs:
continue
if name in global_kwargs and name not in kwargs:
kwargs[name] = global_kwargs[name]
continue
kwargs[name] = args[j]
j += 1
if self.allow_unused:
param_names = set(param_names)
# pop unused kwargs
kwargs = {k: v for k, v in kwargs.items() if k in param_names}
if j < len(args):
raise TypeError("too many positional arguments")
output = module(**kwargs)
global_kwargs.update({k: v for k, v in kwargs.items() if k in self.global_args})
args = []
kwargs = {}
if isinstance(output, dict):
kwargs.update(output)
elif isinstance(output, container_abcs.Sequence):
args += list(output)
else:
args.append(output)
return output
```
#### File: torchdrug/layers/distribution.py
```python
import math
import torch
from torch._six import container_abcs
from torch import nn
class IndependentGaussian(nn.Module):
"""
Independent Gaussian distribution.
Parameters:
mu (Tensor): mean of shape :math:`(N,)`
sigma2 (Tensor): variance of shape :math:`(N,)`
learnable (bool, optional): learnable parameters or not
"""
def __init__(self, mu, sigma2, learnable=False):
super(IndependentGaussian, self).__init__()
if learnable:
self.mu = nn.Parameter(torch.as_tensor(mu))
self.sigma2 = nn.Parameter(torch.as_tensor(sigma2))
else:
self.register_buffer("mu", torch.as_tensor(mu))
self.register_buffer("sigma2", torch.as_tensor(sigma2))
self.dim = len(mu)
def forward(self, input):
"""
Compute the likelihood of input data.
Parameters:
input (Tensor): input data of shape :math:`(..., N)`
"""
log_likelihood = -0.5 * (math.log(2 * math.pi) + self.sigma2.log() + (input - self.mu) ** 2 / self.sigma2)
return log_likelihood
def sample(self, *size):
"""
Draw samples from the distribution.
Parameters:
size (tuple of int): shape of the samples
"""
if len(size) == 1 and isinstance(size[0], container_abcs.Sequence):
size = size[0]
size = list(size) + [self.dim]
sample = torch.randn(size, device=self.mu.device) * self.sigma2.sqrt() + self.mu
return sample
``` |
{
"source": "jiahe23/SCAMPy",
"score": 2
} |
#### File: tests/unit/test_eos.py
```python
import sys
sys.path.insert(0, "./../")
import os
import subprocess
import json
import warnings
from netCDF4 import Dataset
import pprint as pp
import numpy as np
import math as mt
import pytest
import pytest_wrapper as wrp
# https://hypothesis.readthedocs.io/en/latest/
from hypothesis import given, strategies as st
@given(p0 = st.floats(min_value = 12000, max_value = 101300), # pressure between 1013hPa - 300hPa
qt = st.floats(min_value = 0, max_value = 0.040), # total water specific humidity between 0 - 40 g/kg
frac = st.floats(min_value = 0, max_value = 1)) # fraction of qt that is water vapor specific humidity
def test_pressure(p0, qt, frac):
"""
Tests functions pd_c and pd_v from thermodynamic_functions.pyx
by checking if the total pressure = dry air pressure + water vapor pressure.
"""
qv = frac * qt
pd = wrp.pd_c(p0, qt, qv)
pv = wrp.pv_c(p0, qt, qv)
assert(np.isclose(p0, pd + pv, rtol = 1e-12))
@given(p0 = st.floats(min_value = 12000, max_value = 101300), # pressure between 1013hPa - 300hPa
qt = st.floats(min_value = 0, max_value = 0.040), # total water specific humidity between 0 - 40 g/kg
T = st.floats(min_value = 273.15, max_value = 273.15 + 40)) # temperature between 0 - 40 C
def test_eos_first_guess(p0, qt, T):
"""
Tests functions t_to_thetali and eos_first_guess from thermodynamic_functions.pyx
by checking if calculating thetali using T ad thet calculating T using thetali
results in the same temperature
checks subsaturated cases
"""
qv = qt
ql = 0.
qi = 0.
pd = wrp.pd_c(p0, qt, qv)
pv = wrp.pv_c(p0, qt, qv)
# calculate thetali(T) assuming no liquid water or ice are present
thetali = wrp.t_to_thetali_c(p0, T, qt, ql, qi)
# calculate T(thetali) assuming no liquid water or ice are present
T_new = wrp.eos_first_guess_thetal(thetali, pd, pv, qt)
assert(np.isclose(T, T_new, rtol = 1e-12))
@given(z = st.floats(min_value = 0, max_value = 18000), # height in the atmosphere around which we will test
rnd= st.floats(min_value = -0.05, max_value = 0.05), # random factor
ql = st.floats(min_value = 0, max_value = 0.010)) # liquid water specific humidity between 0 - 10 g/kg
def test_eos_saturated(z, rnd, ql):
"""
Check if the saturation adjustment scheme finds the correct T and ql
"""
# constructing initial condition such that
# a) it's saturated
# b) there is some ql
# c) p and T are close to conditions possible in the atmosphere
# scampy constants
const = wrp.scampy_constants()
# "standard atmosphere" profile around which we will test
# https://en.wikipedia.org/wiki/International_Standard_Atmosphere
dTdz = -0.0065
T0 = 273.15 + 15
T1 = 273.15 - 56.5
p0 = 101300.
p1 = 22632
z1 = 11000
def T_standard(z):
T = T1
if z < z1:
T = T0 + dTdz * z
return T
def p_standard(z):
p = p0
if z < z1:
p = p0 * mt.exp(-const.g / const.Rd / dTdz * mt.log(1. + dTdz / T0 * z))
else:
p = p1 * mt.exp(-const.g / const.Rd / T1 * (z - z1))
return p
T = T_standard(z) / (1. - rnd)
p = p_standard(T) / (1. - rnd)
pv_s = wrp.pv_star(T)
# qv_star assuming that qt = qv + ql
qv_s = wrp.qv_star_t(p, T) * (1 - ql)
qt = qv_s + ql
# initial prognostic variable thetali
thetali = wrp.t_to_thetali_c(p, T, qt, ql, 0)
# saturation adjustment
res = wrp.eos(p, qt, thetali)
assert(np.isclose(T, res['T'], rtol = 1e-3))
if (ql > 1e-7):
assert(np.isclose(ql, res['ql'], rtol = 1e-2))
``` |
{
"source": "JiaHeng-DLUT/EfficientNet-PyTorch",
"score": 2
} |
#### File: JiaHeng-DLUT/EfficientNet-PyTorch/Untitled.py
```python
import torch
#from efficientnet_pytorch import EfficientNet
# In[20]:
#model = EfficientNet.from_pretrained('efficientnet-b7')
# print(model)
# In[21]:
from efficientnet_pytorch.utils import get_model_params
blocks_args, global_params = get_model_params('efficientnet-b7', None)
print(blocks_args)
#[BlockArgs(num_repeat=1, kernel_size=3, stride=[1], expand_ratio=1, input_filters=32, output_filters=16, se_ratio=0.25, id_skip=True),
# BlockArgs(num_repeat=2, kernel_size=3, stride=[2], expand_ratio=6, input_filters=16, output_filters=24, se_ratio=0.25, id_skip=True),
# BlockArgs(num_repeat=2, kernel_size=5, stride=[2], expand_ratio=6, input_filters=24, output_filters=40, se_ratio=0.25, id_skip=True),
# BlockArgs(num_repeat=3, kernel_size=3, stride=[2], expand_ratio=6, input_filters=40, output_filters=80, se_ratio=0.25, id_skip=True),
# BlockArgs(num_repeat=3, kernel_size=5, stride=[1], expand_ratio=6, input_filters=80, output_filters=112, se_ratio=0.25, id_skip=True),
# BlockArgs(num_repeat=4, kernel_size=5, stride=[2], expand_ratio=6, input_filters=112, output_filters=192, se_ratio=0.25, id_skip=True),
# BlockArgs(num_repeat=1, kernel_size=3, stride=[1], expand_ratio=6, input_filters=192, output_filters=320, se_ratio=0.25, id_skip=True)]
print(global_params)
# In[23]:
from efficientnet_pytorch.model import EfficientNet
net = EfficientNet(blocks_args, global_params)
img = torch.ones((1, 3, 224, 224))
print(img.shape)
out = net(img)
print(out.shape)
# In[16]:
class SimpleNet:
def __init__(self):
pass
def forward(self, x):
pass
# In[ ]:
``` |
{
"source": "JiaHeng-DLUT/minGPT",
"score": 3
} |
#### File: mingpt/data/fly_dataset.py
```python
import numpy as np
import torch
import torch.utils.data as data
class FlyDataset(data.Dataset):
"""Fruit fly dataset.
"""
def __init__(self, opt):
super(FlyDataset, self).__init__()
self.opt = opt
data_path = opt['data_path']
meta_path = opt['meta_path']
data = np.load(data_path, allow_pickle=True).item()
self.seqs = data['sequences']
id_list = open(meta_path).readlines()
self.id_list = [id.strip() for id in id_list]
self.num_frame = opt['num_frame'] # Number of frames per clip
self.num_repeat = int(opt['total_frame'] / opt['num_frame']) # Number of clips per video
def __getitem__(self, index):
id = self.id_list[index // self.num_repeat]
keypoints = torch.from_numpy(self.seqs[id]['keypoints']) # (4500, 11, 24, 2)
keypoints = torch.flatten(keypoints, 1) # (4500, 528)
keypoints = torch.nan_to_num(keypoints, nan=0)
if 'annotations' in self.seqs[id]:
labels = torch.from_numpy(self.seqs[id]['annotations'])
labels = torch.nan_to_num(labels, nan=-100)
else:
labels = None
pos = index % self.num_repeat * self.num_frame
if labels is None:
return {
'keypoints': keypoints[pos : pos + self.num_frame],
'pos': pos,
'id': id,
}
else:
return {
'keypoints': keypoints[pos : pos + self.num_frame],
'labels': labels[:, pos : pos + self.num_frame],
'pos': pos,
'id': id,
}
def __len__(self):
return len(self.id_list) * self.num_repeat
if __name__ == '__main__':
# train
train_dataset = {
'data_path': '../../Fruit_Fly_Groups/Notebooks/data/user_train.npy',
'meta_path': 'meta_info/meta_info_train_0.txt',
'num_frame': 150,
'total_frame': 4500,
}
dataset = FlyDataset(train_dataset)
dataloader = data.DataLoader(dataset, batch_size=1, shuffle=False)
for i, data in enumerate(dataloader):
print(i, data[-2], data[-1])
# val
# dataset = FlyDataset(opt['datasets']['val'])
# dataloader = data.DataLoader(dataset, batch_size=1, shuffle=False)
# for i, data in enumerate(dataloader):
# print(i)
```
#### File: minGPT/mingpt/test_fly.py
```python
import math
import os
from tqdm import tqdm
import numpy as np
import torch
from torch.utils.data.dataloader import DataLoader
from data.fly_dataset import FlyDataset
from model import GPT, GPT1Config
from utils.misc import set_random_seed
class TesterConfig:
# model
input_dim = 528
output_dim = 256
total_frames = 4500
clip_frames = 150
# data
test_dataset = {
'data_path': '../../Fruit_Fly_Groups/Notebooks/data/submission_data.npy',
'meta_path': 'meta_info/meta_info_test.txt',
'num_frame': clip_frames,
'total_frame': total_frames,
}
batch_size = int(total_frames // clip_frames)
num_workers = 4
# checkpoint setting
ckpt_path = f'./experiments/fly/01_max_epoch_100/epoch1.pth'
feat_path = ckpt_path.replace('.pth', '_submission.npy')
# CUDA_VISIBLE_DEVICES=0 python test_fly.py
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
def validate_submission(submission, submission_clips):
if not isinstance(submission, dict):
print("Submission should be dict")
return False
if 'frame_number_map' not in submission:
print("Frame number map missing")
return False
if 'embeddings' not in submission:
print('Embeddings array missing')
return False
elif not isinstance(submission['embeddings'], np.ndarray):
print("Embeddings should be a numpy array")
return False
elif not len(submission['embeddings'].shape) == 2:
print("Embeddings should be 2D array")
return False
elif not submission['embeddings'].shape[1] <= 256:
print("Embeddings too large, max allowed is 256")
return False
elif not isinstance(submission['embeddings'][0, 0], np.float32):
print(f"Embeddings are not float32")
return False
total_clip_length = 0
for key in submission_clips['sequences']:
start, end = submission['frame_number_map'][key]
clip_length = submission_clips['sequences'][key]['keypoints'].shape[0]
total_clip_length += clip_length
if not end-start == clip_length:
print(f"Frame number map for clip {key} doesn't match clip length")
return False
if not len(submission['embeddings']) == total_clip_length:
print(f"Emebddings length doesn't match submission clips total length")
return False
if not np.isfinite(submission['embeddings']).all():
print(f"Emebddings contains NaN or infinity")
return False
print("All checks passed")
return True
class Tester:
def __init__(self, model, test_dataset, config):
self.model = model
self.test_dataset = test_dataset
self.config = config
# take over whatever gpus are on the system
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.model = torch.nn.DataParallel(self.model).to(self.device)
def test(self):
model, config = self.model, self.config
is_train = False
model.train(is_train)
data = self.test_dataset
loader = DataLoader(data, shuffle=False, pin_memory=True,
batch_size=config.batch_size,
num_workers=config.num_workers)
pbar = tqdm(enumerate(loader), total=len(loader)) if is_train else enumerate(loader)
feats = []
frame_number_map = {}
for it, data in pbar:
# place data on the correct device
x = data['keypoints'].to(self.device) #(b, clip_frame, 528)
pos = data['pos']
id = data['id'][0]
# print(pos, id)
if id not in frame_number_map:
st = it * self.config.total_frames
ed = st + self.config.total_frames
frame_number_map[id] = (st, ed)
# forward the model
with torch.set_grad_enabled(is_train):
feat = model(x, pos, y=None).view(-1, self.config.output_dim)
feats.append(feat)
feats = torch.cat(feats, dim=0).detach().cpu().numpy()
# print(1, feats.shape)
# for k in frame_number_map:
# print(k, frame_number_map[k])
submission_dict = {
"frame_number_map": frame_number_map,
"embeddings": feats
}
submission_clips = np.load(self.config.test_dataset['data_path'], allow_pickle=True).item()
validate_submission(submission_dict, submission_clips)
np.save(self.config.feat_path, submission_dict)
if __name__ == '__main__':
set_random_seed(0)
config = TesterConfig()
test_set = FlyDataset(config.test_dataset)
print(len(test_set))
gpt_config = GPT1Config(block_size=config.total_frames,
input_dim=config.input_dim,
output_dim=config.output_dim,
num_tokens=config.clip_frames)
model = GPT(gpt_config)
print(model)
# x = torch.randn((2, 4500, 528))
# print(model(x).shape)
tester = Tester(model, test_set, config)
tester.test()
```
#### File: mingpt/utils/misc.py
```python
import numpy as np
import random
import torch
def set_random_seed(seed):
"""Set random seeds."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
``` |
{
"source": "JiaHeng-DLUT/Webshell",
"score": 3
} |
#### File: JiaHeng-DLUT/Webshell/neopi.py
```python
import math
import sys
import os
import re
import csv
import zlib
import time
from collections import defaultdict
from optparse import OptionParser
#
# Globals
#
# Smallest filesize to checkfor in bytes.
SMALLEST = 60
class LanguageIC:
"""Class that calculates a file's Index of Coincidence as
as well as a a subset of files average Index of Coincidence.
"""
def __init__(self):
"""Initialize results arrays as well as character counters."""
self.char_count = defaultdict(int)
self.total_char_count = 0
self.results = []
self.ic_total_results = ""
def calculate_char_count(self,data):
"""Method to calculate character counts for a particular data file."""
if not data:
return 0
for x in range(256):
char = chr(x)
charcount = data.count(char)
self.char_count[char] += charcount
self.total_char_count += charcount
return
def calculate_IC(self):
"""Calculate the Index of Coincidence for the self variables"""
total = 0
for val in self.char_count.values():
if val == 0:
continue
total += val * (val-1)
try:
ic_total = float(total)/(self.total_char_count * (self.total_char_count - 1))
except:
ic_total = 0
self.ic_total_results = ic_total
return
def calculate(self,data,filename):
"""Calculate the Index of Coincidence for a file and append to self.ic_results array"""
if not data:
return 0
char_count = 0
total_char_count = 0
for x in range(256):
char = chr(x)
charcount = data.count(char)
char_count += charcount * (charcount - 1)
total_char_count += charcount
ic = float(char_count)/(total_char_count * (total_char_count - 1))
self.results.append({"filename":filename, "value":ic})
# Call method to calculate_char_count and append to total_char_count
self.calculate_char_count(data)
return ic
def sort(self):
self.results.sort(key=lambda item: item["value"])
self.results = resultsAddRank(self.results)
def printer(self, count):
"""Print the top signature count match files for a given search"""
# Calculate the Total IC for a Search
self.calculate_IC()
print "\n[[ Average IC for Search ]]"
print self.ic_total_results
print "\n[[ Top %i lowest IC files ]]" % (count)
if (count > len(self.results)): count = len(self.results)
for x in range(count):
print ' {0:>7.4f} {1}'.format(self.results[x]["value"], self.results[x]["filename"])
return
class Entropy:
"""Class that calculates a file's Entropy."""
def __init__(self):
"""Instantiate the entropy_results array."""
self.results = []
def calculate(self,data,filename):
"""Calculate the entropy for 'data' and append result to entropy_results array."""
if not data:
return 0
entropy = 0
self.stripped_data =data.replace(' ', '')
for x in range(256):
p_x = float(self.stripped_data.count(chr(x)))/len(self.stripped_data)
if p_x > 0:
entropy += - p_x * math.log(p_x, 2)
self.results.append({"filename":filename, "value":entropy})
return entropy
def sort(self):
self.results.sort(key=lambda item: item["value"])
self.results.reverse()
self.results = resultsAddRank(self.results)
def printer(self, count):
"""Print the top signature count match files for a given search"""
print "\n[[ Top %i entropic files for a given search ]]" % (count)
if (count > len(self.results)): count = len(self.results)
for x in range(count):
print ' {0:>7.4f} {1}'.format(self.results[x]["value"], self.results[x]["filename"])
return
class LongestWord:
"""Class that determines the longest word for a particular file."""
def __init__(self):
"""Instantiate the longestword_results array."""
self.results = []
def calculate(self,data,filename):
"""Find the longest word in a string and append to longestword_results array"""
if not data:
return "", 0
longest = 0
longest_word = ""
words = re.split("[\s,\n,\r]", data)
if words:
for word in words:
length = len(word)
if length > longest:
longest = length
longest_word = word
self.results.append({"filename":filename, "value":longest})
return longest
def sort(self):
self.results.sort(key=lambda item: item["value"])
self.results.reverse()
self.results = resultsAddRank(self.results)
def printer(self, count):
"""Print the top signature count match files for a given search"""
print "\n[[ Top %i longest word files ]]" % (count)
if (count > len(self.results)): count = len(self.results)
for x in range(count):
print ' {0:>7} {1}'.format(self.results[x]["value"], self.results[x]["filename"])
return
class SignatureNasty:
"""Generator that searches a given file for nasty expressions"""
def __init__(self):
"""Instantiate the results array."""
self.results = []
def calculate(self, data, filename):
if not data:
return "", 0
# Lots taken from the wonderful post at http://stackoverflow.com/questions/3115559/exploitable-php-functions
valid_regex = re.compile('(eval\(|file_put_contents|base64_decode|python_eval|exec\(|passthru|popen|proc_open|pcntl|assert\(|system\(|shell)', re.I)
matches = re.findall(valid_regex, data)
self.results.append({"filename":filename, "value":len(matches)})
return len(matches)
def sort(self):
self.results.sort(key=lambda item: item["value"])
self.results.reverse()
self.results = resultsAddRank(self.results)
def printer(self, count):
"""Print the top signature count match files for a given search"""
print "\n[[ Top %i signature match counts ]]" % (count)
if (count > len(self.results)): count = len(self.results)
for x in range(count):
print ' {0:>7} {1}'.format(self.results[x]["value"], self.results[x]["filename"])
return
class SignatureSuperNasty:
"""Generator that searches a given file for SUPER-nasty expressions (These are almost always bad!)"""
def __init__(self):
"""Instantiate the results array."""
self.results = []
def calculate(self, data, filename):
if not data:
return "", 0
valid_regex = re.compile('(@\$_\[\]=|\$_=@\$_GET|\$_\[\+""\]=)', re.I)
matches = re.findall(valid_regex, data)
self.results.append({"filename":filename, "value":len(matches)})
return len(matches)
def sort(self):
self.results.sort(key=lambda item: item["value"])
self.results.reverse()
self.results = resultsAddRank(self.results)
def printer(self, count):
"""Print the top signature count match files for a given search"""
print "\n[[ Top %i SUPER-signature match counts (These are usually bad!) ]]" % (count)
if (count > len(self.results)): count = len(self.results)
for x in range(count):
print ' {0:>7} {1}'.format(self.results[x]["value"], self.results[x]["filename"])
return
class UsesEval:
"""Generator that searches a given file for nasty eval with variable"""
def __init__(self):
"""Instantiate the eval_results array."""
self.results = []
def calculate(self, data, filename):
if not data:
return "", 0
# Lots taken from the wonderful post at http://stackoverflow.com/questions/3115559/exploitable-php-functions
valid_regex = re.compile('(eval\(\$(\w|\d))', re.I)
matches = re.findall(valid_regex, data)
self.results.append({"filename":filename, "value":len(matches)})
return len(matches)
def sort(self):
self.results.sort(key=lambda item: item["value"])
self.results.reverse()
self.results = resultsAddRank(self.results)
def printer(self, count):
"""Print the files that use eval"""
print "\n[[ Top %i eval match counts ]]" % (count)
if (count > len(self.results)): count = len(self.results)
for x in range(count):
print ' {0:>7} {1}'.format(self.results[x]["value"], self.results[x]["filename"])
return
class Compression:
"""Generator finds compression ratio"""
def __init__(self):
"""Instantiate the results array."""
self.results = []
def calculate(self, data, filename):
if not data:
return "", 0
compressed = zlib.compress(data)
ratio = float(len(compressed)) / float(len(data))
self.results.append({"filename":filename, "value":ratio})
return ratio
def sort(self):
self.results.sort(key=lambda item: item["value"])
self.results.reverse()
self.results = resultsAddRank(self.results)
def printer(self, count):
"""Print the top files for a given search"""
print "\n[[ Top %i compression match counts ]]" % (count)
if (count > len(self.results)): count = len(self.results)
for x in range(count):
print ' {0:>7.4f} {1}'.format(self.results[x]["value"], self.results[x]["filename"])
return
def resultsAddRank(results):
rank = 1
offset = 1
previousValue = False
newList = []
for file in results:
if (previousValue and previousValue != file["value"]):
rank = offset
file["rank"] = rank
newList.append(file)
previousValue = file["value"]
offset = offset + 1
return newList
class SearchFile:
"""Generator that searches a given filepath with an optional regular
expression and returns the filepath and filename"""
def search_file_path(self, args, valid_regex):
for root, dirs, files in os.walk(args[0]):
for file in files:
filename = os.path.join(root, file)
if (valid_regex.search(file) and os.path.getsize(filename) > SMALLEST):
try:
data = open(root + "/" + file, 'rb').read()
except:
data = False
print "Could not read file :: %s/%s" % (root, file)
yield data, filename
if __name__ == "__main__":
"""Parse all the options"""
timeStart = time.clock()
print """
) ( (
( /( )\ ))\ )
)\()) ( (()/(()/(
((_)\ ))\ ( /(_))(_))
_((_)/((_))\(_))(_))
| \| (_)) ((_) _ \_ _|
| .` / -_) _ \ _/| |
|_|\_\___\___/_| |___| Ver. *.USEGIT
"""
parser = OptionParser(usage="usage: %prog [options] <start directory> <OPTIONAL: filename regex>",
version="%prog 1.0")
parser.add_option("-c", "--csv",
action="store",
dest="is_csv",
default=False,
help="generate CSV outfile",
metavar="FILECSV")
parser.add_option("-a", "--all",
action="store_true",
dest="is_all",
default=False,
help="Run all (useful) tests [Entropy, Longest Word, IC, Signature]",)
parser.add_option("-z", "--zlib",
action="store_true",
dest="is_zlib",
default=False,
help="Run compression Test",)
parser.add_option("-e", "--entropy",
action="store_true",
dest="is_entropy",
default=False,
help="Run entropy Test",)
parser.add_option("-E", "--eval",
action="store_true",
dest="is_eval",
default=False,
help="Run signiture test for the eval",)
parser.add_option("-l", "--longestword",
action="store_true",
dest="is_longest",
default=False,
help="Run longest word test",)
parser.add_option("-i", "--ic",
action="store_true",
dest="is_ic",
default=False,
help="Run IC test",)
parser.add_option("-s", "--signature",
action="store_true",
dest="is_signature",
default=False,
help="Run signature test",)
parser.add_option("-S", "--supersignature",
action="store_true",
dest="is_supersignature",
default=False,
help="Run SUPER-signature test",)
parser.add_option("-A", "--auto",
action="store_true",
dest="is_auto",
default=False,
help="Run auto file extension tests",)
parser.add_option("-u", "--unicode",
action="store_true",
dest="ignore_unicode",
default=False,
help="Skip over unicode-y/UTF'y files",)
(options, args) = parser.parse_args()
# Error on invalid number of arguements
if len(args) < 1:
parser.print_help()
print ""
sys.exit()
# Error on an invalid path
if os.path.exists(args[0]) == False:
parser.error("Invalid path")
valid_regex = ""
if (len(args) == 2 and options.is_auto is False):
try:
valid_regex = re.compile(args[1])
except:
parser.error("Invalid regular expression")
else:
valid_regex = re.compile('.*')
tests = []
if options.is_auto:
valid_regex = re.compile('(\.php|\.asp|\.aspx|\.scath|\.bash|\.zsh|\.csh|\.tsch|\.pl|\.py|\.txt|\.cgi|\.cfm|\.htaccess)$')
if options.is_all:
tests.append(LanguageIC())
tests.append(Entropy())
tests.append(LongestWord())
tests.append(SignatureNasty())
tests.append(SignatureSuperNasty())
else:
if options.is_entropy:
tests.append(Entropy())
if options.is_longest:
tests.append(LongestWord())
if options.is_ic:
tests.append(LanguageIC())
if options.is_signature:
tests.append(SignatureNasty())
if options.is_supersignature:
tests.append(SignatureSuperNasty())
if options.is_eval:
tests.append(UsesEval())
if options.is_zlib:
tests.append(Compression())
# Instantiate the Generator Class used for searching, opening, and reading files
locator = SearchFile()
# CSV file output array
csv_array = []
csv_header = ["filename"]
# Grab the file and calculate each test against file
fileCount = 0
fileIgnoreCount = 0
for data, filename in locator.search_file_path(args, valid_regex):
if data:
# a row array for the CSV
csv_row = []
csv_row.append(filename)
if options.ignore_unicode:
asciiHighCount = 0
for character in data:
if ord(character) > 127:
asciiHighCount = asciiHighCount + 1
fileAsciiHighRatio = float(asciiHighCount) / float(len(data))
if (options.ignore_unicode == False or fileAsciiHighRatio < .1):
for test in tests:
calculated_value = test.calculate(data, filename)
# Make the header row if it hasn't been fully populated, +1 here to account for filename column
if len(csv_header) < len(tests) + 1:
csv_header.append(test.__class__.__name__)
csv_row.append(calculated_value)
fileCount = fileCount + 1
csv_array.append(csv_row)
else:
fileIgnoreCount = fileIgnoreCount + 1
if options.is_csv:
csv_array.insert(0,csv_header)
fileOutput = csv.writer(open(options.is_csv, "wb"))
fileOutput.writerows(csv_array)
timeFinish = time.clock()
# Print some stats
print "\n[[ Total files scanned: %i ]]" % (fileCount)
print "[[ Total files ignored: %i ]]" % (fileIgnoreCount)
print "[[ Scan Time: %f seconds ]]" % (timeFinish - timeStart)
# Print top rank lists
rank_list = {}
for test in tests:
test.sort()
test.printer(10)
for file in test.results:
rank_list[file["filename"]] = rank_list.setdefault(file["filename"], 0) + file["rank"]
rank_sorted = sorted(rank_list.items(), key=lambda x: x[1])
print "\n[[ Top cumulative ranked files ]]"
count = 10
if (count > len(rank_sorted)): count = len(rank_sorted)
for x in range(count):
print ' {0:>7} {1}'.format(rank_sorted[x][1], rank_sorted[x][0])
``` |
{
"source": "jiahenghuang/text_classification",
"score": 3
} |
#### File: text_classification/a00_Bert/utils.py
```python
import pickle
import h5py
import os
import numpy as np
import random
random_number=300
def load_data(cache_file_h5py,cache_file_pickle):
"""
load data from h5py and pickle cache files, which is generate by take step by step of pre-processing.ipynb
:param cache_file_h5py:
:param cache_file_pickle:
:return:
"""
if not os.path.exists(cache_file_h5py) or not os.path.exists(cache_file_pickle):
raise RuntimeError("############################ERROR##############################\n. "
"please download cache file, it include training data and vocabulary & labels. "
"link can be found in README.md\n download zip file, unzip it, then put cache files as FLAGS."
"cache_file_h5py and FLAGS.cache_file_pickle suggested location.")
print("INFO. cache file exists. going to load cache file")
f_data = h5py.File(cache_file_h5py, 'r')
print("f_data.keys:",list(f_data.keys()))
train_X=f_data['train_X'] # np.array(
print("train_X.shape:",train_X.shape)
train_Y=f_data['train_Y'] # np.array(
print("train_Y.shape:",train_Y.shape,";")
vaild_X=f_data['vaild_X'] # np.array(
valid_Y=f_data['valid_Y'] # np.array(
test_X=f_data['test_X'] # np.array(
test_Y=f_data['test_Y'] # np.array(
#f_data.close()
word2index, label2index=None,None
with open(cache_file_pickle, 'rb') as data_f_pickle:
word2index, label2index=pickle.load(data_f_pickle)
print("INFO. cache file load successful...")
return word2index, label2index,train_X,train_Y,vaild_X,valid_Y,test_X,test_Y
#######################################
def compute_f1_score(predict_y,eval_y):
"""
compoute f1_score.
:param logits: [batch_size,label_size]
:param evalY: [batch_size,label_size]
:return:
"""
f1_score=0.0
p_5=0.0
r_5=0.0
return f1_score,p_5,r_5
def compute_f1_score_removed(label_list_top5,eval_y):
"""
compoute f1_score.
:param logits: [batch_size,label_size]
:param evalY: [batch_size,label_size]
:return:
"""
num_correct_label=0
eval_y_short=get_target_label_short(eval_y)
for label_predict in label_list_top5:
if label_predict in eval_y_short:
num_correct_label=num_correct_label+1
#P@5=Precision@5
num_labels_predicted=len(label_list_top5)
all_real_labels=len(eval_y_short)
p_5=num_correct_label/num_labels_predicted
#R@5=Recall@5
r_5=num_correct_label/all_real_labels
f1_score=2.0*p_5*r_5/(p_5+r_5+0.000001)
return f1_score,p_5,r_5
def compute_confuse_matrix(target_y,predict_y,label_dict,name='default'):
"""
compute true postive(TP), false postive(FP), false negative(FN) given target lable and predict label
:param target_y:
:param predict_y:
:param label_dict {label:(TP,FP,FN)}
:return: macro_f1(a scalar),micro_f1(a scalar)
"""
#1.get target label and predict label
if random.choice([x for x in range(random_number)]) ==1:
print(name+".target_y:",target_y,";predict_y:",predict_y) #debug purpose
#2.count number of TP,FP,FN for each class
y_labels_unique=[]
y_labels_unique.extend(target_y)
y_labels_unique.extend(predict_y)
y_labels_unique=list(set(y_labels_unique))
for i,label in enumerate(y_labels_unique): #e.g. label=2
TP, FP, FN = label_dict[label]
if label in predict_y and label in target_y:#predict=1,truth=1 (TP)
TP=TP+1
elif label in predict_y and label not in target_y:#predict=1,truth=0(FP)
FP=FP+1
elif label not in predict_y and label in target_y:#predict=0,truth=1(FN)
FN=FN+1
label_dict[label] = (TP, FP, FN)
return label_dict
def compute_micro_macro(label_dict):
"""
compute f1 of micro and macro
:param label_dict:
:return: f1_micro,f1_macro: scalar, scalar
"""
f1_micro = compute_f1_micro_use_TFFPFN(label_dict)
f1_macro= compute_f1_macro_use_TFFPFN(label_dict)
return f1_micro,f1_macro
def compute_TF_FP_FN_micro(label_dict):
"""
compute micro FP,FP,FN
:param label_dict_accusation: a dict. {label:(TP, FP, FN)}
:return:TP_micro,FP_micro,FN_micro
"""
TP_micro,FP_micro,FN_micro=0.0,0.0,0.0
for label,tuplee in label_dict.items():
TP,FP,FN=tuplee
TP_micro=TP_micro+TP
FP_micro=FP_micro+FP
FN_micro=FN_micro+FN
return TP_micro,FP_micro,FN_micro
def compute_f1_micro_use_TFFPFN(label_dict):
"""
compute f1_micro
:param label_dict: {label:(TP,FP,FN)}
:return: f1_micro: a scalar
"""
TF_micro_accusation, FP_micro_accusation, FN_micro_accusation =compute_TF_FP_FN_micro(label_dict)
f1_micro_accusation = compute_f1(TF_micro_accusation, FP_micro_accusation, FN_micro_accusation,'micro')
return f1_micro_accusation
def compute_f1_macro_use_TFFPFN(label_dict):
"""
compute f1_macro
:param label_dict: {label:(TP,FP,FN)}
:return: f1_macro
"""
f1_dict= {}
num_classes=len(label_dict)
for label, tuplee in label_dict.items():
TP,FP,FN=tuplee
f1_score_onelabel=compute_f1(TP,FP,FN,'macro')
f1_dict[label]=f1_score_onelabel
f1_score_sum=0.0
for label,f1_score in f1_dict.items():
f1_score_sum=f1_score_sum+f1_score
f1_score=f1_score_sum/float(num_classes)
return f1_score
small_value=0.00001
def compute_f1(TP,FP,FN,compute_type):
"""
compute f1
:param TP_micro: number.e.g. 200
:param FP_micro: number.e.g. 200
:param FN_micro: number.e.g. 200
:return: f1_score: a scalar
"""
precison=TP/(TP+FP+small_value)
recall=TP/(TP+FN+small_value)
f1_score=(2*precison*recall)/(precison+recall+small_value)
if random.choice([x for x in range(500)]) == 1:print(compute_type,"precison:",str(precison),";recall:",str(recall),";f1_score:",f1_score)
return f1_score
def init_label_dict(num_classes):
"""
init label dict. this dict will be used to save TP,FP,FN
:param num_classes:
:return: label_dict: a dict. {label_index:(0,0,0)}
"""
label_dict={}
for i in range(num_classes):
label_dict[i]=(0,0,0)
return label_dict
def get_target_label_short(eval_y):
eval_y_short=[] #will be like:[22,642,1391]
for index,label in enumerate(eval_y):
if label>0:
eval_y_short.append(index)
return eval_y_short
def get_target_label_short_batch(eval_y_big): # tested.
eval_y_short_big=[] #will be like:[22,642,1391]
for ind, eval_y in enumerate(eval_y_big):
eval_y_short=[]
for index,label in enumerate(eval_y):
if label>0:
eval_y_short.append(index)
eval_y_short_big.append(eval_y_short)
return eval_y_short_big
#get top5 predicted labels
def get_label_using_logits(logits,top_number=5):
y_predict_labels = [i for i in range(len(logits)) if logits[i] >= 0.50] # TODO 0.5PW e.g.[2,12,13,10]
if len(y_predict_labels) < 1: y_predict_labels = [np.argmax(logits)]
return y_predict_labels
def get_label_using_logits_batch(logits,top_number=5): # tested.
result_labels=[]
for i in range(len(logits)):
single_logit=logits[i]
labels=get_label_using_logits(single_logit)
result_labels.append(labels)
return result_labels
#统计预测的准确率
def calculate_accuracy(labels_predicted, labels,eval_counter):
label_nozero=[]
#print("labels:",labels)
labels=list(labels)
for index,label in enumerate(labels):
if label>0:
label_nozero.append(index)
if eval_counter<2:
print("labels_predicted:",labels_predicted," ;labels_nozero:",label_nozero)
count = 0
label_dict = {x: x for x in label_nozero}
for label_predict in labels_predicted:
flag = label_dict.get(label_predict, None)
if flag is not None:
count = count + 1
return count / len(labels)
def compute_confuse_matrix_batch(y_targetlabel_list,y_logits_array,label_dict,name='default'):
"""
compute confuse matrix for a batch
:param y_targetlabel_list: a list; each element is a mulit-hot,e.g. [1,0,0,1,...]
:param y_logits_array: a 2-d array. [batch_size,num_class]
:param label_dict:{label:(TP, FP, FN)}
:param name: a string for debug purpose
:return:label_dict:{label:(TP, FP, FN)}
"""
for i,y_targetlabel_list_single in enumerate(y_targetlabel_list):
label_dict=compute_confuse_matrix(y_targetlabel_list_single,y_logits_array[i],label_dict,name=name)
return label_dict
``` |
{
"source": "jiahenglu/VeblenWedderburn",
"score": 4
} |
#### File: jiahenglu/VeblenWedderburn/NumberSystem.py
```python
listoflists = []
list = []
for i in range(0,10):
list.append(i)
if len(list)>3:
list.remove(list[0])
listoflists.append((list, list[0]))
def numeric_compare(alist1, alist2):
list1 = alist1[0]
list2 = alist2[0]
for i in range(0,len(list1)):
if list1[i] != list2[i]:
return list1[i] - list2[i]
return 0
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
list1 = [4,4,8]
list2 = [1,2,3]
list3 = [1,3,9]
alist1 = [list1,-1]
alist2 = [list2,1]
alist3 = [list3,-7]
listoflists = [alist1,alist2,alist3]
listoflists.sort(key=cmp_to_key(numeric_compare))
def multiply(onelist,varlist):
for i in range(0,len(onelist)):
partlist1 = (onelist[i])[0]
partlist1.append(varlist[0])
partlist1.sort()
(onelist[i])[1] = (onelist[i])[1] * varlist[1]
return onelist
varlist = [6, 2]
n = multiply(listoflists,varlist)
print(n)
``` |
{
"source": "JiaheXu/CIS",
"score": 3
} |
#### File: CIS/CIS1_PA2/cartesian.py
```python
import numpy as np
import scipy
import math
class frame:
def __init__(self, R, t):
self.R = R
self.t = t
r_t = np.concatenate((R, t), 1)
bot = np.array([0, 0, 0, 1])
self.F = np.concatenate((r_t, bot))
def get_R(self):
return self.R
def get_t(self):
return self.t
def get_F(self):
return self.F
#Get the transpose of Rotation matrix
def Ri(R):
return np.transpose(R)
#get the Rotation matrix
def get_R(F):
return F[0:3, 0:3]
#get the translation matrix
def get_t(F):
return F[0:3, 3].reshape(3, 1)
#Determines whether the given matrix is a rotation matrix
def isRot(R):
det = np.linalg.det(R)
return (det == 0)
#Given Rotation and translation, return a homogeneous matrix
def concat_frame(R, t):
t = t.reshape(3,1)
r_t = np.concatenate((R, t), 1)
bot = np.array([0, 0, 0, 1]).reshape(1,4)
return np.concatenate((r_t, bot))
#Get the inverse of a frame
def Fi(F):
tmp_ri = Ri(get_R(F))
tmp_ti = np.dot(-tmp_ri, get_t(F))
return concat_frame(tmp_ri, tmp_ti)
# points: n*3 F:4*4
def points_transform(F,points):
n = np.shape(points)[0]
t = np.ones((n,1))
points = np.concatenate((points,t) , 1)
points = points.T
points = F@points
points = (points.T)[:,0:3]
return points
def skew(vec):
# print(vec.shape)
x = vec[0]
y = vec[1]
z = vec[2]
tmp_m = [[0, -z, y],
[z, 0, -x],
[-y, x, 0]]
return np.array(tmp_m)
def combinations(N, k):
return (math.factorial(N)) / (math.factorial(k) * math.factorial(N-k))
```
#### File: CIS/CIS1_PA2/eval_all.py
```python
from genericpath import exists
import numpy as np
import glob
from distort_calibration import *
from cartesian import *
from registration_3d import *
from optical_tracking import *
from em_tracking import *
from eval import *
from pathlib import Path
import argparse
import csv
def main():
# args = parse_args()
runtype = 0
run = 0
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']
type = ['debug', 'unknown']
data_dir = "DATA/"
# first read in Calbody (Nd, Na, Nc)
# then readings (ND, NA, NC, nframes)
tmp_all = np.array([0, 0, 0])
for i in range(7):
run = i
# the last output determines whether to show the result in terminal
em_pivot = em_tracking( data_dir , type[runtype] , letters[run] , output = 0)
optical_pivot = optical_tracking( data_dir , type[runtype] , letters[run] , output = 0)
tmp_ce = distort_calibration( data_dir , type[runtype] , letters[run] ,output = 0)
C_exp = tmp_ce[0]
Nc = tmp_ce[1]
Nframes = tmp_ce[2]
# print(optical_pivot.shape)
ep = np.transpose(em_pivot)
op = np.transpose(optical_pivot)
em_rounded = np.round(em_pivot.reshape(3), decimals=2)
opt_rounded = np.round(optical_pivot.reshape(3), decimals=2)
C_exp_rounded = np.round(C_exp, decimals=2)
tmp = eval( data_dir , type[runtype] , letters[run] , C_exp_rounded, em_rounded.reshape(3, 1), opt_rounded.reshape(3, 1))
np_avg = np.array([tmp[0], tmp[4], tmp[8]])
tmp_all = tmp_all + np_avg
runtype = 1
for i in range(4):
run = i + 7
# the last output determines whether to show the result in terminal
em_pivot = em_tracking( data_dir , type[runtype] , letters[run] , output = 0)
optical_pivot = optical_tracking( data_dir , type[runtype] , letters[run] , output = 0)
tmp_ce = distort_calibration( data_dir , type[runtype] , letters[run] ,output = 0)
C_exp = tmp_ce[0]
Nc = tmp_ce[1]
Nframes = tmp_ce[2]
# print(optical_pivot.shape)
ep = np.transpose(em_pivot)
op = np.transpose(optical_pivot)
em_rounded = np.round(em_pivot.reshape(3), decimals=2)
opt_rounded = np.round(optical_pivot.reshape(3), decimals=2)
C_exp_rounded = np.round(C_exp, decimals=2)
tmp = eval( data_dir , type[runtype] , letters[run] , C_exp_rounded, em_rounded.reshape(3, 1), opt_rounded.reshape(3, 1))
np_avg = [tmp[0], tmp[4], tmp[8]]
tmp_all = tmp_all + np.array(np_avg)
eval_results = tmp_all / 11
print(eval_results)
# print(em_pivot, optical_pivot)
# print(Nc, Nframes)
if __name__ == '__main__':
main()
```
#### File: CIS/CIS1_PA2/eval.py
```python
import glob
import numpy as np
import cartesian
from registration_3d import *
from pivot_calibration import pivot_calibration
import copy
import os
from improved_em_tracking import *
from distortion_correction import *
from fiducials import *
#Evaluation for all data
#data_dir, data_type, letter: Values used for composing the data file names.
# Cexp: The C_exp data calculated from distortion_calibration
# empivot: data calculated from em_tracking
#optpivot: calculated from op-tracking
#ct: calculated from em_nav2ct
def eval( data_dir , data_type , letter, Cexp, empivot, optpivot, ct, F_reg, pivot_set):
# file_path = glob.glob(data_dir + 'pa1-' + data_type + '-' + letter + '-output')
################################################################################
#For output1, this part is mostly the same as in PA1
file_path = 'pa2-' + data_type + '-' + letter + '-output1'
path_lst = []
for file in os.listdir(data_dir):
if file.startswith(file_path):
path_lst.append(file)
output_data = []
for file in path_lst:
read_file = open(os.path.join(data_dir, file), mode='r')
lines = read_file.read().splitlines()
for num in range(len(lines)):
output_data.append( lines[num].split(',') )
for i in range(len(output_data[-1])):
output_data[-1][i] = output_data[-1][i].strip()
Nc = int(output_data[0][0])
Nframes = int(output_data[0][1])
output_name = output_data[0][2]
output_data = np.asarray(output_data[1:]).astype(float)
em_pivot = (output_data[0, :].reshape(3, 1))
# print(em_pivot)
opt_pivot = output_data[1, :].reshape(3, 1)
output_data = output_data[2:]
row = output_data.shape[0]
col = output_data.shape[1]
assert output_data.shape == Cexp.shape, "Dimensions of the input Cexp and file data shape must be the same!"
# print(em_pivot.shape)
diff_arr_ce = np.abs(output_data - Cexp).reshape(row * col)
# print(np.abs(em_pivot - empivot).shape)
diff_arr_em = np.abs(em_pivot - empivot).reshape(3)
diff_arr_opt = np.abs(opt_pivot - optpivot).reshape(3)
################################################################################
#For output2, this part was added. It extracts the data from output2 to evaluate F_reg and ct_points
output_path = glob.glob(data_dir + "pa2-" + data_type + '-' + letter + '-output2.txt')
output_data = []
read_file = open(output_path[0], mode='r')
lines = read_file.read().splitlines()
for num in range(len(lines)):
output_data.append( lines[num].split(',') )
for i in range(len(output_data[-1])):
output_data[-1][i] = output_data[-1][i].strip()
Nframes = int(output_data[0][0])
output_name = output_data[0][1]
#Ground truth answers
ct_points = np.asarray(output_data[1:]).astype(float)
rows, cols = ct_points.shape
# print(output_data.shape)
#Calculating the answer for F_reg which should be closer to ground truth,
#acting as a source for evaluation.
F_reg_gt = registration_3d(pivot_set, ct_points)
#Differences in both CT_points and F_reg
diff_arr_ct = np.abs(ct_points - ct).reshape(rows * cols)
diff_freg = np.abs(F_reg - F_reg_gt)
return (np.average(diff_arr_ce), np.var(diff_arr_ce), np.max(diff_arr_ce), np.min(diff_arr_ce),
np.average(diff_arr_em), np.var(diff_arr_em), np.max(diff_arr_em), np.min(diff_arr_em),
np.average(diff_arr_opt), np.var(diff_arr_opt), np.max(diff_arr_opt), np.min(diff_arr_opt),
np.average(diff_arr_ct), np.var(diff_arr_ct), np.max(diff_arr_ct), np.min(diff_arr_ct),
np.average(diff_freg), np.var(diff_freg), np.max(diff_freg), np.min(diff_freg))
``` |
{
"source": "JiaheXu/CS285_2020_pytorch",
"score": 2
} |
#### File: cs285/scripts/run_hw3_dqn.py
```python
import os
import time
from cs285.infrastructure.rl_trainer import RL_Trainer
from cs285.agents.dqn_agent import DQNAgent
from cs285.infrastructure.dqn_utils import get_env_kwargs
class Q_Trainer(object):
def __init__(self, params):
self.params = params
train_args = {
'num_agent_train_steps_per_iter': params['num_agent_train_steps_per_iter'],
'num_critic_updates_per_agent_update': params['num_critic_updates_per_agent_update'],
'train_batch_size': params['batch_size'],
'double_q': params['double_q'],
}
env_args = get_env_kwargs(params['env_name'])
self.agent_params = {**train_args, **env_args, **params}
self.params['agent_class'] = DQNAgent
self.params['agent_params'] = self.agent_params
self.params['train_batch_size'] = params['batch_size']
self.params['env_wrappers'] = self.agent_params['env_wrappers']
self.rl_trainer = RL_Trainer(self.params)
def run_training_loop(self):
self.rl_trainer.run_training_loop(
self.agent_params['num_timesteps'],
collect_policy = self.rl_trainer.agent.actor,
eval_policy = self.rl_trainer.agent.actor,
)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--env_name',
default='MsPacman-v0',
choices=('PongNoFrameskip-v4', 'LunarLander-v3', 'MsPacman-v0')
)
parser.add_argument('--ep_len', type=int, default=200)
parser.add_argument('--exp_name', type=str, default='todo')
parser.add_argument('--eval_batch_size', type=int, default=1000)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1)
parser.add_argument('--num_critic_updates_per_agent_update', type=int, default=1)
parser.add_argument('--double_q', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--no_gpu', '-ngpu', action='store_true')
parser.add_argument('--which_gpu', '-gpu_id', default=0)
parser.add_argument('--scalar_log_freq', type=int, default=int(1e4))
parser.add_argument('--video_log_freq', type=int, default=-1)
parser.add_argument('--save_params', action='store_true')
args = parser.parse_args()
# convert to dictionary
params = vars(args)
params['video_log_freq'] = -1 # This param is not used for DQN
##################################
### CREATE DIRECTORY FOR LOGGING
##################################
data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data')
if not (os.path.exists(data_path)):
os.makedirs(data_path)
logdir = 'hw3_' + args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join(data_path, logdir)
params['logdir'] = logdir
if not(os.path.exists(logdir)):
os.makedirs(logdir)
print("\n\n\nLOGGING TO: ", logdir, "\n\n\n")
trainer = Q_Trainer(params)
trainer.run_training_loop()
if __name__ == "__main__":
main()
# python cs285/scripts/run_hw3_dqn.py --env_name LunarLander-v3 --exp_name q2_doubledqn_1 --double_q --seed 1
# python cs285/scripts/run_hw3_dqn.py --env_name LunarLander-v3 --exp_name q2_doubledqn_2 --double_q --seed 2
# python cs285/scripts/run_hw3_dqn.py --env_name LunarLander-v3 --exp_name q2_doubledqn_3 --double_q --seed 3
# python cs285/scripts/run_hw3_dqn.py --env_name LunarLander-v3 --exp_name q3_hparam1 [added an extra layer to network]
# python cs285/scripts/run_hw3_dqn.py --env_name LunarLander-v3 --exp_name q3_hparam2 [used layer size of 128]
# python cs285/scripts/run_hw3_dqn.py --env_name LunarLander-v3 --exp_name q3_hparam3 [used layer size + extra layer of 128]
```
#### File: cs285/policies/MPC_policy.py
```python
import numpy as np
from .base_policy import BasePolicy
class MPCPolicy(BasePolicy):
def __init__(self,
env,
ac_dim,
dyn_models,
horizon,
N,
sample_strategy='random',
cem_iterations=4,
cem_num_elites=5,
cem_alpha=1,
**kwargs
):
super().__init__(**kwargs)
# init vars
self.env = env
self.dyn_models = dyn_models
self.horizon = horizon
self.N = N
self.data_statistics = None # NOTE must be updated from elsewhere
self.ob_dim = self.env.observation_space.shape[0]
# action space
self.ac_space = self.env.action_space
self.ac_dim = ac_dim
self.low = self.ac_space.low
self.high = self.ac_space.high
# Sampling strategy
allowed_sampling = ('random', 'cem')
assert sample_strategy in allowed_sampling, f"sample_strategy must be one of the following: {allowed_sampling}"
self.sample_strategy = sample_strategy
self.cem_iterations = cem_iterations
self.cem_num_elites = cem_num_elites
self.cem_alpha = cem_alpha
print(f"Using action sampling strategy: {self.sample_strategy}")
if self.sample_strategy == 'cem':
print(f"CEM params: alpha={self.cem_alpha}, "
+ f"num_elites={self.cem_num_elites}, iterations={self.cem_iterations}")
def sample_action_sequences(self, num_sequences, horizon, obs=None):
act_seq_size = (num_sequences, horizon, self.ac_dim)
if self.sample_strategy == 'random' \
or (self.sample_strategy == 'cem' and obs is None):
# TODO(Q1) uniformly sample trajectories and return an array of
# dimensions (num_sequences, horizon, self.ac_dim) in the range
# [self.low, self.high]
random_action_sequences = np.random.uniform(low=self.low, high=self.high, size=act_seq_size)
return random_action_sequences
elif self.sample_strategy == 'cem':
# TODO(Q5): Implement action selection using CEM.
# Begin with randomly selected actions, then refine the sampling distribution
# iteratively as described in Section 3.3, "Iterative Random-Shooting with Refinement" of
# https://arxiv.org/pdf/1909.11652.pdf
cem_mu, cem_var = np.zeros(horizon * self.ac_dim), np.zeros(horizon * self.ac_dim)
for i in range(self.cem_iterations):
# - Sample candidate sequences from a Gaussian with the current
# elite mean and variance
# (Hint: remember that for the first iteration, we instead sample
# uniformly at random just like we do for random-shooting)
# - Get the top `self.cem_num_elites` elites
# (Hint: what existing function can we use to compute rewards for
# our candidate sequences in order to rank them?)
# - Update the elite mean and variance
if i == 0:
action_sequences = \
np.random.uniform(low=self.low, high=self.high, size=act_seq_size)
else:
action_sequences = \
np.random.multivariate_normal(
mean=cem_mu,
cov=np.diag(cem_var),
size=num_sequences-self.cem_num_elites
)
action_sequences = np.concatenate((action_sequences,elites), axis=0).reshape(*act_seq_size)
ensemble_sum_of_rewards = self.evaluate_candidate_sequences(action_sequences, obs)
elites_idx = np.argpartition(ensemble_sum_of_rewards, -self.cem_num_elites)[-self.cem_num_elites:]
elites = action_sequences[elites_idx].reshape(self.cem_num_elites, -1)
elites_mu, elites_var = elites.mean(axis=0), elites.var(axis=0)
cem_mu = self.cem_alpha * elites_mu + (1 - self.cem_alpha) * cem_mu
cem_var = self.cem_alpha * elites_var + (1 - self.cem_alpha) * cem_var
# TODO(Q5): Set `cem_action` to the appropriate action sequence chosen by CEM.
# The shape should be (horizon, self.ac_dim)
cem_action = action_sequences[ensemble_sum_of_rewards.argmax()]
return cem_action[None]
else:
raise Exception(f"Invalid sample_strategy: {self.sample_strategy}")
def evaluate_candidate_sequences(self, candidate_action_sequences, obs) -> np.ndarray:
# TODO(Q2): for each model in ensemble, compute the predicted sum of rewards
# for each candidate action sequence.
#
# Then, return the mean predictions across all ensembles.
# Hint: the return value should be an array of shape (N,)
ensemble_sum_of_rewards = np.zeros(self.N)
for model in self.dyn_models:
candidate_seq_rews = self.calculate_sum_of_rewards(obs, candidate_action_sequences, model)
ensemble_sum_of_rewards += candidate_seq_rews
ensemble_sum_of_rewards /= len(self.dyn_models)
return ensemble_sum_of_rewards
def get_action(self, obs):
if self.data_statistics is None:
return self.sample_action_sequences(num_sequences=1, horizon=1)[0]
# sample random actions (N x horizon)
candidate_action_sequences = self.sample_action_sequences(
num_sequences=self.N, horizon=self.horizon, obs=obs)
if candidate_action_sequences.shape[0] == 1:
# CEM: only a single action sequence to consider; return the first action
return candidate_action_sequences[0][0][None]
else:
predicted_rewards = self.evaluate_candidate_sequences(candidate_action_sequences, obs)
# pick the action sequence and return the 1st element of that sequence
best_action_sequence = candidate_action_sequences[np.argmax(predicted_rewards)] # TODO (Q2)
action_to_take = best_action_sequence[0] # TODO (Q2)
return action_to_take[None] # Unsqueeze the first index
def calculate_sum_of_rewards(self, obs, candidate_action_sequences, model):
"""
:param obs: numpy array with the current observation. Shape [D_obs]
:param candidate_action_sequences: numpy array with the candidate action
sequences. Shape [N, H, D_action] where
- N is the number of action sequences considered
- H is the horizon
- D_action is the action of the dimension
:param model: The current dynamics model.
:return: numpy array with the sum of rewards for each action sequence.
The array should have shape [N].
"""
assert candidate_action_sequences.shape[0] == self.N \
and candidate_action_sequences.shape[1] == self.horizon
sum_of_rewards = np.zeros(self.N) # TODO (Q2)
batch_obs = np.tile(obs, (self.N, 1))
for idx in range(self.horizon):
batch_act = candidate_action_sequences[:,idx,:]
batch_rew, batch_done = self.env.get_reward(batch_obs, batch_act)
sum_of_rewards += batch_rew
batch_obs = model.get_prediction(batch_obs, batch_act, self.data_statistics)
# For each candidate action sequence, predict a sequence of
# states for each dynamics model in your ensemble.
# Once you have a sequence of predicted states from each model in
# your ensemble, calculate the sum of rewards for each sequence
# using `self.env.get_reward(predicted_obs, action)` at each step.
# You should sum across `self.horizon` time step.
# Hint: you should use model.get_prediction and you shouldn't need
# to import pytorch in this file.
# Hint: Remember that the model can process observations and actions
# in batch, which can be much faster than looping through each
# action sequence.
return sum_of_rewards
```
#### File: cs285/policies/argmax_policy.py
```python
import numpy as np
import pdb
class ArgMaxPolicy(object):
def __init__(self, critic):
self.critic = critic
def set_critic(self, critic):
self.critic = critic
def get_action(self, obs):
# MJ: changed the dimension check to a 3
if len(obs.shape) > 3:
observation = obs
else:
observation = obs[None]
# TODO/Done: get this from hw3
action = np.argmax(self.critic.qa_values(observation), axis=1)
return action.squeeze().item()
####################################
####################################
```
#### File: cs285/policies/MLP_policy.py
```python
import abc
import itertools
from torch import nn
from torch.nn import functional as F
from torch import optim
import numpy as np
import torch
from torch import distributions
from cs285.infrastructure import pytorch_util as ptu
from cs285.policies.base_policy import BasePolicy
class MLPPolicy(BasePolicy, nn.Module, metaclass=abc.ABCMeta):
def __init__(self,
ac_dim,
ob_dim,
n_layers,
size,
discrete=False,
learning_rate=1e-4,
training=True,
nn_baseline=False,
**kwargs
):
super().__init__(**kwargs)
# init vars
self.ac_dim = ac_dim
self.ob_dim = ob_dim
self.n_layers = n_layers
self.discrete = discrete
self.size = size
self.learning_rate = learning_rate
self.training = training
self.nn_baseline = nn_baseline
if self.discrete:
self.logits_na = ptu.build_mlp(input_size=self.ob_dim,
output_size=self.ac_dim,
n_layers=self.n_layers,
size=self.size)
self.logits_na.to(ptu.device)
self.mean_net = None
self.logstd = None
self.optimizer = optim.Adam(self.logits_na.parameters(),
self.learning_rate)
else:
self.logits_na = None
self.mean_net = ptu.build_mlp(input_size=self.ob_dim,
output_size=self.ac_dim,
n_layers=self.n_layers, size=self.size)
self.logstd = nn.Parameter(
torch.zeros(self.ac_dim, dtype=torch.float32, device=ptu.device)
)
self.mean_net.to(ptu.device)
self.logstd.to(ptu.device)
self.optimizer = optim.Adam(
itertools.chain([self.logstd], self.mean_net.parameters()),
self.learning_rate
)
if nn_baseline:
self.baseline = ptu.build_mlp(
input_size=self.ob_dim,
output_size=1,
n_layers=self.n_layers,
size=self.size,
)
self.baseline.to(ptu.device)
self.baseline_optimizer = optim.Adam(
self.baseline.parameters(),
self.learning_rate,
)
else:
self.baseline = None
##################################
def save(self, filepath):
torch.save(self.state_dict(), filepath)
##################################
# query the policy with observation(s) to get selected action(s)
def get_action(self, obs: np.ndarray) -> np.ndarray:
# TODO/Done: get this from hw1
if len(obs.shape) > 1:
observation = obs
else:
observation = obs[None]
observation = ptu.from_numpy(obs.astype(np.float32))
act_dist = self.forward(observation)
act = act_dist.sample()
return ptu.to_numpy(act)
####################################
####################################
# update/train this policy
def update(self, observations, actions, **kwargs):
raise NotImplementedError
# This function defines the forward pass of the network.
# You can return anything you want, but you should be able to differentiate
# through it. For example, you can return a torch.FloatTensor. You can also
# return more flexible objects, such as a
# `torch.distributions.Distribution` object. It's up to you!
def forward(self, observation: torch.FloatTensor):
# TODO/Done: get this from hw1
if self.discrete:
logits_na = self.logits_na(observation)
act_dist = distributions.Categorical(logits=logits_na)
else:
mean_na = self.mean_net(observation)
std_na = torch.exp(self.logstd)
# helpful: difference between multivariatenormal and normal sample/batch/event shapes:
# https://bochang.me/blog/posts/pytorch-distributions/
# https://ericmjl.github.io/blog/2019/5/29/reasoning-about-shapes-and-probability-distributions/
act_dist = distributions.MultivariateNormal(loc=mean_na, scale_tril=torch.diag(std_na))
return act_dist
####################################
####################################
#####################################################
#####################################################
class MLPPolicyAC(MLPPolicy):
# MJ: cut acs_labels_na and qvals from the signature if they are not used
def update(
self, observations, actions,
adv_n=None, acs_labels_na=None, qvals=None
):
raise NotImplementedError
# Not needed for this homework
####################################
####################################
``` |
{
"source": "JiaheXu/MATH",
"score": 3
} |
#### File: MATH/error_and_fp/test_norms.py
```python
import numpy as np
def test_norm_1():
from norms import norm_1
for i in range(10):
A = np.random.randn(20, 20)
x = np.random.randn(20)
assert norm_1(A@x) <= norm_1(A) * norm_1(x)
``` |
{
"source": "JiaHe-yogurt/GNN",
"score": 2
} |
#### File: GNN/data_loader/data_helper.py
```python
import networkx as nx
from sklearn import preprocessing
import numpy as np
import os
import collections
import networkx as nx
import matplotlib.pyplot as plt
import random
import numpy as np
from itertools import permutations, combinations
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
from numpy.linalg import matrix_power
from scipy import sparse
import pickle
import copy
tf.disable_eager_execution()
NUM_LABELS = {'ENZYMES': 3, 'COLLAB': 0, 'IMDBBINARY': 0, 'IMDBMULTI': 0, 'MUTAG': 7, 'NCI1': 37, 'NCI109': 38, 'PROTEINS': 3, 'PTC': 22, 'DD': 89}
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def normalize(graph):
D_inv = np.diag(np.sum(graph, axis=0) ** -0.5)
graph = np.matmul(np.matmul(D_inv, graph), D_inv)
return graph
def A_power(graph_adj):
top = graph_adj.shape[0]-1
D_inv = np.diag(np.sum(graph_adj, axis=0) ** -0.5)
graph_adj = np.matmul(np.matmul(D_inv, graph_adj), D_inv)
adj_powers=[matrix_power(graph_adj,i+1) - matrix_power(graph_adj,i) for i in range(1, top+1)]
adj_powers.insert(0,graph_adj)
return np.array(adj_powers)
#top = graph_adj.shape[0]
#adj_powers, diffs = [],[]
#adj_powers.append(graph_adj)
#diffs.append(graph_adj)
#for p in range(2,top+1):
# power, diff = correct_A_power(p, graph_adj, adj_powers)
# adj_powers.append(power), diffs.append(diff)
return np.array(diffs)
def correct_A_power(p,graph_adj,adj_powers):
adj_power = matrix_power(graph_adj,p) + adj_powers[-1]
np.fill_diagonal(adj_power, 0)
adj_power[np.where(adj_power > 0)] = 1
diff = adj_power - adj_powers[-1]
return adj_power, diff
def load_dataset_ori(ds_name):
"""
construct graphs and labels from dataset text in data folder
:param ds_name: name of data set you want to load
:return: two numpy arrays of shape (num_of_graphs).
the graphs array contains in each entry a ndarray represent adjacency matrix of a graph of shape (num_vertex, num_vertex, num_vertex_labels)
the labels array in index i represent the class of graphs[i]
"""
directory = BASE_DIR + "/data/benchmark_graphs/{0}/{0}.txt".format(ds_name)
graphs = []
labels = []
with open(directory, "r") as data:
num_graphs = int(data.readline().rstrip().split(" ")[0])
for i in range(num_graphs):
graph_meta = data.readline().rstrip().split(" ")
num_vertex = int(graph_meta[0])
curr_graph = np.zeros(shape=(num_vertex, num_vertex, NUM_LABELS[ds_name]+1), dtype=np.float32)
labels.append(int(graph_meta[1]))
for j in range(num_vertex):
vertex = data.readline().rstrip().split(" ")
if NUM_LABELS[ds_name] != 0:
curr_graph[j, j, int(vertex[0])+1] = 1.
for k in range(2,len(vertex)):
curr_graph[j, int(vertex[k]), 0] = 1.
# curr_graph = noramlize_graph(curr_graph)
graphs.append(curr_graph)
graphs = np.array(graphs)
for i in range(graphs.shape[0]):
graphs[i] = np.transpose(graphs[i], [2,0,1])
return graphs, np.array(labels)
def load_dataset(ds_name):
directory = BASE_DIR + "/data/benchmark_graphs/{0}/{0}.txt".format(ds_name)
graphs = []
labels = []
with open(directory, "r") as data:
num_graphs = int(data.readline().rstrip().split(" ")[0])
for i in range(num_graphs):
graph_meta = data.readline().rstrip().split(" ")
num_vertex = int(graph_meta[0])
curr_graph = np.zeros(shape=(num_vertex, num_vertex, NUM_LABELS[ds_name] + 1), dtype=np.float32)
labels.append(int(graph_meta[1])) # ori
for j in range(num_vertex):
vertex = data.readline().rstrip().split(" ")
if NUM_LABELS[ds_name] != 0:
curr_graph[j, j, int(vertex[0]) + 1] = int(vertex[0]) + 1
for k in range(2, len(vertex)):
curr_graph[j, int(vertex[k]), 0] = 1.
# curr_graph = noramlize_graph(curr_graph)
graphs.append(curr_graph)
graphs = np.array(graphs)
labels = np.array(labels) # ori
# dim = [graph.shape[0] for graph in graphs]
# sort = (sorted([(x, i) for (i, x) in enumerate(dim)], reverse=True)[:110])
# graphs = np.delete(graphs, ([sort[i][1] for i in range(len(sort))]), axis=0)
# labels = np.delete(labels, ([sort[i][1] for i in range(len(sort))]), axis=0)
for i in range(graphs.shape[0]):
graphs[i] = np.transpose(graphs[i], [2, 0, 1]) ## ori: use all features
# edge_feature = Edge_Label(graphs[i])
# adj_powers = A_power(graphs[i][0])
# graphs[i] = np.concatenate((adj_powers, edge_feature), axis=0)
adj_powers = A_power(graphs[i][0])
graphs[i] = np.concatenate((graphs[i], adj_powers[1:]), axis=0)
# max_dim = max([graph.shape[0] for graph in graphs])
# for i in range(graphs.shape[0]):
# padded = np.zeros((max_dim - graphs[i].shape[0], graphs[i].shape[1], graphs[i].shape[2]))
# graphs[i] = np.concatenate((graphs[i], padded), axis=0)
return graphs, labels
def load_dataset2s(ds_name):
graph_dict=dict(zip([5,6,9,12,15,16,25], [0.7,0.7,0.6,0.8,0.8,0.8,0.7]))
num_rep=[100,100,100,200,200,200,200]
# graph_dict=dict(zip([5,6,9,12,15,16], [0.7,0.7,0.6,0.8, 0.8,0.8]))
# num_rep=[100,100,100,200,200,200]
graphs = []
labels = []
for num, (k,v) in zip(num_rep, graph_dict.items()):
G = nx.erdos_renyi_graph(k, v, seed=1, directed=False)
#plt.subplot(121)
#nx.draw(G,with_labels=True)
label=nx.clique.graph_clique_number(G)
A=nx.to_numpy_matrix(G,nodelist=list(range(len(G.nodes))))
graphs.append(A)
labels.append(label)
for graph in range(num):
node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random())))
G_new = nx.relabel_nodes(G, node_mapping)
A_new=nx.to_numpy_matrix(G_new, nodelist=list(range(len(G_new.nodes))))
graphs.append(A_new)
labels.append(label)
graphs = np.array(graphs)
labels = np.array(labels)
for i in range(graphs.shape[0]):
# graphs[i] = A_power(graphs[i])
graphs[i] = np.expand_dims(graphs[i], axis=0) # use only A
# max_dim = max([graph.shape[0] for graph in graphs])
# for i in range(graphs.shape[0]):
# padded = np.zeros((max_dim-graphs[i].shape[0], graphs[i].shape[1], graphs[i].shape[1]))
# graphs[i] =np.concatenate([graphs[i], padded], axis=0)
le = preprocessing.LabelEncoder() # to find clique
le.fit(labels) # to find clique
labels = le.transform(labels) # to find clique
return graphs, labels
def load_dataset_2s_val(ds_name):
"""
construct graphs and labels from dataset text in data folder
:param ds_name: name of data set you want to load
:return: two numpy arrays of shape (num_of_graphs).
the graphs array contains in each entry a ndarray represent adjacency matrix of a graph of shape (num_vertex, num_vertex, num_vertex_labels)
the labels array in index i represent the class of graphs[i]
"""
graph_dict = dict(zip([5, 6, 9, 12, 15, 16, 25], [0.7, 0.7, 0.6, 0.8, 0.8, 0.8, 0.7]))
num_rep = [20, 20, 20, 30, 30, 30, 30]
# graph_dict=dict(zip([5,6,9], [0.6,0.7,0.6]))
# num_rep=[3,3,3]
graphs = []
labels = []
for num, (k, v) in zip(num_rep, graph_dict.items()):
G = nx.erdos_renyi_graph(k, v, seed=1, directed=False)
# plt.subplot(121)
# nx.draw(G,with_labels=True)
label = nx.clique.graph_clique_number(G)
A = nx.to_numpy_matrix(G, nodelist=list(range(len(G.nodes))))
for graph in range(num):
node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random())))
G_new = nx.relabel_nodes(G, node_mapping)
u, v = random.sample(range(G_new.number_of_nodes() + 1), 2)
G_new.add_edge(u, v)
if G_new.number_of_edges() == G.number_of_edges() + 1:
if nx.clique.graph_clique_number(G_new) == label:
A_new = nx.to_numpy_matrix(G_new, nodelist=list(range(len(G_new.nodes))))
graphs.append(A_new)
labels.append(label)
graphs = np.array(graphs)
labels = np.array(labels)
for i in range(graphs.shape[0]):
# graphs[i] = np.transpose(graphs[i], [2,0,1]) ## ori: use all features
graphs[i] = np.expand_dims(np.expand_dims(graphs[i], axis=0), axis=0) # use only A
le = preprocessing.LabelEncoder() # to find clique
le.fit(labels) # to find clique
labels = le.transform(labels) # to find clique
return graphs, labels
def load_dataset2m(ds_name):
graph_dict = dict(zip([5, 6, 9, 12, 15, 16, 25], [0.7, 0.7, 0.6, 0.8, 0.8, 0.8, 0.7]))
num_rep = [100, 100, 100, 200, 200, 200, 200]
# graph_dict=dict(zip([5,6,9], [0.6,0.7,0.6]))
# num_rep=[3,3,3]
graphs = []
labels = []
for num, (k, v) in zip(num_rep, graph_dict.items()):
G = nx.erdos_renyi_graph(k, v, seed=1, directed=False)
# plt.subplot(121)
# nx.draw(G,with_labels=True)
label = nx.clique.graph_clique_number(G)
A = nx.to_numpy_matrix(G, nodelist=list(range(len(G.nodes))))
graphs.append(A)
labels.append(label)
for graph in range(num):
node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random())))
G_new = nx.relabel_nodes(G, node_mapping)
u, v = random.sample(range(G_new.number_of_nodes() + 1), 2)
G_new.add_edge(u, v)
if G_new.number_of_edges() == G.number_of_edges() + 1:
if nx.clique.graph_clique_number(G_new) == label:
A_new = nx.to_numpy_matrix(G_new, nodelist=list(range(len(G_new.nodes))))
graphs.append(A_new)
labels.append(label)
graphs = np.array(graphs)
labels = np.array(labels)
for i in range(graphs.shape[0]):
# graphs[i] = np.transpose(graphs[i], [2,0,1]) ## ori: use all features
graphs[i] = np.expand_dims(graphs[i], axis=0) # use only A
le = preprocessing.LabelEncoder() # to find clique
le.fit(labels) # to find clique
labels = le.transform(labels) # to find clique
# idx = np.where(labels == 4)[0] # balance data
# labels = np.delete(labels, idx[:700]) # labels = labels[:2000]
# graphs = np.delete(graphs, idx[:700], axis=0) # graphs= graphs[:2000]
return graphs, labels
def get_train_val_indexes(num_val, ds_name):
"""
reads the indexes of a specific split to train and validation sets from data folder
:param num_val: number of the split
:param ds_name: name of data set
:return: indexes of the train and test graphs
"""
directory = BASE_DIR + "/data/benchmark_graphs/{0}/10fold_idx".format(ds_name)
train_file = "train_idx-{0}.txt".format(num_val)
train_idx = []
with open(os.path.join(directory, train_file), 'r') as file:
for line in file:
train_idx.append(int(line.rstrip()))
test_file = "test_idx-{0}.txt".format(num_val)
test_idx = []
with open(os.path.join(directory, test_file), 'r') as file:
for line in file:
test_idx.append(int(line.rstrip()))
return train_idx, test_idx
def get_parameter_split(ds_name):
"""
reads the indexes of a specific split to train and validation sets from data folder
:param ds_name: name of data set
:return: indexes of the train and test graphs
"""
directory = BASE_DIR + "/data/benchmark_graphs/{0}/".format(ds_name)
train_file = "tests_train_split.txt"
train_idx = []
with open(os.path.join(directory, train_file), 'r') as file:
for line in file:
train_idx.append(int(line.rstrip()))
test_file = "tests_val_split.txt"
test_idx = []
with open(os.path.join(directory, test_file), 'r') as file:
for line in file:
test_idx.append(int(line.rstrip()))
return train_idx, test_idx
def group_same_size(graphs, labels, graphs3d):
"""
group graphs of same size to same array
:param graphs: numpy array of shape (num_of_graphs) of numpy arrays of graphs adjacency matrix
:param labels: numpy array of labels
:return: two numpy arrays. graphs arrays in the shape (num of different size graphs) where each entry is a numpy array
in the shape (number of graphs with this size, num vertex, num. vertex, num vertex labels)
the second arrayy is labels with correspons shape
"""
sizes = list(map(lambda t: t.shape[1], graphs))
indexes = np.argsort(sizes)
graphs = graphs[indexes]
labels = labels[indexes]
graphs3d = graphs3d[indexes]
r_graphs = []
r_labels = []
r_graphs3d = []
one_size = []
one_size_node = []
start = 0
size = graphs[0].shape[1]
for i in range(len(graphs)):
if graphs[i].shape[1] == size:
one_size.append(np.expand_dims(graphs[i], axis=0))
one_size_node.append(np.expand_dims(graphs3d[i], axis=0))
else:
r_graphs.append(np.concatenate(one_size, axis=0))
r_graphs3d.append(np.concatenate(one_size_node, axis=0))
r_labels.append(np.array(labels[start:i]))
start = i
one_size = []
one_size_node = []
size = graphs[i].shape[1]
one_size.append(np.expand_dims(graphs[i], axis=0))
one_size_node.append(np.expand_dims(graphs3d[i], axis=0))
r_graphs.append(np.concatenate(one_size, axis=0))
r_graphs3d.append(np.concatenate(one_size_node, axis=0))
r_labels.append(np.array(labels[start:]))
return r_graphs, r_labels, r_graphs3d
def QM9_group_same_size(graphs1d, graphs2d, graphs3d, labels):
"""
group graphs of same size to same array
:param graphs: numpy array of shape (num_of_graphs) of numpy arrays of graphs adjacency matrix
:param labels: numpy array of labels
:return: two numpy arrays. graphs arrays in the shape (num of different size graphs) where each entry is a numpy array
in the shape (number of graphs with this size, num vertex, num. vertex, num vertex labels)
the second arrayy is labels with correspons shape
"""
sizes = list(map(lambda t: t.shape[1], graphs2d))
indexes = np.argsort(sizes)
graphs1d = graphs1d[indexes]
graphs2d = graphs2d[indexes]
graphs3d = graphs3d[indexes]
labels = labels[indexes]
r_graphs1d, r_graphs2d ,r_graphs3d = [], [], []
r_labels = []
one_size1d, one_size2d, one_size3d = [],[],[]
start = 0
size = graphs2d[0].shape[-1]
for i in range(len(graphs2d)):
if graphs2d[i].shape[-1] == size:
one_size1d.append(np.expand_dims(graphs1d[i], axis=0))
one_size2d.append(np.expand_dims(graphs2d[i], axis=0))
one_size3d.append(np.expand_dims(graphs3d[i], axis=0))
else:
r_graphs1d.append(np.concatenate(one_size1d, axis=0))
r_graphs2d.append(np.concatenate(one_size2d, axis=0))
r_graphs3d.append(np.concatenate(one_size3d, axis=0))
r_labels.append(np.array(labels[start:i]))
start = i
one_size1d ,one_size2d ,one_size3d = [], [], []
size = graphs2d[i].shape[-1]
one_size1d.append(np.expand_dims(graphs1d[i], axis=0))
one_size2d.append(np.expand_dims(graphs2d[i], axis=0))
one_size3d.append(np.expand_dims(graphs3d[i], axis=0))
r_graphs1d.append(np.concatenate(one_size1d, axis=0))
r_graphs2d.append(np.concatenate(one_size2d, axis=0))
r_graphs3d.append(np.concatenate(one_size3d, axis=0))
r_labels.append(np.array(labels[start:]))
return r_graphs1d, r_graphs2d, r_graphs3d, r_labels
# helper method to shuffle each same size graphs array
def shuffle_same_size(graphs, labels, graphs3d):
r_graphs, r_labels, r_graphs3d = [], [], []
for i in range(len(labels)):
curr_graph, curr_labels, curr_nodefeature = shuffle(graphs[i], labels[i], graphs3d[i])
r_graphs.append(curr_graph)
r_graphs3d.append(curr_nodefeature )
r_labels.append(curr_labels)
return r_graphs, r_labels, r_graphs3d
def QM9_shuffle_same_size(graphs1d, graphs2d, graphs3d, labels):
r_graphs1d, r_graphs2d, r_labels, r_graphs3d = [], [], [], []
for i in range(len(labels)):
curr_graph1d, curr_graph2d,curr_graph3d, curr_labels = QM9_shuffle(graphs1d[i], graphs2d[i], graphs3d[i],labels[i])
r_graphs1d.append(curr_graph1d)
r_graphs2d.append(curr_graph2d)
r_graphs3d.append(curr_graph3d )
r_labels.append(curr_labels)
return r_graphs1d, r_graphs2d, r_graphs3d, r_labels
def split_to_batches(graphs, labels, graphs3d, size):
"""
split the same size graphs array to batches of specified size
last batch is in size num_of_graphs_this_size % size
:param graphs: array of arrays of same size graphs
:param labels: the corresponding labels of the graphs
:param size: batch size
:return: two arrays. graphs array of arrays in size (batch, num vertex, num vertex. num vertex labels)
corresponds labels
"""
r_graphs = []
r_labels = []
r_graphs3d = []
for k in range(len(graphs)):
r_graphs = r_graphs + np.split(graphs[k], [j for j in range(size, graphs[k].shape[0], size)])
r_graphs3d = r_graphs3d + np.split(graphs3d[k], [j for j in range(size, graphs3d[k].shape[0], size)])
r_labels = r_labels + np.split(labels[k], [j for j in range(size, labels[k].shape[0], size)])
return np.array(r_graphs), np.array(r_labels), np.array(r_graphs3d)
def QM9_split_to_batches(graphs1d, graphs2d, graphs3d, labels, size):
"""
split the same size graphs array to batches of specified size
last batch is in size num_of_graphs_this_size % size
:param graphs: array of arrays of same size graphs
:param labels: the corresponding labels of the graphs
:param size: batch size
:return: two arrays. graphs array of arrays in size (batch, num vertex, num vertex. num vertex labels)
corresponds labels
"""
r_graphs1d, r_graphs2d, r_graphs3d = [],[],[]
r_labels = []
for k in range(len(graphs2d)):
r_graphs1d = r_graphs1d + np.split(graphs1d[k], [j for j in range(size, graphs1d[k].shape[0], size)])
r_graphs2d = r_graphs2d + np.split(graphs2d[k], [j for j in range(size, graphs2d[k].shape[0], size)])
r_graphs3d = r_graphs3d + np.split(graphs3d[k], [j for j in range(size, graphs3d[k].shape[0], size)])
r_labels = r_labels + np.split(labels[k], [j for j in range(size, labels[k].shape[0], size)])
return np.array(r_graphs1d), np.array(r_graphs2d), np.array(r_graphs3d), np.array(r_labels)
# helper method to shuffle the same way graphs and labels arrays
def shuffle(graphs, labels, graphs3d):
shf = np.arange(labels.shape[0], dtype=np.int32)
#np.random.seed(1)
np.random.shuffle(shf)
return np.array(graphs)[shf], labels[shf], np.array(graphs3d)[shf]
def QM9_shuffle(graphs1d,graphs2d,graphs3d, labels):
shf = np.arange(labels.shape[0], dtype=np.int32)
#np.random.seed(1)
np.random.shuffle(shf)
return np.array(graphs1d)[shf],np.array(graphs2d)[shf] , np.array(graphs3d)[shf], labels[shf]
def noramlize_graph(curr_graph):
split = np.split(curr_graph, [1], axis=2)
adj = np.squeeze(split[0], axis=2)
deg = np.sqrt(np.sum(adj, 0))
deg = np.divide(1., deg, out=np.zeros_like(deg), where=deg != 0)
normal = np.diag(deg)
norm_adj = np.expand_dims(np.matmul(np.matmul(normal, adj), normal), axis=2)
ones = np.ones(shape=(curr_graph.shape[0], curr_graph.shape[1], curr_graph.shape[2]), dtype=np.float32)
spred_adj = np.multiply(ones, norm_adj)
labels = np.append(np.zeros(shape=(curr_graph.shape[0], curr_graph.shape[1], 1)), split[1], axis=2)
return np.add(spred_adj, labels)
def load_dataset3s(ds_name, upper=True):
graphs, adj_powers, graphs3d = [], [], []
labels = []
if ds_name == 'syn':
graph_dict = dict(zip([5, 6, 9, 12, 15, 16, 25], [0.7, 0.7, 0.6, 0.8, 0.8, 0.8, 0.7]))
# graph_dict=dict(zip([5,6,9,12], [0.7,0.7,0.6,0.8,0.8,0.8,0.7]))
num_rep = [100, 100, 100, 200, 200, 200, 200]
for num, (k, v) in zip(num_rep, graph_dict.items()):
G = nx.erdos_renyi_graph(k, v, seed=1, directed=False)
adj = nx.linalg.graphmatrix.adjacency_matrix(G).toarray()
graphs.append(adj)
label = nx.clique.graph_clique_number(G)
if upper == False:
A = construct_A3(G)
adj_power = A_power(adj)
else:
A = construct_upperA3(G)
adj_power = A_power(adj)
graphs3d.append(A)
adj_powers.append(adj_power)
labels.append(label)
for graph in range(num):
node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random())))
G_new = nx.relabel_nodes(G, node_mapping)
adj_new = nx.linalg.graphmatrix.adjacency_matrix(G_new).toarray()
if upper == False:
A_new = construct_A3(G_new)
adj_power = A_power(adj_new)
else:
A_new = construct_upperA3(G_new)
adj_power = A_power(adj_new)
graphs.append(adj)
graphs3d.append(A_new)
adj_powers.append(adj_power)
labels.append(label)
if k == list(graph_dict.keys())[-1]:
zero = np.zeros((k + 1, k + 1, k + 1))
graphs.append(zero)
adj_powers.append(zero)
graphs3d.append(zero)
graphs = np.array(graphs)
labels = np.array(labels)
graphs3d = np.array(graphs3d)
adj_powers = np.array(adj_powers)
for i in range(graphs.shape[0]):
# graphs[i] = np.expand_dims(graphs[i], axis=0)
graphs[i] = adj_powers[i]
graphs3d[i] = np.expand_dims(graphs3d[i], axis=0)
graphs = tf.ragged.constant(graphs).to_tensor().eval(session=tf.Session())
graphs3d = tf.ragged.constant(graphs3d).to_tensor().eval(session=tf.Session())
graphs = np.delete(graphs, -1, axis=0)
graphs3d = np.delete(graphs3d, -1, axis=0)
# graphs = np.delete(graphs, -1, axis=0)
le = preprocessing.LabelEncoder() # to find clique
le.fit(labels) # to find clique
labels = le.transform(labels) # to find clique
else:
directory = BASE_DIR + "/data/benchmark_graphs/{0}/{0}.txt".format(ds_name)
with open(directory, "r") as data:
num_graphs = int(data.readline().rstrip().split(" ")[0])
for i in range(num_graphs):
graph_meta = data.readline().rstrip().split(" ")
num_vertex = int(graph_meta[0])
curr_graph = np.zeros(shape=(num_vertex, num_vertex, NUM_LABELS[ds_name] + 1), dtype=np.float32)
labels.append(int(graph_meta[1])) # ori
for j in range(num_vertex):
vertex = data.readline().rstrip().split(" ")
if NUM_LABELS[ds_name] != 0:
curr_graph[j, j, int(vertex[0]) + 1] = 1.
for k in range(2, len(vertex)):
curr_graph[j, int(vertex[k]), 0] = 1.
# curr_graph = noramlize_graph(curr_graph)
graphs.append(curr_graph)
graphs = np.array(graphs)
for i in range(graphs.shape[0]):
graphs[i] = np.expand_dims(np.transpose(graphs[i], [2, 0, 1])[0], axis=0) # use only A
G = nx.from_numpy_array(graphs[i][0])
graphs[i] = construct_upperA3(G)
graphs[i] = np.expand_dims(graphs[i], axis=0)
labels = np.array(labels)
return graphs, labels, graphs3d
def load_dataset_3s_val(ds_name, upper):
graph_dict = dict(zip([5, 6, 9, 12, 15, 16, 25], [0.7, 0.7, 0.6, 0.8, 0.8, 0.8, 0.7]))
# graph_dict=dict(zip([5,6,9,12], [0.7,0.7,0.6,0.8,0.8,0.8,0.7]))
num_rep = [20, 20, 20, 30, 30, 30, 30]
# graph_dict=dict(zip([5,6,9], [0.6,0.7,0.6]))
# num_rep=[3,3,3]
graphs, adj_powers, graphs3d = [], [], []
labels = []
for num, (k, v) in zip(num_rep, graph_dict.items()):
G = nx.erdos_renyi_graph(k, v, seed=1, directed=False)
label = nx.clique.graph_clique_number(G)
if upper == False:
A = construct_A3(G)
else:
A = construct_upperA3(G)
for graph in range(num):
node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random())))
G_new = nx.relabel_nodes(G, node_mapping)
u, v = random.sample(range(G_new.number_of_nodes() + 1), 2)
G_new.add_edge(u, v)
if G_new.number_of_edges() == G.number_of_edges() + 1:
if nx.clique.graph_clique_number(G_new) == label:
adj = nx.linalg.graphmatrix.adjacency_matrix(G_new).toarray()
if upper == False:
A_new = construct_A3(G_new)
adj_power = A_power(adj)
else:
A_new = construct_upperA3(G_new)
adj_power = A_power(adj)
graphs3d.append(A_new)
labels.append(label)
graphs.append(adj_power)
graphs = np.array(graphs)
labels = np.array(labels)
graphs3d = np.array(graphs3d)
# graphs = tf.ragged.constant(graphs).to_tensor().eval(session=tf.Session())
for i in range(graphs.shape[0]):
graphs[i] = np.expand_dims(graphs[i], axis=0)
graphs3d[i] = np.expand_dims(np.expand_dims(graphs3d[i], axis=0), axis=0)
graphs = tf.ragged.constant(graphs).to_tensor().eval(session=tf.Session())
graphs3d = tf.ragged.constant(graphs3d).to_tensor().eval(session=tf.Session())
# for i in range(graphs.shape[0]):
# graphs[i] = np.expand_dims(np.expand_dims(graphs[i], axis=0), axis=0)
# graphs3d[i] = np.expand_dims( np.expand_dims(graphs3d[i], axis=0), axis=0)
le = preprocessing.LabelEncoder() # to find clique
le.fit(labels) # to find clique
labels = le.transform(labels) # to find clique
return graphs, labels, graphs3d
def load_dataset3m(ds_name, upper):
import tensorflow.compat.v1 as tf
tf.disable_eager_execution
graph_dict = dict(zip([5, 6, 9, 12, 15, 16, 25], [0.7, 0.7, 0.6, 0.8, 0.8, 0.8, 0.7]))
num_rep = [100, 100, 100, 200, 200, 200, 200]
# graph_dict=dict(zip([5,6,9], [0.6,0.7,0.6]))
# num_rep=[3,3,3]
graphs = []
labels = []
for num, (k, v) in zip(num_rep, graph_dict.items()):
G = nx.erdos_renyi_graph(k, v, seed=1, directed=False)
label = nx.clique.graph_clique_number(G)
if upper == False:
A = construct_A3(G)
else:
A = construct_upperA3(G)
graphs.append(A)
labels.append(label)
for graph in range(num):
node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random())))
G_new = nx.relabel_nodes(G, node_mapping)
u, v = random.sample(range(G_new.number_of_nodes() + 1), 2)
G_new.add_edge(u, v)
if G_new.number_of_edges() == G.number_of_edges() + 1:
if nx.clique.graph_clique_number(G_new) == label:
if upper == False:
A_new = construct_A3(G_new)
else:
A_new = construct_upperA3(G_new)
graphs.append(A_new)
labels.append(label)
graphs = np.array(graphs)
labels = np.array(labels)
graphs = tf.ragged.constant(graphs).to_tensor().eval(session=tf.Session())
le = preprocessing.LabelEncoder() # to find clique
le.fit(labels) # to find clique
labels = le.transform(labels) # to find clique
# idx = np.where(labels == 4)[0] # balance data
# labels = np.delete(labels, idx[:700]) # labels = labels[:2000]
# graphs = np.delete(graphs, idx[:700], axis=0) # graphs= graphs[:2000]
return graphs, labels
def load_dataset3s_large(ds_name, upper):
graph_dict = dict(zip([7, 8, 9], [1, 1, 1, 1, 1, 1, 1]))
num_rep = [20, 20, 20, 50, 50, 200, 200]
graphs = []
labels = []
for num, (k, v) in zip(num_rep, graph_dict.items()):
G, label = construct_graph(k, v, sub_size=1)
if upper == False:
A = construct_A3(G)
else:
A = construct_upperA3(G)
graphs.append(A)
labels.append(label)
for graph in range(num):
node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random())))
G_new = nx.relabel_nodes(G, node_mapping)
if upper == False:
A_new = construct_A3(G_new)
else:
A_new = construct_upperA3(G_new)
graphs.append(A_new)
labels.append(label)
graphs = np.array(graphs)
labels = np.array(labels)
max_dim = max([graph.shape[0] for graph in graphs]) + 1
for i in range(graphs.shape[0]):
padded = np.zeros((max_dim, max_dim, max_dim))
padded[:graphs[i].shape[0], :graphs[i].shape[1], :graphs[i].shape[2]] = graphs[i]
graphs[i] = padded
le = preprocessing.LabelEncoder() # to find clique
le.fit(labels) # to find clique
labels = le.transform(labels) # to find clique
return graphs, labels
def load_dataset_3s_large_val(ds_name, upper):
graph_dict = dict(zip([7, 8, 9], [1, 1, 1, 1, 1, 1, 1]))
num_rep = [15, 15, 15, 50, 50, 200, 200]
graphs = []
labels = []
for num, (k, v) in zip(num_rep, graph_dict.items()):
G, label = construct_graph(k, v, sub_size=1)
for graph in range(num):
node_mapping = dict(zip(G.nodes(), sorted(G.nodes(), key=lambda k: random.random())))
G_new = nx.relabel_nodes(G, node_mapping)
f, t = random.sample(range(G_new.number_of_nodes() + 1), 2)
G_new.add_edge(f, t)
f, t = random.sample(range(G_new.number_of_nodes() + 1), 2)
G_new.add_edge(f, t)
if G_new.number_of_edges() >= G.number_of_edges() + 1:
if upper == False:
A_new = construct_A3(G_new)
else:
A_new = construct_upperA3(G_new)
graphs.append(A_new)
labels.append(label)
graphs = np.array(graphs)
labels = np.array(labels)
max_dim = max([graph.shape[0] for graph in graphs])
for i in range(graphs.shape[0]):
padded = np.zeros((max_dim, max_dim, max_dim))
padded[:graphs[i].shape[0], :graphs[i].shape[1], :graphs[i].shape[2]] = graphs[i]
graphs[i] = padded
graphs = list(graphs)
for i in range(len(graphs)):
# graphs[i] = np.transpose(graphs[i], [2,0,1]) ## ori: use all features
graphs[i] = np.expand_dims(graphs[i], axis=0)
le = preprocessing.LabelEncoder() # to find clique
le.fit(labels) # to find clique
labels = le.transform(labels) # to find clique
return graphs, labels
def construct_graph(k, v, sub_size):
G = nx.erdos_renyi_graph(k, v, directed=False)
sub_k, sub_v = np.int(k * sub_size), 0.1
G2 = nx.erdos_renyi_graph(sub_k, sub_v, directed=False)
G3 = nx.disjoint_union(G, G2)
G3.add_edge(G.number_of_nodes() - 1, G.number_of_nodes())
label = nx.clique.graph_clique_number(G3)
return G3, label
def get_cliques_by_length(G, length_clique):
""" Return the list of all cliques in an undirected graph G with length
equal to length_clique. """
cliques = []
for c in nx.enumerate_all_cliques(G):
if len(c) <= length_clique:
if len(c) == length_clique:
cliques.append(c)
else:
return cliques
# return empty list if nothing is found
return cliques
def construct_A3(G, length_clique=3):
tri = get_cliques_by_length(G, 3)
# print(tri)
nn = G.number_of_nodes()
A3 = np.zeros((nn, nn, nn), dtype='float32')
for i in tri:
perm = permutations(i)
for j in list(perm):
A3[j] = 1
return A3
def construct_upperA3(G, length_clique=3):
tri = get_cliques_by_length(G, 3)
# print(tri)
nn = G.number_of_nodes()
A3 = np.zeros((nn, nn, nn), dtype='float32')
for i in tri:
A3[tuple(i)] = 1
return A3
def motif(shape):
target = nx.Graph()
if shape == 'tree':
target.add_edge(1, 2)
target.add_edge(2, 3)
if shape == 'triangle':
target.add_edge(1, 2)
target.add_edge(2, 3)
target.add_edge(1, 3)
if shape == 'tail_triangle':
target.add_edge(1, 2)
target.add_edge(2, 3)
target.add_edge(1, 3)
target.add_edge(1, 4)
if shape == 'star':
target.add_edge(1, 2)
target.add_edge(1, 3)
target.add_edge(1, 4)
if shape == 'chain':
target.add_edge(1, 2)
target.add_edge(2, 3)
target.add_edge(3, 4)
if shape == 'box':
target.add_edge(1, 2)
target.add_edge(2, 3)
target.add_edge(3, 4)
target.add_edge(1, 4)
if shape == 'semi_clique':
target.add_edge(1, 2)
target.add_edge(2, 3)
target.add_edge(3, 4)
target.add_edge(1, 4)
target.add_edge(1, 3)
if shape == '4_clique':
target.add_edge(1, 2)
target.add_edge(2, 3)
target.add_edge(3, 4)
target.add_edge(1, 4)
target.add_edge(1, 3)
target.add_edge(2, 4)
return target
def high_order(g, target):
nn = g.number_of_nodes()
sub_node = []
if target.number_of_nodes() == 3:
A = np.zeros((nn, nn, nn), dtype='float32')
for sub_nodes in combinations(g.nodes(), len(target.nodes())):
subg = g.subgraph(sub_nodes)
if nx.is_connected(subg) and nx.is_isomorphic(subg, target):
A[tuple(subg.nodes())] = 1
sub_node.append(tuple(subg.nodes()))
if target.number_of_nodes() == 4:
A = np.zeros((nn, nn, nn, nn), dtype='float32')
for sub_nodes in combinations(g.nodes(), len(target.nodes())):
subg = g.subgraph(sub_nodes)
if nx.is_connected(subg) and nx.is_isomorphic(subg, target):
A[tuple(subg.nodes())] = 1
sub_node.append(tuple(subg.nodes()))
label = len(sub_node)
return A, label, sub_node
def high_order2(g, target):
nn = g.number_of_nodes()
sub_node = []
if target.number_of_nodes() == 3:
A = np.zeros((nn, nn, nn), dtype='float32')
for sub_nodes in combinations(g.nodes(), len(target.nodes())):
subg = g.subgraph(sub_nodes)
if nx.is_connected(subg) and nx.is_isomorphic(subg, target):
center_node = list(set(list(subg.edges)[0]).intersection(set(list(subg.edges)[1])))
edge_nodes = list(set(tuple(subg.nodes())).difference(set((center_node))))
A[center_node[0], edge_nodes[0], edge_nodes[1]] = 1
A[center_node[0], edge_nodes[1], edge_nodes[0]] = 1
A[edge_nodes[0], center_node[0], edge_nodes[1]] = 1
A[edge_nodes[1], center_node[0], edge_nodes[0]] = 1
sub_node.append(tuple(subg.nodes()))
if target.number_of_nodes() == 4:
A = np.zeros((nn, nn, nn, nn), dtype='float32')
for sub_nodes in combinations(g.nodes(), len(target.nodes())):
subg = g.subgraph(sub_nodes)
if nx.is_connected(subg) and nx.is_isomorphic(subg, target):
A[tuple(subg.nodes())] = 1
sub_node.append(tuple(subg.nodes()))
label = len(sub_node)
return A, label, sub_node
def high_order3(g, target):
nn = g.number_of_nodes()
sub_node = []
if target.number_of_nodes() == 3:
A1, A2 = np.zeros((nn, nn, nn), dtype='float32'), np.zeros((nn, nn, nn), dtype='float32')
for sub_nodes in combinations(g.nodes(), len(target.nodes())):
subg = g.subgraph(sub_nodes)
if nx.is_connected(subg) and nx.is_isomorphic(subg, target):
center_node = list(set(list(subg.edges)[0]).intersection(set(list(subg.edges)[1])))
edge_nodes = list(set(tuple(subg.nodes())).difference(set((center_node))))
A1[center_node[0], edge_nodes[0], edge_nodes[1]] = 1
A1[center_node[0], edge_nodes[1], edge_nodes[0]] = 1
A2[edge_nodes[0], center_node[0], edge_nodes[1]] = 2
A2[edge_nodes[1], center_node[0], edge_nodes[0]] = 2
sub_node.append(tuple(subg.nodes()))
if target.number_of_nodes() == 4:
A = np.zeros((nn, nn, nn, nn), dtype='float32')
for sub_nodes in combinations(g.nodes(), len(target.nodes())):
subg = g.subgraph(sub_nodes)
if nx.is_connected(subg) and nx.is_isomorphic(subg, target):
A[tuple(subg.nodes())] = 1
sub_node.append(tuple(subg.nodes()))
label = len(sub_node)
return A1, A2, label, sub_node
def multihead(ds_name, target_shape):
graphs, graphs3d, labels = [], [], []
if ds_name == 'syn':
target = motif(target_shape)
# graph_dict = dict(zip([5, 6, 9, 12, 15, 16, 25], [0.7, 0.7, 0.6, 0.8, 0.8, 0.8, 0.7]))
# num_rep = [100, 100, 100, 200, 200, 200, 200]
graph_dict = dict(zip([8, 9, 9, 10, 10, 11, 11, 12, 13], [0.3, 0.3, 0.3, 0.3, 0.4, 0.3, 0.4, 0.2, 0.2]))
num_rep = [50, 50, 50, 50, 100, 100, 100, 100, 100, 100]
for num, (k, v) in zip(num_rep, graph_dict.items()):
for s in range(num):
G = nx.erdos_renyi_graph(k, v, seed=s, directed=False)
if nx.is_connected(G):
graph3d, label, _ = high_order(G, target)
# label = nx.clique.graph_clique_number(G)
labels.append(label)
graphs3d.append(graph3d)
adj = nx.linalg.graphmatrix.adjacency_matrix(G).toarray()
graphs.append(adj)
graphs = np.array(graphs)
graphs3d = np.array(graphs3d)
for i in range(graphs.shape[0]):
graphs[i] = np.expand_dims(graphs[i], axis=0)
graphs3d[i] = np.expand_dims(graphs3d[i], axis=0)
# le = preprocessing.LabelEncoder() # to find clique
# le.fit(labels) # to find clique
# labels = le.transform(labels) # to find clique
else:
target = motif(target_shape)
directory = BASE_DIR + "/data/benchmark_graphs/{0}/{0}.txt".format(ds_name)
with open(directory, "r") as data:
num_graphs = int(data.readline().rstrip().split(" ")[0])
for i in range(num_graphs):
graph_meta = data.readline().rstrip().split(" ")
num_vertex = int(graph_meta[0])
curr_graph = np.zeros(shape=(num_vertex, num_vertex, NUM_LABELS[ds_name] + 1), dtype=np.float32)
labels.append(int(graph_meta[1])) # ori
for j in range(num_vertex):
vertex = data.readline().rstrip().split(" ")
if NUM_LABELS[ds_name] != 0:
curr_graph[j, j, int(vertex[0]) + 1] = 1.
for k in range(2, len(vertex)):
curr_graph[j, int(vertex[k]), 0] = 1.
# curr_graph = noramlize_graph(curr_graph)
graphs.append(curr_graph)
graphs = np.array(graphs)
labels = np.array(labels)
# dim = [graph.shape[0] for graph in graphs]
# sort = (sorted([(x, i) for (i, x) in enumerate(dim)], reverse=True)[:100])
# graphs = np.delete(graphs, ([sort[i][1] for i in range(len(sort))]), axis=0)
# labels = np.delete(labels, ([sort[i][1] for i in range(len(sort))]), axis=0)
for i in range(graphs.shape[0]):
graphs[i] = np.transpose(graphs[i], [2, 0, 1]) # use only A
G = nx.from_numpy_array(graphs[i][0])
graph3d, _, _ = high_order(G, target)
graphs3d.append(graph3d)
adj_powers = A_power(graphs[i][0])
graphs[i] = np.concatenate((graphs[i], adj_powers[1:]), axis=0)
graphs3d = np.array(graphs3d)
for i in range(graphs3d.shape[0]):
graphs3d[i] = np.expand_dims(graphs3d[i], axis=0)
return graphs, np.array(labels), graphs3d
def gnn3(ds_name, target_shape):
graphs, graphs3d, labels, adj_powers =[], [], [], []
if ds_name == 'syn':
target = motif(target_shape)
# graph_dict = dict(zip([5, 6, 9, 12, 15, 16, 25], [0.7, 0.7, 0.6, 0.8, 0.8, 0.8, 0.7]))
# num_rep = [100, 100, 100, 200, 200, 200, 200]
graph_dict = dict(zip([8, 9, 9, 10, 10, 11, 11, 12, 13], [0.3, 0.3, 0.3, 0.3, 0.4, 0.3, 0.4, 0.2, 0.2]))
num_rep = [50, 50, 50, 50, 100, 100, 100, 100, 100, 100]
for num, (k, v) in zip(num_rep, graph_dict.items()):
for s in range(num):
G = nx.erdos_renyi_graph(k, v, seed=s, directed=False)
if nx.is_connected(G):
graph3d, label, _ = high_order(G, target)
# label = nx.clique.graph_clique_number(G)
labels.append(label)
graphs3d.append(graph3d)
adj = nx.linalg.graphmatrix.adjacency_matrix(G).toarray()
graphs.append(adj)
graphs = np.array(graphs)
graphs3d = np.array(graphs3d)
for i in range(graphs.shape[0]):
graphs[i] = np.expand_dims(graphs[i], axis=0)
graphs3d[i] = np.expand_dims(graphs3d[i], axis=0)
# le = preprocessing.LabelEncoder() # to find clique
# le.fit(labels) # to find clique
# labels = le.transform(labels) # to find clique
else:
target = motif(target_shape)
directory = BASE_DIR + "/data/benchmark_graphs/{0}/{0}.txt".format(ds_name)
with open(directory, "r") as data:
num_graphs = int(data.readline().rstrip().split(" ")[0])
for i in range(num_graphs):
graph_meta = data.readline().rstrip().split(" ")
num_vertex = int(graph_meta[0])
curr_graph = np.zeros(shape=(num_vertex, num_vertex, NUM_LABELS[ds_name] + 1), dtype=np.float32)
labels.append(int(graph_meta[1])) # ori
for j in range(num_vertex):
vertex = data.readline().rstrip().split(" ")
if NUM_LABELS[ds_name] != 0:
curr_graph[j, j, int(vertex[0]) + 1] = 1.
for k in range(2, len(vertex)):
curr_graph[j, int(vertex[k]), 0] = 1.
curr_graph = noramlize_graph(curr_graph)
graphs.append(curr_graph)
graphs = np.array(graphs)
labels = np.array(labels)
# dim = [graph.shape[0] for graph in graphs]
# sort = (sorted([(x, i) for (i, x) in enumerate(dim)], reverse=True)[:100])
# graphs = np.delete(graphs, ([sort[i][1] for i in range(len(sort))]), axis=0)
# labels = np.delete(labels, ([sort[i][1] for i in range(len(sort))]), axis=0)
for i in range(graphs.shape[0]):
graphs[i] = np.transpose(graphs[i], [2, 0, 1]) # use only A
G = nx.from_numpy_array(graphs[i][0])
graph3d, _, _ = high_order(G, target)
adj_power = A_power(graphs[i][0])
graphs3d.append(graph3d)
adj_powers.append(adj_power)
graphs3d = np.array(graphs3d)
adj_powers = np.array(adj_powers)
for i in range(graphs3d.shape[0]):
graphs3d[i] = np.expand_dims(graphs3d[i], axis=0)
adj_powers[i] = np.expand_dims(adj_powers[i], axis=0)
graphs3d[i] = np.concatenate((graphs3d[i], adj_powers[i]), axis=0)
# graphs = tf.ragged.constant(graphs).to_tensor().eval(session=tf.Session())
# graphs3d = tf.ragged.constant(graphs3d).to_tensor().eval(session=tf.Session())
return graphs, np.array(labels), graphs3d
def gnn4(ds_name, target_shape):
graphs, graphs3d, labels, adj_powers =[], [], [], []
if ds_name == 'syn':
target = motif(target_shape)
# graph_dict = dict(zip([5, 6, 9, 12, 15, 16, 25], [0.7, 0.7, 0.6, 0.8, 0.8, 0.8, 0.7]))
# num_rep = [100, 100, 100, 200, 200, 200, 200]
graph_dict = dict(zip([8, 9, 9, 10, 10, 11, 11, 12, 13], [0.3, 0.3, 0.3, 0.3, 0.4, 0.3, 0.4, 0.2, 0.2]))
num_rep = [50, 50, 50, 50, 100, 100, 100, 100, 100, 100]
for num, (k, v) in zip(num_rep, graph_dict.items()):
for s in range(num):
G = nx.erdos_renyi_graph(k, v, seed=s, directed=False)
if nx.is_connected(G):
graph3d, label, _ = high_order(G, target)
# label = nx.clique.graph_clique_number(G)
labels.append(label)
graphs3d.append(graph3d)
adj = nx.linalg.graphmatrix.adjacency_matrix(G).toarray()
graphs.append(adj)
graphs = np.array(graphs)
graphs3d = np.array(graphs3d)
for i in range(graphs.shape[0]):
graphs[i] = np.expand_dims(graphs[i], axis=0)
graphs3d[i] = np.expand_dims(graphs3d[i], axis=0)
# le = preprocessing.LabelEncoder() # to find clique
# le.fit(labels) # to find clique
# labels = le.transform(labels) # to find clique
else:
target = motif(target_shape)
directory = BASE_DIR + "/data/benchmark_graphs/{0}/{0}.txt".format(ds_name)
with open(directory, "r") as data:
num_graphs = int(data.readline().rstrip().split(" ")[0])
for i in range(num_graphs):
graph_meta = data.readline().rstrip().split(" ")
num_vertex = int(graph_meta[0])
curr_graph = np.zeros(shape=(num_vertex, num_vertex, NUM_LABELS[ds_name] + 1), dtype=np.float32)
labels.append(int(graph_meta[1])) # ori
for j in range(num_vertex):
vertex = data.readline().rstrip().split(" ")
if NUM_LABELS[ds_name] != 0:
curr_graph[j, j, int(vertex[0]) + 1] = 1.
for k in range(2, len(vertex)):
curr_graph[j, int(vertex[k]), 0] = 1.
#curr_graph = noramlize_graph(curr_graph)
graphs.append(curr_graph)
graphs = np.array(graphs)
labels = np.array(labels)
dim = [graph.shape[0] for graph in graphs]
sort = (sorted([(x, i) for (i, x) in enumerate(dim)], reverse=True)[:100])
graphs = np.delete(graphs, ([sort[i][1] for i in range(len(sort))]), axis=0)
labels = np.delete(labels, ([sort[i][1] for i in range(len(sort))]), axis=0)
for i in range(graphs.shape[0]):
graphs[i] = np.transpose(graphs[i], [2, 0, 1]) # use only A
G = nx.from_numpy_array(graphs[i][0])
graph3d, _, _ = high_order(G, target)
adj_power = A_power(graphs[i][0])
graphs3d.append(graph3d)
adj_powers.append(adj_power)
graphs3d = np.array(graphs3d)
adj_powers = np.array(adj_powers)
for i in range(graphs3d.shape[0]):
graphs3d[i] = np.expand_dims(graphs3d[i], axis=0)
adj_powers[i] = np.expand_dims(adj_powers[i], axis=0)
graphs3d[i] = np.concatenate((graphs3d[i], adj_powers[i]), axis=0)
graphs[i] = np.einsum('ijj->ij', graphs[i][1:])
# graphs = tf.ragged.constant(graphs).to_tensor().eval(session=tf.Session())
# graphs3d = tf.ragged.constant(graphs3d).to_tensor().eval(session=tf.Session())
return graphs, np.array(labels), graphs3d
def load_qm9_aux(which_set, target_param,target_shape):
target = motif(target_shape)
base_path = BASE_DIR + "/data/QM9/QM9_{}.p".format(which_set)
graphs, graphs1d, graphs2d, graphs3d, labels, adj_powers =[], [], [], [],[], []
counter = 0
with open(base_path, 'rb') as f:
data = pickle.load(f)
for instance in data:
counter += 1
if counter == 100:
break
labels.append(instance['y'])
nodes_num = instance['usable_features']['x'].shape[0]
graph = np.empty((nodes_num, nodes_num, 19))
for i in range(13):
# 13 features per node - for each, create a diag matrix of it as a feature
graph[:, :, i] = np.diag(instance['usable_features']['x'][:, i])
graph[:, :, 13] = instance['usable_features']['distance_mat']
graph[:, :, 14] = instance['usable_features']['affinity']
graph[:, :, 15:] = instance['usable_features']['edge_features'] # shape n x n x 4
graphs.append(graph)
graphs = np.array(graphs)
graphs_copy = copy.deepcopy(graphs)
labels = np.array(labels).squeeze() # shape N x 12
# if target_param is not False: # regression over a specific target, not all 12 elements
# labels = labels[:, target_param].reshape(-1, 1) # shape N x 1
for i in range(graphs.shape[0]):
graphs[i] = np.transpose(graphs[i], [2, 0, 1]) # use only A
G = nx.from_numpy_array(graphs[i][14])
graph3d, _, _ = high_order2(G, target)
adj_power = A_power(graphs[i][14])
graphs3d.append(graph3d)
adj_powers.append(adj_power)
graph1d = graphs[i][:13]
graph1d = np.einsum('ijj->ij', graph1d)
graphs_copy[i] = graph1d
# graphs[i] = graphs[i]
graphs[i] = graphs[i][13:]
# graphs[i][0] = normalize(graphs[i][0])
# graphs[i][1] = normalize(graphs[i][1])
graphs3d = np.array(graphs3d)
adj_powers = np.array(adj_powers)
for i in range(graphs3d.shape[0]):
graphs3d[i] = np.expand_dims(graphs3d[i], axis=0)
adj_powers[i] = np.expand_dims(adj_powers[i], axis=0)
graphs3d[i] = np.concatenate((graphs3d[i], adj_powers[i]), axis=0)
return graphs_copy, graphs, graphs3d, labels
def load_qm9(target_param,target_shape):
"""
Constructs the graphs and labels of QM9 data set, already split to train, val and test sets
:return: 6 numpy arrays:
train_graphs: N_train,
train_labels: N_train x 12, (or Nx1 is target_param is not False)
val_graphs: N_val,
val_labels: N_train x 12, (or Nx1 is target_param is not False)
test_graphs: N_test,
test_labels: N_test x 12, (or Nx1 is target_param is not False)
each graph of shape: 19 x Nodes x Nodes (CHW representation)
"""
train_graphs1d, train_graphs2d, train_graphs3d, train_labels = load_qm9_aux('train', target_param,target_shape)
val_graphs1d, val_graphs2d, val_graphs3d, val_labels = load_qm9_aux('val', target_param,target_shape)
test_graphs1d, test_graphs2d, test_graphs3d, test_labels = load_qm9_aux('test', target_param,target_shape)
return train_graphs1d, train_graphs2d, train_graphs3d, train_labels, val_graphs1d, val_graphs2d, val_graphs3d, val_labels, test_graphs1d, test_graphs2d, test_graphs3d, test_labels
def load_qm9_aux_gnn3(which_set, target_param, target_shape):
target = motif(target_shape)
base_path = BASE_DIR + "/data/QM9/QM9_{}.p".format(which_set)
graphs, graphs3d, graphs3d2,labels, adj_powers =[], [], [], [],[]
counter = 0
with open(base_path, 'rb') as f:
data = pickle.load(f)
for instance in data:
#counter += 1
#if counter == 10000:
# break
labels.append(instance['y'])
nodes_num = instance['usable_features']['x'].shape[0]
graph = np.empty((nodes_num, nodes_num, 19))
for i in range(13):
# 13 features per node - for each, create a diag matrix of it as a feature
graph[:, :, i] = np.diag(instance['usable_features']['x'][:, i])
graph[:, :, 13] = instance['usable_features']['distance_mat']
graph[:, :, 14] = instance['usable_features']['affinity']
graph[:, :, 15:] = instance['usable_features']['edge_features'] # shape n x n x 4
#for i in range(4):
# graph[:,:,i] += graph[:,:,i+15]
#graphs.append(graph[:,:,:15])
graphs = np.array(graphs)
labels = np.array(labels).squeeze() # shape N x 12
# if target_param is not False: # regression over a specific target, not all 12 elements
# labels = labels[:, target_param].reshape(-1, 1) # shape N x 1
for i in range(graphs.shape[0]):
graphs[i] = np.transpose(graphs[i], [2, 0, 1]) # use only A
G = nx.from_numpy_array(graphs[i][14])
# graph3d, graph3d2, _, _ = high_order3(G, target)
graph3d, _, _ = high_order2(G, target)
adj_power = A_power(graphs[i][14])
graphs3d.append(graph3d)
# graphs3d2.append(graph3d2)
adj_powers.append(adj_power)
# graphs[i][13] = normalize(graphs[i][13])
# graphs[i][14] = normalize(graphs[i][14])
graphs3d = np.array(graphs3d)
#graphs3d2 = np.array(graphs3d2)
adj_powers = np.array(adj_powers)
for i in range(graphs3d.shape[0]):
graphs3d[i] = np.expand_dims(graphs3d[i], axis=0)
adj_powers[i] = np.expand_dims(adj_powers[i], axis=0)
# graphs3d2[i] = np.expand_dims(graphs3d2[i], axis=0)
# graphs3d[i] = np.concatenate((graphs3d[i], graphs3d2[i], adj_powers[i]), axis=0)
graphs3d[i] = np.concatenate((graphs3d[i], adj_powers[i]), axis=0)
return graphs, labels, graphs3d
def load_qm9_gnn3(target_param,target_shape):
train_graphs2d, train_labels , train_graphs3d= load_qm9_aux_gnn3('train', target_param,target_shape)
val_graphs2d, val_labels, val_graphs3d = load_qm9_aux_gnn3('val', target_param,target_shape)
test_graphs2d, test_labels, test_graphs3d = load_qm9_aux_gnn3('test', target_param,target_shape)
return train_graphs2d, train_labels, train_graphs3d, val_graphs2d,val_labels, val_graphs3d, test_graphs2d, test_labels, test_graphs3d
def gnn1(ds_name, target_shape):
graphs = []
labels = []
if ds_name == 'syn':
target = motif(target_shape)
# graph_dict=dict(zip([5,6,6, 6, 7,8, 9, 9, 10,10], [0.7,0.4,0.5, 0.6, 0.4,0.4,0.4, 0.3, 0.4, 0.3]))
graph_dict = dict(zip([8, 9, 9, 10, 10, 11, 11, 12, 13], [0.3, 0.3, 0.3, 0.3, 0.4, 0.3, 0.4, 0.2, 0.2]))
num_rep = [50, 50, 50, 50, 100, 100, 100, 100, 100, 100]
for num, (k, v) in zip(num_rep, graph_dict.items()):
for s in range(num):
G = nx.erdos_renyi_graph(k, v, seed=s, directed=False)
if nx.is_connected(G):
graph, label, _ = high_order(G, target)
graphs.append(graph)
labels.append(label)
graphs = np.array(graphs)
labels = np.array(labels)
graphs = tf.ragged.constant(graphs).to_tensor().eval(session=tf.Session())
else:
target = motif(target_shape)
directory = BASE_DIR + "/data/benchmark_graphs/{0}/{0}.txt".format(ds_name)
with open(directory, "r") as data:
num_graphs = int(data.readline().rstrip().split(" ")[0])
for i in range(num_graphs):
graph_meta = data.readline().rstrip().split(" ")
num_vertex = int(graph_meta[0])
curr_graph = np.zeros(shape=(num_vertex, num_vertex, NUM_LABELS[ds_name] + 1), dtype=np.float32)
labels.append(int(graph_meta[1])) # ori
for j in range(num_vertex):
vertex = data.readline().rstrip().split(" ")
if NUM_LABELS[ds_name] != 0:
curr_graph[j, j, int(vertex[0]) + 1] = 1.
for k in range(2, len(vertex)):
curr_graph[j, int(vertex[k]), 0] = 1.
# curr_graph = noramlize_graph(curr_graph)
graphs.append(curr_graph)
graphs = np.array(graphs)
for i in range(graphs.shape[0]):
graphs[i] = np.transpose(graphs[i], [2, 0, 1]) # use only A
G = nx.from_numpy_array(graphs[i][0])
graph, _, _ = high_order(G, target)
graphs[i] = np.expand_dims(graph, axis=0)
return graphs, np.array(labels)
if __name__ == '__main__':
graphs, labels = load_dataset("MUTAG")
a, b = get_train_val_indexes(1, "MUTAG")
print(np.transpose(graphs[a[0]], [1, 2, 0])[0])
```
#### File: GNN/main_scripts/main_qm9_experiment.py
```python
import os
import sys
import copy
"""
How To:
Example for running from command line:
python <path_to>/ProvablyPowerfulGraphNetworks/main_scripts/main_qm9_experiment.py --config=configs/qm9_config.json
"""
# Change working directory to project's main directory, and add it to path - for library and config usages
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(project_dir)
os.chdir(project_dir)
import importlib
from data_loader.data_generator import DataGenerator, QM9_DataGenerator, QM9_DataGenerator_gnn3
from models.invariant_basic import invariant_basic, QM9_invariant_basic,QM9_invariant_basic2, QM9_invariant_basic_gnn3
from trainers.trainer import Trainer, QM9_Trainer, QM9_Trainer_gnn3
import trainers.trainer as trainers
importlib.reload(trainers)
from Utils.config import process_config
from Utils.dirs import create_dirs
from Utils import doc_utils
from Utils.utils import get_args
import tensorflow.compat.v1 as tf
import pandas as pd
tf.disable_eager_execution()
import random
def parametersearch():
# capture the config path from the run arguments
# then process the json configuration file
config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json')
data = QM9_DataGenerator(config)
#train_labels_ori, val_labels_ori, test_labels_ori = copy.deepcopy(data.train_labels), copy.deepcopy(data.val_labels), copy.deepcopy(data.test_labels),
data.train_labels, data.val_labels, data.test_labels = train_labels_ori[:, config.target_param].reshape(-1, 1),\
val_labels_ori[:, config.target_param].reshape(-1, 1), test_labels_ori[:, config.target_param].reshape(-1, 1)
base_summary_folder = config.summary_dir
base_exp_name = config.exp_name
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
import numpy as np
tf.set_random_seed(1)
# for lr in [0.00008 * (2 ** i) for i in range(2, 8)]:
param_grid = {
'learning_rate': list(np.logspace(np.log10(0.00005), np.log10(0.1), base=10, num=1000)),
'architecture1d': list(range(5, 500, 10)),
'architecture2d': list(range(5, 500, 10)),
'architecture3d': list(range(5, 500, 10)),
}
LR, A1, A2, A3 = [], [], [], []
for expe in range(5) :
hyperparameters = {k: random.sample(v, 1)[0] for k, v in param_grid.items()}
lr, a1, a2, a3 = hyperparameters['learning_rate'], hyperparameters['architecture1d'], hyperparameters['architecture2d'], hyperparameters['architecture3d']
LR.append(lr), A1.append(a1), A2.append(a2), A3.append(a3)
config.exp_name = base_exp_name + "lr={0}_a1={1}_a2={2}=_a3={3}".format(lr, a1, a2, a3)
curr_dir = os.path.join(base_summary_folder,
"lr={0}_a1={1}_a2={2}=_a3={3}".format(lr, a1, a2, a3))
config.summary_dir = curr_dir
# create your data generator
data.config.learning_rate = lr
data.config.architecture1d = [a1]
data.config.architecture2d = [a2]
data.config.architecture3d = [a3]
create_dirs([config.summary_dir, config.checkpoint_dir])
doc_utils.doc_used_config(config)
# create your data generator
gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpuconfig.gpu_options.visible_device_list = config.gpus_list
gpuconfig.gpu_options.allow_growth = True
sess = tf.Session(config=gpuconfig)
# create an instance of the model you want
model = QM9_invariant_basic_gnn3(config, data)
# create trainer and pass all the previous components to it
trainer = QM9_Trainer_gnn3(sess, model, data, config)
# here you train your model
trainer.train()
sess.close()
tf.reset_default_graph()
import pandas as pd
def summary_10fold_results(summary_dir):
df = pd.read_csv(summary_dir+"/per_epoch_stats.csv")
acc = np.array(df["val_accuracy"])
for i in range(len(acc)):
acc[i] = float(''.join(list(acc[i])[1:-1]))
print("Results")
print("Mean MAR = {0}".format(np.mean(acc)))
# print("Mean std = {0}".format(np.std(acc)))
#for lr in [0.00008 * (2 ** i) for i in range(2, 8)]:
for lr in [0.00008*(2**i) for i in range(2,8)]:
dir = base_exp_name + "lr={0}".format(lr)
print('lr:' + str(lr))
summary_10fold_results(dir)
def main():
# capture the config path from the run arguments
# then process the json configuration file
config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/example.json')
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
import numpy as np
tf.set_random_seed(1)
print("lr = {0}".format(config.learning_rate))
print("decay = {0}".format(config.decay_rate))
if config.target_param is not False: # (0 == False) while (0 is not False)
print("target parameter: {0}".format(config.target_param))
# create the experiments dirs
create_dirs([config.summary_dir, config.checkpoint_dir])
doc_utils.doc_used_config(config)
data = QM9_DataGenerator(config)
train_lables_ori, val_labels_ori, test_labels_ori = copy.deepcopy(data.train_labels), copy.deepcopy(data.val_labels), copy.deepcopy(data.test_labels),
data.train_labels, data.val_labels, data.test_labels = train_lables_ori[:, config.target_param].reshape(-1, 1),\
val_labels_ori[:, config.target_param].reshape(-1, 1), test_labels_ori[:, config.target_param].reshape(-1, 1)
# create your data generator
gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpuconfig.gpu_options.visible_device_list = config.gpus_list
gpuconfig.gpu_options.allow_growth = True
sess = tf.Session(config=gpuconfig)
# create an instance of the model you want
model = QM9_invariant_basic(config, data)
# create trainer and pass all the previous components to it
trainer = trainers.QM9_Trainer(sess, model, data, config)
# here you train your model
trainer.train()
# test model, restore best model
test_dists, test_loss, pred= trainer.test(load_best_model=True)
sess.close()
tf.reset_default_graph()
doc_utils.summary_qm9_results(config.summary_dir, test_dists, test_loss, trainer.best_epoch)
############## gnn 3 ###############
config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/example.json')
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
import numpy as np
tf.set_random_seed(1)
print("lr = {0}".format(config.learning_rate))
print("decay = {0}".format(config.decay_rate))
if config.target_param is not False: # (0 == False) while (0 is not False)
print("target parameter: {0}".format(config.target_param))
# create the experiments dirs
create_dirs([config.summary_dir, config.checkpoint_dir])
doc_utils.doc_used_config(config)
data = QM9_DataGenerator_gnn3(config)
train_labels_ori, val_labels_ori, test_labels_ori = copy.deepcopy(data.train_labels), copy.deepcopy(data.val_labels), copy.deepcopy(data.test_labels),
data.train_labels, data.val_labels, data.test_labels = train_labels_ori[:, config.target_param].reshape(-1, 1),\
val_labels_ori[:, config.target_param].reshape(-1, 1), test_labels_ori[:, config.target_param].reshape(-1, 1)
# create your data generator
gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpuconfig.gpu_options.visible_device_list = config.gpus_list
gpuconfig.gpu_options.allow_growth = True
sess = tf.Session(config=gpuconfig)
# create an instance of the model you want
model = QM9_invariant_basic_gnn3(config, data)
# create trainer and pass all the previous components to it
trainer = trainers.QM9_Trainer_gnn3(sess, model, data, config)
# here you train your model
trainer.train()
# test model, restore best model
test_dists, test_loss, pred= trainer.test(load_best_model=True)
sess.close()
tf.reset_default_graph()
def parametersearch_gnn3():
# capture the config path from the run arguments
# then process the json configuration file
config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json')
data = QM9_DataGenerator_gnn3(config)
#train_labels_ori, val_labels_ori, test_labels_ori = copy.deepcopy(data.train_labels), copy.deepcopy(data.val_labels), copy.deepcopy(data.test_labels),
data.train_labels, data.val_labels, data.test_labels = train_labels_ori[:, config.target_param].reshape(-1, 1),\
val_labels_ori[:, config.target_param].reshape(-1, 1), test_labels_ori[:, config.target_param].reshape(-1, 1)
base_summary_folder = config.summary_dir
base_exp_name = config.exp_name
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
import numpy as np
tf.set_random_seed(1)
# for lr in [0.00008 * (2 ** i) for i in range(2, 8)]:
param_grid = {
'learning_rate': list(np.logspace(np.log10(0.00005), np.log10(0.1), base=10, num=1000)),
'architecture1d': [100],
'architecture2d': [100],
'architecture3d': [100],
}
for lr in [0.00008 * (2 ** i) for i in range(2, 8)]:
config.exp_name = base_exp_name + "lr={0}".format(lr)
curr_dir = os.path.join(base_summary_folder,
"lr={0}".format(lr))
config.summary_dir = curr_dir
# create your data generator
data.config.learning_rate = lr
create_dirs([config.summary_dir, config.checkpoint_dir])
doc_utils.doc_used_config(config)
# create your data generator
gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpuconfig.gpu_options.visible_device_list = config.gpus_list
gpuconfig.gpu_options.allow_growth = True
sess = tf.Session(config=gpuconfig)
# create an instance of the model you want
model = QM9_invariant_basic_gnn3(config, data)
# create trainer and pass all the previous components to it
trainer = trainers.QM9_Trainer_gnn3(sess, model, data, config)
# here you train your model
trainer.train()
sess.close()
tf.reset_default_graph()
for lr in [0.00008*(2**i) for i in range(2,8)]:
dir = base_exp_name + "lr={0}".format(lr)
print('lr:' + str(lr))
summary_10fold_results(dir)
```
#### File: GNN/Utils/utils.py
```python
import argparse
def get_args():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-c', '--config',
metavar='C',
default='None',
help='The Configuration file')
argparser.add_argument("--mode", default='client')
argparser.add_argument("--port", default=50722)
args = argparser.parse_args(args=[])
return args
``` |
{
"source": "jiahfong/alr",
"score": 3
} |
#### File: alr/data/datasets.py
```python
import torch.utils.data as torchdata
import torchvision as tv
from torch import nn
from torch.nn import functional as F
from dataclasses import dataclass
from torch.nn.utils import weight_norm
from enum import Enum
from typing import Optional, Tuple
from torchvision import transforms
from pathlib import Path
@dataclass(frozen=True)
class DataDescription:
"""
Describes the attributes of this dataset.
"""
n_class: int
width: int
height: int
channels: int
class Dataset(Enum):
r"""
An enum class that provides convenient data retrieval.
Example:
>>> train, test = Dataset.MNIST.get()
>>> train_load = torch.utils.data.DataLoader(train, batch_size=32)
"""
MNIST = "MNIST"
FashionMNIST = "FashionMNIST"
# https://arxiv.org/pdf/1702.05373v1.pdf (Cohen et. al 2017)
# byMerge: a 47-class classification task.
# The merged classes, as suggested by
# the NIST, are for the letters C, I, J, K, L, M, O, P, S, U,
# V, W, X, Y and Z
EMNISTMerge = "EMNISTMerge"
# subset of bymerge
EMNISTBalanced = "EMNISTBalanced"
CIFAR10 = "CIFAR10"
CIFAR100 = "CIFAR100"
RepeatedMNIST = "RepeatedMNIST"
CINIC10 = "CINIC10"
def get(
self,
root: Optional[str] = "data",
raw: Optional[bool] = False,
augmentation: Optional[bool] = False,
) -> Tuple[torchdata.Dataset, torchdata.Dataset]:
r"""
Return (train, test) tuple of datasets.
Args:
root (str, optional): root path where data will be read from or downloaded to
raw (bool, optional): if `True`, then training set will not be transformed (i.e.
no normalisation, ToTensor, etc.); note, the test set *WILL* be transformed.
augmentation (bool, optional): whether to add standard augmentation: horizontal flips and
random cropping.
Returns:
tuple: a 2-tuple of (train, test) datasets
"""
assert not raw or not augmentation, "Cannot enable augmentation on raw dataset!"
regular_transform = [
transforms.ToTensor(),
transforms.Normalize(*self.normalisation_params),
]
test_transform = transforms.Compose(regular_transform)
standard_augmentation = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
]
if augmentation:
regular_transform = standard_augmentation + regular_transform
train_transform = transforms.Compose(regular_transform)
if raw:
train_params = dict(root=root, train=True, download=True)
else:
train_params = dict(
root=root, transform=train_transform, train=True, download=True
)
test_params = dict(
root=root, transform=test_transform, train=False, download=True
)
if self in {
Dataset.MNIST,
Dataset.FashionMNIST,
Dataset.CIFAR10,
Dataset.CIFAR100,
}:
train = getattr(tv.datasets, self.value)(**train_params)
test = getattr(tv.datasets, self.value)(**test_params)
elif self in {Dataset.EMNISTBalanced, Dataset.EMNISTMerge}:
split = "balanced" if self is Dataset.EMNISTBalanced else "bymerge"
train = tv.datasets.EMNIST(**train_params, split=split)
test = tv.datasets.EMNIST(**test_params, split=split)
elif self is Dataset.RepeatedMNIST:
train = tv.datasets.MNIST(**train_params)
test = tv.datasets.MNIST(**test_params)
train = torchdata.ConcatDataset([train] * 3)
elif self is Dataset.CINIC10:
from alr.data._cinic_indices import (
_cinic_test_indices,
_cinic_train_indices,
)
if raw:
train_transform = None
cinic_root = Path().home() / "data" / "cinic-10"
train = tv.datasets.ImageFolder(
str(cinic_root / "train"),
transform=train_transform,
)
valid = tv.datasets.ImageFolder(
str(cinic_root / "valid"),
transform=train_transform,
)
test = tv.datasets.ImageFolder(
str(cinic_root / "test"),
transform=test_transform,
)
train = torchdata.ConcatDataset((train, valid))
train = torchdata.Subset(train, _cinic_train_indices)
test = torchdata.Subset(test, _cinic_test_indices)
else:
raise ValueError(f"{self} dataset hasn't been implemented.")
return train, test
@property
def normalisation_params(self) -> Tuple[Tuple[float], Tuple[float]]:
r"""
Returns a tuple of channel mean and standard deviation of 0-1-scaled inputs.
I.e. the input is assumed to be in the range of 0-1.
Returns:
tuple: a 2-tuple of mean and standard deviation
"""
params = {
Dataset.MNIST: ((0.1307,), (0.3081,)),
Dataset.FashionMNIST: ((0.2860,), (0.3530,)),
Dataset.EMNISTMerge: ((0.1736,), (0.3317,)),
Dataset.EMNISTBalanced: ((0.1751,), (0.3332,)),
Dataset.CIFAR10: (
(0.49139968, 0.48215841, 0.44653091),
(0.2023, 0.1994, 0.2010),
),
Dataset.CIFAR100: (
(0.50707516, 0.48654887, 0.44091784),
(0.26733429, 0.25643846, 0.27615047),
),
Dataset.CINIC10: (
(0.47889522, 0.47227842, 0.43047404),
(0.24205776, 0.23828046, 0.25874835),
),
}
params[Dataset.RepeatedMNIST] = params[Dataset.MNIST]
return params[self]
@property
def about(self) -> DataDescription:
r"""
Returns information about this dataset including:
* n_class
* width
* height
* channels
Returns:
:class:`DataDescription`: information about this dataset
"""
params = {
Dataset.MNIST: DataDescription(10, 28, 28, 1),
Dataset.RepeatedMNIST: DataDescription(10, 28, 28, 1),
Dataset.FashionMNIST: DataDescription(10, 28, 28, 1),
Dataset.EMNISTBalanced: DataDescription(47, 28, 28, 1),
Dataset.EMNISTMerge: DataDescription(47, 28, 28, 1),
Dataset.CIFAR10: DataDescription(10, 32, 32, 3),
Dataset.CIFAR100: DataDescription(100, 32, 32, 3),
Dataset.CINIC10: DataDescription(10, 32, 32, 3),
}
return params[self]
def get_fixed(
self,
root: Optional[str] = "data",
which: Optional[int] = 0,
raw: Optional[bool] = False,
) -> Tuple[torchdata.Dataset, torchdata.Dataset, torchdata.Dataset]:
r"""
Returns a fixed train, pool, and test datasets. This is only used for experiments.
Args:
root (str, optional): root path where data will be read from or downloaded to.
which (int, optional): there are multiple possible sets of fixed points for a given dataset.
This argument specifies which of the multiple possible ones to choose from.
raw (bool, optional): similar to :meth:`get`, train will not contain any
transform whatsoever. (Test will still have ToTensor and Normalisation.)
Returns:
tuple: A tuple of train, pool, and test datasets.
"""
if self is Dataset.MNIST:
idx_set = _mnist_20
elif self is Dataset.CIFAR10:
idx_set = _cifar10_1
else:
raise NotImplementedError(f"Fixed points for {self} is not available yet.")
train, test = self.get(root, raw=raw)
assert which < len(
idx_set
), f"Only {len(idx_set)} sets are available for {self}."
idxs = idx_set[which]
cidxs = set(range(len(train))) - set(idxs)
pool = torchdata.Subset(train, list(cidxs))
train = torchdata.Subset(train, idxs)
return train, pool, test
@property
def get_augmentation(self):
if self is not Dataset.CIFAR10:
raise NotImplementedError(f"get_augmentation not available for {self}")
data_augmentation = tv.transforms.Compose(
[
tv.transforms.Pad(2, padding_mode="reflect"),
tv.transforms.ColorJitter(
brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1
),
tv.transforms.RandomCrop(32),
tv.transforms.RandomHorizontalFlip(),
]
)
return data_augmentation
@property
def model(self) -> nn.Module:
r"""
Returns a canonical model architecture for a given dataset.
Returns:
torch.nn.Module: a pytorch model
"""
if self in {Dataset.MNIST, Dataset.RepeatedMNIST}:
return MNISTNet()
if self == Dataset.CIFAR10:
return CIFAR10Net()
raise NotImplementedError("No model defined for this dataset yet.")
# 24 sets of 20 points with +- 0.01 accuracy from median: 0.65205
_mnist_20 = [
(
33479,
3162,
19169,
11495,
15914,
20426,
21495,
52396,
16515,
19727,
52720,
12424,
30690,
58679,
44785,
20682,
28513,
38932,
20408,
3801,
),
(
40920,
33826,
59132,
22914,
35401,
46080,
47822,
1925,
28716,
50867,
5126,
29159,
41470,
25508,
53476,
27472,
44436,
44587,
56388,
19129,
),
(
19641,
4578,
29287,
37320,
21474,
31142,
20221,
49296,
35922,
32416,
28365,
38441,
26961,
22063,
23629,
50925,
46201,
37567,
11815,
46561,
),
(
13506,
43404,
12223,
42233,
33552,
58117,
36527,
2797,
29225,
11150,
25582,
49413,
55733,
36569,
6531,
50308,
9405,
34069,
16092,
41826,
),
(
46938,
45343,
10338,
6631,
24919,
32224,
14440,
52834,
21392,
40344,
31691,
43258,
25017,
56908,
41200,
19552,
43623,
57612,
56061,
33019,
),
(
34380,
458,
27650,
18400,
36235,
34469,
31224,
52286,
22674,
49931,
5674,
18622,
2865,
30094,
37540,
1887,
47609,
37123,
17887,
59419,
),
(
55333,
56236,
54764,
31705,
45729,
26454,
15116,
45512,
42629,
35753,
11879,
4471,
42841,
23479,
22760,
1535,
30522,
32030,
6356,
31809,
),
(
654,
6458,
52794,
16987,
38175,
14182,
1679,
44590,
2654,
38630,
27540,
22150,
27289,
36348,
46448,
25692,
2592,
43035,
11195,
19245,
),
(
38608,
28958,
49076,
55452,
43257,
38931,
28884,
52759,
41098,
28558,
46660,
59685,
34048,
51456,
19143,
38580,
3850,
17198,
22749,
39503,
),
(
33674,
28825,
35042,
57790,
18797,
59202,
45838,
44119,
28229,
30357,
59087,
22074,
37914,
43639,
28235,
59731,
2687,
1710,
16031,
37424,
),
(
37041,
32464,
2182,
5105,
25852,
42029,
15667,
53665,
52299,
19278,
29439,
21215,
12811,
20382,
50605,
36311,
3196,
6964,
34140,
58381,
),
(
49580,
32583,
10860,
12029,
27952,
57306,
27114,
51904,
37445,
12358,
39175,
8044,
10086,
18826,
36491,
27013,
53208,
49325,
55150,
50527,
),
(
34791,
43564,
16453,
18335,
19112,
18183,
17212,
473,
58744,
20163,
22546,
58391,
26952,
39042,
12006,
48625,
26815,
49461,
6468,
6936,
),
(
47333,
32600,
7634,
15318,
3236,
43565,
34004,
47891,
52446,
5381,
27198,
56250,
44513,
57343,
6476,
27699,
23440,
14554,
42538,
58241,
),
(
32861,
43028,
23876,
54561,
20624,
22584,
2156,
5675,
25557,
38187,
4675,
5643,
31785,
39365,
55789,
11507,
50565,
14166,
46226,
2144,
),
(
52038,
47011,
35514,
36353,
13205,
26807,
37701,
24186,
22144,
8822,
39192,
30370,
42906,
19378,
9625,
44845,
37137,
13356,
28077,
36932,
),
(
28931,
58414,
34981,
23698,
23096,
24403,
32018,
38366,
54223,
33457,
7647,
22917,
11600,
48807,
39192,
47631,
16900,
15283,
14155,
55377,
),
(
49969,
31620,
56337,
19699,
49342,
12913,
43909,
145,
5575,
41365,
20196,
43690,
39055,
44785,
33422,
2819,
14811,
43261,
45203,
39170,
),
(
52645,
41154,
43574,
26144,
17243,
51196,
21418,
21816,
54635,
13619,
2874,
17124,
16391,
45504,
55157,
13527,
33756,
45948,
21693,
3374,
),
(
36700,
12636,
35933,
9290,
975,
42757,
5197,
41484,
11101,
10798,
19309,
4748,
38047,
34424,
42575,
38653,
43514,
36621,
35862,
28877,
),
(
20398,
27976,
25154,
54951,
18249,
20,
55911,
55500,
186,
38592,
48834,
4119,
11926,
25099,
54824,
48339,
43320,
3754,
24752,
11457,
),
(
46959,
13392,
8626,
55276,
26976,
33992,
16264,
44518,
30741,
39375,
34387,
4537,
12291,
6658,
20542,
18832,
44508,
20867,
30517,
37982,
),
(
26754,
6166,
16478,
1561,
59790,
9000,
43538,
4868,
34394,
21017,
37970,
14324,
46481,
52564,
40462,
50910,
48934,
2070,
3811,
21865,
),
(
13369,
54382,
20231,
14627,
43491,
15178,
2253,
14073,
31816,
1870,
34302,
5359,
36903,
41308,
45210,
50448,
21174,
57606,
22846,
54399,
),
]
_cifar10_1 = [
(
33553,
9427,
199,
12447,
39489,
42724,
10822,
49498,
36958,
43106,
38695,
1414,
18471,
15118,
13466,
26497,
24148,
41514,
30263,
24712,
)
]
class MNISTNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 5)
# 32 24 24
self.dropout1 = nn.Dropout2d()
# maxpool --
# 32 12 12
self.conv2 = nn.Conv2d(32, 64, 5)
# 64 8 8
self.dropout2 = nn.Dropout2d()
# maxpool --
# 64 4 4
self.fc1 = nn.Linear(64 * 4 * 4, 128)
self.dropout3 = nn.Dropout()
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = F.max_pool2d(self.dropout1(F.relu(self.conv1(x))), 2)
x = F.max_pool2d(self.dropout2(F.relu(self.conv2(x))), 2)
x = x.view(-1, 64 * 4 * 4)
x = self.fc2(self.dropout3(F.relu(self.fc1(x))))
return F.log_softmax(x, dim=1)
class CIFAR10Net(nn.Module):
"""
CNN from Mean Teacher paper
# taken from: https://github.com/EricArazo/PseudoLabeling/blob/2fbbbd3ca648cae453e3659e2e2ed44f71be5906/utils_pseudoLab/ssl_networks.py
"""
def __init__(self, num_classes=10, drop_prob=0.5):
super(CIFAR10Net, self).__init__()
self.activation = nn.LeakyReLU(0.1)
self.conv1a = weight_norm(nn.Conv2d(3, 128, 3, padding=1))
self.bn1a = nn.BatchNorm2d(128)
self.conv1b = weight_norm(nn.Conv2d(128, 128, 3, padding=1))
self.bn1b = nn.BatchNorm2d(128)
self.conv1c = weight_norm(nn.Conv2d(128, 128, 3, padding=1))
self.bn1c = nn.BatchNorm2d(128)
self.mp1 = nn.MaxPool2d(2, stride=2, padding=0)
self.drop = nn.Dropout(drop_prob)
self.conv2a = weight_norm(nn.Conv2d(128, 256, 3, padding=1))
self.bn2a = nn.BatchNorm2d(256)
self.conv2b = weight_norm(nn.Conv2d(256, 256, 3, padding=1))
self.bn2b = nn.BatchNorm2d(256)
self.conv2c = weight_norm(nn.Conv2d(256, 256, 3, padding=1))
self.bn2c = nn.BatchNorm2d(256)
self.mp2 = nn.MaxPool2d(2, stride=2, padding=0)
self.conv3a = weight_norm(nn.Conv2d(256, 512, 3, padding=0))
self.bn3a = nn.BatchNorm2d(512)
self.conv3b = weight_norm(nn.Conv2d(512, 256, 1, padding=0))
self.bn3b = nn.BatchNorm2d(256)
self.conv3c = weight_norm(nn.Conv2d(256, 128, 1, padding=0))
self.bn3c = nn.BatchNorm2d(128)
self.ap3 = nn.AvgPool2d(6, stride=2, padding=0)
self.fc1 = weight_norm(nn.Linear(128, num_classes))
def forward(self, x):
x = self.activation(self.bn1a(self.conv1a(x)))
x = self.activation(self.bn1b(self.conv1b(x)))
x = self.activation(self.bn1c(self.conv1c(x)))
x = self.mp1(x)
x = self.drop(x)
x = self.activation(self.bn2a(self.conv2a(x)))
x = self.activation(self.bn2b(self.conv2b(x)))
x = self.activation(self.bn2c(self.conv2c(x)))
x = self.mp2(x)
x = self.drop(x)
x = self.activation(self.bn3a(self.conv3a(x)))
x = self.activation(self.bn3b(self.conv3b(x)))
x = self.activation(self.bn3c(self.conv3c(x)))
x = self.ap3(x)
x = x.view(-1, 128)
return F.log_softmax(self.fc1(x), dim=-1)
```
#### File: alr/alr/__init__.py
```python
import copy
import math
from abc import ABC, abstractmethod
from typing import Optional, Callable
import torch
from torch import nn
from alr.acquisition import AcquisitionFunction
from alr.modules.dropout import replace_dropout, replace_consistent_dropout
from alr.utils import range_progress_bar, progress_bar
from alr.utils._type_aliases import _DeviceType
__version__ = "0.0.0b8"
class ALRModel(nn.Module, ABC):
def __init__(self):
"""
A :class:`ALRModel` provides generic methods required for common
operations in active learning experiments.
"""
super(ALRModel, self).__init__()
self._snapshot = None
@abstractmethod
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Regular forward pass. Usually reserved for training.
:param x: input tensor
:type x: `torch.Tensor`
:return: output tensor
:rtype: `torch.Tensor`
"""
pass
def predict(self, x: torch.Tensor) -> torch.Tensor:
"""
Sets the model mode to eval and calls forward.
:param x: input tensor
:type x: `torch.Tensor`
:return: output tensor
:rtype: `torch.Tensor`
"""
self.eval()
return self(x)
def reset_weights(self) -> None:
"""
Resets the model's weights to the last saved snapshot.
:return: None
:rtype: NoneType
"""
assert self._snapshot is not None, "Snapshot was never taken"
self.load_state_dict(self._snapshot, strict=True)
def snap(self) -> None:
r"""
Take and store a snapshot of the current state.
Returns:
NoneType: None
"""
# update snapshot
self._snapshot = copy.deepcopy(self.state_dict())
class MCDropout(ALRModel):
def __init__(
self,
model: nn.Module,
forward: Optional[int] = 100,
reduce: Optional[str] = "logsumexp",
inplace: Optional[bool] = True,
output_transform: Optional[Callable[[torch.Tensor], torch.Tensor]] = None,
fast: Optional[bool] = False,
consistent: Optional[bool] = False,
):
r"""
A wrapper that turns a regular PyTorch module into one that implements
`Monte Carlo Dropout <https://arxiv.org/abs/1506.02142>`_ (Gal & Ghahramani, 2016).
Args:
model (`nn.Module`): `torch.nn.Module` object. This model's forward pass
should return (log) probabilities. I.e. the final layer should
be `softmax` or `log_softmax`. Otherwise, `output_transform` can
be used to convert `model`'s output into probabilities.
forward (int, optional): number of stochastic forward passes
reduce (str, optional): either `"logsumexp"` or `"mean"`. This is used to reduce the
n `forward` stochastic passes during evaluation. If `model` or `output_transform`
returns probabilities (i.e. `F.softmax`), this should be `"mean"`;
otherwise it should be "logsumexp" if they return log-probabilities (i.e. `F.log_softmax`).
[default = `"logsumexp"`]
inplace (bool, optional): if `True`, the `model` is modified *in-place* when the dropout layers are
replaced. If `False`, `model` is not modified and a new model is cloned.
output_transform (callable, optional): model's output is given as input and the output of this
callable is expected to return (log) probabilities.
fast (bool, optional): if true, :meth:`stochastic_forward` will stack the batch dimension for faster
MC dropout passes. If false, then forward passes are called in a for-loop. Note,
the former will consume `forward` times more memory.
consistent (bool, optional): if true, the dropout layers will be replaced with consistent variants.
Attributes:
base_model (`nn.Module`): provided base model (a clone if `inplace=True`)
n_forward (int): number of forward passes (`forward`)
"""
super(MCDropout, self).__init__()
if consistent:
self.base_model = replace_consistent_dropout(model, inplace=inplace)
else:
self.base_model = replace_dropout(model, inplace=inplace)
self.n_forward = forward
self._output_transform = (
output_transform if output_transform is not None else lambda x: x
)
self._reduce = reduce.lower()
assert self._reduce in {"logsumexp", "mean"}
self._fast = fast
self.snap()
def forward(self, x: torch.Tensor) -> torch.Tensor:
r"""
Forward pass. *Note, this function has a different behaviour in eval mode*.
It returns the (log) mean score of :meth:`stochastic_forward` passes. In other words,
if `self.training` is `False`, the following is returned instead:
.. code:: python
# if reduce = "logsumexp"
torch.logsumexp(self.stochastic_forward(x), dim=0) - log(self.n_forward)
# if reduce = "mean"
torch.mean(self.stochastic_forward(x), dim=0)
Args:
x (`torch.Tensor`): input tensor, any size
Returns:
`torch.Tensor`:
output tensor of size :math:`N \times C` where
:math:`N` is the batch size and :math:`C` is the number of target classes.
Note:
if a single forward pass is required during eval mode, one could use the following
instead: `base_model(x)`
"""
if self.training:
return self._output_transform(self.base_model(x))
if self._reduce == "mean":
return torch.mean(self.stochastic_forward(x), dim=0)
# if self._reduce == "logsumexp"
return torch.logsumexp(self.stochastic_forward(x), dim=0) - math.log(
self.n_forward
)
def stochastic_forward(self, x: torch.Tensor) -> torch.Tensor:
r"""
Returns a :math:`m \times N \times C` `torch.Tensor` where:
1. :math:`m` is equal to `self.n_forward`
2. :math:`N` is the batch size, equal to `x.size(0)`
3. :math:`C` is the number of units in the final layer (e.g. number of classes in a classification model)
Args:
x (`torch.Tensor`): input tensor
Returns:
`torch.Tensor`: output tensor of shape :math:`m \times N \times C`
Raises:
RuntimeError: Occurs when the machine runs out of memory and `fast` was set to true.
"""
if self._fast:
size = x.size()
x = self._repeat_n(x, self.n_forward)
assert x.size() == (size[0] * self.n_forward, *size[1:])
try:
preds = self._output_transform(self.base_model(x))
preds = preds.view(self.n_forward, -1, *preds.size()[1:])
except RuntimeError as e:
raise RuntimeError(
r"Ran out of memory. Try reducing batch size or"
"reducing the number of MC dropout samples. Alternatively, switch off"
"fast MC dropout."
) from e
else:
preds = torch.stack(
[
self._output_transform(self.base_model(x))
for _ in range(self.n_forward)
]
)
assert preds.size(0) == self.n_forward
return preds
@staticmethod
def _repeat_n(x: torch.Tensor, n: int) -> torch.Tensor:
r"""
Repeat the data in x `n` times along the batch dimension.
Args:
x (torch.Tensor): input tensor, the batch dimension is assumed to be 0.
n (int): number of repeats
Returns:
torch.Tensor: output tensor
Raises:
RuntimeError: Occurs when the machine runs out of memory.
"""
try:
out = x.repeat(n, *([1] * (x.ndim - 1)))
except RuntimeError as e:
raise RuntimeError(
r"Ran out of memory. Try reducing batch size or"
"reducing the number of MC dropout samples. Alternatively, switch off"
"fast MC dropout."
) from e
return out
```
#### File: training/diagnostics/__init__.py
```python
from pathlib import Path
import pickle
from typing import Optional
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
def jitter(x):
return x + np.random.normal(scale=0.13, size=(len(x),))
def feature_scale(arr):
mini, maxi = arr.min(), arr.max()
return (arr - mini) / (maxi - mini)
def confidence_plot(
confidences_E_N, proportions_E_N, axis, cmap: Optional[str] = "viridis"
):
assert confidences_E_N.shape == proportions_E_N.shape
cmap = cm.get_cmap(cmap)
E = confidences_E_N.shape[0]
for idx, (x, y) in enumerate(zip(confidences_E_N, proportions_E_N)):
axis.plot(x, y, label=f"epoch {idx + 1}", color=cmap(idx / E))
axis.set_title("Pseudo-label confidence on pool set")
axis.set_xlabel("Confidence threshold")
axis.set_ylabel("Proportion of predictions that\npass the confidence threshold")
def reliability_plot(
bins_E_M,
accuracies_E_N,
counts_E_N,
axis,
title: Optional[str] = "Reliability plot",
cmap: Optional[str] = "viridis",
):
assert accuracies_E_N.shape == counts_E_N.shape
cmap = cm.get_cmap(cmap)
E = bins_E_M.shape[0]
for idx, (x, y, c) in enumerate(zip(bins_E_M, accuracies_E_N, counts_E_N)):
y[c == 0] = np.nan
axis.scatter(
jitter(list(range(len(x) - 1))),
y,
label=f"epoch {idx + 1}",
color=cmap(idx / E),
)
bins = bins_E_M[0]
axis.set_xticklabels(
[f"({bins[idx]:.1f},{b:.1f}]" for idx, b in enumerate(bins[1:])], rotation=45
)
axis.set_xticks(range(len(bins) - 1))
axis.set_ylim(bottom=-0.05, top=1)
axis.set_ylabel("Accuracy of pseudo-label")
axis.set_xlabel("Confidence")
if title:
axis.set_title(title)
axis.set_yticks(np.arange(0, 1.1, 0.1))
axis.plot(
range(len(bins) - 1),
np.arange(0.1, 1.1, 0.1) - 0.05,
color="grey",
alpha=0.3,
linestyle="-.",
)
def reliability_hist_plot(
bins_E_M,
counts_E_N,
axis,
cmap: Optional[str] = "viridis",
xticklabels=True,
title="Confidence histogram",
bar=False,
):
cmap = cm.get_cmap(cmap)
E = bins_E_M.shape[0]
for idx, (x, y) in enumerate(zip(bins_E_M, counts_E_N)):
if bar:
axis.bar(
list(range(len(x) - 1)),
y / y.sum(),
label=f"epoch {idx + 1}",
color=cmap(idx / E),
)
else:
axis.scatter(
jitter(list(range(len(x) - 1))),
y / y.sum(),
label=f"epoch {idx + 1}",
color=cmap(idx / E),
)
bins = bins_E_M[0]
axis.set_xticklabels(
[f"({bins[idx]:.1f},{b:.1f}]" for idx, b in enumerate(bins[1:])], rotation=45
)
axis.set_ylim(top=1)
axis.set_ylabel("Proportion")
if xticklabels:
axis.set_xticks(range(len(bins) - 1))
axis.set_xlabel("Confidence")
else:
axis.set_xticks(())
axis.set_title(title)
# todo(harry): can accommodate iterations too
def ece_plot(ece_E, axis, label: Optional[str] = None, cmap: Optional[str] = "viridis"):
cmap = cm.get_cmap(cmap)
E = ece_E.shape[0]
if label:
axis.plot(range(1, E + 1), ece_E, label=label)
else:
axis.plot(range(1, E + 1), ece_E)
axis.set_title("Expected Calibration Error (ECE)")
axis.set_ylabel("ECE")
axis.set_xlabel("Epoch")
axis.set_xticks(range(1, E + 1))
axis.set_xticklabels(range(1, E + 1), rotation=45)
def plot_entropy(ent_E_N, num_classes, axis, cmap: Optional[str] = "viridis"):
cmap = cm.get_cmap(cmap)
bplot = axis.boxplot(ent_E_N.T, patch_artist=True, showfliers=False)
E = ent_E_N.shape[0]
max_ent = num_classes * ((-1 / num_classes) * np.log(1 / num_classes))
for e, patch in enumerate(bplot["boxes"]):
patch.set_facecolor(cmap(e / E))
axis.set_xlabel("Epoch")
axis.set_ylabel("Entropy")
axis.set_ylim(bottom=-0.05, top=max_ent)
axis.set_yticks(np.linspace(0, max_ent, 5))
axis.set_title("Entropy")
axis.set_xticklabels(range(1, E + 1), rotation=45)
# todo(harry): can accommodate iterations too
def plot_accuracy(pool_acc_E, val_acc_E, axis, cmap: Optional[str] = "viridis"):
cmap = cm.get_cmap(cmap)
E = pool_acc_E.shape[0]
assert val_acc_E.shape[0] == E
axis.plot(range(1, E + 1), pool_acc_E, label="pool")
axis.plot(range(1, E + 1), val_acc_E, label="val")
axis.set_title("Accuracy")
axis.set_xlabel("Epoch")
axis.set_ylabel("Accuracy")
axis.set_xticks(range(1, E + 1))
axis.set_xticklabels(range(1, E + 1), rotation=45)
axis.legend()
def plot_sample_size(metric: dict, axis):
y = metric["history"]["train_size"]
x = len(y)
axis.plot(range(1, x + 1), y)
axis.set_xticks(range(1, x + 1))
axis.set_title("Training set size")
axis.set_xlabel("Epoch")
axis.set_ylabel("Training set size")
axis.set_xticklabels(range(1, x + 1), rotation=45)
def plot_val_loss(metric: dict, axis):
y = metric["history"]["val_loss"]
x = len(y)
axis.plot(range(1, x + 1), y)
axis.set_xticks(range(1, x + 1))
axis.set_title("Validation Loss")
axis.set_xlabel("Epoch")
axis.set_ylabel("Loss")
axis.set_xticklabels(range(1, x + 1), rotation=45)
def get_val_acc(metric: dict):
return np.array(metric["history"]["val_acc"])
def plot_labelled_classes(metric: dict, axis):
x, y = np.unique(metric["labelled_classes"], return_counts=True)
axis.bar(x, y)
axis.set_xlabel("Class")
axis.set_ylabel("Counts")
axis.set_title("BALD-acquired classes (so far)")
def parse_calib_dir(calib_metrics: str):
def num_sort(fname: Path):
basename = fname.name
return int(basename[: basename.find("_")])
calib_metrics = Path(calib_metrics)
pkls = list(calib_metrics.rglob("*.pkl"))
pkls = sorted(pkls, key=num_sort)
buffer = []
for p in pkls:
with open(p, "rb") as fp:
buffer.append(pickle.load(fp))
confidences, proportions, accuracies = [], [], []
bins, bin_accuracy, counts, ece = [], [], [], []
entropy = []
per_acc = []
for b in buffer:
res = b["conf-thresh"]
confidences.append(res[0])
proportions.append(res[1])
accuracies.append(b["accuracy"])
res = b["ece"]
bins.append(res[0])
bin_accuracy.append(res[1])
counts.append(res[2])
# res[3] = mean confidence
ece.append(res[4])
entropy.append(b["entropy"])
if "per-instance-accuracy" in b:
per_acc.append(b["per-instance-accuracy"])
confidences_E_N = np.stack(confidences, axis=0)
proportions_E_N = np.stack(proportions, axis=0)
accuracies_E = np.stack(accuracies, axis=0)
bins_E_M = np.stack(bins, axis=0)
bin_accuracy_E_N = np.stack(bin_accuracy, axis=0)
counts_E_N = np.stack(counts, axis=0)
ece_E = np.stack(ece, axis=0)
try:
# can only do so if entropy is a non-jagged matrix (non-pool set calib)
entropy_E_N = np.stack(entropy, axis=0)
if per_acc:
per_acc_E_N = np.stack(per_acc, axis=0)
else:
per_acc_E_N = None
except:
entropy_E_N = None
per_acc_E_N = None
return (
confidences_E_N,
proportions_E_N,
accuracies_E,
bins_E_M,
bin_accuracy_E_N,
counts_E_N,
ece_E,
entropy_E_N,
per_acc_E_N,
)
def diagnostics(calib_metrics: str, metrics: str):
metrics = Path(metrics)
(
confidences_E_N,
proportions_E_N,
accuracies_E,
bins_E_M,
bin_accuracy_E_N,
counts_E_N,
ece_E,
entropy_E_N,
_,
) = parse_calib_dir(calib_metrics)
with open(metrics, "rb") as fp:
metrics = pickle.load(fp)
fig, axes = plt.subplots(3, 3, figsize=(3 * 5, 3 * 5))
axes = axes.flatten()
confidence_plot(confidences_E_N, proportions_E_N, axes[0])
ece_plot(ece_E, axes[1])
plot_val_loss(metrics, axes[2])
reliability_hist_plot(bins_E_M, counts_E_N, axes[3])
if entropy_E_N is not None:
plot_entropy(entropy_E_N, num_classes=10, axis=axes[4])
plot_labelled_classes(metrics, axis=axes[5])
reliability_plot(bins_E_M, bin_accuracy_E_N, counts_E_N, axes[6])
plot_accuracy(accuracies_E, get_val_acc(metrics), axis=axes[7])
plot_sample_size(metrics, axes[8])
plt.suptitle(f"Pool size = {entropy_E_N.shape[-1]:,}", y=1.0)
for i, ax in enumerate(axes):
if i % 3 == 0:
ax.grid()
fig.tight_layout()
def solo_reliability_plot(calib_metrics, title="Reliability plot", label="Iteration"):
(
confidences_E_N,
proportions_E_N,
accuracies_E,
bins_E_M,
bin_accuracy_E_N,
counts_E_N,
ece_E,
entropy_E_N,
_,
) = parse_calib_dir(calib_metrics)
fig = plt.figure(constrained_layout=True, figsize=(8, 8))
spec = fig.add_gridspec(
ncols=2,
nrows=2,
width_ratios=[29, 1],
height_ratios=[2, 7],
)
axes = [
fig.add_subplot(spec[0, 0]),
fig.add_subplot(spec[1, 0]),
fig.add_subplot(spec[:, -1]),
]
reliability_hist_plot(bins_E_M, counts_E_N, axes[0], xticklabels=False, title=title)
reliability_plot(bins_E_M, bin_accuracy_E_N, counts_E_N, axes[1], title=None)
norm = mpl.colors.Normalize(vmin=1, vmax=accuracies_E.shape[0])
fig.colorbar(
cm.ScalarMappable(norm=norm, cmap=cm.get_cmap("viridis")),
orientation="vertical",
label=label,
cax=axes[2],
)
def entropy_reliability_plot(calib_metrics, num_class=10):
*_, entropy_E_N, per_acc_E_N = parse_calib_dir(calib_metrics)
E = entropy_E_N.shape[0]
max_ent = -np.log(1 / num_class)
space = np.linspace(0, max_ent, 11)
fig = plt.figure(constrained_layout=True, figsize=(8, 8))
if E > 1:
spec = fig.add_gridspec(
ncols=2,
nrows=2,
width_ratios=[29, 1],
height_ratios=[2, 7],
)
axes = [
fig.add_subplot(spec[0, 0]),
fig.add_subplot(spec[1, 0]),
fig.add_subplot(spec[:, -1]),
]
else:
spec = fig.add_gridspec(ncols=1, nrows=2, height_ratios=[2, 7])
axes = [fig.add_subplot(spec[0, 0]), fig.add_subplot(spec[1, 0])]
for ent, acc in zip(entropy_E_N, per_acc_E_N):
y = []
x = []
p = []
for i, upper in enumerate(space[1:]):
lower = space[i]
mask = (ent > lower) & (ent <= upper)
mean_acc = acc[mask].mean()
prop = mask.mean()
y.append(mean_acc)
# (lower, upper]
x.append(f"({lower:.2f}, {upper:.2f}]")
p.append(prop)
if E == 1:
axes[1].bar(range(len(y)), y)
axes[0].bar(range(len(p)), p)
else:
raise NotImplementedError
axes[1].set_xticklabels(x, rotation=45, ha="right")
axes[1].set_xticks(range(len(y)))
axes[0].set_xticks(())
axes[0].set_xticklabels(())
axes[0].set_title("Reliability plot")
axes[0].set_ylabel("Proportion")
axes[1].set_ylabel("Accuracy")
axes[1].set_xlabel("Entropy")
# norm = mpl.colors.Normalize(vmin=1, vmax=accuracies_E.shape[0])
# fig.colorbar(cm.ScalarMappable(norm=norm, cmap=cm.get_cmap('viridis')),
# orientation='vertical', label=label, cax=axes[2])
```
#### File: alr/training/utils.py
```python
import pickle
import numpy as np
import torch
from torch import nn
from ignite.handlers import EarlyStopping, ModelCheckpoint, global_step_from_engine
import tempfile
from pathlib import Path
from ignite.engine import Engine, Events
from typing import Optional, Callable
from alr.utils.math import entropy
class EarlyStopper:
def __init__(
self,
model: nn.Module,
patience: int,
trainer: Engine,
key: Optional[str] = "acc",
mode: Optional[str] = "max",
):
self._model = model
self._patience = patience
self._trainer = trainer
self._key = key
self._tmpdir = tempfile.TemporaryDirectory()
mode = mode.lower()
assert mode in {"min", "max"}
self._mode = -1 if mode == "min" else 1
assert Path(str(self._tmpdir.name)).is_dir()
assert not list(Path(str(self._tmpdir.name)).rglob("*.pth"))
self._chkpt_handler = None
self._reload_called = False
def attach(self, engine: Engine):
r"""
Attach an early stopper to engine that will terminate the provided trainer
when the predetermined metric does not improve for `patience` epochs.
Args:
engine (ignite.engine.Engine): this is expected to be a validation
evaluator. The `key` metric will be extracted and the best will
be used.
Returns:
NoneType: None
"""
es_handler = EarlyStopping(
patience=self._patience,
score_function=self._score_function,
trainer=self._trainer,
)
self._chkpt_handler = ModelCheckpoint(
str(self._tmpdir.name),
filename_prefix="best",
n_saved=1,
create_dir=False,
score_function=self._score_function,
score_name=f"val_{self._key}",
global_step_transform=global_step_from_engine(self._trainer),
)
engine.add_event_handler(Events.COMPLETED, es_handler)
engine.add_event_handler(
Events.COMPLETED, self._chkpt_handler, {"model": self._model}
)
def _score_function(self, engine):
return engine.state.metrics[self._key] * self._mode
def reload_best(self):
if self._reload_called:
raise RuntimeError("Cannot reload more than once.")
if self._chkpt_handler is None or self._chkpt_handler.last_checkpoint is None:
raise RuntimeError(
"Cannot reload model until it has been trained for at least one epoch."
)
self._model.load_state_dict(
torch.load(str(self._chkpt_handler.last_checkpoint)), strict=True
)
self._tmpdir.cleanup()
self._reload_called = True
class PLPredictionSaver:
def __init__(
self,
log_dir: str,
compact: Optional[bool] = True,
pred_transform: Optional[
Callable[[torch.Tensor], torch.Tensor]
] = lambda x: x.exp(),
onehot_target: Optional[bool] = False,
):
r"""
Args:
log_dir (): duh
compact (): save what you need (compact) instead of saving all predictions (huge files)
pred_transform (): typically used to exponentiate model's output predictions
onehot_target (): set to True if the target label is a distribution (i.e.
argmax should be called on it to get the class); leave as false if targets are
ints.
"""
self._output_transform = lambda x: x
self._preds = []
self._targets = []
if log_dir:
self._log_dir = Path(log_dir)
self._log_dir.mkdir(parents=True, exist_ok=True)
self._other_engine = None
self._compact = compact
self._pred_transform = pred_transform
self._onehot_target = onehot_target
def attach(
self, engine: Engine, output_transform: Callable[..., tuple] = lambda x: x
):
self._output_transform = output_transform
self._other_engine = engine
engine.add_event_handler(Events.EPOCH_STARTED, self._reset)
engine.add_event_handler(Events.EPOCH_COMPLETED, self._flush)
engine.add_event_handler(Events.ITERATION_COMPLETED, self._parse)
def _parse(self, engine: Engine):
pred, target = self._output_transform(engine.state.output)
self._preds.append(pred.detach().cpu())
if self._onehot_target:
self._targets.append(target.detach().cpu().argmax(dim=-1))
else:
self._targets.append(target.detach().cpu())
def _flush(self, _):
preds_N_C = self._pred_transform(torch.cat(self._preds, dim=0)).numpy()
assert preds_N_C.ndim == 2
targets_N = torch.cat(self._targets, dim=0).numpy()
assert targets_N.ndim == 1 and targets_N.shape[0] == preds_N_C.shape[0]
if self._compact:
acc = _accuracy(preds_N_C, targets_N)
payload = {
"ece": _expected_calibration_error(preds_N_C, targets_N),
"conf-thresh": _confidence_threshold(preds_N_C),
"entropy": _entropy(preds_N_C),
"accuracy": acc.mean(),
"per-instance-accuracy": acc,
}
else:
payload = {
"preds": preds_N_C,
"targets": targets_N,
}
epoch = self._other_engine.state.epoch
fname = self._log_dir / f"{str(epoch)}_pl_predictions.pkl"
assert not fname.exists(), "You've done goofed"
with open(fname, "wb") as fp:
pickle.dump(payload, fp)
def _reset(self, _):
self._preds = []
self._targets = []
def global_step_from_engine(self, engine: Engine):
self._other_engine = engine
def _confidence_threshold(preds_N_C: np.ndarray):
x = np.linspace(0, 1, num=100)
y = np.empty(shape=x.shape[0])
for idx, thresh in enumerate(x):
y[idx] = np.mean(np.max(preds_N_C, axis=-1) >= thresh)
return x, y
def _entropy(preds_N_C: np.ndarray):
ent_N = entropy(torch.from_numpy(preds_N_C), mode="softmax").numpy().sum(axis=-1)
return ent_N
def _accuracy(preds_N_C, targets_N):
return np.equal(preds_N_C.argmax(axis=-1), targets_N)
def _expected_calibration_error(preds_N_C: np.ndarray, targets_N: np.ndarray):
# https://arxiv.org/pdf/1706.04599.pdf
width = 0.1
N = preds_N_C.shape[0]
bins = np.arange(0, 1 + width, width)
acc = np.zeros(shape=(len(bins) - 1))
counts = np.zeros_like(acc)
conf = np.zeros_like(acc)
class_N = preds_N_C.argmax(axis=-1)
probs_N = np.max(preds_N_C, axis=-1)
for idx, b in enumerate(bins[1:]):
low, high = bins[idx], b
mask = (low < probs_N) & (probs_N <= high)
if mask.any():
acc[idx] = np.equal(class_N[mask], targets_N[mask]).mean()
counts[idx] = mask.sum()
# average confidence in bin (low, high]
conf[idx] = np.mean(probs_N[mask])
res = np.abs(acc - conf) * counts
assert res.shape == (len(bins) - 1,)
assert np.isfinite(res).all()
return bins, acc, counts, conf, np.sum(res) / N
class PerformanceTracker:
def __init__(self, model: nn.Module, patience: int):
self.model = model
self.patience = patience
self._original_patience = patience
self.last_acc = None
self._temp_dir = tempfile.TemporaryDirectory()
self._model_filename = (
Path(str(self._temp_dir.name)).absolute() / f"{id(self)}.pt"
)
self._reloaded = False
def reset(self):
self.patience = self._original_patience
def step(self, acc):
if self.last_acc is None or acc > self.last_acc:
self.reset()
if self._model_filename.exists():
# 2 am paranoia: make sure old model weights are overridden
self._model_filename.unlink()
torch.save(self.model.state_dict(), str(self._model_filename))
self.last_acc = acc
else:
self.patience -= 1
@property
def done(self) -> bool:
return self.patience <= 0
@property
def reloaded(self) -> bool:
return self._reloaded
def reload_best(self):
if self.last_acc is None:
raise RuntimeError(
"Cannot reload model until step is called at least once."
)
self.model.load_state_dict(torch.load(self._model_filename), strict=True)
self._temp_dir.cleanup()
self._reloaded = True
```
#### File: cifar/models/efficient.py
```python
from efficientnet_pytorch import EfficientNet as EN
from torch import nn
from torch.nn import functional as F
class EfficientNet(nn.Module):
def __init__(self, version=3, dropout_rate=0.5, num_classes=10):
super(EfficientNet, self).__init__()
params = dict(
image_size=[32, 32], dropout_rate=dropout_rate, num_classes=num_classes
)
self.module = EN.from_name(f"efficientnet-b{version}", override_params=params)
def forward(self, x):
out = self.module(x)
return F.log_softmax(out, dim=1)
```
#### File: mnist/restart/train.py
```python
r"""
From previous experiments, we saw that ephemeral pseudo-labelling helped boost accuracy
despite starting with only 20 points. We could kick-start BALD with 85% accuracy with 24 iterations
but it seems like using 80% accuracy at 10 iterations is a good trade-off. It's harder to gain more
accuracy as the number of iteration increases.
This experiment kick-starts BALD10 acquisition by warming the model to 80% accuracy (with 10 iterations
of ephemeral pseudo-labelling). However, the acquisition loop will NOT run ephemeral P.L. as we've seen
a decrease in performance when doing so. There are two possibilities: (1) warm-starting the model
has caused it to lower its entropy on the pool dataset, hence causing it to actually perform worse.
(2) warm-starting it actually helped! my bet is (unfortunately) on the former, given previous observations
(i.e. ephemeral bald10 performs worse than bald10 -- but i'm hopeful, notwithstanding.).
"""
from collections import defaultdict
from alr.utils import manual_seed, eval_fwd_exp, timeop
from alr.acquisition import BALD
from alr import MCDropout
from alr.data.datasets import Dataset
from alr.training.samplers import RandomFixedLengthSampler
from alr.data import UnlabelledDataset, DataManager
from alr.training import Trainer
from alr.training.repeated_acquisition_utils import (
get_confident_indices,
RelabelledDataset,
)
import torch
import torch.utils.data as torchdata
import pickle
from torch.nn import functional as F
from pathlib import Path
def main(b, threshold, warm_start_iters, log_every):
manual_seed(42)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
kwargs = dict(num_workers=4, pin_memory=True)
# --- constants ---
BATCH_SIZE = 64
EPOCHS = 200
REPS = 6
ITERS = 23
# +1 because of the structure of our loop
warm_start_iters += 1
VAL_SIZE = 5_000
MIN_TRAIN_LEN = 12_500
# --- setup ---
train, pool, test = Dataset.MNIST.get_fixed()
val, pool = torchdata.random_split(pool, (VAL_SIZE, len(pool) - VAL_SIZE))
pool = UnlabelledDataset(pool, debug=True)
model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)
bald = BALD(eval_fwd_exp(model), device=device, batch_size=1024, **kwargs)
dm = DataManager(train, pool, bald)
val_loader = torchdata.DataLoader(
val,
batch_size=1024,
shuffle=False,
**kwargs,
)
test_loader = torchdata.DataLoader(
test,
batch_size=1024,
shuffle=False,
**kwargs,
)
warm_start_accs = []
accs = defaultdict(list)
template = f"wsi={warm_start_iters}_b={b}_thresh={threshold}"
pl_metrics = Path("pl_metrics") / template
metrics = Path("metrics") / template
saved_models = Path("saved_models") / template
metrics.mkdir(parents=True)
saved_models.mkdir(parents=True)
for r in range(1, REPS + 1):
print(f"- Repeat {r} of {REPS} -")
dm.reset()
ws_accs_r = {}
# store temporarily labelled points (will be union-ed with the training dataset)
pseudo_labelled_points = None
for i in range(1, warm_start_iters + 1):
if pseudo_labelled_points is not None:
full_train_dataset = torchdata.ConcatDataset(
(dm.labelled, pseudo_labelled_points)
)
else:
full_train_dataset = dm.labelled
train_length = len(full_train_dataset)
print(
f"=== Warm start iteration {i} of {warm_start_iters} ({i / warm_start_iters:.2%}) ==="
)
print(
f"\ttrain: {train_length}; "
f"pool: {dm.n_unlabelled}; "
f"val: {len(val)}; "
f"test: {len(test)}"
)
model.reset_weights()
# -- stage 1: train --
trainer = Trainer(
model, F.nll_loss, "Adam", patience=3, reload_best=True, device=device
)
train_loader = torchdata.DataLoader(
full_train_dataset,
batch_size=BATCH_SIZE,
sampler=RandomFixedLengthSampler(
full_train_dataset, MIN_TRAIN_LEN, shuffle=True
),
**kwargs,
)
with timeop() as t:
history = trainer.fit(train_loader, val_loader, epochs=EPOCHS)
test_metrics = trainer.evaluate(test_loader)
ws_accs_r[train_length] = test_metrics["acc"]
print(
f"\t[test] loss, acc: ({test_metrics['loss']:.4f}, {test_metrics['acc']:.4f}); time: {t}"
)
with open(
metrics / f"repeat_{r}_dsize_{train_length}_metrics.pkl", "wb"
) as fp:
payload = {
"history": history,
"test_metrics": test_metrics,
}
pickle.dump(payload, fp)
if (i - 1) % log_every == 0:
torch.save(
model.state_dict(),
saved_models / f"repeat_{r}_dsize_{train_length}_weights.pth",
)
# skip if this is the last iteration
if i == warm_start_iters:
accs[dm.n_labelled].append(test_metrics["acc"])
continue
# -- stage 2: acquire more data into the training set --
# -- acquire using pseudo-labels --
dm.unlabelled.debug = True
idxs, plabs = get_confident_indices(
model=model,
dataset=dm.unlabelled,
threshold=threshold,
root=((pl_metrics / f"repeat_{r}") if r == 1 else None),
step=i,
device=device,
**kwargs,
)
if idxs.shape[0]:
truth = torchdata.Subset(dm.unlabelled, idxs)
# replace true labels with pseudo-labels
pseudo_labelled_points = RelabelledDataset(truth, plabs)
assert len(pseudo_labelled_points) == idxs.shape[0]
else:
print(
f"\tSelf-labelling didn't happen because none of the pseudo-labels are confident enough."
)
warm_start_accs.append(ws_accs_r)
dm.unlabelled.debug = False
print(
f"Warm-started with {warm_start_iters} iterations. Beginning AL acquisitions"
)
for i in range(1, ITERS + 1):
dm.acquire(b=b)
print(f"=== Iteration {i} of {ITERS} ({i / ITERS:.2%}) ===")
print(
f"\ttrain: {dm.n_labelled}; val: {len(val)}; "
f"pool: {dm.n_unlabelled}; test: {len(test)}"
)
# model.reset_weights() # leverage p.l. from before, DON'T reset!
trainer = Trainer(
model,
F.nll_loss,
optimiser="Adam",
patience=3,
reload_best=True,
device=device,
)
train_loader = torchdata.DataLoader(
dm.labelled,
batch_size=BATCH_SIZE,
sampler=RandomFixedLengthSampler(
dm.labelled, MIN_TRAIN_LEN, shuffle=True
),
**kwargs,
)
with timeop() as t:
trainer.fit(train_loader, val_loader, epochs=EPOCHS)
test_metric = trainer.evaluate(test_loader)
print(f"\t[test] acc: {test_metric['acc']}, time: {t}")
accs[dm.n_labelled].append(test_metric["acc"])
with open(f"{template}_warm_start_accs.pkl", "wb") as fp:
pickle.dump(warm_start_accs, fp)
with open(f"{template}_accs.pkl", "wb") as fp:
pickle.dump(accs, fp)
if __name__ == "__main__":
main(b=10, threshold=0.9, warm_start_iters=10, log_every=2)
```
#### File: legacy/dont_reset_weights/train.py
```python
import pickle
from collections import defaultdict
from pathlib import Path
from typing import Optional, Callable
import numpy as np
import torch
import torch.utils.data as torchdata
from ignite.contrib.handlers import ProgressBar
from ignite.engine import create_supervised_evaluator, Events, Engine
from ignite.metrics import Accuracy, Loss
from torch import nn
from torch.nn import functional as F
from alr import ALRModel
from alr import MCDropout
from alr.acquisition import BALD
from alr.data import DataManager
from alr.data import RelabelDataset, PseudoLabelDataset, UnlabelledDataset
from alr.data.datasets import Dataset
from alr.training import Trainer
from alr.training.samplers import RandomFixedLengthSampler
from alr.training.utils import EarlyStopper, PLPredictionSaver
from alr.utils import eval_fwd_exp, timeop, manual_seed
from alr.utils._type_aliases import _DeviceType, _Loss_fn
class PseudoLabelManager:
def __init__(
self,
pool: UnlabelledDataset,
model: nn.Module,
threshold: float,
log_dir: Optional[str] = None,
device: _DeviceType = None,
**kwargs,
):
bs = kwargs.pop("batch_size", 1024)
shuffle = kwargs.pop("shuffle", False)
assert not shuffle
self._pool = pool
self._loader = torchdata.DataLoader(
pool, batch_size=bs, shuffle=shuffle, **kwargs
)
self._model = model
self._log_dir = log_dir
self._device = device
self._threshold = threshold
self.acquired_sizes = []
def attach(self, engine: Engine):
engine.add_event_handler(Events.STARTED, self._initialise)
# could also be EPOCH_COMPLETED since there's only one iteration in each epoch
engine.add_event_handler(Events.ITERATION_COMPLETED, self._load_labels)
def _load_labels(self, engine: Engine):
evaluator = create_supervised_evaluator(
self._model, metrics=None, device=self._device
)
plc = PseudoLabelCollector(
self._threshold,
log_dir=self._log_dir,
)
plc.attach(evaluator, batch_size=self._loader.batch_size)
plc.global_step_from_engine(engine)
evaluator.run(self._loader)
indices, pseudo_labels = (
evaluator.state.pl_indices.cpu().numpy(),
evaluator.state.pl_plabs.cpu().numpy(),
)
self.acquired_sizes.append(indices.shape[0])
if indices.shape[0]:
confident_points = torchdata.Subset(self._pool, indices)
if self._pool.debug:
# pool returns target labels too
engine.state.pseudo_labelled_dataset = RelabelDataset(
confident_points, pseudo_labels
)
else:
engine.state.pseudo_labelled_dataset = PseudoLabelDataset(
confident_points, pseudo_labels
)
else:
engine.state.pseudo_labelled_dataset = None
@staticmethod
def _initialise(engine: Engine):
engine.state.pseudo_labelled_dataset = None
class PseudoLabelCollector:
def __init__(
self,
threshold: float,
log_dir: Optional[str] = None,
pred_transform: Callable[[torch.Tensor], torch.Tensor] = lambda x: x.exp(),
):
self._indices = []
self._plabs = []
self._pred_transform = pred_transform
self._output_transform = lambda x: x
self._thresh = threshold
self._targets = []
self._preds = []
if log_dir:
self._saver = PLPredictionSaver(log_dir, pred_transform=pred_transform)
else:
self._saver = None
self._batch_size = None
def _parse(self, engine: Engine):
preds, targets = self._output_transform(engine.state.output)
# state.iteration starts with 1
iteration = engine.state.iteration - 1
offset = iteration * self._batch_size
with torch.no_grad():
preds = self._pred_transform(preds)
preds_max, plabs = torch.max(preds, dim=-1)
mask = torch.nonzero(preds_max >= self._thresh).flatten()
if mask.shape[0]:
# plabs = [N,]
self._plabs.append(plabs[mask])
self._indices.append(mask + offset)
def _flush(self, engine: Engine):
if self._indices and self._plabs:
engine.state.pl_indices = torch.cat(self._indices)
engine.state.pl_plabs = torch.cat(self._plabs)
else:
engine.state.pl_indices = torch.Tensor([])
engine.state.pl_plabs = torch.Tensor([])
self._indices = []
self._plabs = []
def attach(self, engine: Engine, batch_size: int, output_transform=lambda x: x):
r"""
Args:
engine (Engine): ignite engine object
batch_size (int): engine's batch size
output_transform (Callable): if engine.state.output is not (preds, target),
then output_transform should return aforementioned tuple.
Returns:
NoneType: None
"""
engine.add_event_handler(Events.ITERATION_COMPLETED, self._parse)
engine.add_event_handler(Events.COMPLETED, self._flush)
self._output_transform = output_transform
self._batch_size = batch_size
if self._saver:
self._saver.attach(engine, output_transform=output_transform)
def global_step_from_engine(self, engine: Engine):
if self._saver:
self._saver.global_step_from_engine(engine)
def _update_dataloader(
loader: torchdata.DataLoader,
dataset: torchdata.Dataset,
sampler: Optional[torchdata.Sampler] = None,
):
# attributes that usually go in dataloader's constructor
attrs = [k for k in loader.__dict__.keys() if not k.startswith("_")]
drop = ["dataset", "sampler", "batch_sampler", "dataset_kind"]
kwargs = {k: getattr(loader, k) for k in attrs if k not in drop}
if not isinstance(
loader.sampler,
(
torchdata.SequentialSampler,
torchdata.RandomSampler,
RandomFixedLengthSampler,
),
):
raise ValueError(
f"Only sequential, random, and random fixed length samplers "
f"are supported in _update_dataloader"
)
kwargs["dataset"] = dataset
# Sequential and Random will be automatically determined if sampler is None (depending on shuffle)
kwargs["sampler"] = sampler
return torchdata.DataLoader(**kwargs)
def create_pseudo_label_trainer(
model: ALRModel,
loss: _Loss_fn,
optimiser: str,
train_loader: torchdata.DataLoader,
val_loader: torchdata.DataLoader,
pseudo_label_manager: PseudoLabelManager,
rfls_len: Optional[int] = None,
patience: Optional[int] = None,
reload_best: Optional[bool] = None,
epochs: Optional[int] = 1,
device: _DeviceType = None,
*args,
**kwargs,
):
def _step(engine: Engine, _):
# update loader accordingly: if pld is not none, concatenate them
new_loader = train_loader
pld = engine.state.pseudo_labelled_dataset
if pld is not None:
# only reset weights if engine.state.epoch != 1
model.reset_weights()
train_ds = torchdata.ConcatDataset((train_loader.dataset, pld))
# update dataloader's dataset attribute
if rfls_len:
new_loader = _update_dataloader(
train_loader,
train_ds,
RandomFixedLengthSampler(train_ds, length=rfls_len, shuffle=True),
)
else:
new_loader = _update_dataloader(train_loader, train_ds)
else:
assert engine.state.epoch == 1
# begin supervised training
trainer = Trainer(
model,
loss,
optimiser,
patience,
reload_best,
device=device,
*args,
**kwargs,
)
history = trainer.fit(
new_loader,
val_loader=val_loader,
epochs=epochs,
)
# if early stopping was applied w/ patience, then the actual train acc and loss should be
# -patience from the final loss/acc UNLESS we reached the maximum number of epochs.
if patience and len(history["train_loss"]) != epochs:
return history["train_loss"][-patience], history["train_acc"][-patience]
return history["train_loss"][-1], history["train_acc"][-1]
e = Engine(_step)
pseudo_label_manager.attach(e)
return e
class EphemeralTrainer:
def __init__(
self,
model: ALRModel,
pool: UnlabelledDataset,
loss: _Loss_fn,
optimiser: str,
threshold: float,
random_fixed_length_sampler_length: Optional[int] = None,
log_dir: Optional[str] = None,
patience: Optional[int] = None,
reload_best: Optional[bool] = False,
device: _DeviceType = None,
pool_loader_kwargs: Optional[dict] = {},
*args,
**kwargs,
):
self._pool = pool
self._model = model
self._loss = loss
self._optimiser = optimiser
self._patience = patience
self._reload_best = reload_best
self._device = device
self._args = args
self._kwargs = kwargs
self._threshold = threshold
self._log_dir = log_dir
self._pool_loader_kwargs = pool_loader_kwargs
self._rfls_len = random_fixed_length_sampler_length
def fit(
self,
train_loader: torchdata.DataLoader,
val_loader: Optional[torchdata.DataLoader] = None,
iterations: Optional[int] = 1,
epochs: Optional[int] = 1,
):
if self._patience and val_loader is None:
raise ValueError(
"If patience is specified, then val_loader must be provided in .fit()."
)
val_evaluator = create_supervised_evaluator(
self._model,
metrics={"acc": Accuracy(), "loss": Loss(self._loss)},
device=self._device,
)
history = defaultdict(list)
pbar = ProgressBar()
def _log_metrics(engine: Engine):
# train_loss and train_acc are moving averages of the last epoch
# in the supervised training loop
train_loss, train_acc = engine.state.output
history[f"train_loss"].append(train_loss)
history[f"train_acc"].append(train_acc)
pbar.log_message(
f"Eph. iteration {engine.state.epoch}/{engine.state.max_epochs}\n"
f"\ttrain acc = {train_acc}, train loss = {train_loss}"
)
if val_loader is None:
return # job done
# val loader - save to history and print metrics. Also, add handlers to
# evaluator (e.g. early stopping, model checkpointing that depend on val_acc)
metrics = val_evaluator.run(val_loader).metrics
history[f"val_acc"].append(metrics["acc"])
history[f"val_loss"].append(metrics["loss"])
pbar.log_message(
f"\tval acc = {metrics['acc']}, val loss = {metrics['loss']}"
)
pseudo_label_manager = PseudoLabelManager(
pool=self._pool,
model=self._model,
threshold=self._threshold,
log_dir=self._log_dir,
device=self._device,
**self._pool_loader_kwargs,
)
trainer = create_pseudo_label_trainer(
model=self._model,
loss=self._loss,
optimiser=self._optimiser,
train_loader=train_loader,
val_loader=val_loader,
pseudo_label_manager=pseudo_label_manager,
rfls_len=self._rfls_len,
patience=self._patience,
reload_best=self._reload_best,
epochs=epochs,
device=self._device,
*self._args,
**self._kwargs,
)
# output of trainer are running averages of train_loss and train_acc (from the
# last epoch of the supervised trainer)
pbar.attach(trainer, output_transform=lambda x: {"loss": x[0], "acc": x[1]})
if val_loader is not None and self._patience:
es = EarlyStopper(
self._model, self._patience, trainer, key="acc", mode="max"
)
es.attach(val_evaluator)
trainer.add_event_handler(Events.EPOCH_COMPLETED, _log_metrics)
trainer.run(
range(iterations),
max_epochs=iterations,
epoch_length=1,
)
if val_loader is not None and self._patience and self._reload_best:
es.reload_best()
history["train_size"] = np.array(pseudo_label_manager.acquired_sizes) + len(
train_loader.dataset
)
return history
def evaluate(self, data_loader: torchdata.DataLoader) -> dict:
evaluator = create_supervised_evaluator(
self._model,
metrics={"acc": Accuracy(), "loss": Loss(self._loss)},
device=self._device,
)
return evaluator.run(data_loader).metrics
def main(threshold: float, b: int):
manual_seed(42)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
kwargs = dict(num_workers=4, pin_memory=True)
BATCH_SIZE = 64
REPS = 3
ITERS = 14
VAL_SIZE = 5_000
MIN_TRAIN_LEN = 12_500
SSL_ITERATIONS = 200
EPOCHS = 200
accs = defaultdict(list)
template = f"thresh_{threshold}_b_{b}"
calib_metrics = Path("calib_metrics") / template
saved_models = Path("saved_models") / template
metrics = Path("metrics") / template
calib_metrics.mkdir(parents=True)
saved_models.mkdir(parents=True)
metrics.mkdir(parents=True)
train, pool, test = Dataset.MNIST.get_fixed()
val, pool = torchdata.random_split(pool, (VAL_SIZE, len(pool) - VAL_SIZE))
pool = UnlabelledDataset(pool)
test_loader = torchdata.DataLoader(test, batch_size=512, shuffle=False, **kwargs)
val_loader = torchdata.DataLoader(val, batch_size=512, shuffle=False, **kwargs)
for r in range(1, REPS + 1):
model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)
bald = BALD(eval_fwd_exp(model), device=device, batch_size=512, **kwargs)
dm = DataManager(train, pool, bald)
dm.reset() # to reset pool
print(f"=== repeat #{r} of {REPS} ===")
for i in range(1, ITERS + 1):
# don't reset weights: let ephemeral trainer take care of it
# since we're collecting calibration metrics,
# make pool return targets too. (i.e. debug mode)
with dm.unlabelled.tmp_debug():
trainer = EphemeralTrainer(
model,
dm.unlabelled,
F.nll_loss,
"Adam",
threshold=threshold,
random_fixed_length_sampler_length=MIN_TRAIN_LEN,
log_dir=(calib_metrics / f"rep_{r}" / f"iter_{i}"),
patience=3,
reload_best=True,
device=device,
pool_loader_kwargs=kwargs,
)
train_loader = torchdata.DataLoader(
dm.labelled,
batch_size=BATCH_SIZE,
sampler=RandomFixedLengthSampler(
dm.labelled, MIN_TRAIN_LEN, shuffle=True
),
**kwargs,
)
with timeop() as t:
history = trainer.fit(
train_loader,
val_loader,
iterations=SSL_ITERATIONS,
epochs=EPOCHS,
)
# eval on test set
test_metrics = trainer.evaluate(test_loader)
accs[dm.n_labelled].append(test_metrics["acc"])
print(f"-- Iteration {i} of {ITERS} --")
print(
f"\ttrain: {dm.n_labelled}; pool: {dm.n_unlabelled}\n"
f"\t[test] acc: {test_metrics['acc']}; time: {t}"
)
# save stuff
with open(metrics / f"rep_{r}_iter_{i}.pkl", "wb") as fp:
payload = {
"history": history,
"test_metrics": test_metrics,
"labelled_classes": dm.unlabelled.labelled_classes,
"labelled_indices": dm.unlabelled.labelled_indices,
}
pickle.dump(payload, fp)
torch.save(model.state_dict(), saved_models / f"rep_{r}_iter_{i}.pth")
# finally, acquire points
dm.acquire(b)
with open(f"{template}_accs.pkl", "wb") as fp:
pickle.dump(accs, fp)
if __name__ == "__main__":
main(threshold=0.95, b=10)
```
#### File: legacy/test_variance_random_acquisition/train.py
```python
from alr.utils import eval_fwd_exp, timeop, manual_seed
from alr import MCDropout
from alr.data.datasets import Dataset
from alr.data import UnlabelledDataset, DataManager
from alr.acquisition import BALD, RandomAcquisition
from alr.training.ephemeral_trainer import EphemeralTrainer
from alr.training.samplers import RandomFixedLengthSampler
import pickle
import numpy as np
from collections import defaultdict
from pathlib import Path
import torch.utils.data as torchdata
import torch
from torch.nn import functional as F
from typing import Union
from alr.data import RelabelDataset, PseudoLabelDataset
def remove_pseudo_labels(
ds: Union[RelabelDataset, PseudoLabelDataset], idxs_to_remove: np.array
):
if ds is not None:
original_length = len(ds)
idxs_to_remove = set(idxs_to_remove)
new_indices = []
mask = []
for i, idx in enumerate(ds._dataset.indices):
if idx not in idxs_to_remove:
new_indices.append(idx)
mask.append(i)
ds._dataset.indices = np.array(new_indices)
ds._labels = ds._labels[mask]
assert len(ds._labels) == len(ds._dataset)
print(f"\tOriginal length: {original_length}, new length: {len(ds)}")
def main(threshold: float, b: int):
manual_seed(42)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
kwargs = dict(num_workers=4, pin_memory=True)
BATCH_SIZE = 64
REPS = 20
ITERS = 5
VAL_SIZE = 5_000
MIN_TRAIN_LEN = 12_500
SSL_ITERATIONS = 200
EPOCHS = 200
accs = defaultdict(list)
template = f"thresh_{threshold}_b_{b}"
calib_metrics = Path("calib_metrics") / template
saved_models = Path("saved_models") / template
metrics = Path("metrics") / template
calib_metrics.mkdir(parents=True)
saved_models.mkdir(parents=True)
metrics.mkdir(parents=True)
train, pool, test = Dataset.MNIST.get_fixed()
val, pool = torchdata.random_split(pool, (VAL_SIZE, len(pool) - VAL_SIZE))
pool = UnlabelledDataset(pool)
test_loader = torchdata.DataLoader(test, batch_size=512, shuffle=False, **kwargs)
val_loader = torchdata.DataLoader(val, batch_size=512, shuffle=False, **kwargs)
for r in range(1, REPS + 1):
model = MCDropout(Dataset.MNIST.model, forward=20, fast=True).to(device)
ra = RandomAcquisition()
# bald = BALD(eval_fwd_exp(model), device=device, **kwargs)
dm = DataManager(train, pool, ra)
dm.reset() # to reset pool
print(f"=== repeat #{r} of {REPS} ===")
# last pseudo-labeled dataset
last_pld = None
for i in range(1, ITERS + 1):
model.reset_weights()
# since we're collecting calibration metrics,
# make pool return targets too. (i.e. debug mode)
with dm.unlabelled.tmp_debug():
trainer = EphemeralTrainer(
model,
dm.unlabelled,
F.nll_loss,
"Adam",
threshold=threshold,
min_labelled=0.1,
log_dir=(calib_metrics / f"rep_{r}" / f"iter_{i}"),
patience=3,
reload_best=True,
init_pseudo_label_dataset=last_pld,
device=device,
pool_loader_kwargs=kwargs,
)
train_loader = torchdata.DataLoader(
dm.labelled,
batch_size=BATCH_SIZE,
sampler=RandomFixedLengthSampler(
dm.labelled, MIN_TRAIN_LEN, shuffle=True
),
**kwargs,
)
with timeop() as t:
history = trainer.fit(
train_loader,
val_loader,
iterations=SSL_ITERATIONS,
epochs=EPOCHS,
)
last_pld = trainer.last_pseudo_label_dataset
# eval on test set
test_metrics = trainer.evaluate(test_loader)
accs[dm.n_labelled].append(test_metrics["acc"])
print(f"-- Iteration {i} of {ITERS} --")
print(
f"\ttrain: {dm.n_labelled}; pool: {dm.n_unlabelled}\n"
f"\t[test] acc: {test_metrics['acc']}; time: {t}"
)
# save stuff
with open(metrics / f"rep_{r}_iter_{i}.pkl", "wb") as fp:
payload = {
"history": history,
"test_metrics": test_metrics,
"labelled_classes": dm.unlabelled.labelled_classes,
"labelled_indices": dm.unlabelled.labelled_indices,
}
pickle.dump(payload, fp)
torch.save(model.state_dict(), saved_models / f"rep_{r}_iter_{i}.pth")
# finally, acquire points
idxs = dm.acquire(b)
remove_pseudo_labels(last_pld, idxs)
with open(f"{template}_accs.pkl", "wb") as fp:
pickle.dump(accs, fp)
if __name__ == "__main__":
main(threshold=0.90, b=10)
```
#### File: figures/chapter3/bald_density.py
```python
import pickle
import numpy as np
import torch
from pathlib import Path
import matplotlib.pyplot as plt
from alr.data import UnlabelledDataset
from alr.data.datasets import Dataset
import torch.utils.data as torchdata
import matplotlib.gridspec as grid_spec
from sklearn.neighbors import KernelDensity
from matplotlib import cm
import torchvision as tv
import tqdm
import colorcet as cc
from scipy.stats import pearsonr
from alr.utils import savefig
bald = Path(
"/Users/harry/Documents/workspace/thesis/figures/4/data/ens_bald_metrics/bald_10_96"
)
rep = 1
files = [f for f in bald.glob(f"rep_{rep}*")]
files = sorted(files, key=lambda x: int(x.name.split("_")[-1][:-4]))
bald_scores = []
for f in files:
with open(f, "rb") as fp:
bald_metrics = pickle.load(fp)
if bald_metrics["bald_scores"]:
bald_scores.append(bald_metrics["bald_scores"][-1])
# https://matplotlib.org/matplotblog/posts/create-ridgeplots-in-matplotlib/
def ridge_plot(
distributions: list,
title: str,
xlabel: str,
ylabel: str,
cmap="cet_bmy_r",
figsize=(8, 6),
every=1,
transform=lambda x: x,
):
if every != 1:
distributions = distributions[::every]
x_max = max(map(max, distributions))
gs = grid_spec.GridSpec(len(distributions), 1)
fig = plt.figure(figsize=figsize)
cmap = cm.get_cmap(cmap)
# creating empty list
ax_objs = []
d_i = 0
for i, dist in tqdm.tqdm(enumerate(distributions)):
# creating new axes object and appending to ax_objs
ax_objs.append(fig.add_subplot(gs[i : i + 1, 0:]))
x_d = np.linspace(0, x_max, 1000)
kde = KernelDensity(bandwidth=0.03, kernel="gaussian")
kde.fit(dist[:, None])
logprob = kde.score_samples(x_d[:, None])
# plotting the distribution
ax_objs[-1].plot(x_d, np.exp(logprob), lw=1, color=cmap(i / len(distributions)))
ax_objs[-1].fill_between(
x_d, np.exp(logprob), color=cmap(i / len(distributions)), alpha=0.3
)
# setting uniform x and y lims
ax_objs[-1].set_xlim(0, x_max)
# ax_objs[-1].set_ylim(0,2.2)
rect = ax_objs[-1].patch
rect.set_alpha(0)
# remove borders, axis ticks, and labels
ax_objs[-1].set_yticklabels([])
ax_objs[-1].tick_params(axis="both", which="both", length=0)
if i == len(distributions) - 1:
ax_objs[-1].set_xlabel(xlabel, fontsize=13)
else:
ax_objs[-1].set_xticklabels([])
spines = ["top", "right", "left", "bottom"]
for s in spines:
ax_objs[-1].spines[s].set_visible(False)
ax_objs[-1].text(-0.02, 0, transform(d_i + 1), fontsize=11, ha="right")
d_i += every
ax_objs[len(ax_objs) // 2].text(-0.17, 0, ylabel, fontsize=13, rotation=90)
ax_objs[0].set_title(title)
gs.update(hspace=-0.7)
plt.tight_layout()
ridge_plot(
bald_scores,
title="BALD score density",
xlabel="BALD score",
ylabel="Acquired dataset size",
every=15,
transform=lambda x: 20 + (x - 1) * 10,
)
savefig("/Users/harry/Documents/workspace/thesis/figures/4/BALD_density.pdf")
```
#### File: figures/chapter4/cifar_10_al_vs_ssl.py
```python
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import pickle
import torch.utils.data as torchdata
from pathlib import Path
from torch import nn
from torch.nn import functional as F
from alr.utils import savefig
root = Path("/Users/harry/Documents/workspace/thesis/figures/3/data")
with open(root / "bald_10_combined_ens.pkl", "rb") as fp:
bald10 = pickle.load(fp)
with open(root / "random_100_combined.pkl", "rb") as fp:
rand100 = pickle.load(fp)
def plot(dic, label, dot=False):
x = list(dic.keys())
y = np.array(list(dic.values()))
median = np.median(y, axis=-1)
top = np.percentile(y, 75, axis=-1)
btm = np.percentile(y, 25, axis=-1)
if dot:
(l,) = plt.plot(x, median, label=label, linestyle="dashdot")
else:
(l,) = plt.plot(x, median, label=label)
plt.fill_between(x, btm, top, color=l.get_color(), alpha=0.2)
# plt.xticks([20] + list(range(50, 300, 50)))
# plt.xlim(left=20, right=250)
plot(rand100, "SSL (Random-100)", dot=True)
plot(bald10, "AL (BALD-10)")
plt.xlim(20, 2000)
plt.grid()
plt.title("CIFAR-10 test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Dataset size")
plt.axhline(y=0.94, color="k")
line = mpl.lines.Line2D([0], [0], color="k")
handles, labels = plt.gca().get_legend_handles_labels()
handles.insert(0, line)
labels.insert(0, "Accuracy on full dataset")
plt.legend(handles=handles, labels=labels)
savefig("/Users/harry/Documents/workspace/thesis/figures/3/cifar_10_al_vs_ssl.pdf")
```
#### File: tests/modules/test_modules.py
```python
import pytest
from torch import nn
from torch.nn.modules.dropout import _DropoutNd
from torch.nn import functional as F
from alr.modules.dropout import replace_dropout
class Net1(nn.Module):
def __init__(self):
super(Net1, self).__init__()
self.fc = nn.Linear(10, 10)
self.drop = nn.Dropout()
def forward(self, x):
return self.drop(self.fc(x))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc = nn.Linear(10, 10)
self.drop = nn.Dropout(p=0.3)
self.nn = Net1()
def forward(self, x):
return self.drop1(self.fc1(x))
def _is_persistent(mod):
if isinstance(mod, _DropoutNd):
assert type(mod).__name__.startswith("Persistent")
def _is_not_persistent(mod):
if isinstance(mod, _DropoutNd):
assert not type(mod).__name__.startswith("Persistent")
def test_dropout_replacement_clone():
model = Net()
model2 = replace_dropout(model, inplace=False)
model2.apply(_is_persistent)
model.apply(_is_not_persistent)
def test_dropout_replacement_no_clone():
model = Net()
model2 = replace_dropout(model)
model2.apply(_is_persistent)
model.apply(_is_persistent)
def test_functional_dropout_warn():
class WarnNet(nn.Module):
def forward(self, x):
F.dropout(x, 0.5, True)
class WarnNet2(nn.Module):
def forward(self, x):
F.dropout2d(x, 0.5, True)
with pytest.warns(UserWarning):
replace_dropout(WarnNet())
with pytest.warns(UserWarning):
replace_dropout(WarnNet2())
```
#### File: alr/tests/test_data.py
```python
import numpy as np
import torch
import torch.utils.data as torchdata
import itertools
from alr.data import DataManager, UnlabelledDataset, RelabelDataset, PseudoLabelDataset
from alr.data.datasets import Dataset
from alr.acquisition import AcquisitionFunction
class MockAcquisitionFunction(AcquisitionFunction):
""" return the first b points of X_pool"""
def __call__(self, X_pool: torchdata.Dataset, b: int) -> np.array:
return np.arange(b)
class DummyData(torchdata.Dataset):
def __init__(self, n, target=None):
self.x = torch.arange(n)
self.target = target
if target:
self.y = torch.arange(n)
def __getitem__(self, idx):
if self.target:
return self.x[idx], self.y[idx]
return self.x[idx]
def __len__(self):
return len(self.x)
def dummy_label(transform=lambda x: x):
def _dummy_label(ds: torchdata.Dataset):
features = []
labels = []
for x in ds:
features.append(x)
labels.append(transform(x))
return torchdata.TensorDataset(torch.Tensor(features), torch.Tensor(labels))
return _dummy_label
def test_unlabelled_dataset_label_with_unlabelled():
N = 15
data = DummyData(N)
ud = UnlabelledDataset(data, dummy_label(lambda x: x + 42))
points_to_label = {5, 8, 3}
points_left = set(range(N)) - points_to_label
labelled = ud.label(list(points_to_label))
assert len(ud) == len(points_left)
assert len(labelled) == len(points_to_label)
# in the first label call:
# * * *
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
# in the second label call:
# - * - * * -
# 0 1 2 3 4 5 6 7 8 9 10 11
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
# second call to label
labelled = torchdata.ConcatDataset([labelled, ud.label([1, 3, 6])])
points_to_label = points_to_label.union({1, 4, 9})
points_left = set(range(N)) - points_to_label
assert len(ud) == len(points_left)
assert len(labelled) == len(points_to_label)
# in the third label call:
# - + * - * * - + + +
# 0 1 2 3 4 5 6 7 8
# 0 1 2 3 4 5 6 7 8 9 10 11
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
# second call to label
labelled = torchdata.ConcatDataset([labelled, ud.label([1, 4, 6, 8])])
points_to_label = points_to_label.union({2, 10, 12, 14})
points_left = set(range(N)) - points_to_label
assert len(ud) == len(points_left)
assert len(labelled) == len(points_to_label)
# test labelled indices
assert [i.item() for i in ud.labelled_indices] == list(points_to_label)
# dummy_label used transform = x + 42
for x, y in labelled:
assert y == (x + 42)
points_to_label.remove(x.item())
assert len(points_to_label) == 0
# check remaining points in ud
for x in ud:
points_left.remove(x.item())
assert len(points_left) == 0
# check reset works
ud.reset()
assert len(ud) == N
assert ud.labelled_indices.size(0) == 0
full_dataset = set(range(N))
for x in ud:
full_dataset.remove(x.item())
assert len(full_dataset) == 0
def test_unlabelled_dataset_label_with_labelled():
N = 15
data = DummyData(N, target=True)
# don't have to provide labeler
ud = UnlabelledDataset(data)
points_to_label = {7, 4, 5, 1, 8, 12}
points_left = set(range(N)) - points_to_label
labelled = ud.label(list(points_to_label))
assert len(ud) == len(points_left)
assert len(labelled) == len(points_to_label)
# in the first label call:
# * * * * * *
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
# in the second label call:
# - * - * * - * * - *
# 0 1 2 3 4 5 6 7 8 9
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
# second call to label
labelled = torchdata.ConcatDataset([labelled, ud.label([0, 2, 3, 4])])
points_to_label = points_to_label.union({0, 3, 6, 9})
points_left = set(range(N)) - points_to_label
assert len(ud) == len(points_left)
assert len(labelled) == len(points_to_label)
# in the third label call:
# - * + - * * - * * - + * +
# 0 1 2 3 4
# 0 1 2 3 4 5 6 7 8 9
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
# second call to label
labelled = torchdata.ConcatDataset([labelled, ud.label([0, 1, 3])])
points_to_label = points_to_label.union({2, 10, 13})
points_left = set(range(N)) - points_to_label
assert len(ud) == len(points_left)
assert len(labelled) == len(points_to_label)
# test labelled indices
assert [i.item() for i in ud.labelled_indices] == list(points_to_label)
# dummy_label used transform = identity
for x, y in labelled:
assert x == y
points_to_label.remove(x.item())
assert len(points_to_label) == 0
# check remaining points in ud
for x in ud:
points_left.remove(x.item())
assert len(points_left) == 0
# check reset works
ud.reset()
assert len(ud) == N
assert ud.labelled_indices.size(0) == 0
full_dataset = set(range(N))
for x in ud:
full_dataset.remove(x.item())
assert len(full_dataset) == 0
def test_data_manager():
N_LABELLED = 15
N_UNLABELLED = N_LABELLED * 10
train_pool = DummyData(N_LABELLED, target=True)
pool = UnlabelledDataset(DummyData(N_UNLABELLED), dummy_label())
dm = DataManager(train_pool, pool, MockAcquisitionFunction())
acquire = 10
dm.acquire(acquire)
assert dm.n_unlabelled == N_UNLABELLED - acquire
assert dm.n_labelled == N_LABELLED + acquire
# since the implementation currently uses concat
newly_acquired = itertools.islice(reversed(dm.labelled), acquire)
# since the unlabelled dataset has range 0-N_UNLABELLED
# and MockAcquisition returns the first `acquire` points
should_acquire = set(range(acquire))
for x, y in newly_acquired:
x = x.item()
y = y.item()
assert x == y
should_acquire.remove(x)
assert len(should_acquire) == 0
# second acquire will now take acquire - 2*acquire
dm.acquire(acquire)
assert dm.n_unlabelled == N_UNLABELLED - 2 * acquire
assert dm.n_labelled == N_LABELLED + 2 * acquire
# since the implementation currently uses concat
newly_acquired = itertools.islice(reversed(dm.labelled), acquire)
should_acquire = set(range(acquire, acquire * 2))
for x, y in newly_acquired:
x = x.item()
y = y.item()
assert x == y
should_acquire.remove(x)
assert len(should_acquire) == 0
# test reset
dm.reset()
assert dm.n_labelled == N_LABELLED
assert dm.n_unlabelled == N_UNLABELLED
assert dm.labelled is train_pool
assert dm.unlabelled is pool
def test_relabel_dataset():
_, test = Dataset.MNIST.get()
fake_classes = np.random.randint(0, 100, size=len(test))
relabelled = RelabelDataset(test, fake_classes)
bs = 1024
loader = torchdata.DataLoader(relabelled, batch_size=bs, shuffle=False)
assert all(
[
np.equal(fake_classes[idx * bs : (idx + 1) * bs], y).all()
for idx, (_, y) in enumerate(loader)
]
)
loader = torchdata.DataLoader(relabelled, batch_size=1, shuffle=False)
x, y = next(iter(loader))
assert x[0].shape == test[0][0].shape
assert np.equal(x[0], test[0][0]).all()
def test_pseudolabel_dataset():
_, test = Dataset.MNIST.get()
fake_classes = np.random.randint(0, 100, size=len(test))
pseudo_labelled = PseudoLabelDataset(
UnlabelledDataset(test),
fake_classes,
)
bs = 1024
loader = torchdata.DataLoader(pseudo_labelled, batch_size=bs, shuffle=False)
assert all(
[
np.equal(fake_classes[idx * bs : (idx + 1) * bs], y).all()
for idx, (_, y) in enumerate(loader)
]
)
loader = torchdata.DataLoader(pseudo_labelled, batch_size=1, shuffle=False)
x, y = next(iter(loader))
assert x[0].shape == test[0][0].shape
assert np.equal(x[0], test[0][0]).all()
def test_unlabelled_data_debug():
train, test = Dataset.MNIST.get()
test = UnlabelledDataset(test, debug=False)
assert not test.debug
assert len(test[0]) == 1
with test.tmp_debug():
assert test.debug
assert isinstance(test[0], tuple)
assert len(test[0]) == 2
assert not test.debug
assert len(test[0]) == 1
def test_unlabelled_data_debug_nested():
train, test = Dataset.MNIST.get()
pool = UnlabelledDataset(test, debug=False)
with pool.tmp_debug():
assert pool.debug
with pool.tmp_debug():
assert pool.debug
assert pool.debug
assert not pool.debug
```
#### File: alr/tests/test_utils.py
```python
import time
import inspect
import pytest
from alr.utils import *
def test_timeop_normal():
with timeop() as t:
time.sleep(1)
assert t.seconds >= 0
def test_timeop_exception():
with pytest.raises(ValueError):
with timeop() as t:
try:
raise ValueError
finally:
assert t.seconds is None
class Foo:
def __init__(self, x):
self.x = x
def m(self, n):
"""
test docs
:param n: ok
:type n: ok
:return: ok
:rtype: ok
"""
time.sleep(0.5)
return self.x * n
@staticmethod
def s(n):
return n * 2
@classmethod
def c(cls, n):
return cls(n)
def test_method_time():
f = Foo(2)
assert inspect.ismethod(f.m)
doc = f.m.__doc__
name = f.m.__name__
assert f.m(32) == 32 * 2
with Time(f.m) as t:
assert inspect.ismethod(f.m)
assert f.m.__doc__ == doc and f.m.__name__ == name
f.x = 234
assert f.m(32) == 32 * 234
assert len(t.tape) == 1 and t.tape[0].seconds >= 0
f.x = 123
assert f.m(23) == 23 * 123
assert len(t.tape) == 2 and t.tape[-1].seconds >= 0
assert inspect.ismethod(f.m)
assert f.m(32) == 32 * 123
assert len(t.tape) == 2
t.reset()
assert len(t.tape) == 0
def test_static_time():
t = Time(Foo.s)
f = Foo.s(99)
assert len(t.tape) == 1 and t.tape[0].seconds >= 0
assert f == 99 * 2
# stop tracking
t.deregister()
f = Foo.s(77)
assert f == 77 * 2
assert len(t.tape) == 1
def test_cls_time():
t = Time(Foo.c)
f = Foo.c(99)
assert len(t.tape) == 1 and t.tape[0].seconds >= 0
assert f.m(88) == 99 * 88
# stop tracking
t.deregister()
f = Foo.c(77)
assert f.m(66) == 77 * 66
assert len(t.tape) == 1
def foobar():
return
def test_func_time():
with Time(foobar) as t:
res = foobar()
assert res is None
assert len(t.tape) == 1
```
#### File: tests/training/test_vanilla_pseudo_label_trainer.py
```python
import numpy as np
import torch
from torch.nn import functional as F
import torch.utils.data as torchdata
from alr.training.pseudo_label_trainer import soft_nll_loss, soft_cross_entropy
from alr.data.datasets import Dataset
from alr.utils.math import entropy
from ignite.engine import Engine, Events
def test_soft_nll_loss():
F = torch.nn.functional
target_logits = torch.randn(size=(100, 10))
logits = torch.randn(size=(100, 10))
# calculated expected cross entropy
target_dist = F.softmax(target_logits, dim=-1)
probs = F.softmax(logits, dim=-1)
cross_entropy = -(target_dist * torch.log(probs)).sum(dim=-1).mean()
# now, test nll
res = soft_nll_loss(
preds=F.log_softmax(logits, dim=-1), target=F.log_softmax(target_logits, dim=-1)
)
assert torch.isclose(res, cross_entropy)
def test_soft_nll_loss_trivial():
N = 1000
target_logits = torch.randperm(N) % 10
one_hot = torch.eye(10)[target_logits]
assert torch.eq(torch.argmax(one_hot, dim=-1), target_logits).all()
logits = torch.randn(size=(N, 10))
preds = F.log_softmax(logits, dim=-1)
a = F.nll_loss(preds, target_logits)
b = soft_nll_loss(preds, target=(one_hot + 1e-8).log())
assert torch.isclose(a, b)
def test_soft_cross_entropy():
target_logits = torch.randn(size=(100, 10))
logits = torch.randn(size=(100, 10))
# calculated expected cross entropy
target_dist = F.softmax(target_logits, dim=-1)
probs = F.softmax(logits, dim=-1)
cross_entropy = -(target_dist * torch.log(probs)).sum(dim=-1).mean()
# now, test cross entropy
res = soft_cross_entropy(logits, target=target_logits)
assert torch.isclose(res, cross_entropy)
def test_soft_cross_entropy_trivial():
N = 1000
target_logits = torch.randperm(N) % 10
target_logits_r = torch.randn(size=(N, 10))
# make the argmax value so high that it will cause the softmax dist to be essentially one-hot
target_logits_r[torch.arange(N), target_logits] = 1e10
logits = torch.randn(size=(N, 10))
a = F.cross_entropy(logits, target_logits)
b = soft_cross_entropy(logits, target=target_logits_r)
assert torch.isclose(a, b)
``` |
{
"source": "JiahnChoi/opsdroid.kr",
"score": 3
} |
#### File: opsdroid/cli/config.py
```python
import click
from opsdroid.cli.utils import (
build_config,
edit_config,
list_all_modules,
path_option,
validate_config,
)
from opsdroid.const import EXAMPLE_CONFIG_FILE
@click.group()
@path_option
@click.pass_context
def config(ctx, path):
"""Subcommands related to opsdroid configuration."""
ctx.obj = path
@config.command()
@click.pass_context
def gen(ctx):
"""Print out the example config.
Open the example configuration file and print it into the terminal.
If the -f flag was used with the config command, then this path will be
set on `ctx.obj` and will be passed into this subcommand and the contents
of the file set in the path will be print into the terminal instead.
Args:
ctx (:obj:`click.Context`): The current click cli context.
Returns:
int: the exit code. Always returns 0 in this case.
"""
path = ctx.obj or EXAMPLE_CONFIG_FILE
with open(path, "r") as conf:
click.echo(conf.read())
ctx.exit(0)
@config.command()
@click.pass_context
def edit(ctx):
"""Open config file with your favourite editor.
By default this command will open the configuration file with
vi/vim. If you have a different editor that you would like to sure,
you need to change the environment variable - `EDITOR`.
Args:
ctx (:obj:`click.Context`): The current click cli context.
Returns:
int: the exit code. Always returns 0 in this case.
"""
edit_config(ctx, ctx.obj)
@config.command()
@click.pass_context
def lint(ctx):
"""Validate the configuration.
This subcommand allows you to validate your configuration or a configuration
from a file if the -f flag is used. This avoids the need to start the bot just
to have it crash because of a configuration error.
This command could also be helpful if you need to do changes to the configuration but
you are unsure if everything is set correct. You could have the new config
file located somewhere and test it before using it to start opsdroid.
Args:
ctx (:obj:`click.Context`): The current click cli context.
Returns:
int: the exit code. Always returns 0 in this case.
"""
validate_config(ctx, ctx.obj)
@config.command()
@click.pass_context
def list_modules(ctx):
"""Print out a list of all active modules.
This function will try to get information from the modules that are active in the
configuration file and print them as a table or will just print a sentence saying that
there are no active modules for that type.
Args:
ctx (:obj:`click.Context`): The current click cli context.
Returns:
int: the exit code. Always returns 0 in this case.
"""
list_all_modules(ctx, ctx.obj)
@config.command()
@click.pass_context
@click.option(
"--verbose",
"verbose",
is_flag=True,
help="Turns logging level to debug to see all logging messages.",
)
def build(ctx, verbose):
"""Load configuration, load modules and install dependencies.
This function loads the configuration and install all necessary
dependencies defined on a `requirements.txt` file inside the module.
If the flag `--verbose` is passed the logging level will be set as debug and
all logs will be shown to the user.
Args:
ctx (:obj:`click.Context`): The current click cli context.
verbose (bool): set the logging level to debug.
Returns:
int: the exit code. Always returns 0 in this case.
"""
build_config(ctx, {"path": ctx.obj, "verbose": verbose})
```
#### File: opsdroid/cli/version.py
```python
import click
from opsdroid import __version__
@click.command()
@click.pass_context
def version(ctx):
"""Print out the version of opsdroid that is installed and exits.
Args:
ctx (:obj:`click.Context`): The current click cli context.
Returns:
int: the exit code. Always returns 0 in this case.
"""
click.echo("opsdroid {version}".format(version=__version__))
ctx.exit(0)
```
#### File: connector/telegram/events.py
```python
from opsdroid import events
class Poll(events.Event):
"""Event class that triggers when a poll is sent."""
def __init__(self, poll, question, options, total_votes, *args, **kwargs):
"""Contain some attributes that you can access.
- ``poll`` - The extracted poll details from the payload
- ``question`` - The question asked in the poll
- ``options`` - An array containing all options in the poll
- ``total_votes`` - Sum of total votes that the poll received
Telegram allows you to create polls or quizzes, this type of message also
contains a lot of different things that you can access with the ``poll``
attribute, such as if the poll is closed, if it allows multiple answers, etc.
"""
super().__init__(*args, **kwargs)
self.poll = poll
self.question = question
self.options = options
self.total_votes = total_votes
class Contact(events.Event):
"""Event class that triggers when a contact is sent."""
def __init__(self, contact, phone_number, first_name, *args, **kwargs):
"""Contain some attributes that you can access.
- ``contact`` - The extracted contact details from the payload
- ``phone_numer`` - Extracted phone number from contact
- ``first_name`` - Extracted first name from contact
Your contact event might contain other information such as the
contact last name or a ``vcard`` field, you can use the ``contact``
attribute to access more information if available.
"""
super().__init__(*args, **kwargs)
self.contact = contact
self.phone_number = phone_number
self.first_name = first_name
class Location(events.Event):
"""Event class that triggers when a location message is sent."""
def __init__(self, location, latitude, longitude, *args, **kwargs):
"""Contain some attributes that you can access.
- ``location`` - The extracted location details from the payload
- ``latitude`` - Extracted latitude from the payload
- ``longitude`` - Extracted longitude from the payload
Since Telegram doesn't add any information to the location other than
the latitude and longitude, you can probably just access these attributes,
we decided to include the location attribute in case Telegram adds more
useful things to his message type.
"""
super().__init__(*args, **kwargs)
self.location = location
self.latitude = latitude
self.longitude = longitude
```
#### File: mockmodules/mongo/mongo_database.py
```python
class DatabaseMongoCollectionMock:
"""The mocked database mongo class."""
def __init__(self, config):
"""Start the class."""
self.config = config
self.dummy_doc = {}
self.valid_response = {"_id": 123, "key": "456", "value": "789"}
async def find_one(self, key):
"""Mock method find_one.
Args: key(object) not considered for test
"""
return self.valid_response
async def update_one(self, key, update, **kwargs):
"""Mock method update_one.
Args: key(object) not considered for test
"""
return self.dummy_doc
async def delete_one(self, key):
"""Mock method delete_one.
Args: key(object) not considered for test
"""
return self.dummy_doc
```
#### File: database/tests/test_database.py
```python
import pytest
from opsdroid.database import Database
def test_init():
"""Test that the database is initialised properly."""
config = {"example_item": "test"}
database = Database(config)
assert database.name == ""
assert database.config["example_item"] == "test"
@pytest.mark.asyncio
async def test_connect():
database = Database({})
with pytest.raises(NotImplementedError):
await database.connect()
@pytest.mark.asyncio
async def test_disconnect():
database = Database({})
try:
await database.disconnect()
except NotImplementedError:
pytest.fail("disconnect() raised NotImplementedError unexpectedly!")
@pytest.mark.asyncio
async def test_get():
database = Database({})
with pytest.raises(NotImplementedError):
await database.get("test")
@pytest.mark.asyncio
async def test_put():
database = Database({})
with pytest.raises(NotImplementedError):
await database.put("test", {})
@pytest.mark.asyncio
async def test_delete():
database = Database({})
with pytest.raises(NotImplementedError):
await database.delete("test")
```
#### File: opsdroid/parsers/catchall.py
```python
import logging
from opsdroid import events
_LOGGER = logging.getLogger(__name__)
async def parse_catchall(opsdroid, event):
"""Parse an event against catch-all skills, if found."""
for skill in opsdroid.skills:
for matcher in skill.matchers:
if "catchall" in matcher:
if (
matcher["messages_only"]
and isinstance(event, events.Message)
or not matcher["messages_only"]
):
await opsdroid.run_skill(skill, skill.config, event)
```
#### File: opsdroid/parsers/dialogflow.py
```python
import os
import logging
from voluptuous import Required
from opsdroid.const import DEFAULT_LANGUAGE
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = {Required("project-id"): str, "min-score": float}
async def call_dialogflow(text, opsdroid, config):
"""Call Dialogflow to get intent from text.
Dialogflow will return an object with a few restrictions, you can't
iterate it and the only way to access each element is by using dot
notation.
Args:
text (string): message.text this is the text obtained from the user.
opsdroid (OpsDroid): An instance of opsdroid.core.
config (dict): configuration settings from the file config.yaml.
Return:
A 'google.cloud.dialogflow_v2.types.DetectIntentResponse' object.
Raises:
Warning: if Google credentials are not found in environmental
variables or 'project-id' is not in config.
"""
try:
import dialogflow
if os.environ.get("GOOGLE_APPLICATION_CREDENTIALS") and config.get(
"project-id"
):
session_client = dialogflow.SessionsClient()
project_id = config.get("project-id")
language = config.get("lang") or opsdroid.config.get(
"lang", DEFAULT_LANGUAGE
)
session = session_client.session_path(project_id, "opsdroid")
text_input = dialogflow.types.TextInput(text=text, language_code=language)
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(
session=session, query_input=query_input
)
return response
else:
raise Warning(
_(
"Authentication file not found or 'project-id' not in configuration, dialogflow parser will not be available."
)
)
except ImportError:
_LOGGER.error(
_(
"Unable to find dialogflow dependency. Please install dialogflow with the command pip install dialogflow if you want to use this parser."
)
)
opsdroid.config["parsers"][0]["enabled"] = False
async def parse_dialogflow(opsdroid, skills, message, config):
"""Parse a message against all Dialogflow skills.
This function does a few things, first it will check if the
intent confidence is higher than the minimum score set on config,
then it will try to match an action or an intent to a matcher and
add the proper skills to the skills list.
At the moment a broad exception is being used due to the fact that
dialogflow library doesn't have the best documentation yet and it's
not possible to know what sort of exceptions the library will return.
Args:
opsdroid (OpsDroid): An instance of opsdroid.core.
skills (list): A list containing all skills available.
message(object): An instance of events.message.
config (dict): configuration settings from the
file config.yaml.
Return:
Either empty list or a list containing all matched skills.
"""
try:
result = await call_dialogflow(message.text, opsdroid, config)
matched_skills = []
if (
"min-score" in config
and result.query_result.intent_detection_confidence < config["min-score"]
):
_LOGGER.debug(_("Dialogflow confidence lower than min-score."))
return matched_skills
if result:
for skill in skills:
for matcher in skill.matchers:
if "dialogflow_action" in matcher or "dialogflow_intent" in matcher:
if (
matcher.get("dialogflow_action")
== result.query_result.action
) or (
matcher.get("dialogflow_intent")
== result.query_result.intent.display_name
):
message.dialogflow = result.query_result
_LOGGER.debug(
_("Matched against skill %s"), skill.config["name"]
)
matched_skills.append(
{
"score": result.query_result.intent_detection_confidence,
"skill": skill,
"config": skill.config,
"message": message,
}
)
return matched_skills
except Exception as error:
# TODO: Refactor broad exception
_LOGGER.error(_("There was an error while parsing to dialogflow - %s."), error)
```
#### File: parsers/tests/test_parser_catchall.py
```python
import logging
import asynctest.mock as amock
import pytest
from opsdroid.cli.start import configure_lang
from opsdroid.core import OpsDroid
from opsdroid.events import Message, OpsdroidStarted
from opsdroid.matchers import match_always, match_catchall
from opsdroid.parsers.catchall import parse_catchall
pytestmark = pytest.mark.asyncio
configure_lang({})
async def getMockSkill():
async def mockedskill(config, message):
pass
mockedskill.config = {}
return mockedskill
async def getRaisingMockSkill():
async def mockedskill(config, message):
raise Exception()
mockedskill.config = {}
return mockedskill
async def test_parse_catchall_decorator_parens():
with OpsDroid() as opsdroid:
mock_skill = await getMockSkill()
opsdroid.skills.append(match_catchall()(mock_skill))
opsdroid.run_skill = amock.CoroutineMock()
mock_connector = amock.CoroutineMock()
message = Message(
text="Hello world",
user="user",
target="default",
connector=mock_connector,
)
await parse_catchall(opsdroid, message)
assert opsdroid.run_skill.called
async def test_parse_catchall_decorate_no_parens():
with OpsDroid() as opsdroid:
mock_skill = await getMockSkill()
opsdroid.skills.append(match_catchall(mock_skill))
opsdroid.run_skill = amock.CoroutineMock()
mock_connector = amock.CoroutineMock()
message = Message(
text="Hello world",
user="user",
target="default",
connector=mock_connector,
)
await parse_catchall(opsdroid, message)
assert opsdroid.run_skill.called
async def test_parse_catchall_raises(caplog):
caplog.set_level(logging.ERROR)
with OpsDroid() as opsdroid:
mock_skill = await getRaisingMockSkill()
mock_skill.config = {"name": "greetings"}
opsdroid.skills.append(match_catchall()(mock_skill))
assert len(opsdroid.skills) == 1
mock_connector = amock.MagicMock()
mock_connector.send = amock.CoroutineMock()
message = Message(
text="<NAME>",
user="user",
target="default",
connector=mock_connector,
)
await parse_catchall(opsdroid, message)
assert "ERROR" in caplog.text
async def test_parse_catchall_not_called():
with OpsDroid() as opsdroid:
mock_skill = await getMockSkill()
catchall_skill = amock.CoroutineMock()
opsdroid.skills.append(match_always()(mock_skill))
opsdroid.skills.append(match_catchall()(catchall_skill))
opsdroid.run_skill = amock.CoroutineMock()
mock_connector = amock.CoroutineMock()
message = Message(
text="<NAME>",
user="user",
target="default",
connector=mock_connector,
)
await parse_catchall(opsdroid, message)
assert not catchall_skill.called
async def test_parse_catchall_messages_only_default():
with OpsDroid() as opsdroid:
catchall_skill = await getMockSkill()
event = OpsdroidStarted()
opsdroid.skills.append(match_catchall()(catchall_skill))
opsdroid.run_skill = amock.CoroutineMock()
await parse_catchall(opsdroid, event)
assert opsdroid.run_skill.called
async def test_parse_catchall_messages_only_enabled():
with OpsDroid() as opsdroid:
catchall_skill = await getMockSkill()
event = OpsdroidStarted()
opsdroid.skills.append(match_catchall(messages_only=True)(catchall_skill))
opsdroid.run_skill = amock.CoroutineMock()
mock_connector = amock.CoroutineMock()
message = Message(
text="<NAME>",
user="user",
target="default",
connector=mock_connector,
)
await parse_catchall(opsdroid, event)
assert not opsdroid.run_skill.called
await parse_catchall(opsdroid, message)
assert opsdroid.run_skill.called
```
#### File: opsdroid/testing/fixtures.py
```python
import contextlib
import socket
from typing import Tuple
import pytest
from opsdroid.core import OpsDroid
from opsdroid.connector import Connector
from .external_api import ExternalAPIMockServer
__all__ = ["mock_api_obj", "bound_address", "get_connector", "opsdroid", "mock_api"]
@pytest.fixture(scope="session")
def get_connector():
"""Pytest fixture which is a factory to generate a connector."""
def _get_connector(config={}, opsdroid=None):
return Connector(config, opsdroid=opsdroid)
return _get_connector
@pytest.fixture
def bound_address(request) -> Tuple[str, int]:
"""Block an unused port and return it.
This allows testing ``except OSError`` blocks that check for a port-in-use.
For example, this test ensures that OSError is propagated
when the port is already in use on localhost:
async def test_web_port_in_use(opsdroid, bound_address):
opsdroid.config["web"] = {
"host": bound_address[0], "port": bound_address[1]
}
app = web.Web(opsdroid)
with pytest.raises(OSError):
await app.start()
By default this blocks a port with host 0.0.0.0, but you can use parametrize
to specify an alternate host:
@pytest.mark.parametrize("bound_address", ["localhost"], indirect=True)
async def test_localhost(bound_address):
assert bound_address[0] == "localhost"
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with contextlib.suppress(socket.error):
if hasattr(socket, "SO_EXCLUSIVEADDRUSE"): # only on windows
s.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
if hasattr(socket, "SO_REUSEPORT"): # not on windows
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 0)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0)
host = request.param if hasattr(request, "param") else "0.0.0.0"
s.bind((host, 0)) # an ephemeral port
yield s.getsockname()
s.close()
@pytest.fixture
def opsdroid() -> OpsDroid:
"""Fixture with a plain instance of opsdroid.
Will yield an instance of :class:`opsdroid.core.OpsDroid` which hasn't been loaded.
"""
with OpsDroid(config={}) as opsdroid:
yield opsdroid
@pytest.fixture
def mock_api_obj(request) -> ExternalAPIMockServer:
"""Initialize instance of :class:`opsdroid.testing.ExternalAPIMockServer`.
This fixture returns an instance that hasn't been started, allowing you to
add routes to the server before it is started.
There are a few options of how to start the server once this fixture has
been used:
* Use the ``mock_api`` fixture in the test *after* this fixture has
been called to setup a route.
* Use the
:meth:`opsdroid.testing.ExternalAPIMockServer.running`
context manager.
* Manually start and stop the server.
An example of the second method is to define a fixture which adds a
response, and then use the ``mock_api`` fixture in the test to start the
server::
@pytest.fixture
def canned_response(mock_api_obj):
mock_api_obj.add_response("/test", "GET")
# Note here mock_api MUST come after canned_response for the route to
# be added before the server is started.
def test_my_endpoint(canned_response, mock_api):
...
Routes can be specified with ``pytest.mark.add_response`` on tests using
this fixture, as described in :func:`~opsdroid.testing.fixtures.mock_api`
"""
mock_api_obj = ExternalAPIMockServer()
markers = [
marker for marker in request.node.own_markers if marker.name == "add_response"
]
for marker in markers:
mock_api_obj.add_response(*marker.args, **marker.kwargs)
return mock_api_obj
@pytest.fixture
async def mock_api(mock_api_obj) -> ExternalAPIMockServer:
"""Fixture for mocking API calls to a web service.
Will yield a running instance of
:class:`opsdroid.testing.ExternalAPIMockServer`, which has been configured
with any routes specified through ``@pytest.mark.add_response()``
decorators, or modification of the
:func:`~opsdroid.testing.fixtures.mock_api_obj` fixture before the test is
called.
All arguments and keyword arguments passed to ``pytest.mark.add_response``
are passed through to `.ExternalAPIMockServer.add_response`.
An example test would look like::
@pytest.mark.add_response("/test", "GET")
@pytest.mark.add_response("/test2", "GET", status=500)
@pytest.mark.asyncio
async def test_hello(mock_api):
async with aiohttp.ClientSession() as session:
async with session.get(f"{mock_api.base_url}/test") as resp:
assert resp.status == 200
assert mock_api.called("/test")
async with session.get(f"{mock_api.base_url}/test2") as resp:
assert resp.status == 500
assert mock_api.called("/test2")
"""
async with mock_api_obj.running() as mock_api:
yield mock_api
```
#### File: opsdroid/tests/test_core.py
```python
import os
import signal
import threading
import asynctest.mock as amock
import pytest
from opsdroid.core import OpsDroid
@pytest.mark.skipif(os.name == "nt", reason="SIGHUP unsupported on windows")
@pytest.mark.isolate_signal_test
def test_signals(event_loop):
pid = os.getpid()
def send_signal(sig):
# print(f"{pid} <== {sig}")
os.kill(pid, sig)
with OpsDroid() as opsdroid:
opsdroid.load = amock.CoroutineMock()
# bypass task creation in start() and just run the task loop
opsdroid.start = amock.CoroutineMock(return_value=opsdroid._run_tasks)
opsdroid.unload = amock.CoroutineMock()
opsdroid.reload = amock.CoroutineMock()
threading.Timer(2, lambda: send_signal(signal.SIGHUP)).start()
threading.Timer(3, lambda: send_signal(signal.SIGINT)).start()
with pytest.raises(SystemExit):
opsdroid.run()
assert opsdroid.reload.called
```
#### File: opsdroid/tests/test_matchers.py
```python
import pytest
import asyncio
import aiohttp.web
from aiohttp.test_utils import make_mocked_request
from opsdroid.cli.start import configure_lang
from opsdroid.web import Web
from opsdroid import matchers
configure_lang({})
async def get_mock_skill():
async def mocked_skill(opsdroid, config, message):
pass
return mocked_skill
async def get_mock_web_skill():
async def mocked_web_skill(opsdroid, config, message):
return aiohttp.web.Response(body=b"custom response", status=200)
return mocked_web_skill
@pytest.mark.asyncio
async def test_match_regex(opsdroid):
regex = r"(.*)"
decorator = matchers.match_regex(regex)
opsdroid.skills.append(decorator(await get_mock_skill()))
assert len(opsdroid.skills) == 1
assert opsdroid.skills[0].matchers[0]["regex"]["expression"] == regex
assert asyncio.iscoroutinefunction(opsdroid.skills[0])
@pytest.mark.asyncio
async def test_match_dialogflow(opsdroid):
action = "myaction"
decorator = matchers.match_dialogflow_action(action)
opsdroid.skills.append(decorator(await get_mock_skill()))
assert len(opsdroid.skills) == 1
assert opsdroid.skills[0].matchers[0]["dialogflow_action"] == action
assert asyncio.iscoroutinefunction(opsdroid.skills[0])
intent = "myIntent"
decorator = matchers.match_dialogflow_intent(intent)
opsdroid.skills.append(decorator(await get_mock_skill()))
assert len(opsdroid.skills) == 2
assert opsdroid.skills[1].matchers[0]["dialogflow_intent"] == intent
assert asyncio.iscoroutinefunction(opsdroid.skills[1])
@pytest.mark.asyncio
async def test_match_luisai(opsdroid):
intent = "myIntent"
decorator = matchers.match_luisai_intent(intent)
opsdroid.skills.append(decorator(await get_mock_skill()))
assert len(opsdroid.skills) == 1
assert opsdroid.skills[0].matchers[0]["luisai_intent"] == intent
assert asyncio.iscoroutinefunction(opsdroid.skills[0]) is True
@pytest.mark.asyncio
async def test_match_watson(opsdroid):
intent = "myIntent"
decorator = matchers.match_watson(intent)
opsdroid.skills.append(decorator(await get_mock_skill()))
assert len(opsdroid.skills) == 1
assert opsdroid.skills[0].matchers[0]["watson_intent"] == intent
assert asyncio.iscoroutinefunction(opsdroid.skills[0]) is True
@pytest.mark.asyncio
async def test_match_witai(opsdroid):
intent = "myIntent"
decorator = matchers.match_witai(intent)
opsdroid.skills.append(decorator(await get_mock_skill()))
assert len(opsdroid.skills) == 1
assert opsdroid.skills[0].matchers[0]["witai_intent"] == intent
assert asyncio.iscoroutinefunction(opsdroid.skills[0]) is True
@pytest.mark.asyncio
async def test_match_rasanu(opsdroid):
intent = "myIntent"
decorator = matchers.match_rasanlu(intent)
opsdroid.skills.append(decorator(await get_mock_skill()))
assert len(opsdroid.skills) == 1
assert opsdroid.skills[0].matchers[0]["rasanlu_intent"] == intent
assert asyncio.iscoroutinefunction(opsdroid.skills[0])
@pytest.mark.asyncio
async def test_match_recastai(opsdroid, caplog):
intent = "myIntent"
decorator = matchers.match_recastai(intent)
opsdroid.skills.append(decorator(await get_mock_skill()))
assert len(opsdroid.skills) == 1
assert opsdroid.skills[0].matchers[0]["sapcai_intent"] == intent
assert asyncio.iscoroutinefunction(opsdroid.skills[0])
@pytest.mark.asyncio
async def test_match_crontab(opsdroid):
crontab = "* * * * *"
decorator = matchers.match_crontab(crontab)
opsdroid.skills.append(decorator(await get_mock_skill()))
assert len(opsdroid.skills) == 1
assert opsdroid.skills[0].matchers[0]["crontab"] == crontab
assert asyncio.iscoroutinefunction(opsdroid.skills[0])
@pytest.mark.asyncio
async def test_match_webhook(opsdroid, mocker):
opsdroid.loader.current_import_config = {"name": "testhook"}
opsdroid.web_server = Web(opsdroid)
opsdroid.web_server.web_app = mocker.MagicMock()
webhook = "test"
decorator = matchers.match_webhook(webhook)
opsdroid.skills.append(decorator(await get_mock_skill()))
opsdroid.skills[0].config = {"name": "mockedskill"}
opsdroid.web_server.setup_webhooks(opsdroid.skills)
assert len(opsdroid.skills) == 1
assert opsdroid.skills[0].matchers[0]["webhook"] == webhook
assert asyncio.iscoroutinefunction(opsdroid.skills[0])
assert opsdroid.web_server.web_app.router.add_post.call_count == 2
@pytest.mark.asyncio
async def test_match_webhook_response(opsdroid, mocker):
opsdroid.loader.current_import_config = {"name": "testhook"}
opsdroid.web_server = Web(opsdroid)
opsdroid.web_server.web_app = mocker.MagicMock()
webhook = "test"
decorator = matchers.match_webhook(webhook)
opsdroid.skills.append(decorator(await get_mock_skill()))
opsdroid.skills[0].config = {"name": "mockedskill"}
opsdroid.web_server.setup_webhooks(opsdroid.skills)
postcalls, _ = opsdroid.web_server.web_app.router.add_post.call_args_list[0]
wrapperfunc = postcalls[1]
webhookresponse = await wrapperfunc(None)
assert isinstance(webhookresponse, aiohttp.web.Response)
@pytest.mark.asyncio
async def test_match_webhook_response_with_authorization_failure(opsdroid, mocker):
opsdroid.loader.current_import_config = {"name": "testhook"}
opsdroid.config["web"] = {"webhook-token": "<KEY>"}
opsdroid.web_server = Web(opsdroid)
opsdroid.web_server.web_app = mocker.MagicMock()
webhook = "test"
decorator = matchers.match_webhook(webhook)
opsdroid.skills.append(decorator(await get_mock_skill()))
opsdroid.skills[0].config = {"name": "mockedskill"}
opsdroid.web_server.setup_webhooks(opsdroid.skills)
postcalls, _ = opsdroid.web_server.web_app.router.add_post.call_args_list[0]
wrapperfunc = postcalls[1]
webhookresponse = await wrapperfunc(
make_mocked_request(
"POST", postcalls[0], headers={"Authorization": "Bearer wwxxyyzz"}
)
)
assert isinstance(webhookresponse, aiohttp.web.Response)
@pytest.mark.asyncio
async def test_match_webhook_custom_response(opsdroid, mocker):
opsdroid.loader.current_import_config = {"name": "testhook"}
opsdroid.web_server = Web(opsdroid)
opsdroid.web_server.web_app = mocker.MagicMock()
webhook = "test"
decorator = matchers.match_webhook(webhook)
opsdroid.skills.append(decorator(await get_mock_web_skill()))
opsdroid.skills[0].config = {"name": "mockedskill"}
opsdroid.web_server.setup_webhooks(opsdroid.skills)
postcalls, _ = opsdroid.web_server.web_app.router.add_post.call_args_list[0]
wrapperfunc = postcalls[1]
webhookresponse = await wrapperfunc(None)
assert isinstance(webhookresponse, aiohttp.web.Response)
assert webhookresponse.body == b"custom response"
```
#### File: opsdroid.kr/tests/test_connector_webexteams.py
```python
import asyncio
import unittest
import asynctest
import asynctest.mock as amock
from opsdroid.core import OpsDroid
from opsdroid.connector.webexteams import ConnectorWebexTeams
from opsdroid.events import Message
from opsdroid.cli.start import configure_lang
class TestConnectorCiscoWebexTeams(unittest.TestCase):
"""Test the opsdroid Webex Teams connector class."""
def setUp(self):
self.loop = asyncio.new_event_loop()
configure_lang({})
def test_init(self):
"""Test that the connector is initialised properly."""
connector = ConnectorWebexTeams({})
self.assertEqual("webexteams", connector.name)
self.assertEqual("opsdroid", connector.bot_name)
def test_webhook_url_is_valid(self):
connector = ConnectorWebexTeams({"webhook-url": "https://example.com"})
assert connector.config.get("webhook-url").startswith("https")
def test_missing_api_key(self):
"""Test that creating without an API without config raises an error."""
with self.assertRaises(TypeError):
ConnectorWebexTeams()
class TestConnectorCiscoSparkAsync(asynctest.TestCase):
"""Test the async methods of the opsdroid webex teams connector class."""
async def setUp(self):
configure_lang({})
async def test_connect(self):
connector = ConnectorWebexTeams({"token": "<PASSWORD>"}, opsdroid=OpsDroid())
opsdroid = amock.CoroutineMock()
opsdroid.eventloop = self.loop
connector.clean_up_webhooks = amock.CoroutineMock()
connector.subscribe_to_rooms = amock.CoroutineMock()
connector.set_own_id = amock.CoroutineMock()
with amock.patch("websockets.connect", new=amock.CoroutineMock()):
await connector.connect()
self.assertTrue(connector.clean_up_webhooks.called)
self.assertTrue(connector.subscribe_to_rooms.called)
self.assertTrue(connector.set_own_id.called)
async def test_message_handler(self):
connector = ConnectorWebexTeams({"token": "abc123"})
connector.opsdroid = OpsDroid()
connector.bot_spark_id = "spark123"
connector.api = amock.CoroutineMock()
request = amock.Mock()
request.json = amock.CoroutineMock()
request.json.return_value = {
"data": {"id": "3vABZrQgDzfcz7LZi", "personId": "21ABZrQgDzfcz7Lsi"}
}
message = amock.Mock()
connector.api.messages.get = amock.Mock()
message.text = "Hello"
message.roomId = "90ABCrWgrzfcz7LZi"
message.roomType = "general"
connector.api.messages.get.return_value = message
connector.get_person = amock.CoroutineMock()
person = amock.CoroutineMock()
person.displayName = "Himanshu"
connector.get_person.return_value = person
response = await connector.webexteams_message_handler(request)
self.assertLogs("_LOGGER", "debug")
self.assertEqual(201, response.status)
self.assertEqual('"Received"', response.text)
self.assertTrue(connector.api.messages.get.called)
self.assertTrue(connector.get_person.called)
connector.opsdroid = amock.CoroutineMock()
connector.opsdroid.parse = amock.CoroutineMock()
connector.opsdroid.parse.side_effect = KeyError
await connector.webexteams_message_handler(request)
self.assertLogs("_LOGGER", "error")
async def test_connect_fail_keyerror(self):
connector = ConnectorWebexTeams({}, opsdroid=OpsDroid())
connector.clean_up_webhooks = amock.CoroutineMock()
connector.subscribe_to_rooms = amock.CoroutineMock()
connector.set_own_id = amock.CoroutineMock()
await connector.connect()
self.assertLogs("_LOGGER", "error")
async def test_listen(self):
"""Test the listen method.
The Webex Teams connector listens using an API endoint and so the listen
method should just pass and do nothing. We just need to test that it
does not block.
"""
connector = ConnectorWebexTeams({}, opsdroid=OpsDroid())
self.assertEqual(await connector.listen(), None)
async def test_respond(self):
connector = ConnectorWebexTeams({"token": "abc123"})
connector.api = amock.CoroutineMock()
connector.api.messages.create = amock.CoroutineMock()
message = Message(
text="Hello",
user="opsdroid",
target={"id": "3vABZrQgDzfcz7LZi"},
connector=None,
)
await connector.send(message)
self.assertTrue(connector.api.messages.create.called)
async def test_get_person(self):
connector = ConnectorWebexTeams({"token": "abc123"})
connector.api = amock.CoroutineMock()
connector.api.messages.create = amock.CoroutineMock()
connector.api.people.get = amock.CoroutineMock()
connector.api.people.get.return_value = "Himanshu"
self.assertEqual(len(connector.people), 0)
await connector.get_person("3vABZrQgDzfcz7LZi")
self.assertEqual(len(connector.people), 1)
async def test_subscribe_to_rooms(self):
connector = ConnectorWebexTeams(
{"token": "abc123", "webhook-url": "http://127.0.0.1"}
)
connector.api = amock.CoroutineMock()
connector.opsdroid = amock.CoroutineMock()
connector.opsdroid.web_server.web_app.router.add_post = amock.CoroutineMock()
connector.api.webhooks.create = amock.CoroutineMock()
await connector.subscribe_to_rooms()
self.assertTrue(connector.api.webhooks.create.called)
self.assertTrue(connector.opsdroid.web_server.web_app.router.add_post.called)
async def test_clean_up_webhooks(self):
connector = ConnectorWebexTeams({"token": "abc123"})
connector.api = amock.CoroutineMock()
x = amock.CoroutineMock()
x.id = amock.CoroutineMock()
connector.api.webhooks.list = amock.Mock()
connector.api.webhooks.list.return_value = [x, x]
connector.api.webhooks.delete = amock.Mock()
await connector.clean_up_webhooks()
self.assertTrue(connector.api.webhooks.list.called)
self.assertTrue(connector.api.webhooks.delete.called)
async def test_set_own_id(self):
connector = ConnectorWebexTeams({"token": "<PASSWORD>"})
connector.api = amock.CoroutineMock()
connector.api.people.me().id = "3vABZrQgDzfcz7LZi"
await connector.set_own_id()
self.assertTrue(connector.bot_webex_id, "3vABZrQgDzfcz7LZi")
``` |
{
"source": "JiahongChen/FRAN",
"score": 3
} |
#### File: JiahongChen/FRAN/correlationMatrix.py
```python
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from os import listdir
from os.path import isfile, join
def get_df(algo):
datasets = ['DE007', 'DE014', 'DE021', 'FE007', 'FE014', 'FE021', 'DE', 'FE']
df = pd.DataFrame(columns = datasets)
mypath = './record/'+algo
files = [f for f in listdir(mypath) if isfile(join(mypath, f))]
for source in datasets:
source_to_target_array = []
for target in datasets:
name = source+'-'+target
for f in files:
if '_'+name+'_' in f:
splited_file_name = f.split('_')
value = splited_file_name[2]
source_to_target_array.append(float(value)*100)
break
df_row = pd.DataFrame([source_to_target_array], columns = datasets, index = [source])
df = pd.concat([df, df_row])
return df
def plot_corMatrix(df):
print (df)
print(np.mean(np.mean(df)))
plt.rcParams['font.size'] = 14
fig, (ax) = plt.subplots(1,1,figsize=(7.5,6))
hm = sns.heatmap(df,
ax = ax,
cmap = 'coolwarm',
square=True,
annot=True,
fmt='.1f',
annot_kws={'size':14},
linewidths=0.05,
robust=True,
vmin = 50,
vmax = 100
)
fig.subplots_adjust(top=0.93)
plt.tight_layout()
plt.savefig('FRAN_ave.pdf')
num_trials = 10
df = pd.DataFrame()
for i in range(1, num_trials+1):
if i ==1:
df = get_df('FRAN' +str(i))
else:
df += get_df('FRAN' +str(i))
df/=num_trials
plot_corMatrix(df)
```
#### File: JiahongChen/FRAN/main.py
```python
import os
import argparse
import tqdm
import os
import argparse
import numpy as np
import tqdm
from itertools import chain
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
from torch.autograd import Variable
from torch.utils.data import TensorDataset, DataLoader
import time
from utils import weights_init, print_args
from model import *
import scipy.io
import random
import time
parser = argparse.ArgumentParser()
parser.add_argument("--data_root", default='./CWRU_dataset/')
parser.add_argument("--source", default='DE')
parser.add_argument("--target", default='FE')
parser.add_argument("--batch_size", default=64, type=int)
parser.add_argument("--shuffle", default=True, type=bool)
parser.add_argument("--num_workers", default=0)
parser.add_argument("--epoch", default=100, type=int)
parser.add_argument("--snapshot", default="")
parser.add_argument("--lr", default=0.0001, type=float)
parser.add_argument("--class_num", default=3)
parser.add_argument("--extract", default=True)
parser.add_argument("--weight_L2norm", default=0.05)
parser.add_argument("--weight_entropy", default=0.1, type=float)
parser.add_argument("--dropout_p", default=0.1, type=float)
parser.add_argument("--task", default='None', type=str)
parser.add_argument("--post", default='-1', type=str)
parser.add_argument("--repeat", default='-1', type=str)
parser.add_argument("--result", default='record')
parser.add_argument("--save", default=False, type=bool)
parser.add_argument("--lambda_val", default=1.0, type=float)
parser.add_argument("--entropy_thres", default=0.00000001, type=float)
parser.add_argument('--thres_rec', type=float, default=0.0001, help='coefficient for reconstruction loss')
parser.add_argument("--optimizer", default='Adam', type=str)
parser.add_argument('--GPU', type=bool, default=True,
help='enable train on GPU or not, default is False')
def guassian_kernel(source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):
n_samples = int(source.size()[0])+int(target.size()[0])
# resize for CWRU dataset
source = source.reshape(int(source.size(0)), int(source.size(1))* int(source.size(2)))
target = target.reshape(int(target.size(0)), int(target.size(1))* int(target.size(2)))
total = torch.cat([source, target], dim=0)
total0 = total.unsqueeze(0).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
total1 = total.unsqueeze(1).expand(int(total.size(0)), int(total.size(0)), int(total.size(1)))
L2_distance = ((total0-total1)**2).sum(2)
if fix_sigma:
bandwidth = fix_sigma
else:
bandwidth = torch.sum(L2_distance.data) / (n_samples**2-n_samples)
bandwidth /= kernel_mul ** (kernel_num // 2)
bandwidth_list = [bandwidth * (kernel_mul**i) for i in range(int(kernel_num))]
kernel_val = [torch.exp(-L2_distance / bandwidth_temp) for bandwidth_temp in bandwidth_list]
return sum(kernel_val)
def MMDLoss(source, target):
kernel_num = 2.0
kernel_mul = 5
fix_sigma = None
batch_size = int(source.size()[0])
kernels = guassian_kernel(source, target, kernel_mul=kernel_mul, kernel_num=kernel_num, fix_sigma=fix_sigma)
XX = kernels[:batch_size, :batch_size]
YY = kernels[batch_size:, batch_size:]
XY = kernels[:batch_size, batch_size:]
YX = kernels[batch_size:, :batch_size]
loss = torch.mean(XX + YY - XY -YX)
return loss
def minmax_norm(data):
min_v = np.min(data)
range_v = np.max(data) - min_v
data = (data - min_v) / range_v
return data
# classification loss
def get_cls_loss(pred, gt):
cls_loss = F.nll_loss(F.log_softmax(pred), gt)
return cls_loss
# compute entropy loss
def get_entropy_loss(p_softmax):
mask = p_softmax.ge(args.entropy_thres)
mask_out = torch.masked_select(p_softmax, mask)
entropy = -(torch.sum(mask_out * torch.log(mask_out)))
return args.weight_entropy * (entropy / float(p_softmax.size(0)))
# compute entropy
def HLoss(x):
b = F.softmax(x, dim=1) * F.log_softmax(x, dim=1)
b = -1.0 * b.sum()
return b
def load_data(domain):
input_domain = np.load(args.data_root+'CWRU_'+domain+'.npy', allow_pickle=True)
input_domain = input_domain.item()
input_N = input_domain['Normal']
input_OR = input_domain['OR']
input_IR = input_domain['IR']
# print (np.shape(input_IR), np.shape(input_OR), np.shape(input_N))
input_label_N = np.zeros([np.size(input_N,0),1])
input_label_OR = np.ones([np.size(input_OR,0),1])
input_label_IR = np.ones([np.size(input_IR,0),1])+1
data = np.concatenate((input_N, input_OR, input_IR) , axis=0)
print(np.shape(data))
label = np.concatenate((input_label_N, input_label_OR, input_label_IR), axis=0)
print(np.shape(label))
# shuffle inputs
nums = [x for x in range(np.size(data, axis = 0))]
random.shuffle(nums)
data = data[nums, :]
label = label[nums, :]
data = np.transpose(data, (0, 2, 1))
label = np.squeeze(label)
return data, label
if __name__ == "__main__":
args = parser.parse_args()
print_args(args)
t = time.time()
# load source data
source_data, source_label = load_data(args.source)
# load target data
target_data, target_label = load_data(args.target)
# fead data to dataloder
source_data = Variable(torch.from_numpy(source_data).float(), requires_grad=False)
source_label= Variable(torch.from_numpy(source_label).long(), requires_grad=False)
target_data = Variable(torch.from_numpy(target_data).float(), requires_grad=False)
target_label= Variable(torch.from_numpy(target_label).long(), requires_grad=False)
source_dataset = TensorDataset(source_data, source_label)
target_dataset = TensorDataset(target_data, target_label)
source_loader = DataLoader(source_dataset,batch_size=args.batch_size)
target_loader = DataLoader(target_dataset,batch_size=args.batch_size)
source_loader_iter = iter(source_loader)
target_loader_iter = iter(target_loader)
# initialize model
netG = Generator(source='CWRU_'+args.source, target='CWRU_'+args.target)
netF = Classifier(source='CWRU_'+args.source, target='CWRU_'+args.target)
if args.GPU:
netG.cuda()
netF.cuda()
netG.apply(weights_init)
netF.apply(weights_init)
print ('Training using Adam')
opt_g = optim.Adam(netG.parameters(), lr=args.lr, weight_decay=0.0005)
opt_f = optim.Adam(netF.parameters(), lr=args.lr, weight_decay=0.0005)
max_correct = -1.0
correct_array = []
# start training
for epoch in range(1, args.epoch+1):
source_loader_iter = iter(source_loader)
target_loader_iter = iter(target_loader)
print(">>training " + args.task + " epoch : " + str(epoch))
netG.train()
netF.train()
tic = time.time()
for i, (t_imgs, _) in tqdm.tqdm(enumerate(target_loader_iter)):
try:
s_imgs, s_labels = source_loader_iter.next()
except:
source_loader_iter = iter(source_loader)
s_imgs, s_labels = source_loader_iter.next()
if s_imgs.size(0) != args.batch_size or t_imgs.size(0) != args.batch_size:
continue
if args.GPU:
s_imgs = Variable(s_imgs.cuda())
s_labels = Variable(s_labels.cuda())
t_imgs = Variable(t_imgs.cuda())
opt_g.zero_grad()
opt_f.zero_grad()
# apply feature extractor to input images
s_bottleneck = netG(s_imgs)
t_bottleneck = netG(t_imgs)
# get classification results
s_logit = netF(s_bottleneck)
t_logit = netF(t_bottleneck)
t_logit_entropy = HLoss(t_bottleneck)
s_logit_entropy = HLoss(s_bottleneck)
# get source domain classification error
s_cls_loss = get_cls_loss(s_logit, s_labels)
# compute entropy loss
t_prob = F.softmax(t_logit)
t_entropy_loss = get_entropy_loss(t_prob)
# MMFD loss
MMD = MMDLoss(s_bottleneck, t_bottleneck)
# Full loss function
loss = s_cls_loss + t_entropy_loss + args.lambda_val*MMD - args.thres_rec*(t_logit_entropy +s_logit_entropy)
loss.backward()
if (i+1) % 50 == 0:
print ("cls_loss: %.4f, MMD: %.4f, t_HLoss: %.4f, s_HLoss: %.4f" % (s_cls_loss.item(), args.lambda_val*MMD.item(), args.thres_rec*t_logit_entropy.item(), args.thres_rec*s_logit_entropy.item()))
opt_g.step()
opt_f.step()
print('Training time:', time.time()-tic)
# evaluate model
tic = time.time()
netG.eval()
netF.eval()
correct = 0
t_loader = DataLoader(target_dataset, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=args.num_workers)
for (t_imgs, t_labels) in t_loader:
if args.GPU:
t_imgs = Variable(t_imgs.cuda())
t_bottleneck = netG(t_imgs)
t_logit = netF(t_bottleneck)
pred = F.softmax(t_logit)
pred = pred.data.cpu().numpy()
pred = pred.argmax(axis=1)
t_labels = t_labels.numpy()
correct += np.equal(t_labels, pred).sum()
t_imgs = []
t_bottleneck = []
t_logit = []
pred = []
t_labels = []
# compute classification accuracy for target domain
correct = correct * 1.0 / len(target_dataset)
correct_array.append(correct)
if correct >= max_correct:
max_correct = correct
print('Test time:', time.time()-tic)
print ("Epoch {0} accuray: {1}; max acc: {2}".format(epoch, correct, max_correct))
# save results
print("max acc: ", max_correct)
max_correct = float("{0:.3f}".format(max_correct))
result = open(os.path.join(args.result, "FRAN_" + args.task + "_" + str(max_correct) +"_lr_"+str(args.lr)+'_lambda_' + str(args.lambda_val) + '_recons_' + str(args.thres_rec)+"_weight_entropy_"+str(args.weight_entropy)+".txt"), "a")
for c in correct_array:
result.write(str(c) + "\n")
result.write("Max: "+ str(max_correct) + "\n")
elapsed = time.time() - t
print("elapsed: ", elapsed)
result.write(str(elapsed) + "\n")
result.close()
``` |
{
"source": "JiahongHe/Personal-Facial-Identification-System",
"score": 3
} |
#### File: backendServer/userRegistration/tests.py
```python
from django.test import TestCase, Client
from django.urls import reverse, resolve
from .forms import registrationForm
class TestRegistration(TestCase):
# test the registration of the project
def setUp(self):
self.client = Client()
def test_view_registrationPage(self):
response_success = self.client.get(reverse('registrationPage'))
response_failure1 = self.client.get('invalidURL')
response_failure2 = self.client.get(reverse('register'))
self.assertEqual(resolve(reverse('registrationPage')).view_name, 'registrationPage')
self.assertEqual(response_success.status_code, 200)
self.assertNotEqual(response_failure1.status_code, 200)
self.assertNotEqual(response_failure2.status_code, 200)
self.assertIsInstance(response_success.context['form'], registrationForm)
def test_view_register(self):
# view register is only for receiving the form, so it should not accept any GET request.
# and it only accept post request from the exact page view registrationPage served, so it should alsp
# deny any post request made outside view registrationPage.
response_failure1 = self.client.get(reverse('register'))
response_failure2 = self.client.post(registrationForm())
self.assertNotAlmostEqual(response_failure1.status_code, 200)
self.assertNotAlmostEqual(response_failure2.status_code, 200)
``` |
{
"source": "Jiahong-Nvidia/amazon-sagemaker-nvidia-ngc-examples",
"score": 3
} |
#### File: amazon-sagemaker-nvidia-ngc-examples/PyTorch_BYOC_BERT_Finetuning/helper_funcs.py
```python
import collections
from types import SimpleNamespace
RawResult = collections.namedtuple("RawResult", ["start_logits", "end_logits"])
from model_utils.tokenization import (BasicTokenizer, BertTokenizer, whitespace_tokenize)
import math
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = <NAME>
# orig_text = <NAME>
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "<NAME>".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def preprocess_tokenized_text(doc_tokens, query_tokens, tokenizer,
max_seq_length, max_query_length):
""" converts an example into a feature """
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# truncate if too long
length = len(all_doc_tokens)
length = min(length, max_tokens_for_doc)
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(length):
token_to_orig_map[len(tokens)] = tok_to_orig_index[i]
token_is_max_context[len(tokens)] = True
tokens.append(all_doc_tokens[i])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
tensors_for_inference = {
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
tensors_for_inference = SimpleNamespace(**tensors_for_inference)
tokens_for_postprocessing = {
'tokens': tokens,
'token_to_orig_map': token_to_orig_map,
'token_is_max_context': token_is_max_context
}
tokens_for_postprocessing = SimpleNamespace(**tokens_for_postprocessing)
return tensors_for_inference, tokens_for_postprocessing
def get_predictions(doc_tokens, tokens_for_postprocessing,
start_logits, end_logits, n_best_size,
max_answer_length, do_lower_case,
can_give_negative_answer, null_score_diff_threshold):
""" Write final predictions to the json file and log-odds of null if needed. """
result = RawResult(start_logits=start_logits, end_logits=end_logits)
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["start_index", "end_index", "start_logit", "end_logit"])
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
start_indices = _get_indices_of_largest_logits(result.start_logits)
end_indices = _get_indices_of_largest_logits(result.end_logits)
# if we could have irrelevant answers, get the min score of irrelevant
if can_give_negative_answer:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indices:
for end_index in end_indices:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(tokens_for_postprocessing.tokens):
continue
if end_index >= len(tokens_for_postprocessing.tokens):
continue
if start_index not in tokens_for_postprocessing.token_to_orig_map:
continue
if end_index not in tokens_for_postprocessing.token_to_orig_map:
continue
if not tokens_for_postprocessing.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]
)
)
if can_give_negative_answer:
prelim_predictions.append(
_PrelimPrediction(
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit
)
)
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True
)
_NbestPrediction = collections.namedtuple("NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = tokens_for_postprocessing.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = tokens_for_postprocessing.token_to_orig_map[pred.start_index]
orig_doc_end = tokens_for_postprocessing.token_to_orig_map[pred.end_index]
orig_tokens = doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# de-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
# get final text
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
# mark it
seen_predictions[final_text] = True
else: # this is a null prediction
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit
)
)
# if we didn't include the empty option in the n-best, include it
if can_give_negative_answer:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="",
start_logit=null_start_logit,
end_logit=null_end_logit
)
)
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0, _NbestPrediction(text="", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
# scoring
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
# get probabilities
probs = _compute_softmax(total_scores)
# nbest predictions into json format
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if can_give_negative_answer:
# predict "unknown" iff ((score_null - score_of_best_non-null_entry) > threshold)
score = best_non_null_entry.start_logit + best_non_null_entry.end_logit
score_diff = score_null - score
if score_diff > null_score_diff_threshold:
nbest_json[0]['text'] = "unknown"
# best_non_null_entry.text = "unknown"
#
return nbest_json
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def _get_indices_of_largest_logits(logits):
""" sort logits and return the indices of the sorted array """
indices_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
indices = map(lambda x: x[0], indices_and_score)
indices = list(indices)
return indices
def preprocess_text_input(context='Danielle is a girl who really loves her cat, Steve.',
question='What cat does Danielle love?',
vocab_file='DeepLearningExamples/PyTorch/LanguageModeling/BERT/vocab/vocab',
max_seq_length=384, max_query_length=64, n_best_size=1, max_answer_length=30,
null_score_diff_threshold=-11.0):
tokenizer = BertTokenizer(vocab_file, do_lower_case=True, max_len=512)
doc_tokens = context.split()
query_tokens = tokenizer.tokenize(question)
feature = preprocess_tokenized_text(doc_tokens,
query_tokens,
tokenizer,
max_seq_length=max_seq_length,
max_query_length=max_query_length)
tensors_for_inference, tokens_for_postprocessing = feature
input_ids = torch.tensor(tensors_for_inference.input_ids, dtype=torch.long).unsqueeze(0)
segment_ids = torch.tensor(tensors_for_inference.segment_ids, dtype=torch.long).unsqueeze(0)
input_mask = torch.tensor(tensors_for_inference.input_mask, dtype=torch.long).unsqueeze(0)
return(input_ids, segments_ids, input_mask)
``` |
{
"source": "JiaHongZ/DRNet",
"score": 3
} |
#### File: DRNet/utils/test.py
```python
import os
import cv2
import matplotlib.pyplot as plt
import random
def saltpepper_noise(img, proportion=0.05):
noise_img = img
height,width,c =noise_img.shape
num = int(height*width*proportion)#多少个像素点添加椒盐噪声
for i in range(num):
w = random.randint(0,width-1)
h = random.randint(0,height-1)
if random.randint(0,1) ==0:
noise_img[h,w,:] =0
else:
noise_img[h,w,:] = 255
return noise_img
image = img = cv2.imread('01.png', 1) # cv2.IMREAD_GRAYSCALE
print(image.shape)
image = saltpepper_noise(image)
cv2.imwrite('1.png',image)
``` |
{
"source": "JIAHONGZHANG/pythonlearn",
"score": 3
} |
#### File: Assignment1/Ass1_Q2/Ass1_Q2.py
```python
import sys
import datetime
from collections import defaultdict
def row_exchange(a):
temp_a = []
temp_a.append(a[7])
temp_a.append(a[6])
temp_a.append(a[5])
temp_a.append(a[4])
temp_a.append(a[3])
temp_a.append(a[2])
temp_a.append(a[1])
temp_a.append(a[0])
return temp_a
def right_circular_shift(a):
temp_a = []
temp_a.append(a[3])
temp_a.append(a[0])
temp_a.append(a[1])
temp_a.append(a[2])
temp_a.append(a[5])
temp_a.append(a[6])
temp_a.append(a[7])
temp_a.append(a[4])
return temp_a
def middle_clockwise_rotation(a):
temp_a = []
temp_a.append(a[0])
temp_a.append(a[6])
temp_a.append(a[1])
temp_a.append(a[3])
temp_a.append(a[4])
temp_a.append(a[2])
temp_a.append(a[5])
temp_a.append(a[7])
return temp_a
def check_list(initial_list,need_list):
for i in range(8):
if initial_list[i] != need_list[i]:
return False
break
else:
return True
library = defaultdict(list)
main_library = set()
main_library.add(str([1,2,3,4,5,6,7,8]))
str_list = input("Input final configuration: ")
str_list = str_list.replace(' ','')
try:
int_list = []
for i in str_list:
int_list.append(int(i))
except ValueError:
print('Incorrect configuration, giving up...')
#end_time = datetime.datetime.now()
#print('program is running in '+str(end_time - start_time))
sys.exit()
if len(int_list) !=8:
print('Incorrect configuration, giving up...')
#end_time = datetime.datetime.now()
#print('program is running in '+str(end_time - start_time))
sys.exit()
if (1 in int_list and 2 in int_list and 3 in int_list and 4 in int_list\
and 5 in int_list and 6 in int_list\
and 7 in int_list and 8 in int_list) == False:
print('Incorrect configuration, giving up...')
#end_time = datetime.datetime.now()
#print('program is running in '+str(end_time - start_time))
sys.exit()
initial_int_list =[1,2,3,4,5,6,7,8]
library[0] = [1,2,3,4,5,6,7,8]
count_step = 0
start_time = datetime.datetime.now()
if str(int_list) in main_library:
print("0 steps is needed to reach the final configuration.")
end_time = datetime.datetime.now()
print('program is running in '+str(end_time - start_time))
sys.exit()
library[count_step +1].append(row_exchange(initial_int_list))
main_library.add(str(row_exchange(initial_int_list)))
library[count_step +1].append(right_circular_shift(initial_int_list))
main_library.add(str(right_circular_shift(initial_int_list)))
library[count_step +1].append(middle_clockwise_rotation(initial_int_list))
main_library.add(str(middle_clockwise_rotation(initial_int_list)))
count_step = count_step +1
if str(int_list) in main_library:
print("1 steps is needed to reach the final configuration.")
end_time = datetime.datetime.now()
print('program is running in '+str(end_time - start_time))
sys.exit()
while 1:
if str(int_list) in main_library:
print(f'{count_step} steps are needed to reach the final configuration.')
end_time = datetime.datetime.now()
print('program is running in '+str(end_time - start_time))
break
for item in library[count_step]:
if str(row_exchange(item)) not in main_library:
library[count_step +1].append(row_exchange(item))
main_library.add(str(row_exchange(item)))
if str(right_circular_shift(item)) not in main_library:
library[count_step +1].append(right_circular_shift(item))
main_library.add(str(right_circular_shift(item)))
if str(middle_clockwise_rotation(item)) not in main_library:
library[count_step +1].append(middle_clockwise_rotation(item))
main_library.add(str(middle_clockwise_rotation(item)))
count_step = count_step +1
```
#### File: Quizzes/quiz_8/quiz_8_solution.py
```python
import sys
from random import seed, randrange
from stack_adt import *
def display_grid():
for i in range(len(grid)):
print(' ', ' '.join(str(grid[i][j]) for j in range(len(grid[0]))))
def explore_depth_first(x, y, target):
directions = {'N': (-1, 0),'S': (1, 0), 'E': (0, 1), 'W': (0, -1)}
next_directions = {'': ('W', 'S', 'E', 'N'),
'N': ('W', 'E', 'N'),
'S': ('E', 'W', 'S'),
'E': ('N', 'S', 'E'),
'W': ('S', 'N', 'W')
}
states = Stack()
states.push(([(x, y)], [grid[x][y]], ''))
while not states.is_empty():
path, sums, previous_direction = states.pop()
if sums[-1] == target:
return path
x, y = path[-1]
for next_direction in next_directions[previous_direction]:
next_x, next_y = x + directions[next_direction][0], y + directions[next_direction][1]
if next_x not in range(10) or next_y not in range(10):
continue
if (next_x, next_y) in path:
continue
next_sum = sums[-1] + grid[next_x][next_y]
if next_sum > target:
continue
path_copy = list(path)
path_copy.append((next_x, next_y))
sums_copy = list(sums)
sums_copy.append(next_sum)
states.push((path_copy, sums_copy, next_direction))
try:
for_seed, bound, x, y, target = [int(x) for x in input('Enter five integers: ').split()]
if bound < 1 or x not in range(10) or y not in range(10) or target < 0:
raise ValueError
except ValueError:
print('Incorrect input, giving up.')
sys.exit()
seed(for_seed)
grid = [[randrange(bound) for _ in range(10)] for _ in range(10)]
print('Here is the grid that has been generated:')
display_grid()
path = explore_depth_first(x, y, target)
if not path:
print(f'There is no way to get a sum of {target} starting from ({x}, {y})')
else:
print('With North as initial direction, and exploring the space clockwise,')
print(f'the path yielding a sum of {target} starting from ({x}, {y}) is:')
print(path)
```
#### File: Quizzes/quiz_8/stack_adt.py
```python
class EmptyStackError(Exception):
def __init__(self, message):
self.message = message
class Stack:
def __init__(self):
self._data = []
def __len__(self):
return len(self._data)
def is_empty(self):
return len(self._data) == 0
def peek(self):
'''
>>> stack = Stack()
>>> stack.peek()
Traceback (most recent call last):
...
EmptyStackError: Cannot peek at top of empty stack
'''
if self.is_empty():
raise EmptyStackError('Cannot peek at top of empty stack')
return self._data[-1]
def push(self, datum):
self._data.append(datum)
def pop(self):
'''
>>> stack = Stack()
>>> stack.peek()
Traceback (most recent call last):
...
EmptyStackError: Cannot pop from top of empty stack
'''
if self.is_empty():
raise EmptyStackError('Cannot pop from top of empty stack')
return self._data.pop()
``` |
{
"source": "JiaHoungChou/2020-Data-Mining_HW2_2",
"score": 3
} |
#### File: JiaHoungChou/2020-Data-Mining_HW2_2/Data MIning 2020 HW2 (Boston Housing Price).py
```python
import numpy as np
import pandas as pd
import random
random.seed(123)
np.random.seed(123)
path= "/Users/ericchou/Desktop/PyCharm/Data Mining HW PROGRAM/Boston_Housing.csv"
with open(path, "r") as file:
database= pd.read_csv(file)
def shuffle(x):
list_= list(np.arange(0, len(x)).astype(int))
random.shuffle(list_)
x= x.reindex(list_)
return x
def one_hot_encoding(Dataframe_type, index_name):
n = len(Dataframe_type[index_name].value_counts().index)
category_index = np.array(Dataframe_type[index_name].value_counts().index)
variables = []
for i in range(0, len(category_index)):
word = category_index[i]
variables.append(index_name+ "_"+ str(word))
variables = np.array(variables)
one_hot_encoding = []
for i in range(0, len(Dataframe_type)):
category_data = Dataframe_type[index_name][i]
one_hot_encoding_row = []
for j in range(0, n):
if category_data == category_index[j]:
one_hot_encoding_row.append(1)
else:
one_hot_encoding_row.append(0)
one_hot_encoding.append(one_hot_encoding_row)
one_hot_encoding = np.array(one_hot_encoding)
new_dataframe = pd.DataFrame(one_hot_encoding, columns= variables)
return new_dataframe
def replace_one_hot_variable(dataframe_type, index_name, new_dataframe):
index = dataframe_type.columns.tolist()
n = len(new_dataframe.columns)
for i in range(0, n):
index.insert(index.index(index_name) + i, new_dataframe.columns[i])
dataframe_type = dataframe_type.reindex(columns=index)
dataframe_type = dataframe_type.drop([index_name], axis=1)
for j in range(0, n):
dataframe_type[new_dataframe.columns[j]] = new_dataframe[new_dataframe.columns[j]]
return dataframe_type
database= shuffle(database)
CHAS_dataframe= one_hot_encoding(Dataframe_type= database, index_name= "CHAS")
database= replace_one_hot_variable(dataframe_type= database, index_name= "CHAS", new_dataframe= CHAS_dataframe)
def normalization(X):
X_std= np.array(pd.DataFrame(X.std(axis= 0)).replace(0.0, 1)).ravel()
Z= (X- X.mean(axis= 0))/ X_std
return Z, X.mean(axis= 0), X_std
X= np.array(database.iloc[:, : -1])
y= np.array(database.iloc[:, -1: ])
Training_Length= 354
X_train, _, _= normalization(X[: Training_Length, :])
y_train, y_train_mean, y_train_std= normalization(y[: Training_Length])
X_test, _, _= normalization(X[Training_Length:, :])
Target= y[Training_Length: ].ravel()
def sigmoid(X_matrix_sigmoid):
X_matrix_sigmoid= np.array(X_matrix_sigmoid)
for i in range(X_matrix_sigmoid.shape[0]):
for j in range(X_matrix_sigmoid.shape[1]):
X_matrix_sigmoid[i][j]= 1/ (1+ np.exp(-X_matrix_sigmoid[i][j]))
return np.mat(X_matrix_sigmoid)
def cost_function(X_matrix):
X_matrix= np.array(X_matrix)
for i in range(X_matrix.shape[0]):
for j in range(X_matrix.shape[1]):
X_matrix[i][j]= np.array(X_matrix[i][j]* (1- X_matrix[i][j]))
return np.array(X_matrix)
def MSE(X_matrix, y_matrix):
X_matrix= np.array(X_matrix).ravel()
y_matrix= np.array(y_matrix).ravel()
return np.sum((y_matrix- X_matrix)**2)/ len(y_matrix)
def forword_Backword_computation(X, target, eta, hidden_size, iteration):
target= np.mat(target).ravel()
X= np.hstack((np.ones((len(X), 1)), np.mat(X)))
m, n= X.shape
W_xh= np.mat(np.random.random(size= (hidden_size, n)))
B_xh= np.mat(np.random.random(size=(1, hidden_size)))
W_hy= np.mat(np.random.random(size= (target.shape[0], hidden_size)))
B_hy= np.mat(np.random.random(size= (1, target.shape[0])))
for i in range(1, iteration+ 1):
net_h= X* W_xh.T- B_xh
H_h= sigmoid(net_h)
net_j= H_h* W_hy.T- B_hy
deltas_j= (1/ H_h.shape[0])* (np.array(target.T)- np.array(net_j))
deltas_h= (1/ net_j.shape[0])* (np.array(target.T)- np.array(H_h))* (np.array(H_h)* (1- np.array(H_h)))
W_hy+= eta* deltas_j.T* H_h
B_hy+= -eta* np.sum(deltas_j, axis= 0)
W_xh+= eta* deltas_h.T* X
B_xh+= -eta* np.sum(deltas_h, axis= 0)
if i % 500== 0:
print("Iteration= %5d, Mean Square Error %4.2f"%(i, MSE(net_j, target)))
return W_xh, np.array(B_xh), W_hy, np.array(B_hy)
W_xh, B_xh, W_hy, B_hy= forword_Backword_computation(X= X_train, target= y_train, eta= 0.01, hidden_size= 10, iteration= 10000)
### Training the performance of model for training dataset
X_train= np.hstack((np.ones((len(X_train), 1)), np.mat(X_train)))
y_hat_train= np.array(sigmoid(X_train* W_xh.T- B_xh)* W_hy.T- B_hy).ravel()
y_hat_train= (y_hat_train* y_train_std)+ y_train_mean
y_true_train= y[: Training_Length].ravel()
print("Mean Square Error : ", round(MSE(y_hat_train, y_true_train), 4), " ( Training Set )")
### Test the performance of model for test dataset
X_test= np.hstack((np.ones((len(X_test), 1)), np.mat(X_test)))
y_hat= np.array(sigmoid(X_test* W_xh.T- B_xh)* W_hy.T- B_hy).ravel()
y_hat= (y_hat* y_train_std)+ y_train_mean
y_true= Target.ravel()
print("Mean Square Error : ", round(MSE(y_hat, y_true), 4), " ( Test Set )")
```
#### File: JiaHoungChou/2020-Data-Mining_HW2_2/Data Mining 2020 HW2 (XOR Problem).py
```python
import numpy as np
import pandas as pd
np.random.seed(123)
database= pd.DataFrame({"X_1": [0, 0, 1, 1], "X_2": [0, 1, 0, 1], "Label": [0, 1, 1, 0]})
def Step_Function_Matrix(X_matrix):
X_matrix= np.array(X_matrix)
for i in range(X_matrix.shape[0]):
for j in range(X_matrix.shape[1]):
if X_matrix[i][j]>= 0.50:
X_matrix[i][j]= int(1)
else:
X_matrix[i][j]= int(0)
return np.mat(X_matrix)
def sigmoid(X_matrix):
X_matrix= np.array(X_matrix)
for i in range(X_matrix.shape[0]):
for j in range(X_matrix.shape[1]):
X_matrix[i][j]= 1/ (1+ np.exp(-X_matrix[i][j]))
return np.mat(X_matrix)
def cost_function(X_matrix):
X_matrix= np.array(X_matrix)
for i in range(X_matrix.shape[0]):
for j in range(X_matrix.shape[1]):
X_matrix[i][j]= np.array(X_matrix[i][j]* (1- X_matrix[i][j]))
return np.array(X_matrix)
def accuracy_(prediction_, target_):
correct= 0
for i in range(0, len(prediction_)):
if np.sum(np.absolute((np.array(prediction_[i])- np.array(target_[i]))))== 0:
correct+= 1
else:
correct+= 0
accuarcy= (correct/len(prediction_))* 100
return accuarcy
def forword_Backword_computation(X, label, eta, hidden_size, iteration):
label= np.mat(label)
X= np.hstack((np.ones((len(X), 1)), np.mat(X)))
m, n= X.shape
W_xh= np.mat(np.random.random(size= (hidden_size, n)))
B_xh= np.mat(np.random.random(size=(m, hidden_size)))
W_hy= np.mat(np.random.random(size= (label.shape[0], hidden_size)))
B_hy= np.mat(np.random.random(size=(m, label.shape[0])))
for i in range(1, iteration+ 1):
net_h= X* W_xh.T- B_xh
H_h= sigmoid(net_h)
net_j= H_h* W_hy.T- B_hy
y_j= sigmoid(net_j)
deltas_j= np.mat(cost_function(X_matrix= y_j)* np.array(label.T- y_j))
deltas_h= (((np.array(cost_function(X_matrix= H_h)))* np.array(np.sum(W_hy, axis= 0)))* np.array(np.sum(deltas_j, axis= 1)))
W_hy+= eta* deltas_j.T* H_h
B_hy+= -1* eta* deltas_j
W_xh+= eta* deltas_h.T* X
B_xh+= (-1* eta)* deltas_h
class_matrix= Step_Function_Matrix(y_j)
loss= accuracy_(prediction_= class_matrix, target_= label.T)
if i % 100== 0:
print("Iteration= %5d Training Accuracy= %4.2f"%(i, loss))
return W_xh, B_xh, W_hy, B_hy
X= np.array(database[["X_1", "X_2"]])
y= np.array(database["Label"])
W_xh_, B_xh_, W_hy_, B_hy_= forword_Backword_computation(X= X, label= y, eta= 0.1, hidden_size= 4, iteration= 1000)
X_XOR= X
y_XOR= y
XOR_test_x_matrix= np.hstack((np.ones((len(X_XOR), 1)), np.mat(X_XOR)))
XOR_test_label= Step_Function_Matrix(sigmoid(sigmoid(XOR_test_x_matrix* W_xh_.T- B_xh_)* W_hy_.T- B_hy_))
print()
print("========== XOR Problem ===========")
print("*Test_label (Target= 0, 1, 1, 0):\n", np.array(XOR_test_label).ravel(), "\n")
print("BPN can solve the XOR problem, when test the model.")
``` |
{
"source": "JiaHsin/ftf-re-api",
"score": 2
} |
#### File: transform_layer/services/data_service.py
```python
from pandas.core.frame import DataFrame
import dateutil.parser as parser
import pandas as pd
from django.db import connections
from transform_layer.services.utils import date_str_to_int, get_control_query
import copy
import time
SCOPE_HIERARCHY = "hierarchy"
SCOPE_GEOGRAPHY = "geography"
#1 instance of DataService for 1 scope
class DataService:
def __init__(self, scope):
self._fact_services = None
self._service_types = None
self._family_services = None
self._new_familiy_services = None
#[monthly, weekly, daily] skeletons
self._date_skeletons = None
self.scope_type = scope["scope_type"]
self.control_query = get_control_query(scope["control_type_name"])
self.scope_field = scope["scope_field"]
self.scope_value = scope["scope_field_value"]
self.start_date = date_str_to_int(scope["startDate"])
self.end_date = date_str_to_int(scope["endDate"])
## returns DataFrame for a specific data definition
def get_data_for_definition(self, id):
if id>=1 and id <= 22:
if(self._fact_services) is None:
self._fact_services = self.__get_fact_services()
return self._fact_services
elif id <= 25:
if(self._service_types) is None:
self._service_types = self.__get_service_types()
return self._service_types
elif id <= 31:
if(self._family_services) is None:
self._family_services = self.__get_family_services()
return self._family_services
elif id <= 56:
#used same base data for new families(32-46) and geographies(47-56)
if(self._new_familiy_services) is None:
self._new_familiy_services = self.__get_new_family_services()
return self._new_familiy_services
elif id <= 68:
if(self._new_familiy_services) is None:
self._new_familiy_services = self.__get_new_family_services()
if(self._date_skeletons) is None:
self._date_skeletons = self.__get_date_skeletons()
#list[0] = services
#list[1] = families
#list[2] = members
#list[3] = monthly_date_skeleton
#list[4] = weekly_date_skeleton
#list[5] = daily_date_skeleton
return self._new_familiy_services + self._date_skeletons
## retrieves fact_services
def __get_fact_services(self):
conn = connections['source_db']
table1 = ""
left1 = right1 = ""
if self.scope_type == "hierarchy":
table1 = "dim_hierarchies"
left1 = right1 = "hierarchy_id"
elif self.scope_type == "geography":
table1 = "dim_geos"
left1 = "dimgeo_id"
right1 = "id"
query = f"""
SELECT
fs.research_service_key,
fs.{left1},
fs.service_status,
fs.service_id,
fs.research_family_key,
fs.served_children,
fs.served_adults,
fs.served_seniors,
fs.served_total,
fsm.research_member_key
FROM
fact_services AS fs
INNER JOIN dim_service_types ON fs.service_id = dim_service_types.id
LEFT JOIN {table1} AS t1 ON fs.{left1} = t1.{right1}
LEFT JOIN dim_service_statuses ON fs.service_status = dim_service_statuses.status
LEFT JOIN fact_service_members AS fsm ON fs.research_service_key = fsm.research_service_key
WHERE
fs.service_status = 17
{self.control_query}
AND t1.{self.scope_field} = {self.scope_value}
AND fs.date >= {self.start_date} AND fs.date <= {self.end_date}
"""
start_time = time.time()
result = pd.read_sql(query, conn)
print(str(time.time() - start_time), ' seconds to download fact services')
mem_usage = result.memory_usage(deep=True).sum()
print(str(mem_usage), 'bytes for fact services')
return result
def __get_service_types(self):
conn = connections['source_db']
extra_join = ""
if self.scope_type == "hierarchy":
table1 = "dim_hierarchies"
left1 = right1 = "hierarchy_id"
elif self.scope_type == "geography":
table1 = "dim_geos"
left1 = "dimgeo_id"
right1 = "id"
extra_join = """INNER JOIN dim_hierarchies ON fact_services.hierarchy_id = dim_hierarchies.loc_id"""
query = f"""
SELECT
fact_services.research_service_key,
fact_services.research_family_key,
fact_services.service_id,
dim_service_types.name AS service_name,
dim_service_types.service_category_code,
dim_service_types.service_category_name,
fact_services.served_total,
dim_hierarchies.loc_id
FROM
fact_services
INNER JOIN dim_service_types ON fact_services.service_id = dim_service_types.id
INNER JOIN {table1} ON fact_services.{left1} = {table1}.{right1}
{extra_join if self.scope_type == "geography" else ""}
WHERE
fact_services.service_status = 17
{self.control_query}
AND fact_services.date >= {self.start_date} AND fact_services.date <= {self.end_date}
AND {table1}.{self.scope_field} = {self.scope_value}
"""
start_time = time.time()
result = pd.read_sql(query, conn)
print(str(time.time() - start_time), ' seconds to download service types')
mem_usage = result.memory_usage(deep=True).sum()
print(str(mem_usage), 'bytes for service types')
return result
def __get_family_services(self):
conn = connections['source_db']
table1 = ""
left1 = right1 = ""
if self.scope_type == "hierarchy":
table1 = "dim_hierarchies"
left1 = right1 = "hierarchy_id"
elif self.scope_type == "geography":
table1 = "dim_geos"
left1 = "dimgeo_id"
right1 = "id"
query = f"""
SELECT
fact_services.research_family_key,
COUNT(fact_services.research_service_key) AS num_services,
AVG(fact_services.served_total) AS avg_fam_size,
SUM(fact_services.is_first_service_date) as timeframe_has_first_service_date,
AVG(fact_services.days_since_first_service) AS avg_days_since_first_service,
MAX(fact_services.days_since_first_service) AS max_days_since_first_service,
dim_family_compositions.family_composition_type
FROM
fact_services
INNER JOIN dim_families ON fact_services.research_family_key = dim_families.research_family_key
INNER JOIN dim_family_compositions ON dim_families.family_composition_type = dim_family_compositions.id
INNER JOIN dim_service_types ON fact_services.service_id = dim_service_types.id
INNER JOIN {table1} ON fact_services.{left1} = {table1}.{right1}
WHERE
fact_services.service_status = 17
{self.control_query}
AND fact_services.date >= {self.start_date} AND fact_services.date <= {self.end_date}
AND {table1}.{self.scope_field} = {self.scope_value}
GROUP BY
fact_services.research_family_key,
dim_family_compositions.family_composition_type
"""
start_time = time.time()
result = pd.read_sql(query, conn)
print(str(time.time() - start_time), ' seconds to download family services')
mem_usage = result.memory_usage(deep=True).sum()
print(str(mem_usage), 'bytes for family services')
return result
def __get_new_family_services(self):
conn = connections['source_db']
if self.scope_type == "hierarchy":
table1 = "dim_hierarchies"
left1 = right1 = "hierarchy_id"
elif self.scope_type == "geography":
table1 = "dim_geos"
left1 = "dimgeo_id"
right1 = "id"
services_query = f"""
SELECT
fs.research_service_key,
fs.research_family_key,
fs.service_id,
fs.hierarchy_id,
dim_hierarchies.event_id,
dim_hierarchies.loc_id,
dim_geos_event.fips_cnty AS fips_cnty_event,
dim_service_types.name as service_name,
dim_service_types.service_category_code,
dim_service_types.service_category_name,
fs.served_total,
fs.is_first_service_date,
fs.served_children,
fs.served_adults,
fs.served_seniors,
fs.family_composition_type,
dim_geos.lattitude AS latitude_fs,
dim_geos.longitude AS longitude_fs,
dim_geos.fips_cnty AS fips_cnty_fs,
fs.dummy_trip,
fs.distance_miles,
fs.direction,
fs.date,
dim_dates.calendaryearmonth AS calendaryearmonth,
dim_dates.sunyearweek AS sunyearweek,
dim_dates.dayofweek AS dayofweek,
dim_hierarchy_events.name AS event_name
FROM
fact_services AS fs
INNER JOIN dim_service_types ON fs.service_id = dim_service_types.id
INNER JOIN dim_hierarchies ON fs.hierarchy_id = dim_hierarchies.hierarchy_id
INNER JOIN dim_dates ON fs.date = dim_dates.date_key
INNER JOIN dim_hierarchy_events ON dim_hierarchies.event_id = dim_hierarchy_events.id
LEFT JOIN dim_geos ON fs.dimgeo_id = dim_geos.id
LEFT JOIN dim_geos AS dim_geos_event ON dim_hierarchy_events.dimgeo_id = dim_geos_event.id
WHERE
fs.service_status = 17
{self.control_query}
AND fs.date >= {self.start_date} AND fs.date <= {self.end_date}
AND {table1}.{self.scope_field} = {self.scope_value}
"""
families_query = f"""
SELECT
fs.research_family_key,
COUNT( fs.research_service_key ) AS num_services,
AVG( fs.served_total ) AS avg_fam_size,
SUM( fs.is_first_service_date ) AS timeframe_has_first_service_date,
AVG( fs.days_since_first_service ) AS avg_days_since_first_service,
MAX( fs.days_since_first_service ) AS max_days_since_first_service,
dim_family_compositions.family_composition_type,
dim_families.datekey_first_service,
dim_families.dummy_use_geo,
dim_families.latitude_5,
dim_families.longitude_5,
dim_families.dimgeo_id,
dim_geos.fips_state,
dim_geos.fips_cnty,
dim_geos.fips_zcta
FROM
fact_services AS fs
INNER JOIN dim_families ON fs.research_family_key = dim_families.research_family_key
INNER JOIN dim_family_compositions ON dim_families.family_composition_type = dim_family_compositions.id
INNER JOIN dim_service_types ON fs.service_id = dim_service_types.id
INNER JOIN dim_dates ON fs.date = dim_dates.date_key
INNER JOIN {table1} AS t1 ON fs.{left1} = t1.{right1}
LEFT JOIN dim_geos ON dim_families.dimgeo_id = dim_geos.id
WHERE
fs.service_status = 17
{self.control_query}
AND fs.date >= {self.start_date} AND fs.date <= {self.end_date}
AND t1.{self.scope_field} = {self.scope_value}
GROUP BY
fs.research_family_key,
dim_family_compositions.family_composition_type,
dim_families.datekey_first_service,
dim_families.dummy_use_geo,
dim_families.latitude_5,
dim_families.longitude_5,
dim_families.dimgeo_id,
dim_geos.fips_state,
dim_geos.fips_cnty,
dim_geos.fips_zcta
"""
members_query = f"""
SELECT
fs_mem.research_member_key,
dim_members.research_family_key,
COUNT( fs.research_service_key ) AS num_services,
SUM( fs_mem.is_first_service_date ) AS timeframe_has_first_service_date,
AVG( fs_mem.days_since_first_service ) AS avg_days_since_first_service,
MAX( fs_mem.days_since_first_service ) AS max_days_since_first_service,
dim_members.datekey_first_served,
dim_members.gender,
dim_members.current_age,
dim_members.race_id,
dim_members.ethnic_id,
dim_members.immigrant_id,
dim_members.language_id,
dim_members.disability_id,
dim_members.military_id,
dim_members.healthcare_id,
dim_members.education_id,
dim_members.employment_id,
dim_families.datekey_first_service AS dim_families_datekey_first_service,
SUM( fs.is_first_service_date ) AS dim_families_timeframe_has_first_service_date,
dim_geos.fips_state,
dim_geos.fips_cnty,
dim_geos.fips_zcta
FROM
fact_services AS fs
INNER JOIN dim_service_types ON fs.service_id = dim_service_types.id
INNER JOIN {table1} AS t1 ON fs.{left1} = t1.{right1}
INNER JOIN dim_dates ON fs.date = dim_dates.date_key
INNER JOIN fact_service_members AS fs_mem ON fs.research_service_key = fs_mem.research_service_key
INNER JOIN dim_members ON fs_mem.research_member_key = dim_members.research_member_key
INNER JOIN dim_families ON dim_members.research_family_key = dim_families.research_family_key
LEFT JOIN dim_geos ON dim_families.dimgeo_id = dim_geos.id
WHERE
fs.service_status = 17
{self.control_query}
AND t1.{self.scope_field} = {self.scope_value}
AND fs.date >= {self.start_date} AND fs.date <= {self.end_date}
GROUP BY
fs_mem.research_member_key
"""
print("Services Query:")
print(services_query)
print("Families Query:")
print(families_query)
print("Members Query")
print(members_query)
start_time = time.time()
services = pd.read_sql(services_query, conn)
families = pd.read_sql(families_query, conn)
members = pd.read_sql(members_query, conn)
print(str(time.time() - start_time), ' seconds to download new family services')
mem_usage = services.memory_usage(deep=True).sum() + families.memory_usage(deep=True).sum() + members.memory_usage(deep=True).sum()
print(str(mem_usage), 'bytes for new family services')
return [services, families, members]
def __get_monthly_date_skeleton(self):
conn = connections['source_db']
query_skeleton_month = f"""
SELECT
dim_dates.CalendarYearMonth as calendaryearmonth,
MIN(dim_dates.FullDate) as calendaryearmonth_start,
CONCAT(dim_dates.MonthName, ' - ', dim_dates.CalendarYear) as calendaryearmonth_name
FROM
dim_dates
WHERE
dim_dates.date_key >= {self.start_date} AND dim_dates.date_key <= {self.end_date}
GROUP BY dim_dates.CalendarYearMonth
"""
start_time = time.time()
skeleton = pd.read_sql(query_skeleton_month, conn)
print(str(time.time() - start_time), ' seconds to download monthly date skeleton')
mem_usage = skeleton.memory_usage(deep=True).sum()
print(str(mem_usage), 'bytes for monthly date skeleton')
return skeleton
def __get_weekly_date_skeleton(self):
conn = connections['source_db']
query_skeleton_week = f"""
SELECT
dim_dates.SunYearWeek AS sunyearweek,
MIN(dim_dates.date_key) as sunyearweek_start
FROM
dim_dates
WHERE
dim_dates.date_key >= {self.start_date}
AND dim_dates.date_key <= {self.end_date}
GROUP BY
dim_dates.SunYearWeek
"""
start_time = time.time()
skeleton = pd.read_sql(query_skeleton_week, conn)
print(str(time.time() - start_time), ' seconds to download weekly date skeleton')
mem_usage = skeleton.memory_usage(deep=True).sum()
print(str(mem_usage), 'bytes for weekly date skeleton')
return skeleton
def __get_daily_date_skeleton(self):
conn = connections['source_db']
query_skeleton_day = f"""
SELECT
dim_dates.date_key as date,
dim_dates.FullDate as date_label
FROM dim_dates
WHERE
dim_dates.date_key >= {self.start_date}
AND dim_dates.date_key <= {self.end_date}
"""
start_time = time.time()
skeleton = pd.read_sql(query_skeleton_day, conn)
print(str(time.time() - start_time), ' seconds to download daily date skeleton')
mem_usage = skeleton.memory_usage(deep=True).sum()
print(str(mem_usage), 'bytes for daily date skeleton')
return skeleton
def __get_date_skeletons(self):
monthly = self.__get_monthly_date_skeleton()
weekly = self.__get_weekly_date_skeleton()
daily = self.__get_daily_date_skeleton()
return [monthly, weekly, daily]
``` |
{
"source": "jiahsuanlo/artificial-intelligence",
"score": 3
} |
#### File: Exercises/4_Game Class/search.py
```python
from minimax_iterative_deepening import minimax_decision
def get_action(gameState, depth_limit):
# TODO: Implement a function that calls minimax_decision
# for each depth from 1...depth_limit (inclusive of both endpoints)
for d in range(1,depth_limit+1):
best_action= minimax_decision(gameState, d)
return best_action
```
#### File: Projects/3_Adversarial Search/my_custom_player.py
```python
from sample_players import DataPlayer
# board array dimensions and bitboard size
_WIDTH = 11
_HEIGHT = 9
_SIZE = (_WIDTH + 2) * _HEIGHT - 2
class CustomPlayer(DataPlayer):
""" Implement your own agent to play knight's Isolation
The get_action() method is the only required method for this project.
You can modify the interface for get_action by adding named parameters
with default values, but the function MUST remain compatible with the
default interface.
**********************************************************************
NOTES:
- The test cases will NOT be run on a machine with GPU access, nor be
suitable for using any other machine learning techniques.
- You can pass state forward to your agent on the next turn by assigning
any pickleable object to the self.context attribute.
**********************************************************************
"""
def get_action(self, state):
""" Employ an adversarial search technique to choose an action
available in the current state calls self.queue.put(ACTION) at least
This method must call self.queue.put(ACTION) at least once, and may
call it as many times as you want; the caller will be responsible
for cutting off the function after the search time limit has expired.
See RandomPlayer and GreedyPlayer in sample_players for more examples.
**********************************************************************
NOTE:
- The caller is responsible for cutting off search, so calling
get_action() from your own code will create an infinite loop!
Refer to (and use!) the Isolation.play() function to run games.
**********************************************************************
"""
# TODO: Replace the example implementation below with your own search
# method by combining techniques from lecture
#
# EXAMPLE: choose a random move without any search--this function MUST
# call self.queue.put(ACTION) at least once before time expires
# (the timer is automatically managed for you)
import random
#self.queue.put(random.choice(state.actions()))
# self.score= self.score_moves
# self.score= self.score_center
# self.score= self.score_moves2
self.score= self.score_progression
depth_limit= 5
if state.ply_count < 2:
self.queue.put(random.choice(state.actions()))
else:
self.queue.put(self.minimax_decision(state, depth_limit))
def score_moves(self,gameState):
""" Default evaluation function:
number of current player liberties - number of opposing player liberties
"""
own_loc = gameState.locs[self.player_id]
opp_loc = gameState.locs[1 - self.player_id]
own_liberties = gameState.liberties(own_loc)
opp_liberties = gameState.liberties(opp_loc)
return len(own_liberties) - len(opp_liberties)
def score_moves2(self,gameState):
""" Extended liberty differences evaluation function
This evaluation function is defined as the difference in the number of
extended liberties between current and opposing players. The extended
liberties are defined as the allowable moves for the current location as
well as all of next possible locations
"""
own_loc = gameState.locs[self.player_id]
opp_loc = gameState.locs[1 - self.player_id]
own_liberties = gameState.liberties(own_loc)
opp_liberties = gameState.liberties(opp_loc)
# Calculate the liberties for each liberty location of the current player
own_set= set(own_liberties)
for o in own_liberties:
own_set.update(gameState.liberties(o))
# Calculate the liberties for each liberty location of the opposing player
opp_set= set(opp_liberties)
for o in opp_liberties:
opp_set.update(gameState.liberties(o))
return len(own_set) - len(opp_set)
def score_center(self,gameState):
""" Evaluation function for occupying center location
This evaluation function rewards the current player to be as close
to center location as possible, and rewards the opposing player to
be as far away from center as possible
"""
own_loc = gameState.locs[self.player_id]
opp_loc = gameState.locs[1 - self.player_id]
center_loc= _SIZE//2
own_x= own_loc%(_WIDTH+2)
own_y= own_loc//(_WIDTH+2)
opp_x= opp_loc%(_WIDTH+2)
opp_y= opp_loc//(_WIDTH+2)
center_x= center_loc%(_WIDTH+2)
center_y= center_loc//(_WIDTH+2)
# own player needs to be close to center and opp needs to be far
# away from center
own_center_dist = abs(own_x - center_x) + abs(own_y - center_y)
opp_center_dist = abs(opp_x - center_x) + abs(opp_y - center_y)
return -own_center_dist + opp_center_dist
def score_progression(self, gameState):
# count the open locations
n_open_spots= bin(gameState.board).count('1')
if n_open_spots >80:
final_score= self.score_center(gameState)
elif n_open_spots >60:
final_score= self.score_moves(gameState)
else:
final_score= self.score_moves2(gameState)
return final_score
# TODO: modify the function signature to accept an alpha and beta parameter
def min_value(self, gameState, alpha, beta, depth=1):
""" Return the value for a win (+1) if the game is over,
otherwise return the minimum value over all legal child
nodes.
"""
if gameState.terminal_test():
return gameState.utility(self.player_id)
# New conditional depth limit cutoff
if depth <= 0: # "==" could be used, but "<=" is safer
return self.score(gameState)
v = float("inf")
for a in gameState.actions():
# TODO: modify the call to max_value()
v = min(v, self.max_value(gameState.result(a),alpha,beta,depth-1))
# TODO: update the value bound
if v<= alpha: return v
beta= min(v, beta)
return v
# TODO: modify the function signature to accept an alpha and beta parameter
def max_value(self, gameState, alpha, beta, depth= 1):
""" Return the value for a loss (-1) if the game is over,
otherwise return the maximum value over all legal child
nodes.
"""
if gameState.terminal_test():
return gameState.utility(self.player_id)
# New conditional depth limit cutoff
if depth <= 0: # "==" could be used, but "<=" is safer
return self.score(gameState)
v = float("-inf")
for a in gameState.actions():
# TODO: modify the call to min_value()
v = max(v, self.min_value(gameState.result(a), alpha, beta, depth-1))
# TODO: update the value bound
if v>=beta: return v
alpha= max(v,alpha)
return v
def minimax_decision(self,gameState, depth):
""" Return the move along a branch of the game tree that
has the best possible value. A move is a pair of coordinates
in (column, row) order corresponding to a legal move for
the searching player.
You can ignore the special case of calling this function
from a terminal state.
"""
alpha= float("-inf")
beta= float("inf")
best_score = float("-inf")
best_move = gameState.actions()[0]
for a in gameState.actions():
# call has been updated with a depth limit
v = self.min_value(gameState.result(a), alpha, beta, depth-1)
alpha= max(v,alpha)
if v > best_score:
best_score = v
best_move = a
return best_move
``` |
{
"source": "jiahuanglin/pytorch-transformers",
"score": 2
} |
#### File: examples/lm_finetuning/finetune_on_pregenerated.py
```python
from argparse import ArgumentParser
from pathlib import Path
import os
import time
import glob
import torch
import logging
import json
import random
import numpy as np
import pandas as pd
from contextlib import contextmanager
from collections import namedtuple, defaultdict
from tempfile import TemporaryDirectory
import numba.cuda as profile_cuda
from tqdm import tqdm
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader, Dataset, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_transformers.modeling_bert import BertForPreTraining
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
from utils import Timers
InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids lm_label_ids is_next")
log_format = '%(asctime)-10s: %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
def check_files(checkpoint_path, prefix, max_files=10):
"""
checkFiles
Check the number of checkpoints, if it exceeds max_files, delete the oldest ones,
return the latest checkpoint file path.
checkpoint_path: str, path to checkpoints
max_files: int, maximum number of checkpoints to retain
"""
try:
pattern = os.path.join(checkpoint_path, prefix + "*.tar")
checkpoint_files = glob.glob(pattern)
checkpoint_files.sort(key=lambda x: os.path.getmtime(x))
except FileNotFoundError:
return None
try:
latest_checkpoint = checkpoint_files[-1]
except IndexError:
# No checkpoint files, list is empty!
latest_checkpoint = None
print("CURRENTLY %d CHECKPOINTS" % len(checkpoint_files))
if len(checkpoint_files) > max_files:
logging.info("DELETE EXCESS CHECKPOINTS")
for idx, checkpoint_file in enumerate(checkpoint_files[:-max_files]):
if checkpoint_file=='training_checkpoint_most_recent.tar':continue
logging.info("DELETE %s" % checkpoint_file)
os.remove(checkpoint_file)
return latest_checkpoint
def save_checkpoint(model, optimizer, epoch, global_step, checkpoint_path, filename):
"""
saveCheckpoint
Save the model and optimizer state in a dictionary
model: [class], torch model instance
optimizer: [class], torch optimizer instance
epoch: int, current epoch
global_step: int, current global step
checkpoint_path: string, path
filename: string, name of the checkpoint file
"""
logging.info("** ** * Saving fine-tuned model ** ** * ")
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path, exist_ok=True)
torch.save({"epoch": epoch,
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"global_step": global_step}, filename)
logging.info("** ** * Model saved! ** ** * ")
def restore_checkpoint(model, optimizer, checkpoint_file, device):
"""
Restores model and optimizer from a checkpoint file and returns checkpoint information.
Has side effect of loading the state_dict for model and optimizer (i.e. modifies the instances).
:param model: [class], torch model instance
:param optimizer: [class], torch optimizer instance
:param checkpoint_file: string, full file path
:param device: [class], torch device instance
:return: Tuple of the checkpoint values
"""
assert checkpoint_file
logging.info("** ** * Restore from checkpoint: %s" % checkpoint_file)
checkpoint_state = torch.load(checkpoint_file, map_location=device)
model.load_state_dict(checkpoint_state["model_state_dict"])
optimizer.load_state_dict(checkpoint_state["optimizer_state_dict"])
last_epoch = checkpoint_state["epoch"]
global_step = checkpoint_state["global_step"]
logging.info(" RESTORED AT epoch:%d-%s, global_step:%d" % (last_epoch, global_step))
logging.info("** ** * Model restored! ** ** * ")
# model.train() # Do this in calling code for now, maybe want model.eval() there instead
return last_epoch, global_step
def convert_example_to_features(example, tokenizer, max_seq_length):
tokens = example["tokens"]
segment_ids = example["segment_ids"]
is_random_next = example["is_random_next"]
masked_lm_positions = example["masked_lm_positions"]
masked_lm_labels = example["masked_lm_labels"]
assert len(tokens) == len(segment_ids) <= max_seq_length # The preprocessed data should be already truncated
input_ids = tokenizer.convert_tokens_to_ids(tokens)
masked_label_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels)
input_array = np.zeros(max_seq_length, dtype=np.int)
input_array[:len(input_ids)] = input_ids
mask_array = np.zeros(max_seq_length, dtype=np.bool)
mask_array[:len(input_ids)] = 1
segment_array = np.zeros(max_seq_length, dtype=np.bool)
segment_array[:len(segment_ids)] = segment_ids
lm_label_array = np.full(max_seq_length, dtype=np.int, fill_value=-1)
lm_label_array[masked_lm_positions] = masked_label_ids
features = InputFeatures(input_ids=input_array,
input_mask=mask_array,
segment_ids=segment_array,
lm_label_ids=lm_label_array,
is_next=is_random_next)
return features
class PregeneratedDataset(Dataset):
def __init__(self, training_path, epoch, chunk, tokenizer, num_data_epochs, reduce_memory=False):
self.vocab = tokenizer.vocab
self.tokenizer = tokenizer
self.epoch = epoch
self.data_epoch = epoch % num_data_epochs
data_file = training_path / f"epoch_{self.data_epoch}-{chunk}.json"
data_zip = training_path / f"epoch_{self.data_epoch}-{chunk}.zip"
if not os.path.isfile(data_file):
# If file not there, then there should be a zip file that extracts to it
extract_zip(data_zip)
assert os.path.isfile(data_file)
logging.info('Training on: {}'.format(data_file))
metrics_file = training_path / f"metrics_epoch_{self.data_epoch}-{chunk}.json"
assert data_file.is_file() and metrics_file.is_file()
metrics = json.loads(metrics_file.read_text())
num_samples = metrics['num_training_examples']
seq_len = metrics['max_seq_len']
self.temp_dir = None
self.working_dir = None
if reduce_memory:
self.temp_dir = TemporaryDirectory()
self.working_dir = Path(self.temp_dir.name)
input_ids = np.memmap(filename=self.working_dir/'input_ids.memmap',
mode='w+', dtype=np.int32, shape=(num_samples, seq_len))
input_masks = np.memmap(filename=self.working_dir/'input_masks.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.bool)
segment_ids = np.memmap(filename=self.working_dir/'segment_ids.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.bool)
lm_label_ids = np.memmap(filename=self.working_dir/'lm_label_ids.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.int32)
lm_label_ids[:] = -1
is_nexts = np.memmap(filename=self.working_dir/'is_nexts.memmap',
shape=(num_samples,), mode='w+', dtype=np.bool)
else:
input_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.int32)
input_masks = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)
segment_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)
lm_label_ids = np.full(shape=(num_samples, seq_len), dtype=np.int32, fill_value=-1)
is_nexts = np.zeros(shape=(num_samples,), dtype=np.bool)
logging.info(f"Loading training examples for epoch {epoch}")
with data_file.open() as f:
for i, line in enumerate(tqdm(f, total=num_samples, desc="Training examples")):
line = line.strip()
example = json.loads(line)
features = convert_example_to_features(example, tokenizer, seq_len)
input_ids[i] = features.input_ids
segment_ids[i] = features.segment_ids
input_masks[i] = features.input_mask
lm_label_ids[i] = features.lm_label_ids
is_nexts[i] = features.is_next
assert i == num_samples - 1 # Assert that the sample count metric was true
logging.info("Loading complete!")
self.num_samples = num_samples
self.seq_len = seq_len
self.input_ids = input_ids
self.input_masks = input_masks
self.segment_ids = segment_ids
self.lm_label_ids = lm_label_ids
self.is_nexts = is_nexts
def __len__(self):
return self.num_samples
def __getitem__(self, item):
return (torch.tensor(self.input_ids[item].astype(np.int64)),
torch.tensor(self.input_masks[item].astype(np.int64)),
torch.tensor(self.segment_ids[item].astype(np.int64)),
torch.tensor(self.lm_label_ids[item].astype(np.int64)),
torch.tensor(self.is_nexts[item].astype(np.int64)))
def get_chunks(dir_path, epoch):
"""
Look in the specified directory for files of the form epoch_0-000, epoch_0-001, ...etc.
and return a list of the chunks e.g. ['000', '001', '002', ...]
There could be a mix of .json and .zip files so sometimes we could get duplicates.
"""
if isinstance(dir_path, Path):
dir_path = str(dir_path)
chunks = [x.split('-')[-1].strip('.json').strip('.zip') for x in glob.glob("{}/epoch_{}-*".format(dir_path, epoch))]
chunks = list(set(chunks))
return sorted(chunks)
def get_args():
parser = ArgumentParser()
parser.add_argument('--pregenerated_data', type=Path, required=True)
parser.add_argument('--output_dir', type=Path, required=True)
parser.add_argument('--restore_dir', type=Path, help="Restore from a checkpoint file and continue training")
parser.add_argument("--bert_model", type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--do_lower_case", action="store_true")
parser.add_argument("--reduce_memory", action="store_true",
help="Store training data as on-disc memmaps to massively reduce memory usage")
parser.add_argument("--epochs", type=int,
default=3, help="Number of epochs to train for")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--num_workers", type=int,
default=0, help="Number of workers to load data")
# training config
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--batch_size", default=12, type=int,
help="Total batch size for training.")
parser.add_argument("--seq_length", default=128, type=int,
help="Seq length of each sample.")
parser.add_argument('--train_iters', type=int, default=2000,
help='number of iterations per epoch')
# distributed training config
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus. Passed from distributed launcher")
# AMP config
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale', type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
# optimization
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
# nvprof args
parser.add_argument('--nvprof', action='store_true',
help='profile this program')
parser.add_argument('--profile_start', type=int, default=200,
help="""Start iteration of nvidia profiler""")
parser.add_argument('--profile_stop', type=int, default=201,
help="""Stop iteration of nvidia profiler""")
parser.add_argument('--warmup_iter', type=int, default=200,
help="""Start iteration of nvidia profiler""")
# benchmarking args
parser.add_argument('--benchmark', action='store_true',
help='benchmark this program')
parser.add_argument('--benchmark_dir', type=str, default="benchmark_output",
help="""Dir to save benchmark output stats""")
parser.add_argument('--benchmark_start', type=int, default=1000,
help="""Start iteration of nvidia profiler""")
parser.add_argument('--benchmark_stop', type=int, default=2000,
help="""Stop iteration of nvidia profiler""")
parser.add_argument('--benchmark_partition', type=str, default="t4",
help="""Partition of gpus""")
parser.add_argument('--log_interval', type=int, default=100,
help='report interval')
args = parser.parse_args()
assert args.pregenerated_data.is_dir(), \
"--pregenerated_data should point to the folder of files made by pregenerate_training_data.py!"
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
return args
def main():
args = get_args()
total_train_examples = 0
for i in range(args.epochs):
chunks = get_chunks(args.pregenerated_data, i)
if i == 0 and len(chunks) == 0:
exit("No training data was found!")
elif len(chunks) == 0:
print(f"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs}).")
print("This script will loop over the available data, but training diversity may be negatively impacted.")
num_data_epochs = i
break
for chunk in chunks:
epoch_file = args.pregenerated_data / f"epoch_{i}-{chunk}.json"
epoch_zip = args.pregenerated_data / f"epoch_{i}-{chunk}.zip"
metrics_file = args.pregenerated_data / f"metrics_epoch_{i}-{chunk}.json"
if (epoch_file.is_file() or epoch_zip.is_file()) and metrics_file.is_file():
metrics = json.loads(metrics_file.read_text())
total_train_examples += metrics['num_training_examples']
else:
num_data_epochs = args.epochs
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6000')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(
backend='nccl',
world_size=args.world_size,
rank=args.rank,
init_method=init_method)
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
# torch.distributed.init_process_group(backend='nccl')
logging.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
# args.batch_size = args.batch_size // args.gradient_accumulation_steps
print("CUDA device count: {}".format(torch.cuda.device_count()))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
args.output_dir.mkdir(parents=True, exist_ok=True)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
num_train_optimization_steps = int(
total_train_examples / args.batch_size)
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // args.world_size
model = BertForPreTraining.from_pretrained(args.bert_model)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
# scheduler not compatible with APEX::FP16_optimizer
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=num_train_optimization_steps)
if args.output_dir:
last_checkpoint = check_files(args.output_dir, args.bert_model)
last_epoch, global_step = restore_checkpoint(model, optimizer, last_checkpoint, device)
else:
last_epoch, global_step = 0, 0
logging.info("***** Running training *****")
logging.info(f" Num examples = {total_train_examples}")
logging.info(" Batch size = %d", args.batch_size)
logging.info(" Num steps = %d", num_train_optimization_steps)
iteration = 0
benchmark_stats = defaultdict(lambda: [])
grad_stats = defaultdict(lambda: [])
summary_writer = SummaryWriter() if args.rank == 0 else None
model.train()
for epoch in range(last_epoch, args.epochs):
shuffled_chunks = get_chunks(args.pregenerated_data, epoch)
random.shuffle(shuffled_chunks)
logging.info('New shuffled chunks: {}'.format(shuffled_chunks))
for chunk in shuffled_chunks:
epoch_dataset = PregeneratedDataset(epoch=epoch, chunk=chunk, training_path=args.pregenerated_data, tokenizer=tokenizer,
num_data_epochs=num_data_epochs, reduce_memory=args.reduce_memory)
if args.local_rank == -1:
train_sampler = RandomSampler(epoch_dataset)
else:
train_sampler = DistributedSampler(epoch_dataset)
train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.batch_size, num_workers=args.num_workers)
data_iterator = iter(train_dataloader)
timers = Timers()
timers('interval time').start()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for batch in data_iterator:
# while iteration < args.train_iters:
if args.nvprof:
if iteration == args.profile_start:
profile_cuda.profile_start()
print("CUDA profiling starts!")
if iteration == args.profile_stop:
profile_cuda.profile_stop()
print("CUDA profiling stops!")
iteration += 1
# benchmark dataloading time
# batch = next(data_iterator)
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
outputs = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next)
loss = outputs[0]
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
# loss = loss / args.gradient_accumulation_steps
if args.local_rank != -1:
if iteration % args.gradient_accumulation_steps == 0:
# we are using APEX DDP => enable_allreduce / disable_allreduce
# print("iteration {}, all reduce enabled!".format(iteration))
model.enable_allreduce()
else:
# print("iteration {}, all reduce disabled!".format(iteration))
model.disable_allreduce()
# note that loss.backward accumulates the gradient => gradient will be accumulated until we call zero_grad
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
# mean_loss = tr_loss * args.gradient_accumulation_steps / nb_tr_steps
mean_loss = tr_loss / nb_tr_steps
if iteration % args.gradient_accumulation_steps == 0:
start = time.time()
scheduler.step() # Update learning rate schedule (commented as lr_scheduler not compatible with FP16_Optimizer)
optimizer.step()
optimizer.zero_grad()
benchmark_stats['weight_update_time'].append(time.time() - start) # unit in s
global_step += 1
if iteration % args.log_interval == 0:
elapsed_time = timers('interval time').elapsed()
log_string = ' epoch{:2d} |'.format(epoch)
log_string += ' iteration {:8d} |'.format(iteration)
log_string += ' elapsed time per iteration (ms): {:.1f} |'.format(elapsed_time * 1000.0 / args.log_interval)
log_string += ' mean loss {:.3E} |'.format(mean_loss)
if args.rank == 0:
summary_writer.add_scalar('mean_loss', mean_loss, iteration)
# args.rank == 0 => this is master process
if args.benchmark and args.rank == 0:
if args.benchmark_start < iteration <= args.benchmark_stop:
benchmark_stats['iteration'].append(iteration)
benchmark_stats['seq_length'].append(args.seq_length)
benchmark_stats['batch_size'].append(args.batch_size * args.world_size)
benchmark_stats['num_tokens'].append(args.seq_length * args.batch_size * args.world_size)
benchmark_stats['elapsed_time'].append(elapsed_time)
benchmark_stats['log_interval'].append(args.log_interval)
print(log_string, flush=True)
# Save a trained model
if n_gpu > 1 and torch.distributed.get_rank() or n_gpu <=1:
logging.info("** ** * Saving fine-tuned model ** ** * ")
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
save_checkpoint(
model,
optimizer,
epoch,
global_step,
args.output_dir,
os.path.join(args.output_dir, "{}_{}.tar".format(args.bert_model, global_step))
)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Save a trained model
if n_gpu > 1 and torch.distributed.get_rank() == 0 or n_gpu <=1:
logging.info("** ** * Saving fine-tuned model ** ** * ")
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
save_checkpoint(
model,
optimizer,
epoch,
global_step,
args.output_dir,
os.path.join(args.output_dir, "{}_{}.tar".format(args.bert_model, global_step))
)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
if args.rank == 0:
summary_writer.close()
if args.benchmark and args.rank == 0:
benchmark_csv = {
k: [np.mean(l)] for k,l in benchmark_stats.items()
}
benchmark_csv['weight_update_time'] = args.log_interval * np.array(benchmark_csv['weight_update_time'])
benchmark_csv['token_throughput'] = np.array(benchmark_csv['num_tokens']) * np.array(benchmark_csv['log_interval'])\
/ np.array(benchmark_csv['elapsed_time'])
benchmark_csv['precision'] = [ 'fp16' if args.fp16 else 'fp32' ]
save_dir = os.path.join(
args.benchmark_dir,
"{gpus}_gpus_{partition}_trials".format(
gpus=args.world_size,
partition=args.benchmark_partition
)
)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
df = pd.DataFrame.from_dict(benchmark_csv)
df.to_csv(os.path.join(
save_dir,
"huggingface_benchmark_{partition}_batch_size_{batch_size}_seq_len_{seq_len}.csv".format(
partition=args.benchmark_partition,
batch_size=args.batch_size,
seq_len=args.seq_length
)
))
if __name__ == '__main__':
main()
``` |
{
"source": "jiahuanluo/attention-is-all-you-need-pytorch",
"score": 3
} |
#### File: jiahuanluo/attention-is-all-you-need-pytorch/translate.py
```python
import torch
import argparse
import dill as pickle
from tqdm import tqdm
import transformer.Constants as Constants
from torchtext.data import Dataset
from transformer.Models import Transformer
from transformer.Translator import Translator
from torch.utils.data import DataLoader
import utils
def prepare_mydataloaders(opt, device):
data = pickle.load(open(opt.data_pkl, 'rb'))
opt.max_token_seq_len = 140
opt.src_pad_idx = data['dict']['src'].labelToIdx[Constants.PAD_WORD]
opt.trg_pad_idx = data['dict']['tgt'].labelToIdx[Constants.PAD_WORD]
opt.trg_bos_idx = data['dict']['tgt'].labelToIdx[Constants.BOS_WORD]
opt.trg_eos_idx = data['dict']['tgt'].labelToIdx[Constants.EOS_WORD]
opt.unk_idx = 1
opt.src_vocab_size = len(data['dict']['src'].labelToIdx)
opt.trg_vocab_size = len(data['dict']['tgt'].labelToIdx)
# ========= Preparing Model =========#
# if opt.embs_share_weight:
# assert data['dict']['src'].labelToIdx == data['dict']['tgt'].labelToIdx, \
# 'To sharing word embedding the src/trg word2idx table shall be the same.'
testset = utils.BiDataset(data['test'])
testloader = torch.utils.data.DataLoader(dataset=testset,
batch_size=1,
shuffle=False,
collate_fn=utils.padding)
return data['dict']['tgt'], testloader
def load_model(opt, device):
checkpoint = torch.load(opt.model, map_location=device)
model_opt = checkpoint['settings']
model = Transformer(
model_opt.src_vocab_size,
model_opt.trg_vocab_size,
model_opt.src_pad_idx,
model_opt.trg_pad_idx,
trg_emb_prj_weight_sharing=model_opt.proj_share_weight,
emb_src_trg_weight_sharing=model_opt.embs_share_weight,
d_k=model_opt.d_k,
d_v=model_opt.d_v,
d_model=model_opt.d_model,
d_word_vec=model_opt.d_word_vec,
d_inner=model_opt.d_inner_hid,
n_layers=model_opt.n_layers,
n_head=model_opt.n_head,
dropout=model_opt.dropout).to(device)
model.load_state_dict(checkpoint['model'])
print('[Info] Trained model state loaded.')
return model
def main():
'''Main Function'''
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model weight file')
parser.add_argument('-data_pkl', required=True,
help='Pickle file with both instances and vocabulary.')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5)
parser.add_argument('-max_seq_len', type=int, default=100)
parser.add_argument('-no_cuda', action='store_true')
# TODO: Translate bpe encoded files
# parser.add_argument('-src', required=True,
# help='Source sequence to decode (one line per sequence)')
# parser.add_argument('-vocab', required=True,
# help='Source sequence to decode (one line per sequence)')
# TODO: Batch translation
# parser.add_argument('-batch_size', type=int, default=30,
# help='Batch size')
# parser.add_argument('-n_best', type=int, default=1,
# help="""If verbose is set, will output the n_best
# decoded sentences""")
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
# data = pickle.load(open(opt.data_pkl, 'rb'))
# SRC, TRG = data['vocab']['src'], data['vocab']['trg']
# opt.src_pad_idx = SRC.vocab.stoi[Constants.PAD_WORD]
# opt.trg_pad_idx = TRG.vocab.stoi[Constants.PAD_WORD]
# opt.trg_bos_idx = TRG.vocab.stoi[Constants.BOS_WORD]
# opt.trg_eos_idx = TRG.vocab.stoi[Constants.EOS_WORD]
# test_loader = Dataset(examples=data['test'], fields={'src': SRC, 'trg': TRG})
device = torch.device('cuda' if opt.cuda else 'cpu')
TRG, test_loader = prepare_mydataloaders(opt, device)
translator = Translator(
model=load_model(opt, device),
beam_size=opt.beam_size,
max_seq_len=opt.max_seq_len,
src_pad_idx=opt.src_pad_idx,
trg_pad_idx=opt.trg_pad_idx,
trg_bos_idx=opt.trg_bos_idx,
trg_eos_idx=opt.trg_eos_idx).to(device)
with open(opt.output, 'w+', encoding='utf-8') as f:
for example in tqdm(test_loader, mininterval=2, desc=' - (Test)', leave=False):
sec_seq = example[0].view(1, -1)
pred_seq = translator.translate_sentence(sec_seq.to(device))
pred_line = ' '.join(TRG.idxToLabel[idx] for idx in pred_seq)
pred_line = pred_line.replace(Constants.BOS_WORD, '').replace(Constants.EOS_WORD, '\n')
f.write(pred_line)
print('[Info] Finished.')
if __name__ == "__main__":
'''
Usage: python translate.py -model trained.chkpt -data multi30k.pt -no_cuda
'''
main()
``` |
{
"source": "jiahuanluo/label-inference-attacks",
"score": 3
} |
#### File: Code/datasets/cinic10.py
```python
import torchvision
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from datasets.dataset_setup import DatasetSetup
from my_utils.utils import train_val_split, image_format_2_rgb
class CINIC10L(Dataset):
def __init__(self, root, split='train', transform=None):
super().__init__()
image_folder = torchvision.datasets.ImageFolder(root=root + '/' + split)
self.targets = image_folder.targets
self.image_paths = image_folder.imgs
self.transform = transform
def __getitem__(self, index):
file_path, label = self.image_paths[index]
img = self.read_image(file_path)
return img, label
def __len__(self):
return len(self.image_paths)
def read_image(self, path):
img = Image.open(path)
return self.transform(img) if self.transform else img
class Cinic10LSetup(DatasetSetup):
def __init__(self):
super().__init__()
self.num_classes = 10
self.size_bottom_out = 10
def set_datasets_for_ssl(self, file_path, n_labeled, party_num):
transforms_ = self.get_transforms()
base_dataset = CINIC10L(file_path)
train_labeled_idxs, train_unlabeled_idxs = train_val_split(base_dataset.targets,
int(n_labeled / self.num_classes),
self.num_classes)
train_labeled_dataset = CINIC10LLabeled(file_path, train_labeled_idxs, split='train', transform=transforms_)
train_unlabeled_dataset = CINIC10LUnlabeled(file_path, train_unlabeled_idxs, split='train',
transform=transforms_)
train_complete_dataset = CINIC10LLabeled(file_path, None, split='train', transform=transforms_)
test_dataset = CINIC10LLabeled(file_path, split='test', transform=transforms_)
print("#Labeled:", len(train_labeled_idxs), "#Unlabeled:", len(train_unlabeled_idxs))
return train_labeled_dataset, train_unlabeled_dataset, test_dataset, train_complete_dataset
def get_normalize_transform(self):
normalize_cinic = transforms.Normalize(mean=[0.47889522, 0.47227842, 0.43047404],
std=[0.24205776, 0.23828046, 0.25874835])
return normalize_cinic
def get_transforms(self):
normalize = self.get_normalize_transform()
transforms_ = transforms.Compose([
transforms.Lambda(image_format_2_rgb),
transforms.Resize((32, 32)),
transforms.ToTensor(),
normalize
])
return transforms_
def get_transformed_dataset(self, file_path, party_num=None, train=True):
if train:
split = 'train'
else:
split = 'test'
transforms_ = self.get_transforms()
_cinic10_dataset = CINIC10L(file_path, split, transform=transforms_)
return _cinic10_dataset
def clip_one_party_data(self, x, half):
x = x[:, :, :, :half]
return x
class CINIC10LLabeled(CINIC10L):
def __init__(self, root, indexs=None, split='train',
transform=None):
super(CINIC10LLabeled, self).__init__(root, split=split,
transform=transform
)
if indexs is not None:
temp_image_paths = []
for id in indexs:
temp_image_paths.append(self.image_paths[id])
self.image_paths = temp_image_paths
class CINIC10LUnlabeled(CINIC10LLabeled):
def __init__(self, root, indexs, split='train',
transform=None):
super(CINIC10LUnlabeled, self).__init__(root, indexs, split=split,
transform=transform
)
temp_image_paths = []
for image_path, label in self.image_paths:
temp_image_paths.append((image_path, -1))
self.image_paths = temp_image_paths
if __name__ == '__main__':
dataset = CINIC10L(root='D:/Datasets/CINIC10L')
print("s")
```
#### File: Code/datasets/tiny_image_net.py
```python
import os
import glob
import torch
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
from datasets.dataset_setup import DatasetSetup
from my_utils.utils import train_val_split, image_format_2_rgb
EXTENSION = 'JPEG'
NUM_IMAGES_PER_CLASS = 500
CLASS_LIST_FILE = 'wnids.txt'
VAL_ANNOTATION_FILE = 'val_annotations.txt'
class TinyImageNet(Dataset):
"""Tiny ImageNet data set available from `http://cs231n.stanford.edu/tiny-imagenet-200.zip`.
Parameters
----------
root: string
Root directory including `train`, `test` and `val` subdirectories.
split: string
Indicating which split to return as a data set.
Valid option: [`train`, `test`, `val`]
transform: torchvision.transforms
A (series) of valid transformation(s).
in_memory: bool
Set to True if there is enough memory (about 5G) and want to minimize disk IO overhead.
"""
def __init__(self, root, split='train', transform=None, in_memory=False):
self.root = os.path.expanduser(root)
self.split = split
self.transform = transform
self.in_memory = in_memory
self.split_dir = os.path.join(root, self.split)
self.image_paths = sorted(glob.iglob(os.path.join(self.split_dir, '**', '*.%s' % EXTENSION), recursive=True))
self.labels = {} # fname - label number mapping
self.images = [] # used for in-memory processing
# build class label - number mapping
with open(os.path.join(self.root, CLASS_LIST_FILE), 'r') as fp:
self.label_texts = sorted([text.strip() for text in fp.readlines()])
self.label_text_to_number = {text: i for i, text in enumerate(self.label_texts)}
if self.split == 'train':
for label_text, i in self.label_text_to_number.items():
for cnt in range(NUM_IMAGES_PER_CLASS):
self.labels['%s_%d.%s' % (label_text, cnt, EXTENSION)] = i
elif self.split == 'val':
with open(os.path.join(self.split_dir, VAL_ANNOTATION_FILE), 'r') as fp:
for line in fp.readlines():
terms = line.split('\t')
file_name, label_text = terms[0], terms[1]
self.labels[file_name] = self.label_text_to_number[label_text]
# read all images into torch tensor in memory to minimize disk IO overhead
if self.in_memory:
self.images = [self.read_image(path) for path in self.image_paths]
def __len__(self):
return len(self.image_paths)
def __getitem__(self, index):
file_path = self.image_paths[index]
if self.in_memory:
img = self.images[index]
else:
img = self.read_image(file_path)
if self.split == 'test':
return img
else:
# file_name = file_path.split('/')[-1]
return img, self.labels[os.path.basename(file_path)]
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = self.split
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
def read_image(self, path):
img = Image.open(path)
return self.transform(img) if self.transform else img
class TinyImageNetSetup(DatasetSetup):
def __init__(self):
super().__init__()
self.num_classes = 200
self.size_bottom_out = 200
def set_datasets_for_ssl(self, file_path, n_labeled, party_num=None):
transforms_ = self.get_transforms()
base_dataset = TinyImageNet(file_path)
train_labeled_idxs, train_unlabeled_idxs = train_val_split(list(base_dataset.labels.values()),
int(n_labeled / self.num_classes),
self.num_classes)
train_labeled_dataset = TinyImageNetLabeled(file_path, train_labeled_idxs, split='train',
transform=transforms_)
train_unlabeled_dataset = TinyImageNetUnlabeled(file_path, train_unlabeled_idxs, split='train',
transform=transforms_)
train_complete_dataset = TinyImageNetLabeled(file_path, None, split='train', transform=transforms_)
test_dataset = TinyImageNetLabeled(file_path, split='val', transform=transforms_)
print("#Labeled:", len(train_labeled_idxs), "#Unlabeled:", len(train_unlabeled_idxs))
return train_labeled_dataset, train_unlabeled_dataset, test_dataset, train_complete_dataset
def get_normalize_transform(self):
normalize_ = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
return normalize_
def get_transforms(self):
normalize = self.get_normalize_transform()
transforms_ = transforms.Compose([
transforms.Lambda(image_format_2_rgb),
transforms.Resize((64, 64)),
transforms.ToTensor(),
normalize
])
return transforms_
def get_transformed_dataset(self, file_path, party_num=None, train=True):
if train:
split = 'train'
else:
split = 'val'
transforms_ = self.get_transforms()
_tiny_imagenet_dataset = TinyImageNet(file_path, split, transform=transforms_)
return _tiny_imagenet_dataset
def clip_one_party_data(self, x, half):
x = x[:, :, :, :half]
return x
class TinyImageNetLabeled(TinyImageNet):
def __init__(self, root, indexs=None, split='train', transform=None):
super(TinyImageNetLabeled, self).__init__(root, split=split,
transform=transform)
if indexs is not None:
temp_image_paths = []
for id in indexs:
temp_image_paths.append(self.image_paths[id])
self.image_paths = temp_image_paths
class TinyImageNetUnlabeled(TinyImageNetLabeled):
def __init__(self, root, indexs, split='train',
transform=None):
super(TinyImageNetUnlabeled, self).__init__(root, indexs, split=split,
transform=transform)
for key in self.labels.keys():
self.labels[key] = -1
if __name__ == '__main__':
normalize_imagenet = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
augmentation = transforms.RandomApply([
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.RandomResizedCrop(64)], p=.8)
training_transform = transforms.Compose([
transforms.Lambda(image_format_2_rgb),
augmentation,
transforms.ToTensor(),
normalize_imagenet])
test_transform = transforms.Compose([
transforms.Lambda(image_format_2_rgb),
transforms.ToTensor(),
normalize_imagenet])
dataset_train = TinyImageNet(root='D:\\Datasets\\tiny-imagenet-200',
split='train')
dataset_test = TinyImageNet(root='D:\\Datasets\\tiny-imagenet-200',
split='val')
train_loader = torch.utils.data.DataLoader(
dataset=dataset_train,
batch_size=1, shuffle=True,
num_workers=4, pin_memory=True
)
test_loader = torch.utils.data.DataLoader(
dataset=dataset_test,
batch_size=1, shuffle=False,
num_workers=4, pin_memory=True
)
print("len train loader:", len(train_loader))
for batch_id, (data, target) in enumerate(train_loader):
print("batch_id:", batch_id)
# print("batch datasets:", data)
print("batch datasets shape:", data.shape)
print("batch target:", target)
break
print("\n\n test-->")
print("len test loader:", len(test_loader))
for batch_id, (data, target) in enumerate(test_loader):
print("batch_id:", batch_id)
# print("batch datasets:", data)
print("batch datasets shape:", data.shape)
print("batch target:", target)
break
for data, target in test_loader:
# print("batch datasets:", data)
print("batch datasets shape:", data.shape)
print("batch target:", target)
break
```
#### File: Code/datasets/yahoo.py
```python
from datasets.dataset_setup import DatasetSetup
from models import read_data_text
class YahooSetup(DatasetSetup):
def __init__(self):
super().__init__()
self.num_classes = 10
self.size_bottom_out = 10
def set_datasets_for_ssl(self, file_path, n_labeled, party_num):
train_labeled_dataset, train_unlabeled_dataset, val_dataset, test_dataset, n_labels = \
read_data_text.get_data(file_path, int(n_labeled / 10))
train_complete_labeled_dataset, _, _, _, _ = \
read_data_text.get_data(file_path, 5000)
print("#Labeled:", len(train_labeled_dataset), "#Unlabeled:", len(train_unlabeled_dataset))
return train_labeled_dataset, train_unlabeled_dataset, test_dataset, train_complete_labeled_dataset
def get_transformed_dataset(self, file_path, party_num=None, train=True):
if train:
train_complete_labeled_dataset, _, _, _, _ = \
read_data_text.get_data(file_path, 5000)
return train_complete_labeled_dataset
else:
_, _, _, test_dataset, _ = \
read_data_text.get_data(file_path, 10)
return test_dataset
if __name__ == '__main__':
dataset_setup = YahooSetup()
train_dataset = dataset_setup.get_transformed_dataset(file_path='D:/Datasets/yahoo_answers_csv/',train=True)
print("s")
```
#### File: label-inference-attacks/Code/inferred_label_exploition.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils import data
import models.bottom_model_plus as models
import copy
import dill
from vfl_framework import VflFramework
import pandas as pd
from sklearn.preprocessing import StandardScaler
import numpy as np
def create_model(ema=False, size_bottom_out=10, num_classes=10):
model = models.BottomModelPlus(size_bottom_out, num_classes)
model = model.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
def correct_counter(output, target):
_, pred = output.topk(1, 1, True, True)
correct_1 = torch.eq(pred, target.view(-1, 1)).sum().float().item()
return correct_1
def train_and_validata_model(train_loader, test_loader, model, epochs):
loss_func = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(params=model.parameters(), lr=2e-3, momentum=0.9)
for epoch in range(epochs):
# train
model.train()
loss_epoch = 0
count_batch = 0
for data, labels in train_loader:
data = data.float()
output = model(data)
loss = loss_func(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_epoch += loss
count_batch += 1
# validate
model.eval()
count_correct = 0
for data, labels in test_loader:
data = data.float()
output = model(data)
count_correct += correct_counter(output, labels)
if epoch == epochs - 1:
print(f'epoch {epoch}')
print(f'train loss:{loss_epoch / count_batch}')
print(f'accuracy on test set:{count_correct / len(test_loader.dataset.df)}')
class MyModel(nn.Module):
def __init__(self, in_dim=14):
super(MyModel, self).__init__()
self.fc1 = nn.Linear(in_dim, 20)
self.fc2 = nn.Linear(20, 20)
self.fc3 = nn.Linear(20, 2)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
class MyDataset(data.Dataset):
def __init__(self, df, label_area_mean, use_diagnosis_as_feature=False, train=True, fraction_of_known=0.1):
known_samples_num = int(fraction_of_known * len(df))
label_area_mean = np.array(label_area_mean, dtype=np.longlong)
if use_diagnosis_as_feature:
feat_num = 15
else:
feat_num = 14
df = df.drop('diagnosis', axis=1)
self.df = df
self.train_data = np.array(df.iloc[:known_samples_num, :feat_num])
self.train_labels = label_area_mean[:known_samples_num]
self.test_data = np.array(df.iloc[known_samples_num:, :feat_num])
self.test_labels = label_area_mean[known_samples_num:]
self.train = train
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def __getitem__(self, index):
if self.train:
data, label = self.train_data[index], self.train_labels[index]
else:
data, label = self.test_data[index], self.test_labels[index]
return data, label
model = create_model(ema=False, size_bottom_out=2, num_classes=2)
checkpoint = torch.load('D:/MyCodes/label_inference_attacks_against_vfl/saved_experiment_results/saved_models/BCW_saved_models/BCW_saved_framework_lr=0.01_normal_half=14.pth', pickle_module=dill)
model.bottom_model = copy.deepcopy(checkpoint.malicious_bottom_model_a)
path = 'D:/MyCodes/label_inference_attacks_against_vfl/saved_experiment_results/saved_models/BCW_saved_models/BCW_mc_best.pth'
load_dict = torch.load(path)
model.load_state_dict(load_dict['state_dict'])
model = model.float()
path = 'D:/Datasets/BreastCancerWisconsin/wisconsin.csv'
df = pd.read_csv(path)
df = df.drop('Unnamed: 32', axis=1)
df = df.drop('id', axis=1)
# sequence adjustment
radius_mean = df['radius_mean']
df = df.drop('radius_mean', axis=1)
df['radius_mean'] = radius_mean
perimeter_mean = df['perimeter_mean']
df = df.drop('perimeter_mean', axis=1)
df['perimeter_mean'] = perimeter_mean
sc = StandardScaler()
df[df.columns[1:]] = sc.fit_transform(df[df.columns[1:]])
feature_area_mean = df['area_mean']
df = df.drop('area_mean', axis=1)
feature_area_mean = pd.qcut(feature_area_mean, q=2, labels=range(2))
batch_size = 16
# cover the label column 'diagnosis'
x = np.array(df.iloc[:, 1:1+14])
x = torch.tensor(x)
x = x.float().cuda()
y_predict = model(x)
y_score = torch.nn.functional.softmax(y_predict)[:, :1]
y_score = y_score.reshape(-1).cpu().detach().numpy()
df['diagnosis'] = y_score
df[['diagnosis']] = sc.fit_transform(df[['diagnosis']])
# eval on dataset without the inferred labels as extra feature
print('eval on dataset WITHOUT the inferred labels as extra feature')
train_set_without_diagnosis = MyDataset(df=df, label_area_mean=feature_area_mean, use_diagnosis_as_feature=False, train=True)
train_dataloader_without_diagnosis = data.DataLoader(train_set_without_diagnosis, batch_size=16, shuffle=True)
test_set_without_diagnosis = MyDataset(df=df, label_area_mean=feature_area_mean, use_diagnosis_as_feature=False, train=False)
test_dataloader_without_diagnosis = data.DataLoader(test_set_without_diagnosis, batch_size=16, shuffle=True)
train_and_validata_model(train_loader=train_dataloader_without_diagnosis,
test_loader=test_dataloader_without_diagnosis,
model=MyModel(in_dim=14),
epochs=100)
print('\n\n')
# eval on dataset with the inferred labels as extra feature
print('eval on dataset WITH the inferred labels as extra feature')
train_set_with_diagnosis = MyDataset(df=df, label_area_mean=feature_area_mean, use_diagnosis_as_feature=True, train=True)
train_dataloader_with_diagnosis = data.DataLoader(train_set_with_diagnosis, batch_size=16, shuffle=True)
test_set_with_diagnosis = MyDataset(df=df, label_area_mean=feature_area_mean, use_diagnosis_as_feature=True, train=False)
test_dataloader_with_diagnosis = data.DataLoader(test_set_with_diagnosis, batch_size=16, shuffle=True)
train_and_validata_model(train_loader=train_dataloader_with_diagnosis,
test_loader=test_dataloader_with_diagnosis,
model=MyModel(in_dim=15),
epochs=100)
```
#### File: label-inference-attacks/Code/model_completion.py
```python
from __future__ import print_function
import argparse
import ast
import os
import shutil
import sys
import time
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torch.nn.functional as F
import models.bottom_model_plus as models
from my_utils import AverageMeter, accuracy, mkdir_p, precision_recall
import datasets.get_dataset as get_dataset
import dill
import copy
from vfl_framework import VflFramework
from vfl_framework_for_idc import IdcVflFramework
parser = argparse.ArgumentParser(description='Model Completion')
# dataset paras
parser.add_argument('--dataset-name', default="Criteo", type=str,
choices=['CIFAR10', 'CIFAR100', 'TinyImageNet', 'CINIC10L', 'BC_IDC', 'Criteo', 'BCW'])
parser.add_argument('--dataset-path', default='D:/Datasets/Criteo/criteo.csv', type=str)
'''
'D:/Datasets/CIFAR10'
'D:/Datasets/CIFAR100'
'D:/Datasets/TinyImageNet'
'D:/Datasets/CINIC10L'
'D:/Datasets/BC_IDC'
'D:/Datasets/Criteo/criteo.csv'
'D:/Datasets/BreastCancerWisconsin/wisconsin.csv'
'''
# attacker's knowledge paras
parser.add_argument('--n-labeled', type=int, default=100,
help='Number of labeled data') # cifar-40, liver-10, TinyImageNet-600
# inference head paras
parser.add_argument('--num-layer', type=int, default=1,
help='number of layers of the inference head')
parser.add_argument('--use-bn', type=ast.literal_eval, default=True,
help='Inference head use batchnorm or not')
parser.add_argument('--activation_func_type', type=str, default='ReLU',
help='Activation function type of the inference head',
choices=['ReLU', 'Sigmoid', 'None'])
# vfl paras
parser.add_argument('--party-num', help='party-num',
type=int, default=2)
parser.add_argument('--half', help='number of the adversary\'s features. For image datasets except IDC, this para '
'represents number of pixels. For IDC, this para represents number of images( in'
'this case it must be lower than party-num. For numeric datasets, it represents'
'number of numeric features.'
'You can change this para (lower that party_num) to evaluate the sensitivity of our '
'attack.',
type=int, default=4096) # choices=[16, 14, 32, 1->party_num]. CIFAR10-16, Liver-14, TinyImageNet-32
# checkpoints paras (used for trained bottom model in our attack)
parser.add_argument('--resume-dir',
default='D:/MyCodes/label_inference_attacks_against_vfl/saved_experiment_results'
'/saved_models/',
type=str, metavar='PATH',
help='path to latest checkpoint', )
parser.add_argument('--resume-name',
default='Criteo_saved_framework_lr=0.05_normal_half=4096.pth',
type=str, metavar='NAME',
help='file name of the latest checkpoint', )
parser.add_argument('--out', default=None, # 'result'
help='Directory to output the best checkpoint')
# evaluation paras
parser.add_argument('--k', help='top k accuracy',
type=int, default=2)
# training paras
parser.add_argument('--batch-size', default=16, type=int, metavar='N',
help='train batchsize') # CIFAR10/ImageNet-32
parser.add_argument('--epochs', default=10, type=int, metavar='N',
help='number of total epochs to run') # cifar-5, BC_IDC-1, liver-5
parser.add_argument('--lr', '--learning-rate', default=2e-3, type=float,
metavar='LR', help='initial learning rate') # CIFAR10 2e-3
parser.add_argument('--gpu', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--manualSeed', type=int, default=0, help='manual seed')
parser.add_argument('--val-iteration', type=int, default=1024,#1024
help='Number of labeled data')
parser.add_argument('--alpha', default=0.75, type=float)
parser.add_argument('--lambda-u', default=50, type=float)
parser.add_argument('--T', default=0.8, type=float)
parser.add_argument('--ema-decay', default=0.999, type=float)
# print paras
parser.add_argument('--print-to-txt', default=0, type=int, choices=[0, 1], help='save all outputs to txt or not')
args = parser.parse_args()
args.resume = args.resume_dir + f'{args.dataset_name}_saved_models/' + args.resume_name
args.out = args.resume_dir + f'{args.dataset_name}_saved_models/'
state = {k: v for k, v in args._get_kwargs()}
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
use_cuda = torch.cuda.is_available()
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
np.random.seed(args.manualSeed)
best_acc = 0 # best test accuracy
def main():
if args.batch_size > args.n_labeled:
raise Exception("args.batch_size must be smaller than args.n_labeled")
global best_acc
if args.out and not os.path.isdir(args.out):
mkdir_p(args.out)
print(args)
# datasets settings
print('==> Preparing {}'.format(args.dataset_name))
dataset_setup = get_dataset.get_dataset_setup_by_name(args.dataset_name)
size_bottom_out = dataset_setup.size_bottom_out
num_classes = dataset_setup.num_classes
clip_function = dataset_setup.clip_one_party_data
zip_ = get_dataset.get_datasets_for_ssl(dataset_name=args.dataset_name, file_path=args.dataset_path,
n_labeled=args.n_labeled, party_num=args.party_num)
train_labeled_set, train_unlabeled_set, test_set, train_complete_dataset = zip_
# Don't use these codes if you wanna speed.
# if args.dataset_name == 'Criteo':
# args.val_iteration = len(train_unlabeled_set)
# else:
# args.val_iteration = int(len(train_unlabeled_set) / args.batch_size)
# print(f"args.val_iteration:{args.val_iteration}")
if num_classes == 4:
import matplotlib
from sklearn.manifold import TSNE
import pandas as pd
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
labeled_train_data_a_tsne = TSNE()
labeled_train_data_a_tsne.fit_transform(train_labeled_set.train_data)
df_train_data_a_tsne = pd.DataFrame(labeled_train_data_a_tsne.embedding_, index=train_labeled_set.train_labels)
# plot the TSNE result
# 0 1 2 3 4 5 6 7 8 9
colors = ['blue', 'red', 'yellow', 'green', 'cyan', 'silver', 'purple', 'saddlebrown', 'orange', 'pink']
for i in range(4):
plt.scatter(df_train_data_a_tsne.loc[i][0], df_train_data_a_tsne.loc[i][1], color=colors[i], marker='.')
plt.title('labeled_train_data_a_tsne')
# plt.show()
if args.dataset_name == 'Criteo':
labeled_trainloader, unlabeled_trainloader, test_loader, train_complete_trainloader = zip_
else:
labeled_trainloader = data.DataLoader(train_labeled_set, batch_size=args.batch_size, shuffle=True,
num_workers=0,
drop_last=True)
unlabeled_trainloader = data.DataLoader(train_unlabeled_set, batch_size=args.batch_size, shuffle=True,
num_workers=0, drop_last=True)
dataset_bs = args.batch_size * 10
test_loader = data.DataLoader(test_set, batch_size=dataset_bs, shuffle=False, num_workers=0)
train_complete_trainloader = data.DataLoader(train_complete_dataset, batch_size=dataset_bs, shuffle=True,
num_workers=0, drop_last=True)
# Model
print("==> creating bottom model plus")
def create_model(ema=False, size_bottom_out=10, num_classes=10):
model = models.BottomModelPlus(size_bottom_out, num_classes,
num_layer=args.num_layer,
activation_func_type=args.activation_func_type,
use_bn=args.use_bn)
model = model.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model(ema=False, size_bottom_out=size_bottom_out, num_classes=num_classes)
ema_model = create_model(ema=True, size_bottom_out=size_bottom_out, num_classes=num_classes)
cudnn.benchmark = True
# print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
train_criterion = SemiLoss()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
ema_optimizer = WeightEMA(model, ema_model, alpha=args.ema_decay)
# Resume
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
if args.out:
args.out = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume, pickle_module=dill)
if args.dataset_name == "BC_IDC" or args.dataset_name == "covid":
model.bottom_model = copy.deepcopy(checkpoint.bottom_models[0])
ema_model.bottom_model = copy.deepcopy(checkpoint.bottom_models[0])
else:
print("checkpoint:", checkpoint.malicious_bottom_model_a)
model.bottom_model = copy.deepcopy(checkpoint.malicious_bottom_model_a)
ema_model.bottom_model = copy.deepcopy(checkpoint.malicious_bottom_model_a)
# model.fc.apply(weights_init_ones)
# ema_model.fc.apply(weights_init_ones)
if args.dataset_name == 'Criteo':
for param in model.bottom_model.parameters():
param.requires_grad = False
test_accs = []
print("---Label inference on complete training dataset:")
# _, train_acc = validate(train_complete_trainloader, ema_model, criterion, epoch=0,
# use_cuda=torch.cuda.is_available(), mode='Train Stats',
# num_classes=num_classes, clip_function=clip_function)
# Train and test
for epoch in range(args.epochs):
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss, train_loss_x, train_loss_u = train(labeled_trainloader, unlabeled_trainloader, model, optimizer,
ema_optimizer, train_criterion, epoch, use_cuda,
clip_function, num_classes)
print("---Label inference on complete training dataset:")
_, train_acc = validate(train_complete_trainloader, ema_model, criterion, epoch, use_cuda, mode='Train Stats',
num_classes=num_classes, clip_function=clip_function)
print("\n---Label inference on testing dataset:")
test_loss, test_acc = validate(test_loader, ema_model, criterion, epoch, use_cuda, mode='Test Stats',
num_classes=num_classes, clip_function=clip_function)
# save model
is_best = train_acc > best_acc
best_acc = max(train_acc, best_acc)
if args.out:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'ema_state_dict': ema_model.state_dict(),
'train_acc': train_acc,
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
}, is_best)
test_accs.append(test_acc)
print('Best top 1 accuracy:')
print(best_acc)
def train(labeled_trainloader, unlabeled_trainloader, model, optimizer, ema_optimizer, criterion, epoch, use_cuda,
clip_function, num_classes):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses_x = AverageMeter()
losses_u = AverageMeter()
ws = AverageMeter()
end = time.time()
labeled_train_iter = iter(labeled_trainloader)
unlabeled_train_iter = iter(unlabeled_trainloader)
model.train()
for batch_idx in range(args.val_iteration):
if args.dataset_name == 'Criteo':
if batch_idx == 1000 - args.n_labeled:
break
inputs_x, targets_x = labeled_trainloader[batch_idx % len(labeled_trainloader)]
inputs_u, _ = unlabeled_trainloader[batch_idx % len(unlabeled_trainloader)]
else:
try:
inputs_x, targets_x = labeled_train_iter.next()
# inputs_x, targets_x = labeled_trainloader.dataset[batch_idx]
except StopIteration:
labeled_train_iter = iter(labeled_trainloader)
inputs_x, targets_x = labeled_train_iter.next()
try:
inputs_u, _ = unlabeled_train_iter.next()
except StopIteration:
unlabeled_train_iter = iter(unlabeled_trainloader)
inputs_u, _ = unlabeled_train_iter.next()
# measure data loading time
data_time.update(time.time() - end)
# in vertical federated learning scenario, attacker(party A) only has part of features, i.e. half of the img.
inputs_x = clip_function(inputs_x, args.half)
inputs_u = clip_function(inputs_u, args.half)
inputs_x = inputs_x.type(torch.float)
inputs_u = inputs_u.type(torch.float)
batch_size = inputs_x.size(0)
# Transform label to one-hot
targets_x = targets_x.view(-1, 1).type(torch.long)
targets_x = torch.zeros(batch_size, num_classes).scatter_(1, targets_x, 1)
if use_cuda:
inputs_x, targets_x = inputs_x.cuda(), targets_x.cuda(non_blocking=True)
inputs_u = inputs_u.cuda()
with torch.no_grad():
targets_x.view(-1, 1).type(torch.long) # compute guessed labels of unlabel samples
outputs_u = model(inputs_u)
p = torch.softmax(outputs_u, dim=1)
pt = p ** (1 / args.T)
targets_u = pt / pt.sum(dim=1, keepdim=True)
targets_u = targets_u.detach()
# mixup
all_inputs = torch.cat([inputs_x, inputs_u], dim=0)
all_targets = torch.cat([targets_x, targets_u], dim=0)
l = np.random.beta(args.alpha, args.alpha)
l = max(l, 1 - l)
idx = torch.randperm(all_inputs.size(0))
input_a, input_b = all_inputs, all_inputs[idx]
target_a, target_b = all_targets, all_targets[idx]
mixed_input = l * input_a + (1 - l) * input_b
mixed_target = l * target_a + (1 - l) * target_b
# interleave labeled and unlabeled samples between batches to get correct batch norm calculation
mixed_input = list(torch.split(mixed_input, batch_size))
mixed_input = interleave(mixed_input, batch_size)
logits = [model(mixed_input[0])]
for input in mixed_input[1:]:
logits.append(model(input))
# put interleaved samples back
logits = interleave(logits, batch_size)
logits_x = logits[0]
logits_u = torch.cat(logits[1:], dim=0)
Lx, Lu, w = criterion(logits_x, mixed_target[:batch_size], logits_u, mixed_target[batch_size:],
epoch + batch_idx / args.val_iteration)
loss = Lx + w * Lu
# record loss
losses.update(loss.item(), inputs_x.size(0))
losses_x.update(Lx.item(), inputs_x.size(0))
losses_u.update(Lu.item(), inputs_x.size(0))
ws.update(w, inputs_x.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
ema_optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print('one batch training done')
if batch_idx % 250 == 0:
print("batch_idx:", batch_idx, " loss:", losses.avg)
return losses.avg, losses_x.avg, losses_u.avg
def validate(valloader, model, criterion, epoch, use_cuda, mode, num_classes, clip_function):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
topk = AverageMeter()
precision = AverageMeter()
recall = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valloader):
# in vertical federated learning scenario, attacker(party A) only has part of features, i.e. half of the img
inputs = clip_function(inputs, args.half)
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True)
# compute output
inputs = inputs.type(torch.float)
outputs = model(inputs)
targets = targets.type(torch.long)
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, preck = accuracy(outputs, targets, topk=(1, args.k))
if num_classes == 2:
prec, rec = precision_recall(outputs, targets)
precision.update(prec, inputs.size(0))
recall.update(rec, inputs.size(0))
# print("batch_id", batch_idx, end='')
# print(" precision", precision.avg, end='')
# print(", recall", recall.avg, end='')
# print(" F1", 2 * (precision.avg * recall.avg) / (precision.avg + recall.avg))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
topk.update(preck.item(), inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print('one batch done')
print("Dataset Overall Statistics:")
if num_classes == 2:
print(" precision", precision.avg, end='')
print(" recall", recall.avg, end='')
if (precision.avg + recall.avg) != 0:
print(" F1", 2 * (precision.avg * recall.avg) / (precision.avg + recall.avg), end='')
else:
print(f"F1:0")
print("top 1 accuracy:{}, top {} accuracy:{}".format(top1.avg, args.k, topk.avg))
return losses.avg, top1.avg
def save_checkpoint(state, is_best, checkpoint=args.out, filename=f'{args.dataset_name}_mc_checkpoint.pth'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, f'{args.dataset_name}_mc_best.pth'))
def linear_rampup(current, rampup_length=args.epochs):
if rampup_length == 0:
return 1.0
else:
current = np.clip(current / rampup_length, 0.0, 1.0)
return float(current)
class SemiLoss(object):
def __call__(self, outputs_x, targets_x, outputs_u, targets_u, epoch):
probs_u = torch.softmax(outputs_u, dim=1)
Lx = -torch.mean(torch.sum(F.log_softmax(outputs_x, dim=1) * targets_x, dim=1))
Lu = torch.mean((probs_u - targets_u) ** 2)
return Lx, Lu, args.lambda_u * linear_rampup(epoch)
class WeightEMA(object):
def __init__(self, model, ema_model, alpha=0.999):
self.model = model
self.ema_model = ema_model
self.alpha = alpha
self.params = list(model.state_dict().values())
self.ema_params = list(ema_model.state_dict().values())
self.wd = 0.02 * args.lr
for param, ema_param in zip(self.params, self.ema_params):
param.data.copy_(ema_param.data)
def step(self):
one_minus_alpha = 1.0 - self.alpha
for param, ema_param in zip(self.params, self.ema_params):
ema_param = ema_param.type(torch.float)
ema_param.mul_(self.alpha)
ema_param.add_(param * one_minus_alpha)
# customized weight decay
param = param.type(torch.float)
param.mul_(1 - self.wd)
def interleave_offsets(batch, nu):
groups = [batch // (nu + 1)] * (nu + 1)
for x in range(batch - sum(groups)):
groups[-x - 1] += 1
offsets = [0]
for g in groups:
offsets.append(offsets[-1] + g)
assert offsets[-1] == batch
return offsets
def interleave(xy, batch):
nu = len(xy) - 1
offsets = interleave_offsets(batch, nu)
xy = [[v[offsets[p]:offsets[p + 1]] for p in range(nu + 1)] for v in xy]
for i in range(1, nu + 1):
xy[0][i], xy[i][i] = xy[i][i], xy[0][i]
return [torch.cat(v, dim=0) for v in xy]
if __name__ == '__main__':
if args.print_to_txt == 0:
main()
else:
inference_head_setting_str = f'_layer={args.num_layer}' \
f'_func={args.activation_func_type}' \
f'_bn={args.use_bn}' \
f'_nlabeled={args.n_labeled}'
txt_name = 'model_completion_' + args.resume_name + inference_head_setting_str + '.txt'
savedStdout = sys.stdout
with open(args.resume_dir + f'{args.dataset_name}_saved_models/' + txt_name, 'w+') as file:
sys.stdout = file
main()
sys.stdout = savedStdout
print('Results saved to txt!')
```
#### File: Code/my_utils/eval.py
```python
from __future__ import print_function, absolute_import
import numpy as np
__all__ = ['accuracy', 'precision_recall']
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def precision_recall(output, target):
right_samples_num = 0
TP_samples_num = 0
TN_samples_num = 0
FP_samples_num = 0
FN_samples_num = 0
wrong_samples_num = 0
_, pred = output.topk(1, 1, True, True)
pred = pred.t()
y_true = np.array(target.clone().detach().cpu())
y_pred = np.array(pred.clone().detach().cpu()[0])
if sum(y_pred) == 0:
y_pred = np.ones_like(y_pred)
# print("y_true:", y_true)
# print("y_pred:", y_pred)
for i in range(len(y_true)):
if y_true[i] == y_pred[i]:
if y_true[i] == 1.:
TP_samples_num += 1
else:
TN_samples_num += 1
right_samples_num += 1
else:
if y_pred[i] == 1.:
FP_samples_num += 1
else:
FN_samples_num += 1
wrong_samples_num += 1
if (TP_samples_num + FP_samples_num) != 0:
precision = TP_samples_num / (TP_samples_num + FP_samples_num)
else:
precision = 0
if (TP_samples_num + FN_samples_num) != 0:
recall = TP_samples_num / (TP_samples_num + FN_samples_num)
else:
recall = 0
return precision, recall
```
#### File: label-inference-attacks/Code/upper_bound_testing.py
```python
import argparse
import ast
import os
import time
import dill
from time import time
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import pandas as pd
import datasets.get_dataset as get_dataset
from my_utils import utils
from models import model_sets, bottom_model_plus
import my_optimizers
import possible_defenses
import torch.nn.functional as F
plt.switch_backend('agg')
D_ = 2 ** 13
BATCH_SIZE = 1000
def split_data_xa(data):
if args.dataset_name == 'Liver':
x_a = data[:, 0:args.half]
elif args.dataset_name in ['CIFAR10', 'CIFAR100', 'CINIC10L']:
x_a = data[:, :, :, 0:args.half]
elif args.dataset_name == 'TinyImageNet':
x_a = data[:, :, :, 0:args.half]
elif args.dataset_name == 'Criteo':
x_a = data[:, 0:args.half]
else:
raise Exception('Unknown dataset name!')
return x_a
def create_model(size_bottom_out=10, num_classes=10):
model = bottom_model_plus.BottomModelPlus(size_bottom_out, num_classes)
model = model.cuda()
return model
def correct_counter(output, target, topk=(1, 5)):
correct_counts = []
for k in topk:
_, pred = output.topk(k, 1, True, True)
correct_k = torch.eq(pred, target.view(-1, 1)).sum().float().item()
correct_counts.append(correct_k)
return correct_counts
def train_model(train_loader, model, optimizer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data = split_data_xa(data)
data = data.float().cuda()
target = target.long().cuda()
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
interval = int(0.1 * len(train_loader))
interval_num = int(batch_idx / interval)
interval_left = batch_idx % interval
if interval_left == 0:
print(f"{interval_num}0% completed, loss is {loss}")
def test_per_epoch(test_loader, model, k=5):
test_loss = 0
correct_top1 = 0
correct_topk = 0
count = 0
with torch.no_grad():
for data, target in test_loader:
data = data.float().cuda()
target = target.long().cuda()
# set all sub-models to eval mode.
model.eval()
# run forward process of the whole framework
x_a = split_data_xa(data)
output = model(x_a)
correct_top1_batch, correct_topk_batch = correct_counter(output, target, (1, k))
# sum up batch loss
test_loss += F.cross_entropy(output, target).data.item()
correct_top1 += correct_top1_batch
correct_topk += correct_topk_batch
# print("one batch done")
count += 1
if count % int(0.1 * len(test_loader)) == 0 and count // int(0.1 * len(test_loader)) > 0:
print(f'{count // int(0.1 * len(test_loader))}0 % completed...')
if args.dataset_name == 'Criteo' and count == test_loader.train_batches_num:
break
if args.dataset_name == 'Criteo':
num_samples = len(test_loader) * BATCH_SIZE
else:
num_samples = len(test_loader.dataset)
test_loss /= num_samples
print('Loss: {:.4f}, Top 1 Accuracy: {}/{} ({:.2f}%), Top {} Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss,
correct_top1, num_samples, 100.00 * float(correct_top1) / num_samples,
k,
correct_topk, num_samples, 100.00 * float(correct_topk) / num_samples
))
def set_loaders():
dataset_setup = get_dataset.get_dataset_setup_by_name(args.dataset_name)
train_dataset = dataset_setup.get_transformed_dataset(args.path_dataset, None, True)
test_dataset = dataset_setup.get_transformed_dataset(args.path_dataset, None, False)
if args.dataset_name == 'Criteo':
train_loader = train_dataset
test_loader = test_dataset
else:
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, shuffle=True,
batch_size=args.batch_size,
# num_workers=args.workers
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=args.batch_size,
# num_workers=args.workers
)
return train_loader, test_loader
def main():
# write experiment setting into file name
setting_str = ""
setting_str += "_"
setting_str += "lr="
setting_str += str(args.lr)
setting_str += "_upperbound"
setting_str += "_"
setting_str += "half="
setting_str += str(args.half)
print("settings:", setting_str)
print('==> Preparing {}'.format(args.dataset_name))
dataset_setup = get_dataset.get_dataset_setup_by_name(args.dataset_name)
size_bottom_out = dataset_setup.size_bottom_out
num_classes = dataset_setup.num_classes
model = create_model(size_bottom_out=size_bottom_out, num_classes=num_classes)
model.bottom_model = model_sets.BottomModel(args.dataset_name).get_model(args.half, True)
model = model.cuda()
cudnn.benchmark = True
stone1 = args.stone1 # 50 int(args.epochs * 0.5)
stone2 = args.stone2 # 85 int(args.epochs * 0.8)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[stone1, stone2], gamma=args.step_gamma)
train_loader, val_loader = set_loaders()
dir_save_model = args.save_dir + f"/saved_models/{args.dataset_name}_saved_models"
if not os.path.exists(dir_save_model):
os.makedirs(dir_save_model)
# start training. do evaluation every epoch.
print('Test the initialized model:')
print('Evaluation on the training dataset:')
# test_per_epoch(test_loader=train_loader, model=model, k=args.k)
print('Evaluation on the testing dataset:')
# test_per_epoch(test_loader=val_loader, model=model, k=args.k)
for epoch in range(args.epochs):
print('optimizer current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
train_model(train_loader, model, optimizer)
lr_scheduler.step()
if epoch == args.epochs - 1:
txt_name = f"{args.dataset_name}_saved_framework{setting_str}"
savedStdout = sys.stdout
with open(dir_save_model + '/' + txt_name + '.txt', 'w+') as file:
sys.stdout = file
print('Evaluation on the training dataset:')
test_per_epoch(test_loader=train_loader, model=model, k=args.k)
print('Evaluation on the testing dataset:')
test_per_epoch(test_loader=val_loader, model=model, k=args.k)
sys.stdout = savedStdout
print('Last epoch evaluation saved to txt!')
print('Evaluation on the training dataset:')
test_per_epoch(test_loader=train_loader, model=model, k=args.k)
print('Evaluation on the testing dataset:')
test_per_epoch(test_loader=val_loader, model=model, k=args.k)
# save model
torch.save(model, os.path.join(dir_save_model, f"{args.dataset_name}_saved_framework{setting_str}.pth"),
pickle_module=dill)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='vfl framework training')
# dataset paras
parser.add_argument('-d', '--dataset-name', default='Criteo', type=str,
help='name of dataset',
choices=['CIFAR10', 'CIFAR100', 'TinyImageNet', 'CINIC10L', 'Liver', 'Criteo'])
parser.add_argument('--path-dataset', help='path_dataset',
type=str, default='D:/Datasets/Criteo/criteo.csv')
'''
'D:/Datasets/CIFAR10'
'D:/Datasets/CIFAR100'
'D:/Datasets/TinyImageNet'
'D:/Datasets/CINIC10L'
'D:/Datasets/BC_IDC'
'D:/Datasets/Criteo/criteo1e?.csv'
'''
# framework paras
parser.add_argument('--half', help='half number of features, generally seen as the adversary\'s feature num. '
'You can change this para (lower that party_num) to evaluate the sensitivity '
'of our attack -- pls make sure that the model to be resumed is '
'correspondingly trained.',
type=int,
default=16) # choices=[16, 14, 32, 1->party_num]. CIFAR10-16, Liver-14, TinyImageNet-32
# evaluation & visualization paras
parser.add_argument('--k', help='top k accuracy',
type=int, default=5)
# saving path paras
parser.add_argument('--save-dir', dest='save_dir',
help='The directory used to save the trained models and csv files',
default='D:/MyCodes/label_inference_attacks_against_vfl/saved_experiment_results', type=str)
# training paras
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of datasets loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch-size', default=32, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=1e-1, type=float,
metavar='LR', help='initial learning rate') # TinyImageNet=5e-2
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
metavar='W', help='weight decay (default: 5e-4)')
parser.add_argument('--step-gamma', default=0.1, type=float, metavar='S',
help='gamma for step scheduler')
parser.add_argument('--stone1', default=50, type=int, metavar='s1',
help='stone1 for step scheduler')
parser.add_argument('--stone2', default=85, type=int, metavar='s2',
help='stone2 for step scheduler')
args = parser.parse_args()
main()
``` |
{
"source": "JiahuaWU/fastai",
"score": 3
} |
#### File: fastai/examples/train_imagenet.py
```python
from fastai.script import *
from fastai.vision import *
from fastai.callbacks import *
from fastai.distributed import *
from fastai.callbacks.tracker import *
torch.backends.cudnn.benchmark = True
import time
def get_data(path, size, bs, workers):
tfms = ([
flip_lr(p=0.5),
brightness(change=(0.4,0.6)),
contrast(scale=(0.7,1.3))
], [])
train = ImageList.from_csv(path, 'train.csv')
valid = ImageList.from_csv(path, 'valid.csv')
lls = ItemLists(path, train, valid).label_from_df().transform(
tfms, size=size).presize(size, scale=(0.25, 1.0))
return lls.databunch(bs=bs, num_workers=workers).normalize(imagenet_stats)
@call_parse
def main( gpu:Param("GPU to run on", str)=None ):
"""Distributed training of Imagenet. Fastest speed is if you run with: python -m fastai.launch"""
path = Path('/mnt/fe2_disk/')
tot_epochs,size,bs,lr = 60,224,256,3e-1
dirname = 'imagenet'
gpu = setup_distrib(gpu)
if gpu is None: bs *= torch.cuda.device_count()
n_gpus = num_distrib() or 1
workers = min(12, num_cpus()//n_gpus)
data = get_data(path/dirname, size, bs, workers)
b_its = len(data.train_dl)//n_gpus
# Using bs 256 on single GPU as baseline, scale the LR linearly
tot_bs = bs*n_gpus
bs_rat = tot_bs/256
lr *= bs_rat
ph1 = (TrainingPhase(tot_epochs*0.10*b_its)
.schedule_hp('lr', (lr/10,lr), anneal=annealing_cos))
ph2 = (TrainingPhase(tot_epochs*0.90*b_its)
.schedule_hp('lr', (lr,lr/1e5), anneal=annealing_cos))
opt_func = partial(optim.Adam, eps=0.1, betas=(0.9,0.99))
learn = Learner(data, models.xresnet50(), metrics=[accuracy,top_k_accuracy], wd=1e-3,
opt_func=opt_func, bn_wd=False, true_wd=True,
loss_func = LabelSmoothingCrossEntropy()).mixup(alpha=0.2)
learn.callback_fns += [
partial(GeneralScheduler, phases=(ph1,ph2)),
partial(SaveModelCallback, every='epoch', name='model')
]
learn.split(lambda m: (children(m)[-2],))
if gpu is None: learn.model = nn.DataParallel(learn.model)
else: learn.to_distributed(gpu)
learn.to_fp16(dynamic=True)
learn.fit(tot_epochs, 1)
if rank_distrib(): time.sleep(1)
learn.save('done')
```
#### File: fastai/callbacks/general_sched.py
```python
from ..core import *
from ..callback import *
from ..basic_train import Learner, LearnerCallback
__all__ = ['GeneralScheduler', 'TrainingPhase']
@dataclass
class TrainingPhase():
"Schedule hyper-parameters for a phase of `length` iterations."
length:int
def __post_init__(self): self.scheds = dict()
def schedule_hp(self, name, vals, anneal=None):
"Adds a schedule for `name` between `vals` using `anneal`."
self.scheds[name] = Scheduler(vals, self.length, anneal)
return self
class GeneralScheduler(LearnerCallback):
"Schedule multiple `TrainingPhase` for a `Learner`."
def __init__(self, learn:Learner, phases:Collection[TrainingPhase], start_epoch:int=None):
super().__init__(learn)
self.phases,self.start_epoch = phases,start_epoch
def on_train_begin(self, epoch:int, **kwargs:Any)->None:
"Initialize the schedulers for training."
res = {'epoch':self.start_epoch} if self.start_epoch is not None else None
self.start_epoch = ifnone(self.start_epoch, epoch)
self.scheds = [p.scheds for p in self.phases]
self.opt = self.learn.opt
for k,v in self.scheds[0].items():
v.restart()
self.opt.set_stat(k, v.start)
self.idx_s = 0
return res
def jump_to_epoch(self, epoch:int)->None:
for _ in range(len(self.learn.data.train_dl) * epoch):
self.on_batch_end(True)
def on_batch_end(self, train, **kwargs:Any)->None:
"Take a step in lr,mom sched, start next stepper when the current one is complete."
if train:
if self.idx_s >= len(self.scheds): return {'stop_training': True, 'stop_epoch': True}
sched = self.scheds[self.idx_s]
for k,v in sched.items(): self.opt.set_stat(k, v.step())
if list(sched.values())[0].is_done: self.idx_s += 1
```
#### File: fastai/callbacks/oversampling.py
```python
from ..torch_core import *
from ..basic_data import DataBunch
from ..callback import *
from ..basic_train import Learner,LearnerCallback
from torch.utils.data.sampler import WeightedRandomSampler
__all__ = ['OverSamplingCallback']
class OverSamplingCallback(LearnerCallback):
def __init__(self,learn:Learner,weights:torch.Tensor=None):
super().__init__(learn)
self.weights = weights
def on_train_begin(self, **kwargs):
self.old_dl = self.data.train_dl
self.labels = self.data.train_dl.y.items
assert np.issubdtype(self.labels.dtype, np.integer), "Can only oversample integer values"
_,self.label_counts = np.unique(self.labels,return_counts=True)
if self.weights is None: self.weights = torch.DoubleTensor((1/self.label_counts)[self.labels])
self.total_len_oversample = int(self.data.c*np.max(self.label_counts))
sampler = WeightedRandomSampler(self.weights, self.total_len_oversample)
self.data.train_dl = self.data.train_dl.new(shuffle=False, sampler=sampler)
def on_train_end(self, **kwargs):
"Reset dataloader to its original state"
self.data.train_dl = self.old_dl
```
#### File: fastai/utils/pynvml_gate.py
```python
import platform
from ..script import *
#
# BEGIN: Temporary workaround for nvml.dll load issue in Win10
#
# Remove once nicolargo/nvidia-ml-py3#2 and a new version of the module is released
# (OR fbcotter/py3nvml#10 but will require extra work to rename things)
# Refer https://forums.fast.ai/t/nvml-dll-loading-issue-in-nvidia-ml-py3-7-352-0-py-0/39684/8
import threading
from ctypes import *
nvmlLib = None
libLoadLock = threading.Lock()
def _LoadNvmlLibrary():
'''
Load the library if it isn't loaded already
'''
global nvmlLib
if (nvmlLib == None):
libLoadLock.acquire()
try:
if (nvmlLib == None):
try:
if (sys.platform[:3] == "win"):
searchPaths = [
os.path.join(os.getenv("ProgramFiles", r"C:\Program Files"), r"NVIDIA Corporation\NVSMI\nvml.dll"),
os.path.join(os.getenv("WinDir", r"C:\Windows"), r"System32\nvml.dll"),
]
nvmlPath = next((x for x in searchPaths if os.path.isfile(x)), None)
if (nvmlPath == None):
nvmlLib = None
else:
nvmlLib = CDLL(nvmlPath)
else:
nvmlLib = None
except OSError as ose:
nvmlLib = None
finally:
libLoadLock.release()
#
# END: Temporary workaround for nvml.dll load issue in Win10
#
def load_pynvml_env():
import pynvml # nvidia-ml-py3
#
# BEGIN: Temporary workaround for nvml.dll load issue in Win10 (continued)
_LoadNvmlLibrary()
pynvml.nvmlLib = nvmlLib
#
# END: Temporary workaround for nvml.dll load issue in Win10
#
if platform.system() == "Darwin":
try:
from pynvx import pynvml
except:
print("please install pynvx on OSX: pip install pynvx")
sys.exit(1)
pynvml.nvmlInit()
return pynvml
pynvml.nvmlInit()
return pynvml
```
#### File: fastai/widgets/image_downloader.py
```python
from ..core import *
from ..vision.data import *
from ipywidgets import widgets, Layout, Output, HBox, VBox, Text, BoundedIntText, Button, Dropdown, Box
from IPython.display import clear_output, display
from urllib.parse import quote
from bs4 import BeautifulSoup
import time
__all__ = ['ImageDownloader', 'download_google_images']
_img_sizes = {'>400*300':'isz:lt,islt:qsvga','>640*480':'isz:lt,islt:vga','>800*600':'isz:lt,islt:svga',
'>1024*768':'visz:lt,islt:xga', '>2MP':'isz:lt,islt:2mp','>4MP':'isz:lt,islt:4mp','>6MP':'isz:lt,islt:6mp',
'>8MP':'isz:lt,islt:8mp', '>10MP':'isz:lt,islt:10mp','>12MP':'isz:lt,islt:12mp','>15MP':'isz:lt,islt:15mp',
'>20MP':'isz:lt,islt:20mp','>40MP':'isz:lt,islt:40mp','>70MP':'isz:lt,islt:70mp'}
class ImageDownloader():
"""
Displays a widget that allows searching and downloading images from google images search
in a Jupyter Notebook or Lab.
"""
def __init__(self, path:Union[Path,str]='data'):
"Setup path to save images to, init the UI, and render the widgets."
self._path = Path(path)
self._ui = self._init_ui()
self.render()
def _init_ui(self) -> VBox:
"Initialize the widget UI and return the UI."
self._search_input = Text(placeholder="What images to search for?")
self._count_input = BoundedIntText(placeholder="How many pics?", value=10, min=1, max=5000, step=1,
layout=Layout(width='60px'))
self._size_input = Dropdown(options= _img_sizes.keys(), value='>400*300', layout=Layout(width='120px'))
self._download_button = Button(description="Search & Download", icon="download", layout=Layout(width='200px'))
self._download_button.on_click(self.on_download_button_click)
self._output = Output()
self._controls_pane = HBox([self._search_input, self._count_input, self._size_input, self._download_button],
layout=Layout(width='auto', height='40px'))
self._heading = ""
self._download_complete_heading = "<h3>Download complete. Here are a few images</h3>"
self._preview_header = widgets.HTML(self._heading, layout=Layout(height='60px'))
self._img_pane = Box(layout=Layout(display='inline'))
return VBox([self._controls_pane, self._preview_header, self._img_pane])
def render(self) -> None:
clear_output()
display(self._ui)
def clear_imgs(self) -> None:
"Clear the widget's images preview pane."
self._preview_header.value = self._heading
self._img_pane.children = tuple()
def validate_search_input(self) -> bool:
"Check if input value is empty."
input = self._search_input
if input.value == str(): input.layout = Layout(border="solid 2px red", height='auto')
else: self._search_input.layout = Layout()
return input.value != str()
def on_download_button_click(self, btn) -> None:
"Download button click handler: validate search term and download images."
term = self._search_input.value
limit = int(self._count_input.value)
size = self._size_input.value
if not self.validate_search_input(): return
self.clear_imgs()
downloaded_images = download_google_images(self._path, term, n_images=limit, size=size)
self.display_images_widgets(downloaded_images[:min(limit, 12)])
self._preview_header.value = self._download_complete_heading
self.render()
def display_images_widgets(self, fnames:list) -> None:
"Display a few preview images in the notebook"
imgs = [widgets.Image(value=open(f, 'rb').read(), width='200px') for f in fnames]
self._img_pane.children = tuple(imgs)
def download_google_images(path:PathOrStr, search_term:str, size:str='>400*300', n_images:int=10, format:str='jpg',
max_workers:int=defaults.cpus, timeout:int=4) -> FilePathList:
"""
Search for `n_images` images on Google, matching `search_term` and `size` requirements,
download them into `path`/`search_term` and verify them, using `max_workers` threads.
"""
label_path = Path(path)/search_term
search_url = _search_url(search_term, size=size, format=format)
if n_images <= 100: img_tuples = _fetch_img_tuples(search_url, format=format, n_images=n_images)
else: img_tuples = _fetch_img_tuples_webdriver(search_url, format=format, n_images=n_images)
downloaded_images = _download_images(label_path, img_tuples, max_workers=max_workers, timeout=timeout)
if len(downloaded_images) == 0: raise RuntimeError(f"Couldn't download any images.")
verify_images(label_path, max_workers=max_workers)
return get_image_files(label_path)
def _url_params(size:str='>400*300', format:str='jpg') -> str:
"Build Google Images Search Url params and return them as a string."
_fmts = {'jpg':'ift:jpg','gif':'ift:gif','png':'ift:png','bmp':'ift:bmp', 'svg':'ift:svg','webp':'webp','ico':'ift:ico'}
if size not in _img_sizes:
raise RuntimeError(f"""Unexpected size argument value: {size}.
See `widgets.image_downloader._img_sizes` for supported sizes.""")
if format not in _fmts:
raise RuntimeError(f"Unexpected image file format: {format}. Use jpg, gif, png, bmp, svg, webp, or ico.")
return "&tbs=" + _img_sizes[size] + "," + _fmts[format]
def _search_url(search_term:str, size:str='>400*300', format:str='jpg') -> str:
"Return a Google Images Search URL for a given search term."
return ('https://www.google.com/search?q=' + quote(search_term) +
'&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' +
_url_params(size, format) + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg')
def _img_fname(img_url:str) -> str:
"Return image file name including the extension given its url."
return img_url.split('/')[-1]
def _fetch_img_tuples(url:str, format:str='jpg', n_images:int=10) -> list:
"Parse the Google Images Search for urls and return the image metadata as tuples (fname, url)."
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}
html = requests.get(url, headers=headers).text
return _html_to_img_tuples(html, format=format, n_images=n_images)
def _html_to_img_tuples(html:str, format:str='jpg', n_images:int=10) -> list:
"Parse the google images html to img tuples containining `(fname, url)`"
bs = BeautifulSoup(html, 'html.parser')
img_tags = bs.find_all('div', {'class': 'rg_meta'})
metadata_dicts = (json.loads(e.text) for e in img_tags)
img_tuples = ((_img_fname(d['ou']), d['ou']) for d in metadata_dicts if d['ity'] == format)
return list(itertools.islice(img_tuples, n_images))
def _fetch_img_tuples_webdriver(url:str, format:str='jpg', n_images:int=150) -> list:
"""
Parse the Google Images Search for urls and return the image metadata as tuples (fname, url).
Use this for downloads of >100 images. Requires `selenium`.
"""
try:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
except:
print("""Looks like you're trying to download > 100 images and `selenium`
is not installed. Try running `pip install selenium` to fix this.
You'll also need chrome and `chromedriver` installed.""")
options = webdriver.ChromeOptions()
options.add_argument("--headless")
try: driver = webdriver.Chrome(chrome_options=options)
except: print("""Error initializing chromedriver.
Check if it's in your path by running `which chromedriver`""")
driver.set_window_size(1440, 900)
driver.get(url)
for i in range(n_images // 100 + 1):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(0.5 + random.random()/2.0)
n_available = len(driver.find_elements_by_css_selector("div.rg_meta"))
if n_available < n_images:
raise ValueError(f"Requested {n_images} images, but only found {n_available}.")
html = driver.page_source
driver.close()
return _html_to_img_tuples(html, format=format, n_images=n_images)
def _download_images(label_path:PathOrStr, img_tuples:list, max_workers:int=defaults.cpus, timeout:int=4) -> FilePathList:
"""
Downloads images in `img_tuples` to `label_path`.
If the directory doesn't exist, it'll be created automatically.
Uses `parallel` to speed things up in `max_workers` when the system has enough CPU cores.
If something doesn't work, try setting up `max_workers=0` to debug.
"""
os.makedirs(Path(label_path), exist_ok=True)
parallel( partial(_download_single_image, label_path, timeout=timeout), img_tuples, max_workers=max_workers)
return get_image_files(label_path)
def _download_single_image(label_path:Path, img_tuple:tuple, i:int, timeout:int=4) -> None:
"""
Downloads a single image from Google Search results to `label_path`
given an `img_tuple` that contains `(fname, url)` of an image to download.
`i` is just an iteration number `int`.
"""
suffix = re.findall(r'\.\w+?(?=(?:\?|$))', img_tuple[1])
suffix = suffix[0].lower() if len(suffix)>0 else '.jpg'
fname = f"{i:08d}{suffix}"
download_url(img_tuple[1], label_path/fname, timeout=timeout)
```
#### File: fastai/tests/test_utils_fastai.py
```python
import pytest, fastai
from fastai.gen_doc.doctest import this_tests
def test_has_version():
this_tests('na')
assert fastai.__version__
```
#### File: fastai/tests/test_vision_models_unet.py
```python
import pytest
from fastai.gen_doc.doctest import this_tests
from fastai.vision.models import *
from fastai.vision.learner import create_body
import torch.nn as nn
import torch
@pytest.fixture
def model():
body = create_body(resnet18, pretrained=False)
for param in body.parameters():
param.requires_grad = False
return DynamicUnet(body, 10)
@pytest.fixture
def image():
return torch.randn([4,3,32,32]) # create fake image
def add_hooks(m, fn):
hooks = []
def add_hook(m):
if isinstance(m, UnetBlock):
hooks.append(m.register_forward_hook(fn))
m.apply(add_hook)
return hooks
def remove_hooks(hooks): [h.remove() for h in hooks]
def run_with_capture(m, image):
activation_shapes = []
def capture_hook(self, input, output):
activation_shapes.append(output.shape)
hooks = add_hooks(m, capture_hook)
m(image)
remove_hooks(hooks)
return activation_shapes
def test_dynamic_unet_shape(model, image):
this_tests(DynamicUnet)
pred = model(image)
assert list(pred.shape[-2:]) == [32,32] # image HxW should remain the same
assert pred.shape[1] == 10 # number of output classes
def test_unet_block_shapes(model, image):
this_tests(DynamicUnet)
expected_shapes = [[4,512,2,2],[4,384,4,4],[4,256,8,8],[4,96,16,16]]
activation_shapes = run_with_capture(model, image)
for act, exp in zip(activation_shapes, expected_shapes):
assert list(act) == exp
```
#### File: tests/utils/text.py
```python
import sys, re
from io import StringIO
# When any function contains print() calls that get overwritten, like progress bars,
# a special care needs to be applied, since under pytest -s captured output (capsys
# or contextlib.redirect_stdout) contains any temporary printed strings, followed by
# \r's. This helper function ensures that the buffer will contain the same output
# with and without -s in pytest, by turning:
# foo bar\r tar mar\r final message
# into:
# final message
# it can handle a single string or a multiline buffer
def apply_print_resets(buf):
return re.sub(r'^.*\r', '', buf, 0, re.M)
def assert_screenout(out, what):
out_pr = apply_print_resets(out).lower()
match_str = out_pr.find(what.lower())
assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
class CaptureStd():
""" Context manager to capture:
stdout, clean it up and make it available via obj.out
stderr, and make it available via obj.err
init arguments:
- out - capture stdout: True/False, default True
- err - capture stdout: True/False, default True
Examples:
with CaptureStdout() as cs:
print("Secret message")
print(f"captured: {cs.out}")
import sys
with CaptureStdout() as cs:
print("Warning: ", file=sys.stderr)
print(f"captured: {cs.err}")
# to capture just one of the streams, but not the other
with CaptureStdout(err=False) as cs:
print("Secret message")
print(f"captured: {cs.out}")
# but best use the stream-specific subclasses
"""
def __init__(self, out=True, err=True):
if out:
self.out_buf = StringIO()
self.out = 'error: CaptureStd context is unfinished yet, called too early'
else:
self.out_buf = None
self.out = 'not capturing stdout'
if err:
self.err_buf = StringIO()
self.err = 'error: CaptureStd context is unfinished yet, called too early'
else:
self.err_buf = None
self.err = 'not capturing stderr'
def __enter__(self):
if self.out_buf:
self.out_old = sys.stdout
sys.stdout = self.out_buf
if self.err_buf:
self.err_old = sys.stderr
sys.stderr = self.err_buf
return self
def __exit__(self, *exc):
if self.out_buf:
sys.stdout = self.out_old
self.out = apply_print_resets(self.out_buf.getvalue())
if self.err_buf:
sys.stderr = self.err_old
self.err = self.err_buf.getvalue()
def __repr__(self):
msg = ''
if self.out_buf: msg += f"stdout: {self.out}\n"
if self.err_buf: msg += f"stderr: {self.err}\n"
return msg
# in tests it's the best to capture only the stream that's wanted, otherwise
# it's easy to miss things, so unless you need to capture both streams, use the
# subclasses below (less typing). Or alternatively, configure `CaptureStd` to
# disable the stream you don't need to test.
class CaptureStdout(CaptureStd):
""" Same as CaptureStd but captures only stdout """
def __init__(self):
super().__init__(err=False)
class CaptureStderr(CaptureStd):
""" Same as CaptureStd but captures only stderr """
def __init__(self):
super().__init__(out=False)
``` |
{
"source": "JiahuaWU/fundus-imaging",
"score": 3
} |
#### File: zeiss_umbrella/fundus/adversarial.py
```python
import torch
import numpy as np
from torchvision import transforms
# Adapted from zeiss_umbrella.adversarial
# FGSM attack code from https://pytorch.org/tutorials/beginner/fgsm_tutorial.html
def fgsm_attack(image, epsilon, data_grad):
# Collect the element-wise sign of the data gradient
sign_data_grad = data_grad.sign()
# Create the perturbed image by adjusting each pixel of the input image
perturbed_image = image + epsilon * sign_data_grad
# Adding clipping to maintain [0,1] range
perturbed_image = torch.clamp(perturbed_image, 0, 1)
# Return the perturbed image
return perturbed_image
def fgsm_k_image(data, target, model, criterion, device,
epsilon=1.0 / 255.0, alpha=None, steps=None, return_path=False, rand=False):
"""
Generate adversarial examples using bim(rand=False) or Madry-PGD(rand=True).
:param data: a set of input images from which we generate the adversarial examples
:param target: the corresponding target labels of the data
:param epsilon: maximum pixelwise amplitude of perturbation
:param model: model to be attacked
:param criterion: loss for the generation of the adversarial examples
:param device: cpu or cuda
:param alpha: step size of each step
:param steps: number of steps
:param return_path: the path to store the adversarial examples
:param rand: starting from a random point within the linf box or not. Yes for Madry-PGD, no for BIM
:return: a set of adversarial examples.
"""
# from https://arxiv.org/pdf/1611.01236.pdf adapted for range 0 1 instead of 0 255
if steps is None:
steps = int(np.round(min(epsilon + 4. / 255, 1.25 * epsilon) * 255))
# Alpha is set to be 2.5 * epsilon / steps as in http://arxiv.org/abs/1706.06083
if alpha is None:
alpha = 2.5 * epsilon / steps
# Send the data and label to the device
data, target = data.to(device), target.to(device)
with torch.no_grad():
if rand:
perturbed_image = data + (-2 * epsilon) * torch.rand_like(data) + epsilon
else:
perturbed_image = data
# Set requires_grad attribute of tensor. Important for Attack
perturbed_image.requires_grad = True
path = [perturbed_image]
for _ in range(steps):
# print("step",k)
# Forward pass the data through the model
output = model(perturbed_image)
# Calculate the loss
loss = criterion(output, target)
# Zero all existing gradients
model.zero_grad()
# Calculate gradients of model in backward pass
loss.backward()
with torch.no_grad():
# Collect datagrad
data_grad = perturbed_image.grad.data
# Collect the element-wise sign of the data gradient
sign_data_grad = data_grad.sign()
# Create the perturbed image by adjusting each pixel of the input image
perturbed_image = perturbed_image.detach() + alpha * sign_data_grad
# Projected the image on the l_inf circle
perturbed_image = torch.min(torch.max(perturbed_image, data - epsilon), data + epsilon)
# Adding clipping to maintain [0,1] range
perturbed_image = torch.clamp(perturbed_image, 0, 1)
if return_path:
path.append(perturbed_image.detach())
perturbed_image.requires_grad = True
# Return the perturbed image
if return_path:
return perturbed_image.detach(), path
else:
return perturbed_image.detach()
def pgd(data, target, model, criterion, device,
epsilon=1.0 / 255.0, alpha=None, steps=None, return_path=False):
return fgsm_k_image(data, target, model, criterion, device,
epsilon=epsilon, alpha=alpha, steps=steps, return_path=return_path, rand=True)
def fgsm_image(data, target, model, criterion, device, epsilon, skip_wrong=False, **kwargs):
# Send the data and label to the device
data, target = data.to(device), target.to(device)
# Set requires_grad attribute of tensor. Important for Attack
data.requires_grad = True
# Forward pass the data through the model
output = model(data)
init_pred = torch.max(output, 1)[1] # get the index of the max log-probability
# If the initial prediction is wrong, dont bother attacking, just move on
if skip_wrong and init_pred.item() != target.item():
return None
# Calculate the loss
loss = criterion(output, target)
# Zero all existing gradients
model.zero_grad()
# Calculate gradients of model in backward pass
loss.backward()
# Collect datagrad
data_grad = data.grad.data
# Call FGSM Attack
perturbed_data = fgsm_attack(data, epsilon, data_grad)
# so we don't collect unnecessary grads if we reuse this data...
data.requires_grad = False
if "return_path" in kwargs:
return perturbed_data, None
else:
return perturbed_data
# Boundary attack
def orthogonal_perturbation(deltas, prev_samples, target_samples, device):
"""
Calculate the orthogonal move
:param device: cpu or cuda
:param deltas: amplitudes of the move of size (batch_size)
:param prev_samples: previous sample of size (batch_size, c, h, w)
:param target_samples: target sample of size (batch_size, c, h, w)
:return: the perturbation of size (batch_size, c, h, w)
"""
prev_samples, target_samples = prev_samples.to(device), target_samples.to(device)
# Generate perturbation
perturb = torch.randn_like(prev_samples) / 255 # (batch_size, c, h, w)
# Normalize and times delta * d(o, o^{k-1})
perturb *= 1. / get_diff(perturb, torch.zeros_like(perturb), device).unsqueeze(-1).unsqueeze(-1)
perturb *= (deltas * torch.mean(get_diff(target_samples, prev_samples, device))).unsqueeze(-1).unsqueeze(
-1).unsqueeze(-1)
# Calculate unit vector pointing to target samples.
diff = (target_samples - prev_samples).type(torch.float32) # (batch_size, c, h, w)
diff *= 1. / get_diff(target_samples, prev_samples, device).unsqueeze(-1).unsqueeze(-1)
# Projection onto the equidistant disc
# perturb -= torch.matmul(perturb, diff) * diff
# Calculate the inner product corresponding to frobenius norm: tr(sqrt(A.t().matmul(B)))
inner_prods = torch.einsum('...ii->...i', perturb.transpose(2, 3).matmul(diff)).sum(dim=2)
# Projection onto diff
proj = inner_prods.unsqueeze(-1).unsqueeze(-1) * diff
perturb -= proj
t = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
ones_normalized = t(torch.ones_like(perturb)[0]).repeat(perturb.shape[0], 1, 1, 1)
zeros_normalized = t(torch.zeros_like(perturb)[0]).repeat(perturb.shape[0], 1, 1, 1)
overflow = (prev_samples + perturb) - ones_normalized
perturb -= overflow * (overflow > 0).type(torch.float32)
underflow = (prev_samples + perturb) - zeros_normalized
perturb -= underflow * (underflow < 0).type(torch.float32)
return perturb.to(device)
def forward_perturbation(epsilons, prev_samples, target_samples, device):
"""
Calculate the perturbation needed towards target sample
:param device: cpu or cuda
:param epsilons: collection of epsilon of each entry in the batch size = (batch)
:param prev_samples: previous samples
:param target_samples: target samples
:return: the perturbation of size (batch_size, c, h, w)
"""
prev_samples, target_samples = prev_samples.to(device), target_samples.to(device)
perturb = (target_samples - prev_samples).type(torch.float32)
perturb *= 1. / get_diff(target_samples, prev_samples, device).unsqueeze(-1).unsqueeze(-1)
perturb *= epsilons.unsqueeze(-1).unsqueeze(-1)
return perturb.to(device)
def get_diff(samples_1, samples_2, device):
"""
Get the frobenius norm of difference between sample_1 and sample_2
:param device: cpu or cuda
:param samples_1: (batch_size, c, h, w) or (batch_size, h, w, c)
:param samples_2: (batch_size, c, h, w) or (batch_size, h, w, c)
:return: (batch_size, 3) dimension tensor of difference at each dimension
"""
samples_1, samples_2 = samples_1.to(device), samples_2.to(device)
if samples_1.shape[1] != 3:
samples_1 = samples_1.clone().permute(0, 3, 1, 2).to(device)
if samples_2.shape[1] != 3:
samples_2 = samples_2.clone().permute(0, 3, 1, 2).to(device)
batch_size = samples_1.shape[0]
num_channel = samples_1.shape[1]
diff = samples_1 - samples_2
return torch.norm(diff.view(batch_size, num_channel, -1), dim=2).to(device)
def generate_target_samples(data, labels, fundus_dataset=None, target_indices=(4, 5, 300, 6), device='cuda'):
"""
Generate target samples for decision boundary attack from the given data. Basically, for each input label, we take
a sample of different label in the data as a target sample. If all the labels are the same, we take a distinct label
from the target_indices which contains indices of the fundus dataset where labels are 0 - 4 and use the selected label
as well as the corresponding image to construnct a target image batch.
:param device:
:param data: input images
:param labels: target labels of data
:param fundus_dataset: fundus dataset object
:param target_indices: 5 positions in the fundus dataset where the labels are respectively 0 - 4
:return: target samples along with their labels used for decision boundary attack
"""
# If all the labels are the same
batch_size = data.shape[0]
all_zero = (labels != labels[0]).bitwise_not().all()
zero_and_the_other = len(torch.unique(labels)) == 2 and 0 in torch.unique(labels)
if all_zero or zero_and_the_other:
data_all = torch.Tensor()
labels_all = []
for index in target_indices:
data_all = torch.cat((data_all, fundus_dataset[index][0].unsqueeze(0)))
labels_all.append(torch.tensor(fundus_dataset[index][1]))
labels_all = torch.stack(labels_all).to(device)
if all_zero:
result_indices = torch.where((labels_all != labels[0].to(device)))
elif zero_and_the_other:
result_indices = torch.where((labels_all != torch.unique(labels)[1].to(device)))
result_indices = result_indices[torch.randperm(len(result_indices))]
target_labels = labels_all[result_indices][0].repeat(batch_size, 1)
target_samples = data_all[result_indices][0].repeat(batch_size, 1, 1, 1)
return target_samples, target_labels.view(batch_size)
else:
result_indices = []
for label in labels:
distinct_indices = torch.where((labels != label) * (labels != 0))
result_indices.append(distinct_indices[torch.randperm(len(distinct_indices))][0])
result_indices = torch.stack(result_indices)
target_labels = labels[result_indices].clone()
target_samples = data[result_indices].clone()
return target_samples, target_labels
def generate_initial_samples(data, labels, model, device, max_iter=100, epsilon=3.0 / 255.0):
data, labels = data.to(device), labels.to(device)
init_samples = data.detach().clone()
n_iter = 0
correct = torch.max(model(init_samples), 1)[1] == labels
while correct.any() and n_iter < max_iter:
init_samples = torch.rand_like(init_samples)
correct = torch.max(model(init_samples), 1)[1] == labels
n_iter += 1
print("generate {} initial samples".format(correct.bitwise_not().type(torch.int).sum()))
return init_samples[correct.bitwise_not()], correct.bitwise_not()
def move_to_boundary(model, epsilons, adversarial_samples, target_samples, init_preds, d_step_max, n_calls, device):
"""
Move first step to the boundary: first coincide with the target sample and gradually reduce step size
wrong/correct_indices is used for navigating in the global tensor (tensor with size of qualified candidates)
wrong/correct is used for navigating in the wrongly classified images that need to be treated (trial samples)
"""
d_step_1 = 0
while True:
# Initialize trial indices
if d_step_1 == 0:
trial_indices = torch.arange(len(adversarial_samples)).to(device)
step_size = epsilons[trial_indices].unsqueeze(-1) * get_diff(adversarial_samples[trial_indices],
target_samples[trial_indices], device)
trial_samples = adversarial_samples[trial_indices] + forward_perturbation(step_size, adversarial_samples[
trial_indices], target_samples[trial_indices], device)
trial_outputs = model(trial_samples)
n_calls += 1
d_step_1 += 1
correct = torch.max(trial_outputs, 1)[1] == init_preds[trial_indices]
wrong = correct.bitwise_not()
# Calculate corresponding indices in the whole adversarial batch
correct_indices = trial_indices[correct]
wrong_indices = trial_indices[wrong]
# Update adversarial examples and step sizes
adversarial_samples[correct_indices] = trial_samples[correct]
epsilons[wrong_indices] *= 0.8
# Update trial indices
trial_indices = trial_indices[wrong]
if correct.all() or d_step_1 > d_step_max:
return epsilons, adversarial_samples, n_calls
def move_and_tuning(model, adversarial_samples, target_samples, init_preds, n_calls, device, parameter, move_type,
num_trial, step_max, reduce_threshold=0.2, increase_threshold=0.7,
increase=0.9, decrease=0.9):
"""
make a move and adjust the step sizes(parameter) according to statistics of num_trial trials.
:param num_trial: number of trials
:param step_max: maximum number of steps
:param reduce_threshold: decrease the step size if the ratio of valid samples is smaller than this value
:param increase_threshold: increase the step size if the ratio of valid samples is smaller than this value
:param increase: increase the step size by 1 / increase times
:param decrease: decrease the step size by decrease times
"""
if move_type == 'forward':
print("\tForward step...")
if move_type == 'orthogonal':
print("\tOrthogonal step...")
step = 0
while True:
step += 1
print("\t#{}".format(step))
# Stop updating correct samples
if step == 1:
trial_indices = torch.arange(len(adversarial_samples)).to(device)
trial_samples = adversarial_samples[trial_indices].repeat(num_trial, 1, 1, 1).to(device)
trial_target_samples = target_samples[trial_indices].repeat(num_trial, 1, 1, 1).to(device)
trial_parameter = parameter[trial_indices].repeat(num_trial).to(device)
if move_type == 'orthogonal':
trial_samples += orthogonal_perturbation(trial_parameter, trial_samples, trial_target_samples, device)
if move_type == 'forward':
step_sizes = trial_parameter.unsqueeze(-1) * get_diff(trial_samples, trial_target_samples, device)
trial_samples += forward_perturbation(step_sizes, trial_samples, trial_target_samples, device)
trial_outputs = model(trial_samples)
n_calls += num_trial * len(trial_indices)
# predictions of size (batch * num_trial)
trial_preds = torch.max(trial_outputs, 1)[1]
# print("trial predictions:{}".format(trial_preds))
# print("initial predictions:{}".format(init_preds))
d_scores = torch.mean((trial_preds.view(num_trial, -1) == init_preds[trial_indices]).type(torch.float32), dim=0)
# print("d_scores: {}".format(d_scores))
non_zero = d_scores > 0.0
case1 = non_zero * (d_scores < reduce_threshold)
case2 = d_scores > increase_threshold
zero = non_zero.bitwise_not()
# Calculate corresponding indices in the whole adversarial example batch
case1_indices = trial_indices[case1]
case2_indices = trial_indices[case2]
non_zero_indices = trial_indices[non_zero]
zero_indices = trial_indices[zero]
# Update step sizes
parameter[case1_indices] *= decrease
parameter[case2_indices] /= increase
parameter[zero_indices] *= decrease
# print("Parameter: {}".format(parameter))
# Take one of the valid orthogonal perturbation
non_zero_row_indices = []
# Take out non zero elements
correct_pred_positions = torch.where(
trial_preds.view(num_trial, -1)[:, non_zero] == init_preds[non_zero_indices])
# Loop over non zero elements and take one valid sample
for index in range(non_zero.type(torch.int).sum()):
first_col_to_be_index = torch.where(index == correct_pred_positions[1])[0][0]
non_zero_row_indices.append(correct_pred_positions[0][first_col_to_be_index])
# Update adversarial samples
if len(non_zero_row_indices) != 0:
non_zero_row_indices = torch.stack(non_zero_row_indices)
adversarial_samples[non_zero_indices] = torch.stack(trial_samples.chunk(num_trial, dim=0))[
(non_zero_row_indices, torch.where(non_zero)[0])]
# Update trial indices
trial_indices = trial_indices[zero]
# Break the loop if all samples are within the correct region.
if non_zero.all() or step > step_max:
return parameter, adversarial_samples, n_calls
def boundary_attack_image(model, device,
data, labels, untarget=False, skip_zero=False, fundus_dataset=None,
target_indices=(4, 5, 300, 6),
epsilon=1., delta=0.1, seed=None,
n_step_max=250, e_step_max=20, diff_tol=10, d_step_max=20, unqualified_sample_ratio_tol=0.2):
"""
Batch implementation of decision boundary attack which allows to produce adversarial examples from input data.
For a input batch, we shuffle it to construct a target batch and optimize the input images towards it. The images which
the model cannot correctly classify will be directly returned. The adversarial examples whose maximum difference from
the target examples of the three channels is greater than the diff_tol are considered as "bad samples" and will be
discarded at return.
Based on https://arxiv.org/pdf/1712.04248.pdf
:param unqualified_sample_ratio_tol: return if the ratio of the "bad adv samples" is smaller than this threshold
:param d_step_max: maximum number of delta steps
:param target_indices: Indices of data of labels 1 - 5
:param n_step_max: maximum number of
:param labels: target labels of input data
:param data: input images
:param fundus_dataset: Fundus_Dataset object used when all the labels are the same
:param diff_tol: return if difference between target sample and adversarial sample smaller than diff_tol
:param e_step_max: maximum number of epsilon steps
:param delta: size of delta step (orthogonal move)
:param epsilon: size of epsilon step (step towards target sample)
:param model: model to be evaluated
:param device: cpu or cuda
:return: adversarial examples along with the corresponding target labels
"""
if seed:
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
# Load the data, labels to device
data, labels = data.to(device), labels.to(device)
if untarget:
init_samples, success = generate_initial_samples(data.detach(), labels.detach(), model, device)
target_samples, target_labels = data.detach()[success], labels.detach()[success]
# Forward pass the data through the model
init_outputs = model(data[success])
init_preds = torch.max(init_outputs, 1)[1] # get the index of the max log-probability
correctly_classified = init_preds == labels[success]
else:
init_samples = data
# Generate target samples from data
target_samples, target_labels = generate_target_samples(data.detach(), labels.detach(),
fundus_dataset=fundus_dataset,
target_indices=target_indices, device=device)
# Forward pass the data through the model
init_outputs = model(data)
init_preds = torch.max(init_outputs, 1)[1] # get the index of the max log-probability
correctly_classified = init_preds == labels
# Load target_samples, target_labels to device
target_samples, target_labels = target_samples.to(device), target_labels.to(device)
# Generate epsilons of size batch_size
batch_size = data.detach().shape[0]
with torch.no_grad():
# If the classifier cannot classify correctly the initial training data,
# no need to generate adversarial examples
qualified_candidates = correctly_classified
# If skip zero, we skip images with label 0
if skip_zero:
qualified_candidates *= labels != 0
num_qualified_candidates = qualified_candidates.type(torch.int).sum()
target_samples = target_samples[qualified_candidates]
target_labels = target_labels[qualified_candidates]
adversarial_samples = init_samples[qualified_candidates].clone().to(device)
init_preds = init_preds[qualified_candidates]
epsilons = (torch.ones(num_qualified_candidates) * epsilon).to(device)
deltas = (torch.ones(num_qualified_candidates) * delta).to(device)
print("Initial Diff :")
print(get_diff(adversarial_samples, target_samples, device))
if adversarial_samples.shape[0] == 0:
return data[correctly_classified.bitwise_not()].clone().to(device), \
labels[correctly_classified.bitwise_not()].clone().to(device)
n_steps = 0
n_calls = 0
epsilons, adversarial_samples, n_calls = move_to_boundary(model, epsilons, adversarial_samples, target_samples,
init_preds,
d_step_max, n_calls, device)
print("After first move:")
print(get_diff(adversarial_samples, target_samples, device))
while True:
print("Step #{}...".format(n_steps))
deltas, adversarial_samples, n_calls = move_and_tuning(model, adversarial_samples, target_samples,
init_preds, n_calls, device,
move_type='orthogonal', parameter=deltas,
step_max=d_step_max, num_trial=20,
reduce_threshold=0.2, increase_threshold=0.8,
increase=0.9, decrease=0.9)
print("After orthgonal move:")
# print("deltas: {}".format(deltas))
print(get_diff(adversarial_samples, target_samples, device))
epsilons, adversarial_samples, n_calls = move_and_tuning(model, adversarial_samples, target_samples,
init_preds, n_calls, device,
move_type='forward', parameter=epsilons,
step_max=e_step_max, num_trial=10,
reduce_threshold=0.2, increase_threshold=0.8,
increase=0.5, decrease=0.5)
print("After forward move:")
print(get_diff(adversarial_samples, target_samples, device))
n_steps += 1
diff = get_diff(adversarial_samples, target_samples, device)
print(diff)
print("{} steps".format(n_steps))
print("Mean Squared Error: {}".format(torch.mean(diff).item()))
unqualified_samples_num = (torch.max(diff, dim=1).values > diff_tol).type(torch.int).sum()
if diff.mean().item() <= diff_tol or n_steps > n_step_max \
or unqualified_samples_num < unqualified_sample_ratio_tol * num_qualified_candidates:
break
# We only return the valid samples
adversarial_samples = adversarial_samples[(torch.max(diff, dim=1).values < diff_tol)]
target_labels = target_labels[(torch.max(diff, dim=1).values < diff_tol)]
# append wrongly classified samples for further training
adversarial_samples = torch.cat(
(adversarial_samples, data[correctly_classified.bitwise_not()].clone().to(device)))
target_labels = torch.cat((target_labels, labels[correctly_classified.bitwise_not()].clone().to(device)))
print("Generate {} adversarial samples".format(len(adversarial_samples)))
print("Total number of calls: {}".format(n_calls))
return adversarial_samples, target_labels
```
#### File: zeiss_umbrella/integrated_gradient/test.py
```python
import torch
from zeiss_umbrella.resnet import resnet18
from zeiss_umbrella.integrated_gradient.integrated_gradients import integrated_gradients
from zeiss_umbrella.integrated_gradient.utils import calculate_outputs_and_gradients
import numpy as np
import torch.nn.functional as F
def testIntegratedGradients():
model = resnet18(pretrained=True)
cuda = torch.cuda.is_available()
if cuda:
device = 'cuda'
else:
device = 'cpu'
seed = torch.randint(high=10000000, size=(1, 1), device=device).item()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
model = model.to(device)
model.eval()
x_baseline = torch.randn((1, 3, 256, 256))
x_input = torch.randn((1, 3, 256, 256))
output = model(x_input.to(device))
output_baseline = model(x_baseline.to(device))
output_index = torch.argmax(output, dim=1).item()
output_baseline_index = torch.argmax(output_baseline, dim=1).item()
y_input = F.softmax(output)[0][output_index]
y_baseline = F.softmax(output_baseline)[0][output_baseline_index]
expected_val = y_input.item() - y_baseline.item()
integrated_grad, _, _ = integrated_gradients(x_input, model, None, calculate_outputs_and_gradients,
steps=1000, cuda=cuda, baseline=x_baseline, path=None)
print(integrated_grad.sum())
print(expected_val)
diff = abs(integrated_grad.sum() - expected_val) / abs(expected_val)
if diff < 1e-4:
print('Integrated Gradients Test past')
else:
print('Integrated Gradients not passed, error: {}'.format(diff))
if __name__ == '__main__':
testIntegratedGradients()
```
#### File: zeiss_umbrella/integrated_gradient/utils.py
```python
import torch
import torch.nn.functional as F
# import cv2
import numpy as np
def grad_to_inp(input, model, target_label_idx, device=None):
if device:
input = input.to(device)
else:
input = input.cpu().detach()
input.requires_grad = True
output = model(input)
output = F.softmax(output, dim=1)
if target_label_idx is None:
target_label_idx = torch.argmax(output, 1).item()
index = np.ones((output.size()[0], 1)) * target_label_idx
index = torch.tensor(index, dtype=torch.int64)
if device:
index = index.to(device)
output = output.gather(1, index)
# clear grad
model.zero_grad()
output.backward()
gradient = input.grad.detach().cpu().numpy()[0]
return gradient, target_label_idx
def calculate_outputs_and_gradients(inputs, model, target_label_idx, device=None):
# do the pre-processing
predict_idx = None
gradients = []
preds = []
for input in inputs:
gradient, pred = grad_to_inp(input, model, target_label_idx, device=device)
gradients.append(gradient)
preds.append(pred)
gradients = np.array(gradients)
return gradients, preds
def pre_processing(obs, device):
# print(np.max(np.max(np.max(obs))))
mean = np.array([0.485, 0.456, 0.406]).reshape([1, 1, 3])
std = np.array([0.229, 0.224, 0.225]).reshape([1, 1, 3])
obs = obs / 255
obs = (obs - mean) / std
obs = np.transpose(obs, (2, 0, 1))
obs = np.expand_dims(obs, 0)
obs = np.array(obs)
obs_tensor = torch.tensor(obs, dtype=torch.float32, device=device, requires_grad=True)
# print(torch.min(obs_tensor),torch.max(obs_tensor))
return obs_tensor
# generate the entire images
def generate_entrie_images(img_origin, img_grad, img_grad_overlay, img_integrad, img_integrad_overlay):
blank = np.ones((img_grad.shape[0], 10, 3), dtype=np.uint8) * 255
blank_hor = np.ones((10, 20 + img_grad.shape[0] * 3, 3), dtype=np.uint8) * 255
upper = np.concatenate([img_origin[:, :, (2, 1, 0)], blank, img_grad_overlay, blank, img_grad], 1)
down = np.concatenate([img_origin[:, :, (2, 1, 0)], blank, img_integrad_overlay, blank, img_integrad], 1)
total = np.concatenate([upper, blank_hor, down], 0)
# total = cv2.resize(total, (550, 364))
return total
```
#### File: zeiss_umbrella/integrated_gradient/visualization.py
```python
import numpy as np
import matplotlib.pyplot as plt
import torch
G = [0, 1.0, 0]
R = [1.0, 0, 0]
def convert_to_gray_scale(attributions):
return np.average(attributions, axis=2)
def linear_transform(attributions, clip_above_percentile=99.9, clip_below_percentile=70.0, low=0.2,
plot_distribution=False):
m = compute_threshold_by_top_percentage(attributions, percentage=100 - clip_above_percentile,
plot_distribution=plot_distribution)
e = compute_threshold_by_top_percentage(attributions, percentage=100 - clip_below_percentile,
plot_distribution=plot_distribution)
transformed = (1 - low) * (np.abs(attributions) - e) / (m - e) + low
transformed *= np.sign(attributions)
transformed *= (np.abs(transformed) >= low)
transformed = np.clip(transformed, 0.0, 1.0)
return transformed
def compute_threshold_by_top_percentage(attributions, percentage=60, plot_distribution=True):
if percentage < 0 or percentage > 100:
raise ValueError('percentage must be in [0, 100]')
if percentage == 100:
return np.min(attributions)
flat_attributions = attributions.flatten()
attribution_sum = np.abs(np.sum(flat_attributions))
sorted_attributions = np.sort(np.abs(flat_attributions))[::-1]
cum_sum = 100.0 * np.cumsum(sorted_attributions) / attribution_sum
threshold_idx = np.where(cum_sum >= percentage)[0][0]
threshold = sorted_attributions[threshold_idx]
if plot_distribution:
# Generate a plot of sorted intgrad scores.
values_to_plot = np.where(cum_sum >= 95)[0][0]
values_to_plot = max(values_to_plot, threshold_idx)
plt.plot(np.arange(values_to_plot), sorted_attributions[:values_to_plot])
plt.axvline(x=threshold_idx)
plt.show()
return threshold
def polarity_function(attributions, polarity):
if polarity == 'positive':
return np.clip(attributions, 0, 1)
elif polarity == 'negative':
return np.clip(attributions, -1, 0)
else:
pos = np.clip(attributions, 0, 1)
neg = np.clip(attributions, -1, 0)
neg = np.abs(neg)
return pos, neg
def overlay_function(attributions, image):
# sanity check
if isinstance(image, torch.Tensor):
image = image.numpy()
return np.clip(0.7 * image + 0.5 * attributions, 0, 1)
def visualize(attributions, image, positive_channel=G, negative_channel=R, polarity='positive',
clip_above_percentile=99.9, clip_below_percentile=0, morphological_cleanup=False,
structure=np.ones((3, 3)), outlines=False, outlines_component_percentage=90, overlay=True,
mask_mode=False, plot_distribution=False, channels_first=True):
if channels_first:
image = image.permute(0, 2, 3, 1).squeeze(0)
if len(attributions.shape) == 4:
attributions = attributions.squeeze(0)
if polarity == 'both':
pos_attr, neg_attr = polarity_function(attributions, polarity=polarity)
attributions = np.zeros_like(pos_attr)
attributions_mask = np.zeros_like(pos_attr)
for attr, chan in zip([pos_attr, neg_attr], [positive_channel, negative_channel]):
# convert the attributions to the gray scale
attr = convert_to_gray_scale(attr)
attr = linear_transform(attr, clip_above_percentile, clip_below_percentile, 0,
plot_distribution=plot_distribution)
amask = attr.copy()
if morphological_cleanup:
raise NotImplementedError
if outlines:
raise NotImplementedError
attr = np.expand_dims(attr, 2) * chan
attributions += attr
attributions_mask += np.expand_dims(amask, 2)
if overlay:
if not mask_mode:
attributions = overlay_function(attributions, image)
else:
# attributions =attributions_mask
imgd = image.detach().numpy()
attributions = np.clip(attributions + imgd * 0.7, 0, 1)
# attributions = attributions[:, :, (2, 1, 0)]
else:
if polarity == 'positive':
attributions = polarity_function(attributions, polarity=polarity)
channel = positive_channel
elif polarity == 'negative':
attributions = polarity_function(attributions, polarity=polarity)
channel = negative_channel
# convert the attributions to the gray scale
attributions = convert_to_gray_scale(attributions)
attributions = linear_transform(attributions, clip_above_percentile, clip_below_percentile, 0.0,
plot_distribution=plot_distribution)
attributions_mask = attributions.copy()
if morphological_cleanup:
raise NotImplementedError
if outlines:
raise NotImplementedError
attributions = np.expand_dims(attributions, 2) * channel
if overlay:
if not mask_mode:
attributions = overlay_function(attributions, image)
else:
imgd = image.detach().numpy()
attributions = np.expand_dims(attributions_mask, 2)
attributions = np.clip(attributions * imgd, 0, 1)
# attributions = attributions[:, :, (2, 1, 0)]
return attributions
``` |
{
"source": "JiahuaZhao/HPC-Python-CFD",
"score": 3
} |
#### File: HPC-Python-CFD/numpy/jacobi_numpy.py
```python
import numpy as np
import sys
def jacobistep(psi, m, n):
"""
Generates one step of the jacobi function for the whole grid
"""
return 0.25 * (psi[0:m, 1:n+1]+psi[2:m+2, 1:n+1]+psi[1:m+1, 0:n] + psi[1:m+1, 2:n+2])
def jacobistepvort(zet, psi, m, n, re):
#print(np.sum(zet), np.sum(psi))
psinew = 0.25 * (psi[0:m, 1:n+1]+psi[2:m+2, 1:n+1]+psi[1:m+1, 0:n] + psi[1:m+1, 2:n+2] - zet[1:m+1, 1:n+1])
zetnew = - re/16.0 * ((psi[1:m+1, 2:n+2]-psi[1:m+1, 0:n])*(zet[2:m+2, 1:n+1]-zet[0:m, 1:n+1]) - (psi[2:m+2, 1:n+1]-psi[0:m, 1:n+1])*(zet[1:m+1, 2:n+2]-zet[1:m+1, 0:n])) + (0.25*(zet[0:m, 1:n+1]+zet[2:m+2, 1:n+1]+zet[1:m+1, 0:n]+zet[1:m+1, 2:n+2]))
return psinew, zetnew
def deltasq(psi_os_zet_temp, oldarr, m, n):
dsq = np.sum(np.power(psi_os_zet_temp - oldarr[1: m+1, 1:n+1], 2))
return float(dsq)
``` |
{
"source": "jiahuei/cisip-FIRe",
"score": 2
} |
#### File: cisip-FIRe/analysis/dist_stats.py
```python
import numpy as np
import torch
from functions.hashing import get_distance_func
from functions.ternarization import tnt
def compute_distance(a, b, distance_mode='cosine'):
return get_distance_func(distance_mode)(a, b)
def get_dist_stat(codes,
labels,
bs=1000,
ternary=False,
binary=True,
get_center=True,
get_hist=False,
hist_bin_size=64,
code_balance_stat=False,
quan_stat=False,
distance_mode='hamming',
minibatch_intraclass=False):
if distance_mode != 'hamming':
assert not get_hist, 'get_hist=True only for distance_mode=hamming'
intracenter_avg = 0
intracenter_std = 0
intercenter_avg = 0
intercenter_std = 0
intraclass_avg = 0
intraclass_std = 0
interclass_avg = 0
interclass_std = 0
intra_count = 0
intra_cent_count = 0
inter_count = 0
inter_cent_count = 0
nbit = codes.size(1)
nclass = labels.size(1)
code_balance_avg = 0
code_balance_std = 0
quan_error_cs_avg = 0
quan_error_cs_std = 0
quan_error_l2_avg = 0
quan_error_l2_std = 0
intra_freq = torch.zeros(hist_bin_size + 1).to(codes.device) # +1 to include last number, e.g. [0, ..., 64]
inter_freq = torch.zeros(hist_bin_size + 1).to(codes.device)
if code_balance_stat:
code_balance = (codes.sign() == 1).float().mean(0)
code_balance_avg = code_balance.mean().item()
code_balance_std = code_balance.std().item()
if quan_stat:
quan_error_cs = torch.cosine_similarity(codes, codes.sign(), dim=1) # .mean()
quan_error_cs_avg = quan_error_cs.mean()
quan_error_cs_std = quan_error_cs.std()
quan_error_l2 = torch.norm(codes - codes.sign(), p=2, dim=1) # .mean()
quan_error_l2_avg = quan_error_l2.mean()
quan_error_l2_std = quan_error_l2.std()
if ternary:
codes = tnt(codes.clone())
elif binary:
codes = codes.sign()
center_codes = torch.zeros(nclass, nbit).to(codes.device)
for c in range(nclass):
print(f'processing class {c}', end='\r')
# intramask = (labels.argmax(dim=1) == c)
intramask = (labels[:, c] == 1)
intracodes = codes[intramask] # .sign()
if ternary:
center_codes[c] = tnt(intracodes.mean(dim=0, keepdim=True)).view(nbit)
elif binary:
center_codes[c] = intracodes.mean(dim=0).sign()
else:
center_codes[c] = intracodes.mean(dim=0)
# intermask = (labels.argmax(dim=1) != c)
intermask = ~intramask
intercodes = codes[intermask] # .sign()
# intradist should be enough memory
triu_mask = torch.ones(intracodes.size(0), intracodes.size(0)).bool()
triu_mask = torch.triu(triu_mask, 1).to(intracodes.device)
# intradist = (0.5 * (nbit - intracodes @ intracodes.t())) * triu_mask
if not minibatch_intraclass:
intradist = compute_distance(intracodes, intracodes, distance_mode) * triu_mask
if get_hist:
h = torch.histc(intradist[triu_mask.bool()], hist_bin_size + 1, 0, nbit)
intra_freq += h
# intradist = intradist.sum() / triu_mask.sum()
intraclass_avg += intradist.sum().item()
intraclass_std += (intradist ** 2).sum().item()
intra_count += triu_mask.sum().item()
else:
intradist = 0
intradist_std = 0
triu_mask_numel = 0
for bidx in range(intracodes.size(0) // bs + 1):
print(f'processing class {c} [{bidx}/{intracodes.size(0) // bs + 1}]', end='\r')
currbidx = bidx * bs
nextbidx = (bidx + 1) * bs
nextbidx = min(nextbidx, intracodes.size(0))
if currbidx >= intracodes.size(0): # already out of index
break
batch_intra = intracodes[currbidx:nextbidx]
intradist_ = compute_distance(batch_intra, intracodes, distance_mode) * triu_mask[currbidx:nextbidx].float()
intradist += intradist_.sum()
intradist_std += (intradist_**2).sum()
triu_mask_numel += triu_mask[currbidx:nextbidx].sum()
if get_hist:
h = torch.histc(intradist_[triu_mask[currbidx:nextbidx].bool()], hist_bin_size + 1, 0, nbit)
intra_freq += h
intradist = intradist # / triu_mask_numel
intraclass_avg += intradist.item()
intraclass_std += intradist_std.item()
intra_count += triu_mask_numel.item()
# dist_to_cent = (0.5 * (nbit - center_codes[c].view(1, -1) @ intracodes.t())) # (1, N)
dist_to_cent = compute_distance(center_codes[c].view(1, -1), intracodes, distance_mode) # (1, N)
intracenter_avg += dist_to_cent.view(-1).sum().item()
intracenter_std += (dist_to_cent ** 2).view(-1).sum().item()
intra_cent_count += intracodes.size(0)
for bidx in range(intercodes.size(0) // bs + 1):
print(f'processing class {c} [{bidx}/{intercodes.size(0) // bs + 1}]', end='\r')
currbidx = bidx * bs
nextbidx = (bidx + 1) * bs
nextbidx = min(nextbidx, intercodes.size(0))
if currbidx >= intercodes.size(0): # already out of index
break
batch_inter = intercodes[currbidx:nextbidx]
# interdist = (0.5 * (nbit - intracodes @ batch_inter.t()))
interdist = compute_distance(intracodes, batch_inter, distance_mode)
if get_hist:
h = torch.histc(interdist, hist_bin_size + 1, 0, nbit)
inter_freq += h
inter_count += torch.numel(interdist)
interclass_avg += interdist.sum().item()
interclass_std += (interdist ** 2).sum().item()
# dist_to_cent = (0.5 * (nbit - center_codes[c].view(1, -1) @ batch_inter.t())) # (1, Nb)
dist_to_cent = compute_distance(center_codes[c].view(1, -1), batch_inter, distance_mode) # (1, Nb)
intercenter_avg += dist_to_cent.view(-1).sum().item()
intercenter_std += (dist_to_cent ** 2).view(-1).sum().item()
inter_cent_count += batch_inter.size(0)
interclass_avg /= inter_count
interclass_std /= inter_count
interclass_std -= (interclass_avg ** 2)
intercenter_avg /= inter_cent_count
intercenter_std /= inter_cent_count
intercenter_std -= (intercenter_avg ** 2)
intraclass_avg /= intra_count
intraclass_std /= intra_count
intraclass_std -= (intraclass_avg ** 2)
intracenter_avg /= intra_cent_count
intracenter_std /= intra_cent_count
intracenter_std -= (intracenter_avg ** 2)
print()
ret = {
'intraclass_avg': intraclass_avg,
'intraclass_std': intraclass_std,
'interclass_avg': interclass_avg,
'interclass_std': interclass_std,
'intracenter_avg': intracenter_avg,
'intracenter_std': intracenter_std,
'intercenter_avg': intercenter_avg,
'intercenter_std': intercenter_std
}
if get_center:
ret['center_codes'] = center_codes
if code_balance_stat:
ret['code_balance_avg'] = code_balance_avg
ret['code_balance_std'] = code_balance_std
if quan_stat:
ret['quan_error_cs'] = quan_error_cs_avg
ret['quan_error_cs_std'] = quan_error_cs_std
ret['quan_error_l2'] = quan_error_l2_avg
ret['quan_error_l2_std'] = quan_error_l2_std
if get_hist:
ret['intra_freq'] = intra_freq
ret['inter_freq'] = inter_freq
return ret
def get_dist_stat_embed(codes,
ids,
ground_truth,
index,
bs=1000,
ternary=False,
binary=True,
get_center=True,
get_hist=False,
hist_bin_size=64,
code_balance_stat=False,
quan_stat=False,
distance_mode='hamming'):
if distance_mode != 'hamming':
assert not get_hist, 'get_hist=True only for distance_mode=hamming'
intracenter_avg = 0
intercenter_avg = 0
intraclass_avg = 0
interclass_avg = 0
intra_count = 0
inter_count = 0
inter_cent_count = 0
nbit = codes.size(1)
nquery = len(ground_truth)
code_balance_avg = 0
code_balance_std = 0
quan_error_cs_avg = 0
quan_error_cs_std = 0
quan_error_l2_avg = 0
quan_error_l2_std = 0
intra_freq = torch.zeros(hist_bin_size + 1).to(codes.device) # +1 to include last number, e.g. [0, ..., 64]
inter_freq = torch.zeros(hist_bin_size + 1).to(codes.device)
if code_balance_stat:
code_balance = (codes.sign() == 1).float().mean(0)
code_balance_avg = code_balance.mean().item()
code_balance_std = code_balance.std().item()
if quan_stat:
quan_error_cs = torch.cosine_similarity(codes, codes.sign(), dim=1) # .mean()
quan_error_cs_avg = quan_error_cs.mean()
quan_error_cs_std = quan_error_cs.std()
quan_error_l2 = torch.norm(codes - codes.sign(), p=2, dim=1) # .mean()
quan_error_l2_avg = quan_error_l2.mean()
quan_error_l2_std = quan_error_l2.std()
if ternary:
codes = tnt(codes.clone())
elif binary:
codes = codes.sign()
center_codes = torch.zeros(nquery, nbit).to(codes.device)
for c in range(nquery):
print(f'processing class {c}', end='\r')
intramask = np.isin(ids, ground_truth.iloc[c]['images'].split())
intracodes = codes[intramask] # .sign()
if ternary:
center_codes[c] = tnt(intracodes.mean(dim=0, keepdim=True)).view(nbit)
elif binary:
center_codes[c] = intracodes.mean(dim=0).sign()
else:
center_codes[c] = intracodes.mean(dim=0)
# intermask = (labels.argmax(dim=1) != c)
intermask = ~intramask[index]
intercodes = codes[index][intermask] # .sign()
# intradist should be enough memory
triu_mask = torch.ones(intracodes.size(0), intracodes.size(0))
triu_mask = torch.triu(triu_mask, 1).to(intracodes.device)
# intradist = (0.5 * (nbit - intracodes @ intracodes.t())) * triu_mask
intradist = compute_distance(intracodes, intracodes, distance_mode) * triu_mask
if get_hist:
h = torch.histc(intradist[triu_mask.bool()], hist_bin_size + 1, 0, nbit)
intra_freq += h
intradist = intradist.sum() / triu_mask.sum()
if triu_mask.sum() != 0: # skip when only one code in intracodes, where no distance can be calculated
intraclass_avg += intradist.item()
intra_count += 1
# dist_to_cent = (0.5 * (nbit - center_codes[c].view(1, -1) @ intracodes.t())) # (1, N)
dist_to_cent = compute_distance(center_codes[c].view(1, -1), intracodes, distance_mode) # (1, N)
intracenter_avg += dist_to_cent.mean().item()
for bidx in range(intercodes.size(0) // bs + 1):
print(f'processing class {c} [{bidx}/{intercodes.size(0) // bs + 1}]', end='\r')
currbidx = bidx * bs
nextbidx = (bidx + 1) * bs
nextbidx = min(nextbidx, intercodes.size(0))
if currbidx >= intercodes.size(0): # already out of index
break
batch_inter = intercodes[currbidx:nextbidx]
# interdist = (0.5 * (nbit - intracodes @ batch_inter.t()))
interdist = compute_distance(intracodes, batch_inter, distance_mode)
if get_hist:
h = torch.histc(interdist, hist_bin_size + 1, 0, nbit)
inter_freq += h
inter_count += torch.numel(interdist)
interclass_avg += interdist.sum().item()
# dist_to_cent = (0.5 * (nbit - center_codes[c].view(1, -1) @ batch_inter.t())) # (1, Nb)
dist_to_cent = compute_distance(center_codes[c].view(1, -1), batch_inter, distance_mode) # (1, Nb)
intercenter_avg += dist_to_cent.sum().item()
inter_cent_count += batch_inter.size(0)
interclass_avg /= inter_count
intercenter_avg /= inter_cent_count
intraclass_avg /= intra_count
intracenter_avg /= intra_count
print()
ret = {
'intraclass_avg': intraclass_avg,
'interclass_avg': interclass_avg,
'intracenter_avg': intracenter_avg,
'intercenter_avg': intercenter_avg,
}
if get_center:
ret['center_codes'] = center_codes
if code_balance_stat:
ret['code_balance_avg'] = code_balance_avg
ret['code_balance_std'] = code_balance_std
if quan_stat:
ret['quan_error_cs'] = quan_error_cs_avg
ret['quan_error_cs_std'] = quan_error_cs_std
ret['quan_error_l2'] = quan_error_l2_avg
ret['quan_error_l2_std'] = quan_error_l2_std
if get_hist:
ret['intra_freq'] = intra_freq
ret['inter_freq'] = inter_freq
return ret
```
#### File: jiahuei/cisip-FIRe/configs.py
```python
import logging
import os
import random
import numpy as np
import torch
import torchvision
from torch.optim import lr_scheduler
from torch.optim.adam import Adam
from torch.optim.rmsprop import RMSprop
from torch.optim.sgd import SGD
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import models
from utils import datasets
from utils.augmentations import get_train_transform
if torch.cuda.device_count() != 0:
default_workers = os.cpu_count() // torch.cuda.device_count() # follow PyTorch recommendation
else:
default_workers = os.cpu_count()
# huge in term of number of classes
non_onehot_dataset = ['landmark', 'gldv2delgembed', 'sop_instance',
'sop_instance_alexnet', 'sop_instance_vgg16', 'sop_instance_resnet18']
dataset_evaluated_by_id = ['landmark', 'gldv2delgembed']
embedding_datasets = ['gldv2delgembed', 'roxford5kdelgembed' 'rparis6kdelgembed']
pin_memory = False
disable_tqdm = False
def in_features(dfolder, dataset):
if dataset == 'gldv2delgembed':
return 2048
elif dataset == 'descriptor':
if dfolder == '':
return 0
if '128' in dfolder:
return 128
elif 'alexnet' in dfolder or 'vgg' in dfolder:
return 4096
else:
return 512
def imagesize(config):
if not isinstance(config, dict):
dsname = config
else:
dsname = config['dataset']
r = {
'imagenet100': 256,
'nuswide': 256,
'coco': 256,
'cifar10': 224,
'cifar10_2': 224,
'cifar10_II': 224,
'cars': 224,
'landmark': 512,
'roxford5k': 224,
'rparis6k': 224,
'gldv2delgembed': 0,
'roxford5kdelgembed': 0,
'mirflickr': 256,
'sop': 256,
'sop_instance': 256,
'food101': 256
}[dsname]
return r
def cropsize(config):
if not isinstance(config, dict):
dsname = config
else:
dsname = config['dataset']
r = {
'imagenet100': 224,
'nuswide': 224,
'coco': 224,
'cifar10': 224,
'cifar10_2': 224,
'cifar10_II': 224,
'cars': 224,
'landmark': 512,
'roxford5k': 224,
'rparis6k': 224,
'gldv2delgembed': 0,
'roxford5kdelgembed': 0,
'rparis6kdelgembed': 0,
'mirflickr': 224,
'sop': 224,
'sop_instance': 224,
'food101': 224
}[dsname]
return r
def nclass(config):
if not isinstance(config, dict):
dsname = config
else:
dsname = config['dataset']
r = {
'imagenet100': 100,
'cifar10': 10,
'cifar10_2': 10,
'cifar10_II': 10,
'nuswide': 21,
'coco': 80,
'cars': 196,
'landmark': 81313,
'gldv2delgembed': 81313, # same as landmark
'roxford5kdelgembed': 0, # not applicable
'rparis6kdelgembed': 0,
'mirflickr': 24,
'sop': 12,
'sop_instance': 22634,
'food101': 101
}[dsname]
return r
def R(config):
r = {
'imagenet100': 1000,
'cifar10': 59000,
'cifar10_2': 50000,
'cifar10_II': 50000,
'nuswide': 5000,
'coco': 5000,
'cars': 100,
'landmark': 100,
'roxford5k': 0, # not using
'rparis6k': 0, # not using
'gldv2delgembed': 100, # same as landmark
'roxford5kdelgembed': 0, # not using
'rparis6kdelgembed': 0,
'mirflickr': 1000,
'sop': 1000,
'sop_instance': 100,
'food101': 1000
}[config['dataset'] + {2: '_2'}.get(config['dataset_kwargs']['evaluation_protocol'], '')]
return r
def arch(config, **kwargs):
if config['arch'] in models.network_names:
net = models.network_names[config['arch']](config, **kwargs)
else:
raise ValueError(f'Invalid Arch: {config["arch"]}')
return net
def optimizer(config, params):
o_type = config['optim']
kwargs = config['optim_kwargs']
if o_type == 'sgd':
o = SGD(params,
lr=kwargs['lr'],
momentum=kwargs.get('momentum', 0.9),
weight_decay=kwargs.get('weight_decay', 0.0005),
nesterov=kwargs.get('nesterov', False))
elif o_type == 'rmsprop':
o = RMSprop(params,
lr=kwargs['lr'],
alpha=kwargs.get('alpha', 0.99),
weight_decay=kwargs.get('weight_decay', 0.0005),
momentum=kwargs.get('momentum', 0))
elif o_type == 'adam': # adam
o = Adam(params,
lr=kwargs['lr'],
betas=kwargs.get('betas', (0.9, 0.999)),
weight_decay=kwargs.get('weight_decay', 0))
else:
raise ValueError(f'Optimizer specified {o_type} is not defined.')
return o
def scheduler(config, optimizer):
s_type = config['scheduler']
kwargs = config['scheduler_kwargs']
if s_type == 'step':
return lr_scheduler.StepLR(optimizer,
kwargs['step_size'],
kwargs['gamma'])
elif s_type == 'mstep':
return lr_scheduler.MultiStepLR(optimizer,
[int(float(m) * int(config['epochs'])) for m in
kwargs['milestones'].split(',')],
kwargs['gamma'])
elif s_type == 'linear':
def function(e):
init_lr = kwargs['linear_init_lr']
last_lr = kwargs['linear_last_lr']
epochs = config['epochs']
return ((last_lr - init_lr) / (epochs - 1) * e + init_lr) / init_lr # y = mx + c
return lr_scheduler.LambdaLR(optimizer, function)
else:
raise Exception('Scheduler not supported yet: ' + s_type)
def get_meanstd(norm):
mean, std = {
0: [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]],
1: [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
2: [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]]
}[norm]
return mean, std
def compose_transform(mode='train', resize=0, crop=0, norm=0,
augmentations=None):
"""
:param mode: train/test
:param resize: if 0, will not add Resize
:param crop: if 0, will not add CenterCrop (only for test mode)
:param norm: if 2, will not add Normalize
:param augmentations: augmentation Compose (only for train mode)
:return: Compose list [(Resize), (train:augmentations), (test:CenterCrop), ToTensor, (Normalize)]
# () based on options
"""
# norm = 0, 0 to 1
# norm = 1, -1 to 1
# norm = 2, standardization
mean, std = get_meanstd(norm)
compose = []
if resize != 0:
compose.append(transforms.Resize((resize, resize)))
if mode == 'train' and augmentations is not None:
compose += augmentations
if mode == 'test' and crop != 0 and resize != crop:
compose.append(transforms.CenterCrop(crop))
compose.append(transforms.ToTensor())
if norm != 0:
compose.append(transforms.Normalize(mean, std))
return transforms.Compose(compose)
def dataset(config, filename, transform_mode,
return_id=False, gpu_transform=False, gpu_mean_transform=False,
skip_preprocess=False):
dataset_name = config['dataset']
use_db_as_train = config['dataset_kwargs'].get('use_db_as_train', False)
data_ratio = 1
if filename == 'train.txt' and use_db_as_train:
filename = 'database.txt'
data_ratio = config['dataset_kwargs'].get('train_ratio', 1)
nclass = config['arch_kwargs']['nclass']
resize = config['dataset_kwargs'].get('resize', 0)
crop = config['dataset_kwargs'].get('crop', 0)
norm = config['dataset_kwargs'].get('norm', 1)
use_rand_aug = config['dataset_kwargs']['use_random_augmentation']
reset = config['dataset_kwargs'].get('reset', False)
remove_train_from_db = config['dataset_kwargs'].get('remove_train_from_db', False)
separate_multiclass = config['dataset_kwargs'].get('separate_multiclass', False)
extra_dataset = config['dataset_kwargs'].get('extra_dataset', 0)
if dataset_name in ['imagenet100', 'nuswide', 'coco', 'cars', 'landmark',
'roxford5k', 'rparis6k', 'mirflickr', 'sop', 'sop_instance', 'food101']:
norm = 2 if not gpu_mean_transform else 0 # 0 = turn off Normalize
if skip_preprocess: # will not resize and crop, and no augmentation
transform = compose_transform('test', 0, 0, norm)
else:
if transform_mode == 'train':
transform = compose_transform('train', 0, crop, norm,
get_train_transform(dataset_name, resize, crop, use_rand_aug))
else:
transform = compose_transform('test', resize, crop, norm)
datafunc = {
'imagenet100': datasets.imagenet100,
'nuswide': datasets.nuswide,
'coco': datasets.coco,
'cars': datasets.cars,
'landmark': datasets.landmark,
'roxford5k': datasets.roxford5k,
'rparis6k': datasets.rparis6k,
'mirflickr': datasets.mirflickr,
'sop': datasets.sop,
'sop_instance': datasets.sop_instance,
'food101': datasets.food101
}[dataset_name]
d = datafunc(transform=transform,
filename=filename,
separate_multiclass=separate_multiclass,
return_id=return_id,
dataset_name_suffix=config['dataset_kwargs'].get('dataset_name_suffix', ''),
ratio=data_ratio)
logging.info(f'Augmentation for {transform_mode}: {transform.transforms}')
elif dataset_name in ['cifar10', 'cifar100', 'cifar10_II']: # cifar10/ cifar100
resizec = 0 if resize == 32 else resize
cropc = 0 if crop == 32 else crop
norm = 2 if not gpu_mean_transform else 0 # 0 = turn off Normalize
if skip_preprocess: # cifar10 will always resize first
transform = compose_transform('test', resizec, 0, norm)
else:
if transform_mode == 'train':
transform = compose_transform('train', resizec, 0, norm, [
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.05, contrast=0.05),
])
else:
transform = compose_transform('test', resizec, cropc, norm)
ep = config['dataset_kwargs'].get('evaluation_protocol', 1)
if dataset_name == 'cifar10_II':
ep = 3
if dataset_name in ['cifar10', 'cifar10_II', 'cifar100']:
d = datasets.cifar(nclass, transform=transform, filename=filename, evaluation_protocol=ep, reset=reset,
remove_train_from_db=remove_train_from_db, extra_dataset=extra_dataset)
logging.info(f'Number of data: {len(d.data)}')
logging.info(f'Augmentation for {transform_mode}: {transform.transforms}')
else:
raise NotImplementedError(f"Not implementation for {dataset_name}")
elif dataset_name in ['gldv2delgembed', 'roxford5kdelgembed', 'rparis6kdelgembed']:
datafunc = {
'gldv2delgembed': datasets.gldv2delgembed,
'roxford5kdelgembed': datasets.roxford5kdelgembed,
'rparis6kdelgembed': datasets.rparis6kdelgembed
}[dataset_name]
d = datafunc(filename=filename)
elif dataset_name == 'descriptor': # descriptor
data_folder = config['dataset_kwargs']['data_folder']
d = datasets.descriptor(data_folder=data_folder,
filename=filename,
ratio=data_ratio)
else:
raise NotImplementedError(f"No implementation for {dataset_name}")
return d
def dataloader(d, bs=256, shuffle=True, workers=-1, drop_last=True, collate_fn=None, seed=-1):
"""
:param d:
:param bs:
:param shuffle:
:param workers:
:param drop_last:
:param collate_fn:
:param seed: random seed for deterministic
:return:
"""
if workers < 0:
workers = default_workers
if seed != -1:
g = torch.Generator()
g.manual_seed(seed)
else:
g = None
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
l = DataLoader(d,
bs,
shuffle,
drop_last=drop_last,
num_workers=workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
worker_init_fn=seed_worker,
generator=g)
return l
def seeding(seed):
seed = int(seed)
if seed != -1:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def tensor_to_dataset(tensor, transform=None):
class TransformTensorDataset(Dataset):
def __init__(self, tensor, ts=None):
super(TransformTensorDataset, self).__init__()
self.tensor = tensor
self.ts = ts
def __getitem__(self, index):
if self.ts is not None:
return self.ts(self.tensor[index])
return self.tensor[index]
def __len__(self):
return len(self.tensor)
ttd = TransformTensorDataset(tensor, transform)
return ttd
def tensors_to_dataset(tensors_with_transform):
"""
:param tensors_with_transform:
[
{
'tensor': torch.Tensor, # required
'transform': callable, # optional
}, ...
]
:return:
"""
class TransformTensorDataset(Dataset):
def __init__(self, tensors_with_ts):
super(TransformTensorDataset, self).__init__()
self.tensors_with_ts = tensors_with_ts
def __getitem__(self, index):
rets = []
for tensor_dict in self.tensors_with_ts:
tensor = tensor_dict['tensor']
ts = tensor_dict.get('transform')
if ts is not None:
rets.append(ts(tensor[index]))
else:
rets.append(tensor[index])
return rets
def __len__(self):
return len(self.tensors_with_ts[0]['tensor'])
ttd = TransformTensorDataset(tensors_with_transform)
return ttd
def use_accimage_backend():
torchvision.set_image_backend('accimage')
```
#### File: cisip-FIRe/functions/hashing.py
```python
import random
import numpy as np
import torch
from scipy.linalg import hadamard
def get_hamm_dist(codes, centroids, margin=0, normalize=False):
with torch.no_grad():
nbit = centroids.size(1)
dist = 0.5 * (nbit - torch.matmul(codes.sign(), centroids.sign().t()))
if normalize:
dist = dist / nbit
if margin == 0:
return dist
else:
codes_clone = codes.clone()
codes_clone[codes_clone.abs() < margin] = 0
dist_margin = 0.5 * (nbit - torch.matmul(codes_clone.sign(), centroids.sign().t()))
if normalize:
dist_margin = dist_margin / nbit
return dist_margin
def get_codes_and_labels(model, loader):
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
vs = []
ts = []
for e, (d, t) in enumerate(loader):
print(f'[{e + 1}/{len(loader)}]', end='\r')
with torch.no_grad():
# model forward
d, t = d.to(device), t.to(device)
v = model(d)
if isinstance(v, tuple):
v = v[0]
vs.append(v)
ts.append(t)
print()
vs = torch.cat(vs)
ts = torch.cat(ts)
return vs, ts
def jmlh_dist(a, b):
# a_m1 = a - 1
# b_m1 = b - 1
# c1 = torch.matmul(a, b_m1.T)
# c2 = torch.matmul(a_m1, b.T)
# return torch.abs(c1 + c2)
# a & b is sigmoid input
a = torch.sign(a - 0.5)
b = torch.sign(b - 0.5)
return hamming(a, b)
def hamming(a, b):
nbit = a.size(1)
return 0.5 * (nbit - torch.matmul(a, b.t())) # (Na, nbit) * (nbit, Nb)
def euclidean(a, b):
# dist = (a.unsqueeze(1) - b.unsqueeze(0)) ** 2
# dist = dist.sum(dim=-1)
# dist = dist ** 0.5
# return dist
return torch.cdist(a, b, p=2) # (Na, Nb)
def cosine(a, b):
a = a / (torch.norm(a, p=2, dim=1, keepdim=True) + 1e-7)
b = b / (torch.norm(b, p=2, dim=1, keepdim=True) + 1e-7)
return (1 - torch.matmul(a, b.t())) / 2
def get_distance_func(distance_func):
if distance_func == 'hamming':
return hamming
elif distance_func == 'euclidean':
return euclidean
elif distance_func == 'cosine':
return cosine
elif distance_func == 'jmlh-dist':
return jmlh_dist
else:
raise ValueError(f'Distance function `{distance_func}` not implemented.')
def inverse_sigmoid(y):
y = y.clamp(0.0000001, 0.9999999) # avoid nan
return torch.log(y / (1 - y))
def sign_dist(inputs, centroids, margin=0):
n, b1 = inputs.size()
nclass, b2 = centroids.size()
assert b1 == b2, 'inputs and centroids must have same number of bit'
# sl = relu(margin - x*y)
out = inputs.view(n, 1, b1) * centroids.sign().view(1, nclass, b1)
out = torch.relu(margin - out) # (n, nclass, nbit)
return out
def calculate_similarity_matrix(centroids):
nclass = centroids.size(0)
sim = torch.zeros(nclass, nclass, device=centroids.device)
for rc in range(nclass):
for cc in range(nclass):
sim[rc, cc] = (centroids[rc] == centroids[cc]).float().mean()
return sim
def get_sim(label_a, label_b, onehot=True):
"""
label_a: (N, 1 or C)
label_b: (M, 1 or C)
return: boolean similarity (N, M)
"""
if onehot:
sim = torch.matmul(label_a.float(), label_b.float().t())
return sim >= 1
else:
n = label_a.size()
m = label_b.size()
label_a = label_a.view(n, 1)
label_b = label_b.view(1, m)
sim = label_a == label_b
return sim
def log_trick(dot_product):
"""
loss = log(1 + e^(dt)) - s * dt
"""
return torch.log(1 + torch.exp(-torch.abs(dot_product))) + dot_product.clamp(min=0)
def get_hadamard(nclass, nbit, fast=True):
H_K = hadamard(nbit)
H_2K = np.concatenate((H_K, -H_K), 0)
hash_targets = torch.from_numpy(H_2K[:nclass]).float()
if H_2K.shape[0] < nclass:
hash_targets.resize_(nclass, nbit)
for k in range(20):
for index in range(H_2K.shape[0], nclass):
ones = torch.ones(nbit)
# Bernouli distribution
sa = random.sample(list(range(nbit)), nbit // 2)
ones[sa] = -1
hash_targets[index] = ones
if fast:
return hash_targets
# to find average/min pairwise distance
c = []
# print()
# print(n_class)
TF = (hash_targets.view(1, -1, nbit) != hash_targets.view(-1, 1, nbit)).sum(dim=2).float()
TF_mask = torch.triu(torch.ones_like(TF), 1).bool()
c = TF[TF_mask]
# choose min(c) in the range of K/4 to K/3
# see in https://github.com/yuanli2333/Hadamard-Matrix-for-hashing/issues/1
# but it is hard when bit is small
if c.min() > nbit / 4 and c.mean() >= nbit / 2:
print(c.min(), c.mean())
break
return hash_targets
```
#### File: functions/loss/base_uns.py
```python
import torch.nn as nn
class BaseUnsupervisedLoss(nn.Module):
def __init__(self):
super(BaseUnsupervisedLoss, self).__init__()
self.losses = {}
def forward(self, x, h, b, labels):
raise NotImplementedError
class BaseUnsupervisedReconstructionLoss(nn.Module):
def __init__(self):
super(BaseUnsupervisedReconstructionLoss, self).__init__()
self.losses = {}
def forward(self, x, h, y):
"""
:param x: feature
:param h: h = hash(feature) e.g. 4096 -> 64
:param y: reconstructed feature
:return:
"""
raise NotImplementedError
```
#### File: functions/loss/bihalf.py
```python
import torch.nn as nn
import torch.nn.functional as F
class BiHalfLoss(nn.Module):
def __init__(self, **kwargs):
super(BiHalfLoss, self).__init__()
self.losses = {}
def forward(self, x, h, b, labels, index, **kwargs):
"""
x: features before hash layer
h: output from hash FC
b: binary code
labels: not using (only use to obtain size)
"""
# case if batch data not even (normally last batch)
if x.size(0) % 2 != 0:
labels = labels[:-1]
b = b[:-1]
x = x[:-1]
target_b = F.cosine_similarity(b[:x.size(0) // 2], b[x.size(0) // 2:])
target_x = F.cosine_similarity(x[:x.size(0) // 2], x[x.size(0) // 2:]).detach()
loss = F.mse_loss(target_b, target_x)
self.losses['mse'] = loss
return loss
```
#### File: functions/loss/dfh.py
```python
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
class DFHLoss(nn.Module):
"""https://github.com/swuxyj/DeepHash-pytorch/blob/master/DFH.py
"""
def __init__(self, train_size, nbit, nclass, mu=1, vul=1, m=1, nta=1, eta=0.5, multiclass=False, **kwargs):
super(DFHLoss, self).__init__()
self.multiclass = multiclass
self.mu = mu
self.vul = vul
self.m = m
self.nta = nta
self.eta = eta
self.U = torch.zeros(nbit, train_size).float()
self.Y = torch.zeros(nclass, train_size).float()
# Relax_center
self.V = torch.zeros(nbit, nclass)
# Center
self.C = self.V.sign()
T = 2 * torch.eye(self.Y.size(0)) - torch.ones(self.Y.size(0))
TK = self.V.size(0) * T
self.TK = torch.FloatTensor(torch.autograd.Variable(TK, requires_grad=False))
self.losses = {}
self.centroids = None
def forward(self, u, y, ind, onehot=True):
"""
u: codes from backbone
y: label (onehot)
ind: index
"""
assert len(y.shape) == 2, 'Only support one hot yet now.'
assert ind is not None, 'ind cannot be None'
y = y.float()
if self.U.get_device() != u.get_device():
self.U = self.U.to(u.get_device())
self.Y = self.Y.to(u.get_device())
self.C = self.C.to(u.get_device())
self.TK = self.TK.to(u.get_device())
self.V = self.V.to(u.get_device())
self.U[:, ind] = u.t().data
self.Y[:, ind] = y.t()
b = (self.mu * self.C @ y.t() + u.t()).sign()
# self.center_gradient(torch.autograd.Variable(self.V, requires_grad=True),
# torch.autograd.Variable(y, requires_grad=False),
# torch.autograd.Variable(b, requires_grad=False))
self.discrete_center(torch.autograd.Variable(self.C.t(), requires_grad=True),
torch.autograd.Variable(y.t(), requires_grad=False),
torch.autograd.Variable(b, requires_grad=False))
s = (y @ self.Y > 0).float()
inner_product = u @ self.U * 0.5
inner_product = inner_product.clamp(min=-100, max=50)
metric_loss = ((1 - s) * torch.log(1 + torch.exp(self.m + inner_product))
+ s * torch.log(1 + torch.exp(self.m - inner_product))).mean()
# metric_loss = (torch.log(1 + torch.exp(inner_product)) - s * inner_product).mean() # Without Margin
quantization_loss = (b - u.t()).pow(2).mean()
self.losses['metric'] = metric_loss
self.losses['quant'] = quantization_loss
loss = metric_loss + self.eta * quantization_loss
return loss
def center_gradient(self, V, batchy, batchb):
alpha = 0.03
for i in range(200):
intra_loss = (V @ batchy.t() - batchb).pow(2).mean()
inter_loss = (V.t() @ V - self.TK).pow(2).mean()
quantization_loss = (V - V.sign()).pow(2).mean()
loss = intra_loss + self.vul * inter_loss + self.nta * quantization_loss
self.losses['intra'] = intra_loss
self.losses['inter'] = inter_loss
self.losses['quant_center'] = quantization_loss
loss.backward()
if i in (149, 179):
alpha = alpha * 0.1
V.data = V.data - alpha * V.grad.data
V.grad.data.zero_()
self.V = V
self.C = self.V.sign()
def discrete_center(self, C, Y, B):
"""
Solve DCC(Discrete Cyclic Coordinate Descent) problem.
"""
ones_vector = torch.ones([C.size(0) - 1]).to(C.get_device())
for i in range(C.shape[0]):
Q = Y @ B.t()
q = Q[i, :]
v = Y[i, :]
Y_prime = torch.cat((Y[:i, :], Y[i+1:, :]))
C_prime = torch.cat((C[:i, :], C[i+1:, :]))
with torch.no_grad():
C[i, :] = (q - C_prime.t() @ Y_prime @ v - self.vul * C_prime.t()@ones_vector).sign()
self.C = C.t()
```
#### File: functions/loss/imh.py
```python
import logging
import faiss
import torch
import torch.nn as nn
from sklearn.manifold import TSNE
def manifold_learning(x, nbit):
tsne = TSNE(nbit, init='pca', method='exact')
y = tsne.fit_transform(x)
return y
class IMHLoss(nn.Module):
def __init__(self, nbit, kmeans_iters=200, m=400, k=5, bandwidth=512., **kwargs):
super(IMHLoss, self).__init__()
self.built = False
self.nbit = nbit
self.kmeans_iters = kmeans_iters
self.m = m # base set size
self.k = k # knn size
self.bandwidth = bandwidth
self.kmeans = None
self.knn_index = None
self.base_set = None
self.losses = {}
def compute_embeddings(self, query):
"""
:param query: (n, d)
:param centroids: (m, d)
:return:
"""
try:
query = query.cpu().numpy()
except:
pass
distances, neighbors = self.kmeans.index.search(query, self.k)
gaussianw = torch.exp(- torch.from_numpy(distances) / self.bandwidth)
gaussianw = gaussianw / gaussianw.sum(dim=1, keepdim=True) # (qn, k)
base_neighbors = self.base_set[neighbors] # (qn, k, nbit)
y = (gaussianw.unsqueeze(2) * base_neighbors).sum(dim=1) # (qn, k, nbit) -> (qn, nbit)
return y
def forward(self, x):
"""
:param x: should be full dataset
:return:
"""
if self.training:
assert not self.built, 'please switch to eval mode'
device = x.device
logging.info('Kmeans Learning')
dim = x.size(1)
self.kmeans = faiss.Kmeans(d=dim, k=self.m, niter=self.kmeans_iters)
self.kmeans.train(x.cpu().numpy())
logging.info('Manifold Learning')
self.base_set = manifold_learning(self.kmeans.centroids, self.nbit)
logging.info('Computing Embedding')
v = self.compute_embeddings(x.cpu().numpy())
v = v.to(device)
quan_error = (1 - torch.cosine_similarity(v, v.sign())).mean()
self.losses['quan'] = quan_error
self.built = True
return v, quan_error
else:
assert self.built, 'please perform training'
return self.compute_embeddings(x.cpu().numpy())
def state_dict(self, destination=None, prefix='', keep_vars=False):
""" Overrides state_dict() to save also theta value"""
original_dict = super().state_dict(destination, prefix, keep_vars)
original_dict['centroids'] = self.kmeans.centroids
original_dict['base_set'] = self.base_set
original_dict['built'] = self.built
original_dict['bandwidth'] = self.bandwidth
return original_dict
def load_state_dict(self, state_dict, strict=True):
""" Overrides state_dict() to load also theta value"""
centroids = state_dict.pop('centroids')
base_set = state_dict.pop('base_set')
built = state_dict.pop('built')
bandwidth = state_dict.pop('bandwidth')
dim = centroids.shape[1]
self.kmeans = faiss.Kmeans(d=dim, k=self.m, niter=self.kmeans_iters)
self.kmeans.centroids = centroids
self.kmeans.index = faiss.IndexFlatL2(dim)
self.kmeans.index.reset()
self.kmeans.index.add(centroids)
self.built = built
self.base_set = base_set
self.bandwidth = bandwidth
super().load_state_dict(state_dict, strict)
```
#### File: cisip-FIRe/functions/metrics.py
```python
import gc
import logging
import os
from collections import defaultdict
from typing import Sequence
import numpy as np
import torch
from tqdm import tqdm
import configs
from functions.evaluate_roxf import DATASETS, configdataset, compute_map
from functions.hashing import inverse_sigmoid, get_distance_func
from functions.ternarization import tnt
from utils.misc import Timer
def calculate_mAP(db_codes, db_labels,
test_codes, test_labels,
Rs,
ternarization=None,
distance_func='hamming',
shuffle_database=False,
device=torch.device('cuda'),
onehot=True, using_id=False, ground_truth=None, db_id=None, test_id=None,
old_eval=False, avoid_clone=False, return_dist=False, zero_mean=False,
PRs: Sequence[int] = list()):
if using_id:
assert ground_truth is not None and db_id is not None and test_id is not None
if not avoid_clone:
# clone in case changing value of the original codes
db_codes = db_codes.clone()
test_codes = test_codes.clone()
##### start distance #####
total_timer = Timer()
total_timer.tick()
logging.info('Start Preprocess')
db_codes, test_codes = preprocess_for_calculate_mAP(db_codes, test_codes, ternarization, distance_func, zero_mean)
logging.info('Start Distance')
dist = compute_distances(db_codes, test_codes, distance_func, device)
db_labels = db_labels.clone().cpu().numpy()
test_labels = test_labels.clone().cpu().numpy()
if Rs == -1:
Rs = [len(db_codes)]
logging.info(f'Computing mAP@All, R = {Rs[0]}')
elif isinstance(Rs, int):
Rs = [Rs]
Rs = [len(db_codes) if x == -1 else x for x in Rs] # make sure -1 in list also read as full
logging.info(f'Computing mAP for R = {Rs}')
if shuffle_database:
logging.info('Shuffle Database Enabled.')
randperm = torch.randperm(dist.size(1)).to(device)
dist = dist[:, randperm]
db_labels = db_labels[randperm.cpu().numpy()]
# db_labels = db_labels[randperm]
if using_id:
db_id = db_id[randperm.cpu().numpy()]
if dist.shape[0] * dist.shape[1] > 134217728: # consider 4 bytes a tensor (32bit), for 512MB
logging.info("Using CPU for dist, due to memory limitation")
dist = dist.cpu() # move to cpu first to avoid oom in gpu
timer = Timer()
mAPs, DistRs = [], []
logging.info(f'Start Sorting')
timer.tick()
maxR = max(max(Rs), max(PRs) if PRs else 0)
topk_ids = torch.topk(dist, maxR, dim=1, largest=False)[1].cpu() # top k, k = largest R
timer.toc()
logging.info(f'Sort ({timer.total:.2f}s)')
gc.collect()
torch.cuda.empty_cache()
# calculate mAP
if using_id:
output = compute_mAP_score_for_id_multi_R(dist, topk_ids,
db_id, test_id, ground_truth,
Rs, return_dist, PRs=PRs)
else:
output = compute_mAP_score_multi_R(dist, topk_ids,
db_labels, test_labels, Rs, onehot, old_eval, return_dist, PRs=PRs)
total_timer.toc()
logging.info(f'Total time usage for calculating mAP: {total_timer.total:.2f}s')
return output
def preprocess_for_calculate_mAP(db_codes, test_codes,
ternarization=None, distance_func='hamming', zero_mean=False):
##### ternarize #####
if ternarization is not None:
# db_codes, test_codes = ternarize(db_codes, db_labels, test_codes, test_labels, **ternarization)[:2]
mode = ternarization['mode']
if distance_func == 'jmlh-dist':
# we inverse jmlh sigmoid output back to normal input
db_codes = inverse_sigmoid(db_codes)
test_codes = inverse_sigmoid(test_codes)
distance_func = 'hamming' # we switch back to hamming distance because we are using normal input
if mode == 'tnt':
db_codes = tnt(db_codes)
test_codes = tnt(test_codes)
elif mode == 'threshold':
threshold = ternarization['threshold']
if threshold != 0:
# if value within margin, set to 0
db_codes[db_codes.abs() < threshold] = 0
test_codes[test_codes.abs() < threshold] = 0
##### zero mean for code balance #####
if zero_mean:
logging.info('Zero mean enabled.')
db_codes_mean = db_codes.mean(dim=0, keepdim=True)
db_codes = db_codes - db_codes_mean
test_codes = test_codes - db_codes_mean
##### binarize #####
if distance_func == 'hamming': # jhml output is {0, 1}, we can skip this step
# binarized
db_codes = torch.sign(db_codes) # (ndb, nbit)
test_codes = torch.sign(test_codes) # (nq, nbit)
return db_codes, test_codes
def compute_mAP_score_multi_R(dist,
topk_ids,
db_labels,
test_labels,
Rs,
onehot=True,
old_eval=False,
return_dist=False,
PRs: Sequence[int] = list()):
Dists = []
APx = defaultdict(list)
recalls = defaultdict(list)
precisions = defaultdict(list)
maxR = max(max(Rs), max(PRs) if PRs else 0)
pbar = tqdm(range(dist.size(0)), desc='Query', ascii=True, bar_format='{l_bar}{bar:10}{r_bar}',
disable=configs.disable_tqdm)
for i in pbar:
if onehot:
label = test_labels[i, :] # [0,1,0,0] one hot label
label[label == 0] = -1
idx = topk_ids[i, :]
# idx = idx[np.argsort(dist[i, :][idx])]
# imatch = (db_labels[idx[:R]] @ label) > 0 # (R, C) dot (C, 1) -> (R,)
imatch = np.sum(np.equal(db_labels[idx[:maxR], :], label), 1) > 0
else:
label = test_labels[i]
idx = topk_ids[i, :]
imatch = (db_labels[idx[0: maxR]] == label) > 0
Lx = np.cumsum(imatch)
Px = Lx.astype(float) / np.arange(1, maxR + 1, 1) # ap += num_correct / (i + 1)
for R in Rs:
rel = np.sum(imatch[:R])
if rel != 0:
APx[R].append(np.sum(Px[:R] * imatch[:R]) / rel)
elif not old_eval:
APx[R].append(0)
if PRs:
Lx[Lx > 1] = 1
for Ri, R in enumerate(PRs):
rel = np.sum(imatch[:R])
recalls[R].append(Lx[R - 1])
precisions[R].append(rel / R)
if return_dist:
Dists.append(dist[i, idx])
if PRs:
recalls = [np.mean(np.array(recalls[R])) for R in PRs]
precisions = [np.mean(np.array(precisions[R])) for R in PRs]
APx = {R: np.mean(np.array(APx[R])) for R in APx}
mAPs = [APx[R] for R in Rs]
if len(mAPs) == 1:
mAPs = mAPs[0]
if return_dist and PRs:
return mAPs, recalls, precisions, [torch.stack(Dists).cpu().numpy()[:, :R] for R in Rs]
elif return_dist and not PRs:
return mAPs, [torch.stack(Dists).cpu().numpy()[:, :R] for R in Rs]
elif PRs:
return mAPs, recalls, precisions
else:
return mAPs
def compute_mAP_score_for_id_multi_R(dist,
topk_ids,
db_id,
test_id,
ground_truth,
Rs,
return_dist=False,
PRs: Sequence[int] = list()):
Dists = []
APx = defaultdict(list)
recalls = defaultdict(list)
precisions = defaultdict(list)
maxR = max(max(Rs), max(PRs) if PRs else 0)
pbar = tqdm(range(dist.size(0)), desc='Query', ascii=True, bar_format='{l_bar}{bar:10}{r_bar}',
disable=configs.disable_tqdm)
for i in pbar:
already_predicted = set()
test_img_id = test_id[i]
all_id_should_retrieve = set(ground_truth[ground_truth['id'] == test_img_id]['images'].item().split(" "))
idx = topk_ids[i, :]
imatch = np.array([])
for img_id in db_id[idx[0: maxR]]:
correct = img_id in all_id_should_retrieve and img_id not in already_predicted
imatch = np.append(imatch, correct)
already_predicted.add(img_id)
# imatch = np.array([db_id in all_id_should_retrieve for db_id in db_id[idx[0: R]]])
rel = np.sum(imatch)
Lx = np.cumsum(imatch)
Px = Lx.astype(float) / np.arange(1, maxR + 1, 1) # ap += num_correct / (i + 1)
# Px = Lx.float() / torch.arange(1, R + 1, 1).to(device)
# https://github.com/tensorflow/models/blob/7f0ee4cb1f10d4ada340cc5bfe2b99d0d690b219/research/delf/delf/python/datasets/google_landmarks_dataset/metrics.py#L160
for R in Rs:
rel = np.sum(imatch[:R])
APx[R].append(np.sum(Px[:R] * imatch[:R]) / len(all_id_should_retrieve))
if PRs:
Lx[Lx > 1] = 1
for Ri, R in enumerate(PRs):
rel = np.sum(imatch[:R])
recalls[R].append(Lx[R - 1])
precisions[R].append(rel / R)
if return_dist:
Dists.append(dist[i, idx])
if PRs:
recalls = [np.mean(np.array(recalls[R])) for R in PRs]
precisions = [np.mean(np.array(precisions[R])) for R in PRs]
APx = {R: np.mean(np.array(APx[R])) for R in APx}
mAPs = [APx[R] for R in Rs]
if len(mAPs) == 1:
mAPs = mAPs[0]
if return_dist and PRs:
return mAPs, recalls, precisions, [torch.stack(Dists).cpu().numpy()[:, :R] for R in Rs]
elif return_dist and not PRs:
return mAPs, [torch.stack(Dists).cpu().numpy()[:, :R] for R in Rs]
elif PRs:
return mAPs, recalls, precisions
else:
return mAPs
def compute_distances(db_codes, test_codes, distance_func, device):
dist = []
dist_f = get_distance_func(distance_func)
with torch.no_grad():
db_codes = db_codes.to(device)
test_codes = test_codes.to(device)
db_codes_ttd = configs.tensor_to_dataset(db_codes)
db_codes_loader = configs.dataloader(db_codes_ttd, 32, False, 0, False)
# calculate hamming distance
pbar = tqdm(db_codes_loader, desc='Distance', ascii=True, bar_format='{l_bar}{bar:10}{r_bar}',
disable=configs.disable_tqdm)
for i, db_code in enumerate(pbar):
dist.append(dist_f(test_codes, db_code).cpu()) # move to gpu avoid oom
dist = torch.cat(dist, 1) # .numpy()
return dist
def calculate_mAP_roxf(db_codes, test_codes, test_dataset,
ternarization=None,
distance_func='hamming',
device=torch.device('cuda')):
assert test_dataset in DATASETS
# evaluate ranks
ks = [1, 5, 10]
# Set test dataset: roxford5k | rparis6k
logging.info('>> {}: Evaluating test dataset...'.format(test_dataset))
# config file for the dataset
# separates query image list from database image list, when revisited protocol used
cfg = configdataset(test_dataset, os.path.join('data'))
# clone in case changing value of the original codes
db_codes = db_codes.clone()
test_codes = test_codes.clone()
logging.info('Start Preprocess')
if ternarization is not None:
# db_codes, test_codes = ternarize(db_codes, db_labels, test_codes, test_labels, **ternarization)[:2]
mode = ternarization['mode']
if mode == 'tnt':
db_codes = tnt(db_codes)
test_codes = tnt(test_codes)
elif mode == 'threshold':
threshold = ternarization['threshold']
if threshold != 0:
# if value within margin, set to 0
db_codes[db_codes.abs() < threshold] = 0
test_codes[test_codes.abs() < threshold] = 0
if distance_func == 'hamming': # jhml output is {0, 1}, we can skip this step
# binarized
db_codes = torch.sign(db_codes) # (ndb, nbit)
test_codes = torch.sign(test_codes) # (nq, nbit)
dist_f = get_distance_func(distance_func)
dist = []
timer = Timer()
total_timer = Timer()
total_timer.tick()
logging.info('Start Distance')
with torch.no_grad():
db_codes = db_codes.to(device)
test_codes = test_codes.to(device)
db_codes_ttd = configs.tensor_to_dataset(db_codes)
db_codes_loader = configs.dataloader(db_codes_ttd, 32, False, 0, False)
# calculate hamming distance
pbar = tqdm(db_codes_loader, desc='Distance', ascii=True, bar_format='{l_bar}{bar:10}{r_bar}',
disable=configs.disable_tqdm)
for i, db_code in enumerate(pbar):
dist.append(dist_f(test_codes, db_code).cpu()) # move to gpu avoid oom
dist = torch.cat(dist, 1) # .numpy()
logging.info('Start Sorting')
# fast sort
timer.tick()
if dist.shape[0] * dist.shape[1] > 134217728: # consider 4 bytes a tensor (32bit), for 512MB
logging.info("Using CPU for dist, due to memory limitation")
dist = dist.cpu() # move to cpu first to avoid oom in gpu
# ranks = torch.topk(dist, min(max(ks)*1000, dist.shape[0]), dim=1, largest=False)[1].cpu()
ranks = torch.argsort(dist, dim=1).t()
timer.toc()
logging.info(f'Sort ({timer.total:.2f}s)')
# revisited evaluation
gnd = cfg['gnd']
# search for easy
gnd_t = []
for i in range(len(gnd)):
g = {'ok': np.concatenate([gnd[i]['easy']]),
'junk': np.concatenate([gnd[i]['junk'], gnd[i]['hard']])}
gnd_t.append(g)
mapE, apsE, mprE, prsE = compute_map(ranks, gnd_t, ks)
# search for easy & hard
gnd_t = []
for i in range(len(gnd)):
g = {'ok': np.concatenate([gnd[i]['easy'], gnd[i]['hard']]),
'junk': np.concatenate([gnd[i]['junk']])}
gnd_t.append(g)
mapM, apsM, mprM, prsM = compute_map(ranks, gnd_t, ks)
# search for hard
gnd_t = []
for i in range(len(gnd)):
g = {'ok': np.concatenate([gnd[i]['hard']]),
'junk': np.concatenate([gnd[i]['junk'], gnd[i]['easy']])}
gnd_t.append(g)
mapH, apsH, mprH, prsH = compute_map(ranks, gnd_t, ks)
logging.info('>> {}: mAP E: {}, M: {}, H: {}'.format(test_dataset, np.around(mapE * 100, decimals=2),
np.around(mapM * 100, decimals=2),
np.around(mapH * 100, decimals=2)))
logging.info(
'>> {}: mP@k{} E: {}, M: {}, H: {}'.format(test_dataset, np.array(ks), np.around(mprE * 100, decimals=2),
np.around(mprM * 100, decimals=2),
np.around(mprH * 100, decimals=2)))
total_timer.toc()
logging.info(f'Total time usage for calculating mAP: {total_timer.total:.2f}s')
return mapE, mapM, mapH, apsE, apsM, apsH, mprE, mprM, mprH
```
#### File: cisip-FIRe/functions/multiclass.py
```python
import torch
def get_imbalance_mask(sigmoid_logits, labels, nclass, threshold=0.7, imbalance_scale=-1):
if imbalance_scale == -1:
imbalance_scale = 1 / nclass
mask = torch.ones_like(sigmoid_logits) * imbalance_scale
# wan to activate the output
mask[labels == 1] = 1
# if predicted wrong, and not the same as labels, minimize it
correct = (sigmoid_logits >= threshold) == (labels == 1)
mask[~correct] = 1
multiclass_acc = correct.float().mean()
# the rest maintain "imbalance_scale"
return mask, multiclass_acc
```
#### File: cisip-FIRe/inference/web.py
```python
import base64
import io
import os
import time
from zipfile import ZipFile
import numpy as np
from flask import Flask, render_template, request, flash, make_response, send_from_directory
from inference.indexer import Indexer
from utils.misc import pil_loader
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'tiff', 'webp', 'tif', 'jfif'}
def allowed_format(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def process_query(f, indexer: Indexer):
assert indexer, 'Indexer not initialize'
start_time = time.time()
img_ = pil_loader(f)
dist, ind, query_code = indexer.query_with_image(img_)
img_paths = indexer.get_img_path(ind)
end_time = time.time()
flash("Upload successfully.")
data = io.BytesIO()
img_.save(data, "JPEG")
encoded_img_data = base64.b64encode(data.getvalue()) # convert to base64 in byte
encoded_img_data = encoded_img_data.decode('utf-8') # convert to base64 in utf-8
time_string = f'Time taken: {(end_time - start_time):.3f}s\n'
code_string = "".join(str(np.unpackbits(query_code)).split())[1:-2]
code_string = '\n'.join(code_string[i:i + 8] for i in range(0, len(code_string), 8))
return dist, img_paths, code_string, encoded_img_data, img_, time_string
def get_web_app(log_path, device='cpu', top_k=10):
indexer = Indexer(log_path, device=device, top_k=top_k)
app = Flask(__name__, template_folder='templates', static_folder='static')
app.secret_key = 'my_secret_key'
@app.route('/', methods=['GET'])
def index():
return render_template('main.html')
@app.route('/', methods=['POST'])
def predict():
f = request.files['file']
if f.filename == '':
flash("Please select a file")
return index()
elif not allowed_format(f.filename):
flash("Invalid file type")
return index()
dist, img_paths, code_string, encoded_img_data, img_, time_string = process_query(f, indexer)
return render_template('main.html', dists=dist[0], paths=img_paths[0],
code=code_string,
query_img=encoded_img_data,
query_img_full=img_,
time_string=time_string,
extra_data=indexer.get_info())
@app.route('/zip', methods=['POST'])
def generate_zip():
f = request.files['file']
try:
img_ = pil_loader(f)
except:
flash("Invalid file type")
return index()
dists, ind, query_code = indexer.query_with_image(img_)
img_paths = indexer.get_img_path(ind)
dists = dists[0]
data = io.BytesIO()
img = io.BytesIO()
img_.save(img, "JPEG")
with ZipFile(data, 'w') as zip:
for i, path in enumerate(img_paths[0]):
zip.write(os.path.abspath(path),
f'retr_rank{i}_{dists[i]}_{path.split("/")[-1].split("_")[0]}.{path.split(".")[-1]}')
zip.writestr(f'query.jpg', img.getvalue())
data.seek(0)
response = make_response(data.read())
response.headers.set('Content-Type', 'zip')
response.headers.set('Content-Disposition', 'attachment', filename='%s.zip' % 'out')
return response
@app.route('/img/<path:filename>')
def get_image(filename):
path, file = os.path.split(filename)
print(os.path.abspath(filename))
return send_from_directory(os.path.abspath(path), file)
return app
```
#### File: models/architectures/arch_ce.py
```python
import torch.nn as nn
from models import register_network, BaseArch
from models.architectures.helper import get_hash_fc_with_normalizations, get_backbone
@register_network('ce')
class ArchCE(BaseArch):
"""Arch CE"""
def __init__(self, config, **kwargs):
super(ArchCE, self).__init__(config, **kwargs)
hash_layer = config['loss_param'].get('hash_layer', 'identity')
hash_kwargs = config['loss_param']
self.backbone = get_backbone(backbone=self.backbone_name,
nbit=self.nbit,
nclass=self.nclass,
pretrained=self.pretrained,
freeze_weight=self.freeze_weight, **kwargs)
self.ce_fc = nn.Linear(self.nbit, self.nclass)
self.hash_fc = get_hash_fc_with_normalizations(in_features=self.backbone.in_features,
nbit=self.nbit,
bias=self.bias,
kwargs=hash_kwargs)
def get_features_params(self):
return self.backbone.get_features_params()
def get_hash_params(self):
return list(self.ce_fc.parameters()) + list(self.hash_fc.parameters())
def forward(self, x):
x = self.backbone(x)
v = self.hash_fc(x)
u = self.ce_fc(v)
return u, v
```
#### File: models/architectures/arch_tbh.py
```python
import torch
import torch.nn as nn
from models import register_network, BaseArch
from models.architectures.helper import get_backbone, get_hash_activation
def build_adjacency_hamming(tensor_in):
"""
Hamming-distance-based graph. It is self-connected.
:param tensor_in: [N D]
:return:
"""
code_length = tensor_in.size(1)
m1 = tensor_in - 1.
c1 = torch.matmul(tensor_in, m1.t()) # (N, N)
c2 = torch.matmul(m1, tensor_in.t()) # (N, N)
normalized_dist = torch.abs(c1 + c2) / code_length
return torch.pow(1 - normalized_dist, 1.4) # why 1.4?
class TwinBottleneck(nn.Module):
def __init__(self, bbn_dim, cbn_dim, **kwargs):
super().__init__()
self.bbn_dim = bbn_dim
self.cbn_dim = cbn_dim
self.gcn = GCNLayer(cbn_dim, cbn_dim)
def forward(self, bbn, cbn):
adj = build_adjacency_hamming(bbn)
return torch.sigmoid(self.gcn(cbn, adj))
class GCNLayer(nn.Module):
# https://github.com/ymcidence/TBH/blob/778dd1cfb5c631d109493e0cee858ab6fa675416/layer/gcn.py#L8
def __init__(self, in_dim=512, out_dim=512, **kwargs):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.fc = nn.Linear(in_dim, out_dim)
def forward(self, values, adjacency, **kwargs):
"""
:param values:
:param adjacency:
:param kwargs:
:return:
"""
return self.spectrum_conv(values, adjacency)
def spectrum_conv(self, values, adjacency):
"""
Convolution on a graph with graph Laplacian
:param values: [N D]
:param adjacency: [N N] must be self-connected
:return:
"""
fc_sc = self.fc(values) # (N, D)
conv_sc = self.graph_laplacian(adjacency) @ fc_sc # (N, D)
return conv_sc
def graph_laplacian(self, adjacency):
"""
:param adjacency: must be self-connected
:return:
"""
graph_size = adjacency.size(0) # (BS, BS)
d = adjacency @ torch.ones(size=[graph_size, 1]).to(adjacency.device) # (BS, 1)
d_inv_sqrt = torch.pow(d + 1e-8, -0.5) # (BS, 1)
d_inv_sqrt = torch.eye(graph_size).to(adjacency.device) * d_inv_sqrt # (BS, BS)
laplacian = d_inv_sqrt @ adjacency @ d_inv_sqrt # (BS, BS)
return laplacian
class Encoder(nn.Module):
def __init__(self, input_dim=4096, middle_dim=1024, bbn_dim=64, cbn_dim=512):
"""
:param middle_dim: hidden units
:param bbn_dim: binary bottleneck size
:param cbn_dim: continuous bottleneck size
"""
super(Encoder, self).__init__()
self.code_length = bbn_dim
self.fc_1 = nn.Sequential(
nn.Linear(input_dim, middle_dim),
nn.ReLU()
)
self.fc_2_1 = nn.Sequential(
nn.Linear(middle_dim, bbn_dim)
)
self.fc_2_2 = nn.Sequential(
nn.Linear(middle_dim, cbn_dim),
# nn.Sigmoid()
) # paper is Identity
self.hash_layer = get_hash_activation('stochasticbin')
def forward(self, x):
fc_1 = self.fc_1(x)
bbn = self.fc_2_1(fc_1)
if self.training:
bbn, _ = self.hash_layer(bbn, torch.ones_like(bbn) * 0.5)
else: # eval mode, just output sigmoid probability
bbn = torch.sigmoid(bbn)
cbn = self.fc_2_2(fc_1)
return bbn, cbn
class Decoder(nn.Module):
def __init__(self, in_dim, middle_dim, feat_dim):
"""
:param middle_dim: hidden units
:param feat_dim: data dim
"""
super(Decoder, self).__init__()
self.fc_1 = nn.Sequential(
nn.Linear(in_dim, middle_dim),
nn.ReLU()
)
self.fc_2 = nn.Sequential(
nn.Linear(middle_dim, feat_dim)
) # original implementation is with ReLU, but paper is Identity
def forward(self, x):
fc_1 = self.fc_1(x)
return self.fc_2(fc_1)
@register_network('tbh')
class ArchTBH(BaseArch):
"""Arch TBH"""
def __init__(self, config, **kwargs):
super(ArchTBH, self).__init__(config, **kwargs)
self.backbone = get_backbone(backbone=self.backbone_name, nbit=self.nbit, nclass=self.nclass,
pretrained=self.pretrained, freeze_weight=self.freeze_weight, **kwargs)
self.encoder = Encoder(self.backbone.in_features, 1024, self.nbit, 512)
self.decoder = Decoder(512, 1024, self.backbone.in_features)
self.tbh = TwinBottleneck(self.nbit, 512)
self.discriminator_binary = nn.Sequential(
# nn.Linear(nbit, 1024),
# nn.ReLU(),
nn.Linear(self.nbit, 1),
nn.Sigmoid()
) # original implementation is one FC-sigmoid layer. paper is one FC-ReLU layer + one FC-sigmoid layer
self.discriminator_continuous = nn.Sequential(
# nn.Linear(512, 1024),
# nn.ReLU(),
nn.Linear(512, 1),
nn.Sigmoid()
) # original implementation is one FC-sigmoid layer. paper is one FC-ReLU layer + one FC-sigmoid layer
def get_features_params(self):
return self.backbone.get_features_params()
def get_hash_params(self):
return (list(self.encoder.parameters()) +
list(self.decoder.parameters()) +
list(self.tbh.parameters()))
def get_discriminator_params(self):
return (list(self.discriminator_binary.parameters()) +
list(self.discriminator_continuous.parameters()))
def forward(self, x):
x = self.backbone(x)
bbn, cbn = self.encoder(x)
gcn_cbn = self.tbh(bbn, cbn)
rec_x = self.decoder(gcn_cbn)
dis1_real = self.discriminator_binary(bbn)
dis1_fake = self.discriminator_binary(torch.bernoulli(torch.ones_like(bbn) * 0.5))
dis2_real = self.discriminator_continuous(gcn_cbn)
dis2_fake = self.discriminator_continuous(torch.rand_like(gcn_cbn))
return x, bbn, rec_x, [(dis1_real, dis1_fake), (dis2_real, dis2_fake)]
if __name__ == '__main__':
torch.manual_seed(1234)
net = ArchTBH(64, 10, False, False, 'alexnet')
print(net.training)
net.train()
data = torch.randn(1, 3, 224, 224)
x, bbn, rec_x, discs = net(data)
print(x)
print(bbn)
print(rec_x)
print(discs)
from functions.loss.tbh import TBHLoss
criterion = TBHLoss()
opt_A = torch.optim.Adam([{'params': net.get_hash_params()},
{'params': net.get_features_params()}],
0.0001)
hashparams = net.get_hash_params()
opt_C = torch.optim.Adam([{'params': net.get_discriminator_params()}],
0.0001)
disparams = net.get_discriminator_params()
loss = criterion(x, bbn, rec_x, discs)
print(loss)
actor_loss = criterion.losses['actor']
critic_loss = criterion.losses['critic']
#
params = [p for param in opt_A.param_groups for p in param['params']]
actor_loss.backward(retain_graph=True, inputs=params)
opt_A.step() # step for main hashing flow
params = [p for param in opt_C.param_groups for p in param['params']]
critic_loss.backward(inputs=params)
opt_C.step()
```
#### File: models/backbone/resnet.py
```python
import torch
import torch.nn as nn
from torchvision import models
from models.backbone.base_backbone import BaseBackbone
class ResNetBackbone(BaseBackbone):
def __init__(self, nbit, nclass, pretrained=False, freeze_weight=False, resnet_size='18', **kwargs):
super(ResNetBackbone, self).__init__()
resnet_models = {
'18': models.resnet18,
'34': models.resnet34,
'50': models.resnet50,
'101': models.resnet101,
'152': models.resnet152
}
model = resnet_models[resnet_size](pretrained)
self.conv1 = model.conv1
self.bn1 = model.bn1
self.relu = model.relu
self.maxpool = model.maxpool
self.layer1 = model.layer1
self.layer2 = model.layer2
self.layer3 = model.layer3
self.layer4 = model.layer4
self.avgpool = model.avgpool
self.classifier = model.fc
resnet_fc_sizes = {
'18': 512,
'34': 512,
'50': 2048,
'101': 2048,
'152': 2048
}
self.in_features = resnet_fc_sizes[resnet_size]
self.nbit = nbit
self.nclass = nclass
self.params = [self.conv1, self.bn1, self.layer1, self.layer2, self.layer3, self.layer4, self.classifier]
self.params = [list(m.parameters()) for m in self.params]
self.params = sum(self.params, []) # join all lists
if freeze_weight:
for m in self.modules():
if hasattr(m, 'weight') and m.weight is not None:
m.weight.requires_grad_(False)
if hasattr(m, 'bias') and m.bias is not None:
m.bias.requires_grad_(False)
def train(self, mode=True):
super(ResNetBackbone, self).train(mode)
for mod in self.modules():
if isinstance(mod, nn.BatchNorm2d):
mod.eval()
def get_features_params(self):
return self.params
def get_hash_params(self):
raise NotImplementedError('no hash layer in backbone')
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
return x
```
#### File: models/backbone/vgg.py
```python
import torch
import torch.nn as nn
from torchvision.models import vgg16, vgg16_bn
from models.backbone.base_backbone import BaseBackbone
def _initialize_weights(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
class VGGBackbone(BaseBackbone):
def __init__(self, nbit, nclass, pretrained=False, freeze_weight=False, vgg_size='vgg16', **kwargs):
super(VGGBackbone, self).__init__()
vgg_sizes = {
'vgg16': vgg16,
'vgg16bn': vgg16_bn
}
model = vgg_sizes[vgg_size](pretrained)
self.features = model.features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.fc = model.classifier[:-1]
self.classifier = model.classifier[-1]
self.in_features = 4096
self.nbit = nbit
self.nclass = nclass
if not pretrained:
_initialize_weights(model)
if freeze_weight:
for m in list(self.features) + list(self.fc):
if hasattr(m, 'weight') and m.weight is not None:
m.weight.requires_grad_(False)
if hasattr(m, 'bias') and m.bias is not None:
m.bias.requires_grad_(False)
def get_features_params(self):
return list(self.features.parameters()) + list(self.fc.parameters()) + list(self.classifier.parameters())
def get_hash_params(self):
raise NotImplementedError('no hash layer in backbone')
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
```
#### File: models/backbone/vit.py
```python
from typing import List
import timm
import torch
from models.backbone.base_backbone import BaseBackbone
class ViTBackbone(BaseBackbone):
def __init__(self, nbit, nclass, vit_name, pretrained=False, freeze_weight=False, **kwargs):
super(ViTBackbone, self).__init__()
model = timm.create_model(vit_name, pretrained=pretrained)
self.patch_embed = model.patch_embed
self.cls_token = model.cls_token
self.pos_embed = model.pos_embed
self.pos_drop = model.pos_drop
self.blocks = model.blocks
self.norm = model.norm
self.pre_logits = model.pre_logits
self.head = model.head # no need train as features_params because not using
self.in_features = model.num_features
self.nbit = nbit
self.nclass = nclass
assert freeze_weight is False, \
'freeze_weight in backbone deprecated. Use --backbone-lr-scale=0 to freeze backbone'
def get_features_params(self) -> List:
return list(self.parameters())
def get_hash_params(self) -> List:
raise NotImplementedError('no hash layer in backbone')
def forward(self, x):
x = self.patch_embed(x)
cls_token = self.cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_token, x), dim=1)
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
x = self.norm(x)
return self.pre_logits(x[:, 0])
```
#### File: cisip-FIRe/scripts/train_pairwise.py
```python
import logging
import torch
def update_params_pairwise(loss_param, train_loader, nbit, nclass):
# update pairwise loss parameters
loss_param['loss_param'].update({
'keep_train_size': loss_param['loss_param'].get('keep_train_size', False),
'train_size': len(train_loader.dataset),
'nbit': nbit,
'nclass': nclass
})
if loss_param['loss_param']['keep_train_size']:
logging.info('Keep train size!')
def get_output_and_loss_pairwise(model, criterion, data, labels, index, onehot, loss_name, loss_cfg, stage='train',
no_loss=False):
logits, code_logits = model(data)
if no_loss:
loss = torch.tensor(0.)
elif stage == 'train':
if loss_cfg.get('train_size', 0) != 0 and loss_cfg.get('keep_train_size', False):
ind = index
else:
ind = None
loss = criterion(code_logits, labels, ind)
elif stage == 'test':
ind = None # no need to put ind into criterion during testing
if loss_name in ['dfh']:
loss = torch.tensor(0.)
else:
loss = criterion(code_logits, labels, ind)
else:
raise ValueError('only train and test can be set as stage')
return {
'logits': logits,
'code_logits': code_logits
}, loss
def update_meters_pairwise(model, meters, out, labels, onehot, criterion, loss_name, loss_cfg):
acc = 0.0
for key in criterion.losses:
val = criterion.losses[key]
if hasattr(val, 'item'):
val = val.item()
meters[key].update(val)
meters['acc'].update(acc)
```
#### File: cisip-FIRe/scripts/train_unsupervised.py
```python
import torch
def get_output_and_loss_unsupervised(model,
criterion,
data,
labels,
index,
onehot,
loss_name,
loss_cfg,
stage,
no_loss):
x, code_logits, b = model(data)[:3]
if not no_loss:
if loss_name in ['ssdh']: # test stage ssdh
if stage == 'train':
loss = criterion(x, code_logits, b, labels, index)
else:
loss = torch.tensor(0.)
elif loss_name in ['uhbdnn']:
loss = criterion(x, code_logits, b, index)
else:
try:
loss = criterion(x, code_logits, b, labels, index)
except:
raise NotImplementedError(f'Loss name: {loss_name}; Stage: {stage}')
else:
loss = torch.tensor(0.)
return {
'x': x,
'code_logits': code_logits,
'b': b
}, loss
def update_meters_unsupervised(model, meters, out, labels, onehot, criterion, loss_name, loss_cfg):
# acc = 0.0
for key in criterion.losses:
val = criterion.losses[key]
if hasattr(val, 'item'):
val = val.item()
meters[key].update(val)
# meters['acc'].update(acc)
```
#### File: jiahuei/cisip-FIRe/test.py
```python
import argparse
import json
from scripts import test_hashing
def get_ternarization_config(args):
return {
'mode': args.tmode,
'threshold': args.threshold
}
parser = argparse.ArgumentParser()
parser.add_argument('--logdir', required=True)
parser.add_argument('--db-path', default=None)
parser.add_argument('--test-path', default=None)
parser.add_argument('--old-eval', default=False, action='store_true', help='whether to use old eval method')
parser.add_argument('--tmode', default='off', choices=['tnt', 'threshold', 'off'], help='ternarization mode')
parser.add_argument('--threshold', default=0., type=float, help='threshold for ternary')
parser.add_argument('--dist', default='hamming', choices=['hamming', 'euclidean', 'cosine', 'jmlh-dist'])
parser.add_argument('--shuffle', default=False, action='store_true', help='whether to shuffle database')
parser.add_argument('--tag', default=None)
parser.add_argument('--device', default='cuda:0', type=str, help='cuda:x')
parser.add_argument('--R', default=0, type=int, help='0 = default, -1 = all')
parser.add_argument('--zero-mean-eval', default=False, action='store_true')
args = parser.parse_args()
logdir = args.logdir
config = json.load(open(logdir + '/config.json'))
config.update({
'ternarization': get_ternarization_config(args),
'distance_func': args.dist,
'shuffle_database': args.shuffle,
'db_path': logdir + '/' + str(args.db_path),
'test_path': logdir + '/' + str(args.test_path),
'load_model': args.db_path is None,
'tag': args.tag,
'old_eval': args.old_eval,
'device': args.device,
'zero_mean_eval': args.zero_mean_eval
})
if args.R != 0 and config['R'] != args.R:
config['R'] = args.R
config['dataset_kwargs']['remove_train_from_db'] = False
test_hashing.main(config)
```
#### File: cisip-FIRe/utils/datasets.py
```python
import logging
import os
from abc import ABC
from typing import Tuple, Any
import numpy as np
import torch
import torchvision
from pandas import read_csv
from torch.utils.data import Dataset, DataLoader
from torchvision.datasets import CIFAR10, CIFAR100
from torchvision.datasets.folder import pil_loader, accimage_loader
from torchvision.transforms import transforms
from tqdm import tqdm
import configs
from functions.evaluate_roxf import configdataset, DATASETS
from functions.mining import SimpleMemoryBank
from utils.augmentations import GaussianBlurOpenCV
class BaseDataset(Dataset, ABC):
def get_img_paths(self):
raise NotImplementedError
class HashingDataset(BaseDataset):
def __init__(self, root,
transform=None,
target_transform=None,
filename='train',
separate_multiclass=False,
ratio=1):
if torchvision.get_image_backend() == 'PIL':
self.loader = pil_loader
else:
self.loader = accimage_loader
self.separate_multiclass = separate_multiclass
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.filename = filename
self.train_data = []
self.train_labels = []
self.ratio = ratio
filename = os.path.join(self.root, self.filename)
is_pkl = False
with open(filename, 'r') as f:
while True:
lines = f.readline()
if not lines:
break
path_tmp = lines.split()[0]
label_tmp = lines.split()[1:]
self.is_onehot = len(label_tmp) != 1
if not self.is_onehot:
label_tmp = lines.split()[1]
if self.separate_multiclass:
assert self.is_onehot, 'if multiclass, please use onehot'
nonzero_index = np.nonzero(np.array(label_tmp, dtype=np.int))[0]
for c in nonzero_index:
self.train_data.append(path_tmp)
label_tmp = ['1' if i == c else '0' for i in range(len(label_tmp))]
self.train_labels.append(label_tmp)
else:
self.train_data.append(path_tmp)
self.train_labels.append(label_tmp)
is_pkl = path_tmp.endswith('.pkl') # if save as pkl, pls make sure dont use different style of loading
if is_pkl:
self.loader = torch.load
self.train_data = np.array(self.train_data)
self.train_labels = np.array(self.train_labels, dtype=float)
if ratio != 1:
assert 0 < ratio < 1, 'data ratio is in between 0 and 1 exclusively'
N = len(self.train_data)
randidx = np.arange(N)
np.random.shuffle(randidx)
randidx = randidx[:int(ratio * N)]
self.train_data = self.train_data[randidx]
self.train_labels = self.train_labels[randidx]
logging.info(f'Number of data: {self.train_data.shape[0]}')
def filter_classes(self, classes): # only work for single class dataset
new_data = []
new_labels = []
for idx, c in enumerate(classes):
new_onehot = np.zeros(len(classes))
new_onehot[idx] = 1
cmask = self.train_labels.argmax(axis=1) == c
new_data.append(self.train_data[cmask])
new_labels.append(np.repeat([new_onehot], int(np.sum(cmask)), axis=0))
# new_labels.append(self.train_labels[cmask])
self.train_data = np.concatenate(new_data)
self.train_labels = np.concatenate(new_labels)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.train_data[index], self.train_labels[index]
target = torch.tensor(target)
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index
def __len__(self):
return len(self.train_data)
def get_img_paths(self):
return self.train_data
class IndexDatasetWrapper(BaseDataset):
def __init__(self, ds) -> None:
super(Dataset, self).__init__()
self.__dict__['ds'] = ds
def __setattr__(self, name, value):
setattr(self.ds, name, value)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.ds, attr)
def __getitem__(self, index: int) -> Tuple:
"""
Args:
index (int): Index
Returns:
tuple: (image, target, index) where target is index of the target class.
"""
outs = self.ds.__getitem__(index)
return tuple(list(outs) + [index])
def __len__(self):
return len(self.ds)
def get_img_paths(self):
return self.ds.get_img_paths()
class Denormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
class InstanceDiscriminationDataset(BaseDataset):
def augment_image(self, img):
# if use this, please run script with --no-aug and --gpu-mean-transform
return self.transform(self.to_pil(img))
def weak_augment_image(self, img):
# if use this, please run script with --no-aug and --gpu-mean-transform
return self.weak_transform(self.to_pil(img))
def __init__(self, ds, tmode='simclr', imgsize=224, weak_mode=0) -> None:
super(Dataset, self).__init__()
self.__dict__['ds'] = ds
if 'simclr' in tmode:
s = 0.5
size = imgsize
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
data_transforms = transforms.Compose([transforms.RandomResizedCrop(size=size, scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([color_jitter], p=0.7),
transforms.RandomGrayscale(p=0.2),
GaussianBlurOpenCV(kernel_size=3),
# GaussianBlur(kernel_size=int(0.1 * size)),
transforms.ToTensor(),
# 0.2 * 224 = 44 pixels
transforms.RandomErasing(p=0.2, scale=(0.02, 0.2))])
self.transform = data_transforms
# lazy fix, can be more pretty and general, cibhash part 1/2
elif tmode == 'cibhash':
logging.info('CIBHash Augmentations')
s = 0.5
size = imgsize
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
data_transforms = transforms.Compose([transforms.RandomResizedCrop(size=size, scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([color_jitter], p=0.7),
transforms.RandomGrayscale(p=0.2),
GaussianBlurOpenCV(kernel_size=3),
# GaussianBlur(kernel_size=3),
transforms.ToTensor()])
self.transform = data_transforms
else:
raise ValueError(f'unknown mode {tmode}')
if weak_mode == 1:
logging.info(f'Weak mode {weak_mode} activated.')
self.weak_transform = transforms.Compose([
transforms.Resize(256), # temp lazy hard code
transforms.CenterCrop(imgsize),
transforms.ToTensor()
])
elif weak_mode == 2:
logging.info(f'Weak mode {weak_mode} activated.')
self.weak_transform = transforms.Compose([
transforms.Resize(256), # temp lazy hard code
transforms.RandomCrop(imgsize),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
self.weak_mode = weak_mode
self.tmode = tmode
self.imgsize = imgsize
self.to_pil = transforms.ToPILImage()
def __setattr__(self, name, value):
setattr(self.ds, name, value)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.ds, attr)
def __getitem__(self, index: int) -> Tuple[Any, Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target, index) where target is index of the target class.
"""
out = self.ds.__getitem__(index)
img, target = out[:2] # exclude index
# if self.tmode == 'simclr':
# aug_imgs = [img, self.augment_image(img)]
# else:
if self.weak_mode != 0:
aug_imgs = [self.weak_augment_image(img), self.augment_image(img)]
else:
aug_imgs = [self.augment_image(img), self.augment_image(img)]
return torch.stack(aug_imgs, dim=0), target, index
def __len__(self):
return len(self.ds)
def get_img_paths(self):
return self.ds.get_img_paths()
class RotationDataset(BaseDataset):
@staticmethod
def rotate_img(img, rot):
img = np.transpose(img.numpy(), (1, 2, 0))
if rot == 0: # 0 degrees rotation
out = img
elif rot == 90: # 90 degrees rotation
out = np.flipud(np.transpose(img, (1, 0, 2)))
elif rot == 180: # 90 degrees rotation
out = np.fliplr(np.flipud(img))
elif rot == 270: # 270 degrees rotation / or -90
out = np.transpose(np.flipud(img), (1, 0, 2))
else:
raise ValueError('rotation should be 0, 90, 180, or 270 degrees')
return torch.from_numpy(np.transpose(out, (2, 0, 1)).copy())
def __init__(self, ds) -> None:
super(Dataset, self).__init__()
self.__dict__['ds'] = ds
def __setattr__(self, name, value):
setattr(self.ds, name, value)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.ds, attr)
def __getitem__(self, index: int) -> Tuple[Any, Any, Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target, index) where target is index of the target class.
"""
out = self.ds.__getitem__(index)
img, target = out[:2] # exclude index
# rot_label = np.random.randint(0, 4) # .item()
rot_labels = [0, 1, 2, 3]
rots = [0, 90, 180, 270]
# rots = [0, rots[rot_label]]
rot_imgs = [self.rotate_img(img, rot) for rot in rots]
return torch.stack(rot_imgs, dim=0), torch.tensor(rot_labels), target, index
def __len__(self):
return len(self.ds)
def get_img_paths(self):
return self.ds.get_img_paths()
class LandmarkDataset(BaseDataset):
def __init__(self, root,
transform=None,
target_transform=None,
filename='train.csv',
onehot=False, return_id=False):
self.loader = pil_loader
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.filename = filename
self.train_labels = []
self.set_name = filename[:-4]
self.onehot = onehot
self.return_id = return_id
def get_path(i: str):
return os.path.join(root, self.set_name, i[0], i[1], i[2], i + ".jpg")
filename = os.path.join(self.root, self.filename)
self.df = read_csv(filename)
self.df['path'] = self.df['id'].apply(get_path)
self.max_index = self.df['landmark_id'].max() + 1
logging.info(f'Number of data: {len(self.df)}')
def to_onehot(self, i):
t = torch.zeros(self.max_index)
t[i] = 1
return t
def __getitem__(self, index):
img = self.df['path'][index]
if self.onehot:
target = self.to_onehot(self.df['landmark_id'][index])
else:
target = self.df['landmark_id'][index]
# target = torch.tensor(target)
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.return_id:
return img, target, (self.df['id'][index], index)
return img, target
def __len__(self):
return len(self.df)
def get_img_paths(self):
return self.df['path'].to_numpy()
class SingleIDDataset(BaseDataset):
"""Dataset with only single class ID
To be merge with Landmark"""
def __init__(self, root,
transform=None,
target_transform=None,
filename='train.csv',
onehot=False):
self.loader = pil_loader
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.filename = filename
self.train_labels = []
self.set_name = filename[:-4]
self.onehot = onehot
def get_path(i: str):
return os.path.join(root, "imgs", i)
filename = os.path.join(self.root, self.filename)
self.df = read_csv(filename)
self.df['path'] = self.df['path'].apply(get_path)
self.max_index = self.df['class_id'].max() + 1
logging.info(f'Number of data: {len(self.df)}')
def to_onehot(self, i):
t = torch.zeros(self.max_index)
t[i] = 1
return t
def __getitem__(self, index):
img = self.df['path'][index]
if self.onehot:
target = self.to_onehot(self.df['class_id'][index])
else:
target = self.df['class_id'][index]
# target = torch.tensor(target)
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index
def __len__(self):
return len(self.df)
def get_img_paths(self):
return self.df['path'].to_numpy()
class ROxfordParisDataset(BaseDataset):
def __init__(self,
dataset='roxford5k',
filename='test.txt',
transform=None,
target_transform=None):
self.loader = pil_loader
self.transform = transform
self.target_transform = target_transform
assert filename in ['test.txt', 'database.txt']
self.set_name = filename
assert dataset in DATASETS
self.cfg = configdataset(dataset, os.path.join('data'))
logging.info(f'Number of data: {self.__len__()}')
def __getitem__(self, index):
if self.set_name == 'database.txt':
img = self.cfg['im_fname'](self.cfg, index)
elif self.set_name == 'test.txt':
img = self.cfg['qim_fname'](self.cfg, index)
img = self.loader(img)
if self.set_name == 'test.txt':
img = img.crop(self.cfg['gnd'][index]['bbx'])
if self.transform is not None:
img = self.transform(img)
return img, index, index # img, None, index is throw error
def __len__(self):
if self.set_name == 'test.txt':
return self.cfg['nq']
elif self.set_name == 'database.txt':
return self.cfg['n']
def get_img_paths(self):
raise NotImplementedError('Not supported.')
class DescriptorDataset(BaseDataset):
def __init__(self, root, filename, ratio=1):
self.data_dict = torch.load(os.path.join(root, filename), map_location=torch.device('cpu'))
self.filename = filename
self.root = root
self.ratio = ratio
if ratio != 1:
assert 0 < ratio < 1, 'data ratio is in between 0 and 1 exclusively'
N = len(self.data_dict['codes'])
randidx = np.arange(N)
np.random.shuffle(randidx)
randidx = randidx[:int(ratio * N)]
for key in self.data_dict:
self.data_dict[key] = self.data_dict[key][randidx]
logging.info(f'Number of data in {filename}: {self.__len__()}')
def __getitem__(self, index):
embed = self.data_dict['codes'][index]
label = self.data_dict['labels'][index] # label is 1 indexed, convert to 0-indexed
return embed, label, index # img, None, index is throw error
def __len__(self):
return len(self.data_dict['codes'])
def get_img_paths(self):
raise NotImplementedError('Not supported for descriptor dataset. Please try usual Image Dataset if you want to get all image paths.')
class EmbeddingDataset(BaseDataset):
def __init__(self, root,
filename='train.txt'):
self.data_dict = torch.load(os.path.join(root, filename), map_location=torch.device('cpu'))
self.filename = filename
self.root = root
logging.info(f'Number of data in {filename}: {self.__len__()}')
def __getitem__(self, index):
embed = self.data_dict['codes'][index]
if self.filename == 'train.txt':
label = self.data_dict['labels'][index] - 1 # label is 1 indexed, convert to 0-indexed
else:
label = 0
landmark_id = self.data_dict['id'][index]
return embed, label, (landmark_id, index) # img, None, index is throw error
def __len__(self):
return len(self.data_dict['id'])
def get_img_paths(self):
raise NotImplementedError('Not supported for descriptor dataset. Please try usual Image Dataset if you want to get all image paths.')
class NeighbourDatasetWrapper(BaseDataset):
def __init__(self, ds, model, config) -> None:
super(Dataset, self).__init__()
self.ds = ds
device = config['device']
loader = DataLoader(ds, config['batch_size'],
shuffle=False,
drop_last=False,
num_workers=os.cpu_count())
model.eval()
pbar = tqdm(loader, desc='Obtain Codes', ascii=True, bar_format='{l_bar}{bar:10}{r_bar}',
disable=configs.disable_tqdm)
ret_feats = []
for i, (data, labels, index) in enumerate(pbar):
with torch.no_grad():
data, labels = data.to(device), labels.to(device)
x, code_logits, b = model(data)[:3]
ret_feats.append(x.cpu())
ret_feats = torch.cat(ret_feats)
mbank = SimpleMemoryBank(len(self.ds), model.backbone.in_features, device)
mbank.update(ret_feats)
neighbour_topk = config['dataset_kwargs'].get('neighbour_topk', 5)
indices = mbank.mine_nearest_neighbors(neighbour_topk)
self.indices = indices[:, 1:] # exclude itself
def __getitem__(self, index: int) -> Tuple[Any, Any, Any, Any, Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target, index) where target is index of the target class.
"""
img, target = self.ds.__getitem__(index)
randidx = np.random.choice(self.indices[index], 1)[0]
nbimg, nbtar = self.ds.__getitem__(randidx)
return img, target, index, nbimg, nbtar, randidx
def __len__(self):
return len(self.ds)
def get_img_paths(self):
return self.ds.get_img_paths()
def one_hot(nclass):
def f(index):
index = torch.tensor(int(index)).long()
return torch.nn.functional.one_hot(index, nclass)
return f
def cifar(nclass, **kwargs):
transform = kwargs['transform']
ep = kwargs['evaluation_protocol']
fn = kwargs['filename']
reset = kwargs['reset']
CIFAR = CIFAR10 if int(nclass) == 10 else CIFAR100
traind = CIFAR(f'data/cifar{nclass}',
transform=transform, target_transform=one_hot(int(nclass)),
train=True, download=True)
traind = IndexDatasetWrapper(traind)
testd = CIFAR(f'data/cifar{nclass}',
transform=transform, target_transform=one_hot(int(nclass)),
train=False, download=True)
testd = IndexDatasetWrapper(testd)
if ep == 2: # using orig train and test
if fn == 'test.txt':
return testd
else: # train.txt and database.txt
return traind
combine_data = np.concatenate([traind.data, testd.data], axis=0)
combine_targets = np.concatenate([traind.targets, testd.targets], axis=0)
path = f'data/cifar{nclass}/0_0_{ep}_{fn}'
load_data = fn == 'train.txt'
load_data = load_data and (reset or not os.path.exists(path))
if not load_data:
logging.info(f'Loading {path}')
data_index = torch.load(path)
else:
train_data_index = []
query_data_index = []
db_data_index = []
data_id = np.arange(combine_data.shape[0]) # [0, 1, ...]
for i in range(nclass):
class_mask = combine_targets == i
index_of_class = data_id[class_mask].copy() # index of the class [2, 10, 656,...]
np.random.shuffle(index_of_class)
if ep == 1:
query_n = 100 # // (nclass // 10)
train_n = 500 # // (nclass // 10)
index_for_query = index_of_class[:query_n].tolist()
index_for_db = index_of_class[query_n:].tolist()
index_for_train = index_for_db[:train_n]
elif ep == 2: # ep2 = take all data
query_n = 1000 # // (nclass // 10)
index_for_query = index_of_class[:query_n].tolist()
index_for_db = index_of_class[query_n:].tolist()
index_for_train = index_for_db
elif ep == 3: # Bi-Half Cifar10(II)
query_n = 1000
train_n = 500
index_for_query = index_of_class[:query_n].tolist()
index_for_db = index_of_class[query_n:].tolist()
index_for_train = index_for_db[:train_n]
else:
raise NotImplementedError('')
train_data_index.extend(index_for_train)
query_data_index.extend(index_for_query)
db_data_index.extend(index_for_db)
train_data_index = np.array(train_data_index)
query_data_index = np.array(query_data_index)
db_data_index = np.array(db_data_index)
torch.save(train_data_index, f'data/cifar{nclass}/0_0_{ep}_train.txt')
torch.save(query_data_index, f'data/cifar{nclass}/0_0_{ep}_test.txt')
torch.save(db_data_index, f'data/cifar{nclass}/0_0_{ep}_database.txt')
data_index = {
'train.txt': train_data_index,
'test.txt': query_data_index,
'database.txt': db_data_index
}[fn]
traind.data = combine_data[data_index]
traind.targets = combine_targets[data_index]
return traind
def imagenet100(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/imagenet{suffix}', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def cars(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = HashingDataset('data/cars', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def landmark(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
return_id = kwargs.get('return_id', False)
d = LandmarkDataset('data/landmark', transform=transform, filename=filename, return_id=return_id)
return d
def nuswide(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
separate_multiclass = kwargs.get('separate_multiclass', False)
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/nuswide_v2_256{suffix}',
transform=transform,
filename=filename,
separate_multiclass=separate_multiclass,
ratio=kwargs.get('ratio', 1))
return d
def nuswide_single(**kwargs):
return nuswide(separate_multiclass=True, **kwargs)
def coco(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/coco{suffix}', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def roxford5k(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = ROxfordParisDataset(dataset='roxford5k', filename=filename, transform=transform)
return d
def rparis6k(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = ROxfordParisDataset(dataset='rparis6k', filename=filename, transform=transform)
return d
def gldv2delgembed(**kwargs):
filename = kwargs['filename']
d = EmbeddingDataset('data/gldv2delgembed', filename=filename)
return d
def roxford5kdelgembed(**kwargs):
filename = kwargs['filename']
d = EmbeddingDataset('data/roxford5kdelgembed', filename=filename)
return d
def rparis6kdelgembed(**kwargs):
filename = kwargs['filename']
d = EmbeddingDataset('data/rparis6kdelgembed', filename=filename)
return d
def descriptor(**kwargs):
filename = kwargs['filename']
data_folder = kwargs['data_folder']
d = DescriptorDataset(data_folder, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def mirflickr(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/mirflickr{suffix}', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def sop_instance(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = SingleIDDataset('data/sop_instance', transform=transform, filename=filename)
return d
def sop(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/sop{suffix}', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def food101(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = HashingDataset('data/food-101', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
``` |
{
"source": "jiahuei/COMIC-Compact-Image-Captioning-with-Attention",
"score": 2
} |
#### File: COMIC-Compact-Image-Captioning-with-Attention/src/train.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, sys, argparse
CURR_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CURR_DIR, '..'))
sys.path.append(os.path.join(CURR_DIR, '..', 'common'))
import train_fn as train
import common.net_params as net_params
import common.utils as utils
pjoin = os.path.join
def create_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--name', type=str, default='lstm',
help='The logging name.')
parser.add_argument(
'--dataset_dir', type=str, default='',
help='The dataset directory.')
parser.add_argument(
'--dataset_file_pattern', type=str,
default='mscoco_{}_w5_s20_include_restval',
help='The dataset text files naming pattern.')
parser.add_argument(
'--train_mode', type=str, default='decoder',
choices=['decoder', 'cnn_finetune', 'scst'],
help='Str. The training regime.')
parser.add_argument(
'--legacy', type=bool, default=False,
help='If True, will match settings as described in paper.')
parser.add_argument(
'--token_type', type=str, default='radix',
choices=['radix', 'word', 'char'],
help='The language model.')
parser.add_argument(
'--radix_base', type=int, default=256,
help='The base for Radix models.')
parser.add_argument(
'--cnn_name', type=str, default='inception_v1',
help='The CNN model name.')
parser.add_argument(
'--cnn_input_size', type=str, default='224,224',
help='The network input size.')
parser.add_argument(
'--cnn_input_augment', type=bool, default=True,
help='Whether to augment input images.')
parser.add_argument(
'--cnn_fm_attention', type=str, default='Mixed_4f',
help='String, name of feature map for attention.')
parser.add_argument(
'--cnn_fm_projection', type=str, default='tied',
choices=['none', 'independent', 'tied'],
help='String, feature map projection, from `none`, `independent`, `tied`.')
parser.add_argument(
'--rnn_name', type=str, default='LSTM',
choices=['LSTM', 'LN_LSTM', 'GRU'],
help='The type of RNN, from `LSTM`, `LN_LSTM` and `GRU`.')
parser.add_argument(
'--rnn_size', type=int, default=512,
help='Int, number of RNN units.')
parser.add_argument(
'--rnn_word_size', type=int, default=256,
help='The word size.')
parser.add_argument(
'--rnn_init_method', type=str, default='first_input',
choices=['project_hidden', 'first_input'],
help='The RNN init method.')
parser.add_argument(
'--rnn_recurr_dropout', type=bool, default=False,
help='Whether to enable variational recurrent dropout.')
parser.add_argument(
'--attn_num_heads', type=int, default=8,
help='The number of attention heads.')
parser.add_argument(
'--attn_context_layer', type=bool, default=False,
help='If True, add linear projection after multi-head attention.')
parser.add_argument(
'--attn_alignment_method', type=str, default='add_LN',
choices=['add_LN', 'add', 'dot'],
help='Str, The alignment method / composition method.')
parser.add_argument(
'--attn_probability_fn', type=str, default='softmax',
choices=['softmax', 'sigmoid'],
help='Str, The attention map probability function.')
parser.add_argument(
'--attn_keep_prob', type=float, default=0.9,
help='Float, The keep rate for attention map dropout.')
parser.add_argument(
'--initialiser', type=str, default='xavier',
choices=['xavier', 'he', 'none'],
help='The initialiser: `xavier`, `he`, tensorflow default.')
parser.add_argument(
'--optimiser', type=str, default='adam',
choices=['adam', 'sgd'],
help='The optimiser: `adam`, `sgd`.')
parser.add_argument(
'--batch_size_train', type=int, default=32,
help='The batch size for training.')
# Divisors of 25010: 1, 2, 5, 10, 41, 61, 82, 122, 205, 305, 410, 610, 2501, 5002, 12505, 25010
parser.add_argument(
'--batch_size_eval', type=int, default=61,
help='The batch size for validation.')
parser.add_argument(
'--max_epoch', type=int, default=30,
help='The max epoch training.')
parser.add_argument(
'--lr_start', type=float, default=1e-2,
help='Float, determines the starting learning rate.')
parser.add_argument(
'--lr_end', type=float, default=1e-5,
help='Float, determines the ending learning rate.')
parser.add_argument(
'--cnn_grad_multiplier', type=float, default=1.0,
help='Float, determines the gradient multiplier when back-prop thru CNN.')
parser.add_argument(
'--adam_epsilon', type=float, default=1e-2,
help='Float, determines the epsilon value of ADAM.')
parser.add_argument(
'--scst_beam_size', type=int, default=7,
help='The beam size for SCST sampling.')
parser.add_argument(
'--scst_weight_ciderD', type=float, default=1.0,
help='The weight for CIDEr-D metric during SCST training.')
parser.add_argument(
'--scst_weight_bleu', type=str, default='0,0,0,2',
help='The weight for BLEU metrics during SCST training.')
parser.add_argument(
'--freeze_scopes', type=str, default='Model/encoder/cnn',
help='The scopes to freeze / do not train.')
parser.add_argument(
'--checkpoint_path', type=str, default=None,
help='The checkpoint path.')
parser.add_argument(
'--checkpoint_exclude_scopes', type=str, default='',
help='The scopes to exclude when restoring from checkpoint.')
parser.add_argument(
'--gpu', type=str, default='0',
help='The gpu number.')
parser.add_argument(
'--run', type=int, default=1,
help='The run number.')
return parser
if __name__ == '__main__':
parser = create_parser()
args = parser.parse_args()
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
for k in ['cnn_input_size']:
args.__dict__[k] = [int(v) for v in args.__dict__[k].split(',')]
if args.legacy:
print('LEGACY mode enabled. Some arguments will be overridden.')
args.cnn_name = 'inception_v1'
args.cnn_input_size = '224,224'
args.cnn_input_augment = True
args.cnn_fm_attention = 'Mixed_4f'
args.rnn_name = 'LSTM'
args.rnn_size = 512
args.rnn_word_size = 256
args.rnn_init_method = 'project_hidden'
args.rnn_recurr_dropout = False
args.attn_context_layer = False
args.attn_alignment_method = 'add_LN'
args.attn_probability_fn = 'softmax'
args.attn_keep_prob = 1.0
args.lr_start = 1e-3
args.lr_end = 2e-4
args.lr_reduce_every_n_epochs = 4
args.cnn_grad_multiplier = 1.0
args.initialiser = 'xavier'
args.optimiser = 'adam'
args.batch_size_train = 32
args.adam_epsilon = 1e-6
if args.run == 1:
rand_seed = 48964896
elif args.run == 2:
rand_seed = 88888888
elif args.run == 3:
rand_seed = 123456789
dataset = args.dataset_file_pattern.split('_')[0]
log_root = pjoin(os.path.dirname(CURR_DIR), 'experiments', dataset)
if args.dataset_dir == '':
args.dataset_dir = pjoin(os.path.dirname(CURR_DIR), 'datasets', dataset)
if args.token_type == 'radix':
token = 'radix_b{}'.format(args.radix_base)
else:
token = args.token_type
name = '_'.join([
token,
args.attn_alignment_method,
args.attn_probability_fn,
'h{}'.format(args.attn_num_heads),
args.cnn_fm_projection[:3],
args.name,
])
if args.legacy:
name = 'legacy_' + name
dec_dir = pjoin(log_root, '{}_run_{:02d}'.format(name, args.run))
cnnft_dir = pjoin(log_root, '{}_cnnFT_run_{:02d}'.format(name, args.run))
train_fn = train.train_fn
if args.train_mode == 'decoder':
assert args.freeze_scopes == 'Model/encoder/cnn'
# Maybe download weights
net = net_params.get_net_params(args.cnn_name, ckpt_dir_or_file=args.checkpoint_path)
utils.maybe_get_ckpt_file(net)
args.checkpoint_path = net['ckpt_path']
log_path = dec_dir
elif args.train_mode == 'cnn_finetune':
# CNN fine-tune
if args.legacy: raise NotImplementedError
if not os.path.exists(dec_dir):
raise ValueError('Decoder training log path not found: {}'.format(dec_dir))
args.lr_start = 1e-3
args.max_epoch = 10
args.freeze_scopes = ''
args.checkpoint_path = dec_dir
log_path = cnnft_dir
elif args.train_mode == 'scst':
# SCST fine-tune (after CNN fine-tune)
if args.legacy: raise NotImplementedError
if not os.path.exists(cnnft_dir):
raise ValueError('CNN finetune log path not found: {}'.format(cnnft_dir))
args.scst_weight_bleu = [float(w) for w in args.scst_weight_bleu.split(',')]
args.batch_size_train = 10
args.lr_start = 1e-3
args.max_epoch = 10
args.freeze_scopes = 'Model/encoder/cnn'
args.checkpoint_path = cnnft_dir
scst = 'beam_{}_CrD_{}_B1_{}_B4_{}'.format(
args.scst_beam_size,
args.scst_weight_ciderD,
args.scst_weight_bleu[0], args.scst_weight_bleu[-1])
scst_dir= pjoin(log_root, '{}_cnnFT_SCST_{}_run_{:02d}'.format(
name, scst, args.run))
log_path = scst_dir
train_fn = train.train_fn_scst
args.resume_training = overwrite = os.path.exists(log_path)
###
# NoneType checking and conversion
for k, v in args.__dict__.iteritems():
if v == 'none':
args.__dict__[k] = None
kwargs = dict(
rnn_layers = 1,
dropout_rnn_in = 0.35,
dropout_rnn_out = 0.35,
rnn_map_loss_scale = 1.0,
l2_decay = 1e-5,
clip_gradient_norm = 0,
max_saves = 12,
num_logs_per_epoch = 100,
per_process_gpu_memory_fraction = None,
rand_seed = rand_seed,
add_image_summaries = True,
add_vars_summaries = False,
add_grad_summaries = False,
log_path = log_path,
save_path = pjoin(log_path, 'model'),
)
kwargs.update(args.__dict__)
###
train.try_to_train(
train_fn = train_fn,
try_block = True,
overwrite = overwrite,
**kwargs)
``` |
{
"source": "jiahuei/document-scanner-opencv-ws",
"score": 3
} |
#### File: scanner/utils/video.py
```python
import logging
import cv2
import numpy as np
from imutils import video as video_utils
logger = logging.getLogger(__name__)
class WebCam:
def __init__(self, cam_index=0):
# start the video stream thread
logger.info("Starting video stream")
self.vs = video_utils.VideoStream(src=cam_index)
def vid_stream(self):
webcam = self.vs.start()
while webcam.stopped is False:
yield webcam.read()
# cv2.waitKey() returns a 32 bit integer value (might be dependent on the platform).
# The key input is in ASCII which is an 8 bit integer value
# So you only care about these 8 bits and want all other bits to be 0
# https://stackoverflow.com/a/39203128
key = cv2.waitKey(delay=1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
logger.info("Ending video stream")
cv2.destroyAllWindows()
webcam.stop()
def read_video_file(path):
vid = video_utils.FileVideoStream(path=path).start()
all_frames = []
while vid.running():
frame = vid.read()
if frame is None:
continue
all_frames.append(frame)
vid.stop()
return all_frames
def write_video_file(path, frame_list):
assert isinstance(frame_list, (list, tuple)), \
f"Expected `frame_list` of type `list` or `tuple`, saw {type(frame_list)}"
if len(frame_list) == 0:
return
assert all(isinstance(_, np.ndarray) for _ in frame_list), \
f"`frame_list` must contain only `np.ndarray`"
assert len(set(_.shape for _ in frame_list)) == 1, \
f"All array in `frame_list` must have the same shape."
assert frame_list[0].ndim == 3 and frame_list[0].shape[2] == 3, \
f"All array in `frame_list` must have shape (M, N, 3), saw {frame_list[0].shape}"
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
vid = cv2.VideoWriter(path, fourcc, round(30), frame_list[0].shape[:2][::-1])
for frame in frame_list:
vid.write(frame)
vid.release()
```
#### File: document-scanner-opencv-ws/tests/test_utils.py
```python
import unittest
import numpy as np
from scanner.utils import misc
class TestUtils(unittest.TestCase):
def test_round_to_nearest_odd(self):
""" Round to nearest odd number. Round up for ties. """
inputs = [(_ - 25) / 4 for _ in range(0, 50)]
for x in inputs:
with self.subTest(f"Positive float: {x}"):
y = misc.round_to_nearest_odd(x)
self.assertEqual(y % 2, 1)
self.assertLessEqual(abs(x - y), 1)
# Complex floats
with self.subTest(f"Complex float"):
with self.assertRaises(TypeError):
misc.round_to_nearest_odd(5.2 + 4j)
def test_numpy_tolist(self):
""" Convert NumPy array to Python list, with specified precision. """
with self.subTest(f"Floats"):
y = misc.numpy_tolist(np.float32([[3.21, 4.5], [5.9124, 21]]), 1)
self.assertEqual(y, [[3.2, 4.5], [5.9, 21.0]])
with self.subTest(f"Integers"):
y = misc.numpy_tolist(np.int8([[3.21, 4.5], [5.9124, 21]]), 1)
self.assertEqual(y, [[3, 4], [5, 21]])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jiahuei/moses",
"score": 3
} |
#### File: moses/baselines/hmm.py
```python
import pickle
import numpy as np
from pomegranate import HiddenMarkovModel, DiscreteDistribution
import moses
class HMM:
def __init__(self, n_components=200,
epochs=100,
batches_per_epoch=100,
seed=0, verbose=False,
n_jobs=1):
"""
Creates a Hidden Markov Model
Arguments:
n_components: numebr of states in HMM
epochs: number of iterations to train model for
batches_per_epoch: number of batches for minibatch training
seed: seed for initializing the model
verbose: if True, will log training statistics
n_jobs: number of threads for training HMM model
"""
self.n_components = n_components
self.epochs = epochs
self.batches_per_epoch = batches_per_epoch
self.seed = seed
self.verbose = verbose
self.n_jobs = n_jobs
self.fitted = False
def fit(self, data):
"""
Fits a model---learns transition and emission probabilities
Arguments:
data: list of SMILES
"""
list_data = [list(smiles) for smiles in data]
self.model = HiddenMarkovModel.from_samples(
DiscreteDistribution, n_components=self.n_components,
end_state=True, X=list_data,
init='kmeans||', verbose=self.verbose, n_jobs=self.n_jobs,
max_iterations=self.epochs,
batches_per_epoch=self.batches_per_epoch,
random_state=self.seed
)
self.fitted = True
return self
def save(self, path):
"""
Saves a model using pickle
Arguments:
path: path to .pkl file for saving
"""
if not self.fitted:
raise RuntimeError("Can't save empty model."
" Fit the model first")
json = self.model.to_json()
with open(path, "wb") as f:
pickle.dump({
'model': json,
'n_components': self.n_components,
'epochs': self.epochs,
'batches_per_epoch': self.batches_per_epoch,
'verbose': self.verbose,
}, f)
@classmethod
def load(cls, path):
"""
Loads saved model
Arguments:
path: path to saved .pkl file
Returns:
Loaded HMM
"""
with open(path, "rb") as f:
data = pickle.load(f)
hmm = data['model']
del data['model']
model = cls(**data)
model.model = HiddenMarkovModel.from_json(hmm)
model.fitted = True
return model
def generate_one(self):
"""
Generates a SMILES string using a trained HMM
Retruns:
SMILES string
"""
return ''.join(self.model.sample())
def reproduce(seed, samples_path=None, metrics_path=None,
n_jobs=1, device='cpu', verbose=False,
samples=30000):
data = moses.get_dataset('train')[:100000]
if verbose:
print("Training...")
model = HMM(n_jobs=n_jobs, seed=seed, verbose=verbose)
model.fit(data)
np.random.seed(seed)
if verbose:
print(f"Sampling for seed {seed}")
np.random.seed(seed)
samples = [model.generate_one()
for _ in range(samples)]
if samples_path is not None:
with open(samples_path, 'w') as f:
f.write('SMILES\n')
for sample in samples:
f.write(sample+'\n')
if verbose:
print(f"Computing metrics for seed {seed}")
metrics = moses.get_all_metrics(
samples, n_jobs=n_jobs, device=device)
if metrics_path is not None:
with open(samples_path, 'w') as f:
for key, value in metrics.items():
f.write("%s,%f\n" % (key, value))
return samples, metrics
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
"Reproduce HMM experiment for one seed (~24h with n_jobs=32)")
parser.add_argument(
'--n_jobs', type=int, required=False,
default=1, help='Number of threads for computing metrics')
parser.add_argument(
'--device', type=str, required=False,
default='cpu', help='Device for computing metrics')
parser.add_argument(
'--samples', type=int, required=False,
default=30000, help='Number of samples for metrics')
parser.add_argument(
'--metrics_path', type=str, required=False,
default='.', help='Path to save metrics')
parser.add_argument(
'--seed', type=int, required=False,
default=1, help='Random seed')
parser.add_argument(
'--model_save', type=str, required=False,
help='File for saving the model')
args = parser.parse_known_args()[0]
reproduce(
seed=args.seed, metrics_path=args.model_save,
n_jobs=args.n_jobs, device=args.device,
verbose=True, samples=args.samples
)
```
#### File: moses/baselines/ngram.py
```python
import pickle
import numpy as np
from tqdm.auto import tqdm
import moses
from moses import CharVocab
class NGram:
def __init__(self, max_context_len=10, verbose=False):
self.max_context_len = max_context_len
self._dict = dict()
self.vocab = None
self.default_probs = None
self.zero_probs = None
self.verbose = verbose
def fit(self, data):
self.vocab = CharVocab.from_data(data)
self.default_probs = np.hstack([np.ones(len(self.vocab)-4),
np.array([0., 1., 0., 0.])])
self.zero_probs = np.zeros(len(self.vocab))
if self.verbose:
print('fitting...')
data = tqdm(data, total=len(data))
for line in data:
t_line = tuple(self.vocab.string2ids(line, True, True))
for i in range(len(t_line)):
for shift in range(self.max_context_len):
if i + shift + 1 >= len(t_line):
break
context = t_line[i:i+shift+1]
cid = t_line[i+shift+1]
probs = self._dict.get(context, self.zero_probs.copy())
probs[cid] += 1.
self._dict[context] = probs
def fit_update(self, data):
if self.verbose:
print('fitting...')
data = tqdm(data, total=len(data))
for line in data:
t_line = tuple(self.vocab.string2ids(line, True, True))
for i in range(len(t_line)):
for shift in range(self.max_context_len):
if i + shift + 1 >= len(t_line):
break
context = t_line[i:i+shift+1]
cid = t_line[i+shift+1]
probs = self._dict.get(context, self.zero_probs.copy())
probs[cid] += 1.
self._dict[context] = probs
def generate_one(self, l_smooth=0.01, context_len=None, max_len=100):
if self.vocab is None:
raise RuntimeError('Error: Fit the model before generating')
if context_len is None:
context_len = self.max_context_len
elif context_len <= 0 or context_len > self.max_context_len:
context_len = self.max_context_len
res = [self.vocab.bos]
while res[-1] != self.vocab.eos and len(res) < max_len:
begin_index = max(len(res)-context_len, 0)
context = tuple(res[begin_index:])
while context not in self._dict:
context = context[1:]
probs = self._dict[context]
smoothed = probs + self.default_probs*l_smooth
normed = smoothed / smoothed.sum()
next_symbol = np.random.choice(len(self.vocab), p=normed)
res.append(next_symbol)
return self.vocab.ids2string(res)
def nll(self, smiles, l_smooth=0.01, context_len=None):
if self.vocab is None:
raise RuntimeError('Error: model is not trained')
if context_len is None:
context_len = self.max_context_len
elif context_len <= 0 or context_len > self.max_context_len:
context_len = self.max_context_len
tokens = tuple(self.vocab.string2ids(smiles, True, True))
likelihood = 0.
for i in range(1, len(tokens)):
begin_index = max(i-context_len, 0)
context = tokens[begin_index:i]
while context not in self._dict:
context = context[1:]
probs = self._dict[context] + self.default_probs
normed = probs / probs.sum()
prob = normed[tokens[i]]
if prob == 0.:
return np.inf
likelihood -= np.log(prob)
return likelihood
def generate(self, n, l_smooth=0.01, context_len=None, max_len=100):
generator = (self.generate_one(l_smooth,
context_len,
max_len) for i in range(n))
if self.verbose:
print('generating...')
generator = tqdm(generator, total=n)
return list(generator)
def save(self, path):
"""
Saves a model using pickle
Arguments:
path: path to .pkl file for saving
"""
if self.vocab is None:
raise RuntimeError("Can't save empty model."
" Fit the model first")
data = {
'_dict': self._dict,
'vocab': self.vocab,
'default_probs': self.default_probs,
'zero_probs': self.zero_probs,
'max_context_len': self.max_context_len
}
with open(path, 'wb') as f:
pickle.dump(data, f)
@classmethod
def load(cls, path):
"""
Loads saved model
Arguments:
path: path to saved .pkl file
Returns:
Loaded NGramGenerator
"""
with open(path, "rb") as f:
data = pickle.load(f)
model = cls()
model._dict = data['_dict']
model.vocab = data['vocab']
model.default_probs = data['default_probs']
model.zero_probs = data['zero_probs']
model.max_context_len = data['max_context_len']
return model
def reproduce(seed, samples_path=None, metrics_path=None,
n_jobs=1, device='cpu', verbose=False,
samples=30000):
data = moses.get_dataset('train')
model = NGram(10, verbose=verbose)
model.fit(data)
np.random.seed(seed)
smiles = model.generate(samples, l_smooth=0.01)
metrics = moses.get_all_metrics(smiles, n_jobs=n_jobs, device=device)
if samples_path is not None:
with open(samples_path, 'w') as out:
out.write('SMILES\n')
for s in smiles:
out.write(s+'\n')
if metrics_path is not None:
with open(metrics_path, 'w') as out:
for key, value in metrics.items():
out.write("%s,%f\n" % (key, value))
return smiles, metrics
```
#### File: moses/dataset/dataset.py
```python
import os
import numpy as np
import pandas as pd
AVAILABLE_SPLITS = ['train', 'test', 'test_scaffolds']
def get_dataset(split='train'):
"""
Loads MOSES dataset
Arguments:
split (str or list): split to load. If str, must be
one of: 'train', 'test', 'test_scaffolds'. If
list, will load all splits from the list.
None by default---loads all splits
Returns:
dict with splits. Keys---split names, values---lists
of SMILES strings.
"""
if split not in AVAILABLE_SPLITS:
raise ValueError(
f"Unknown split {split}. "
f"Available splits: {AVAILABLE_SPLITS}"
)
base_path = os.path.dirname(__file__)
if split not in AVAILABLE_SPLITS:
raise ValueError(
f"Unknown split {split}. "
f"Available splits: {AVAILABLE_SPLITS}")
path = os.path.join(base_path, 'data', split+'.csv.gz')
smiles = pd.read_csv(path, compression='gzip')['SMILES'].values
return smiles
def get_statistics(split='test'):
base_path = os.path.dirname(__file__)
path = os.path.join(base_path, 'data', split+'_stats.npz')
return np.load(path, allow_pickle=True)['stats'].item()
```
#### File: moses/scripts/run.py
```python
import os
import argparse
import sys
import importlib.util
import pandas as pd
from moses.models_storage import ModelsStorage
def load_module(name, path):
dirname = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(dirname, path)
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
MODELS = ModelsStorage()
split_dataset = load_module('split_dataset', 'split_dataset.py')
eval_script = load_module('eval', 'eval.py')
trainer_script = load_module('train', 'train.py')
sampler_script = load_module('sample', 'sample.py')
def get_model_path(config, model):
return os.path.join(
config.checkpoint_dir, model + config.experiment_suff + '_model.pt'
)
def get_log_path(config, model):
return os.path.join(
config.checkpoint_dir, model + config.experiment_suff + '_log.txt'
)
def get_config_path(config, model):
return os.path.join(
config.checkpoint_dir, model + config.experiment_suff + '_config.pt'
)
def get_vocab_path(config, model):
return os.path.join(
config.checkpoint_dir, model + config.experiment_suff + '_vocab.pt'
)
def get_generation_path(config, model):
return os.path.join(
config.checkpoint_dir,
model + config.experiment_suff + '_generated.csv'
)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='all',
choices=['all'] + MODELS.get_model_names(),
help='Which model to run')
parser.add_argument('--test_path',
type=str, required=False,
help='Path to test molecules csv')
parser.add_argument('--test_scaffolds_path',
type=str, required=False,
help='Path to scaffold test molecules csv')
parser.add_argument('--train_path',
type=str, required=False,
help='Path to train molecules csv')
parser.add_argument('--ptest_path',
type=str, required=False,
help='Path to precalculated test npz')
parser.add_argument('--ptest_scaffolds_path',
type=str, required=False,
help='Path to precalculated scaffold test npz')
parser.add_argument('--checkpoint_dir', type=str, default='./checkpoints',
help='Directory for checkpoints')
parser.add_argument('--n_samples', type=int, default=30000,
help='Number of samples to sample')
parser.add_argument('--n_jobs', type=int, default=1,
help='Number of threads')
parser.add_argument('--device', type=str, default='cpu',
help='GPU device index in form `cuda:N` (or `cpu`)')
parser.add_argument('--metrics', type=str, default='metrics.csv',
help='Path to output file with metrics')
parser.add_argument('--train_size', type=int, default=None,
help='Size of training dataset')
parser.add_argument('--test_size', type=int, default=None,
help='Size of testing dataset')
parser.add_argument('--experiment_suff', type=str, default='',
help='Experiment suffix to break ambiguity')
return parser
def train_model(config, model, train_path, test_path):
print('Training...')
model_path = get_model_path(config, model)
config_path = get_config_path(config, model)
vocab_path = get_vocab_path(config, model)
log_path = get_log_path(config, model)
if os.path.exists(model_path) and \
os.path.exists(config_path) and \
os.path.exists(vocab_path):
return
trainer_parser = trainer_script.get_parser()
args = [
'--device', config.device,
'--model_save', model_path,
'--config_save', config_path,
'--vocab_save', vocab_path,
'--log_file', log_path,
'--n_jobs', str(config.n_jobs)
]
if train_path is not None:
args.extend(['--train_load', train_path])
if test_path is not None:
args.extend(['--val_load', test_path])
trainer_config = trainer_parser.parse_known_args(
[model] + sys.argv[1:] + args
)[0]
trainer_script.main(model, trainer_config)
def sample_from_model(config, model):
print('Sampling...')
model_path = get_model_path(config, model)
config_path = get_config_path(config, model)
vocab_path = get_vocab_path(config, model)
assert os.path.exists(model_path), (
"Can't find model path for sampling: '{}'".format(model_path)
)
assert os.path.exists(config_path), (
"Can't find config path for sampling: '{}'".format(config_path)
)
assert os.path.exists(vocab_path), (
"Can't find vocab path for sampling: '{}'".format(vocab_path)
)
sampler_parser = sampler_script.get_parser()
sampler_config = sampler_parser.parse_known_args(
[model] + sys.argv[1:] +
['--device', config.device,
'--model_load', model_path,
'--config_load', config_path,
'--vocab_load', vocab_path,
'--gen_save', get_generation_path(config, model),
'--n_samples', str(config.n_samples)]
)[0]
sampler_script.main(model, sampler_config)
def eval_metrics(config, model, test_path, test_scaffolds_path,
ptest_path, ptest_scaffolds_path, train_path):
print('Computing metrics...')
eval_parser = eval_script.get_parser()
args = [
'--gen_path', get_generation_path(config, model),
'--n_jobs', str(config.n_jobs),
'--device', config.device,
]
if test_path is not None:
args.extend(['--test_path', test_path])
if test_scaffolds_path is not None:
args.extend(['--test_scaffolds_path', test_scaffolds_path])
if ptest_path is not None:
args.extend(['--ptest_path', ptest_path])
if ptest_scaffolds_path is not None:
args.extend(['--ptest_scaffolds_path', ptest_scaffolds_path])
if train_path is not None:
args.extend(['--train_path', train_path])
eval_config = eval_parser.parse_args(args)
metrics = eval_script.main(eval_config, print_metrics=False)
return metrics
def main(config):
if not os.path.exists(config.checkpoint_dir):
os.mkdir(config.checkpoint_dir)
train_path = config.train_path
test_path = config.test_path
test_scaffolds_path = config.test_scaffolds_path
ptest_path = config.ptest_path
ptest_scaffolds_path = config.ptest_scaffolds_path
models = (MODELS.get_model_names()
if config.model == 'all'
else [config.model])
for model in models:
train_model(config, model, train_path, test_path)
sample_from_model(config, model)
for model in models:
model_metrics = eval_metrics(config, model,
test_path, test_scaffolds_path,
ptest_path, ptest_scaffolds_path,
train_path)
table = pd.DataFrame([model_metrics]).T
if len(models) == 1:
metrics_path = ''.join(
os.path.splitext(config.metrics)[:-1])+f'_{model}.csv'
else:
metrics_path = config.metrics
table.to_csv(metrics_path, header=False)
if __name__ == '__main__':
parser = get_parser()
config = parser.parse_known_args()[0]
main(config)
``` |
{
"source": "jiahuei/test-caption-actions",
"score": 2
} |
#### File: caption_vae/models/att_model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from argparse import ArgumentParser, _ArgumentGroup
from typing import Union, Dict
from functools import reduce
from models import register_model
from models.caption_model import CaptionModel
from data.collate import AttCollate
from utils.model_utils import repeat_tensors, pack_wrapper
from tokenizer import Tokenizer
bad_endings = [
'a', 'an', 'the', 'in', 'for', 'at', 'of', 'with', 'before', 'after', 'on', 'upon', 'near', 'to', 'is',
'are', 'am', 'the'
]
# noinspection PyAbstractClass,PyAttributeOutsideInit,PyMethodMayBeStatic
class AttModel(CaptionModel):
def __init__(self, config, tokenizer: Tokenizer = None):
super(AttModel, self).__init__()
self.config = config
self.input_encoding_size = config.input_encoding_size
self.rnn_size = config.rnn_size
# self.num_layers = config.num_layers
self.drop_prob_lm = config.drop_prob_lm
self.seq_length = config.max_seq_length
self.fc_feat_size = config.fc_feat_size
self.att_feat_size = config.att_feat_size
self.att_hid_size = config.att_hid_size
self.vocab_size = config.vocab_size
self.eos_idx = config.eos_token_id
self.bos_idx = config.bos_token_id
self.unk_idx = config.unk_token_id
self.pad_idx = config.pad_token_id
self.use_bn = config.get('use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
# For remove bad ending
if tokenizer is None:
self.bad_endings_ix = []
else:
self.bad_endings_ix = [tokenizer.token_to_id(w) for w in bad_endings]
self.make_model()
def make_model(self):
self.embed = nn.Sequential(
nn.Embedding(self.vocab_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)
)
self.fc_embed = nn.Sequential(
nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)
)
self.att_embed = nn.Sequential(
*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ()) +
(
nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)
) +
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn == 2 else ())
)
)
self.logit_layers = self.config.get('logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size)
else:
self.logit = [
[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(self.drop_prob_lm)]
for _ in range(self.config.logit_layers - 1)
]
self.logit = nn.Sequential(
*(reduce(lambda x, y: x + y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size)])
)
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (
weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size)
)
def clip_att(self, att_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
return att_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation consumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, seqs, att_masks=None, **kwargs):
batch_size = fc_feats.size(0)
if seqs.ndim == 3: # B * seq_per_img * seq_len
seqs = seqs.reshape(-1, seqs.shape[2])
seq_per_img = seqs.shape[0] // batch_size
state = self.init_hidden(batch_size * seq_per_img)
outputs = fc_feats.new_zeros(batch_size * seq_per_img, seqs.size(1), self.vocab_size)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = repeat_tensors(
seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seqs.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwise no need to sample
sample_prob = fc_feats.new(batch_size * seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seqs[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seqs[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i - 1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seqs[:, i].clone()
# break if all the sequences end
if i >= 1 and seqs[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
def _sample(self, fc_feats, att_feats, att_masks=None, opt=None, **kwargs):
if opt is None:
opt = {}
num_random_sample = opt.get("num_random_sample", 0)
beam_size = opt.get("beam_size", 1)
temperature = opt.get("temperature", 1.0)
decoding_constraint = opt.get("decoding_constraint", 0)
batch_size = att_feats.shape[0]
fc_feats, att_feats, p_att_feats, att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
state = self.init_hidden(batch_size)
if num_random_sample <= 0 and beam_size > 1:
assert beam_size <= self.vocab_size
it = att_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
seq_logprobs = att_feats.new_zeros(batch_size, beam_size, self.seq_length)
seq = att_feats.new_full((batch_size, beam_size, self.seq_length), self.pad_idx, dtype=torch.long)
# first step, feed bos
logprobs, state = self.get_logprobs_state(it, fc_feats, att_feats, p_att_feats, att_masks, state)
fc_feats, att_feats, p_att_feats, att_masks = repeat_tensors(
beam_size,
[fc_feats, att_feats, p_att_feats, att_masks]
)
self.done_beams = self.batch_beam_search(
state, logprobs, fc_feats, att_feats, p_att_feats, att_masks, opt=opt
)
for k in range(batch_size):
for b in range(beam_size):
res = self.done_beams[k][b]
seq_len = res["seq"].shape[0]
seq[k, b, :seq_len] = res["seq"]
seq_logprobs[k, b, :seq_len] = res["logps"].gather(1, res["seq"].unsqueeze(1)).squeeze(1)
# top_seq = self.done_beams[k][0]["seq"]
# seq_len = top_seq.shape[0]
# seq[k, :seq_len] = top_seq # the first beam has highest cumulative score
# seq_logprobs[k, :seq_len] = self.done_beams[k][0]["logps"].gather(1, top_seq.unsqueeze(1)).squeeze(1)
# return the samples and their log likelihoods
return seq, seq_logprobs
# Greedy search or random sample
if num_random_sample > 0:
assert beam_size < 1, f"Beam size must be < 1, saw {beam_size}"
batch_size *= num_random_sample
fc_feats, att_feats, p_att_feats, att_masks = repeat_tensors(
n=num_random_sample, x=(fc_feats, att_feats, p_att_feats, att_masks)
)
# (self.num_layers, bsz, self.rnn_size)
state = repeat_tensors(n=num_random_sample, x=state, dim=1)
# state = tuple(_.repeat_interleave(num_random_sample, dim=1) for _ in state)
else:
assert beam_size == 1, f"Beam size must be 1, saw {beam_size}"
it = att_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
seq_logprobs = att_feats.new_zeros(batch_size, self.seq_length)
seq = att_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long)
unfinished = it != self.eos_idx
for t in range(self.seq_length + 1):
logprobs, state = self.get_logprobs_state(it, fc_feats, att_feats, p_att_feats, att_masks, state)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(batch_size, self.vocab_size)
tmp.scatter_(1, seq[:, t - 1].data.unsqueeze(1), float("-inf"))
logprobs = logprobs + tmp
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
if num_random_sample > 0:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data) # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature))
it = torch.multinomial(prob_prev, 1)
sample_logprobs = logprobs.gather(1, it) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
else:
# greedy search
sample_logprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
# stop when all finished
seq[:, t] = it * unfinished.type_as(it)
unfinished = unfinished * (it != self.eos_idx)
seq_logprobs[:, t] = sample_logprobs.view(-1)
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
if num_random_sample > 0:
seq = seq.view(-1, num_random_sample, self.seq_length)
seq_logprobs = seq_logprobs.view(-1, num_random_sample, self.seq_length)
else:
seq = seq.view(-1, 1, self.seq_length)
seq_logprobs = seq_logprobs.view(-1, 1, self.seq_length)
return seq, seq_logprobs
# noinspection PyAbstractClass
class Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.rnn_size = config.rnn_size
self.att_hid_size = config.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
# noinspection PyAbstractClass
class UpDownCore(nn.Module):
def __init__(self, config, use_maxout=False):
super(UpDownCore, self).__init__()
self.config = config
self.drop_prob_lm = config.drop_prob_lm
self.att_lstm = nn.LSTMCell(
config.input_encoding_size + config.rnn_size * 2, config.rnn_size
) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(config.rnn_size * 2, config.rnn_size) # h^1_t, \hat v
self.attention = Attention(config)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
# noinspection PyAbstractClass,PyAttributeOutsideInit
@register_model("up_down_lstm")
class UpDownModel(AttModel):
COLLATE_FN = AttCollate
def __init__(self, config, tokenizer: Tokenizer = None):
self.num_layers = 2
super().__init__(config, tokenizer)
self.core = UpDownCore(self.config)
@staticmethod
def add_argparse_args(parser: Union[_ArgumentGroup, ArgumentParser]):
# fmt: off
UpDownModel.COLLATE_FN.add_argparse_args(parser)
# CaptionModel args
parser.add_argument(
"--rnn_size", type=int, default=1000,
help="int: Size of the RNN (number of units)."
)
# parser.add_argument(
# "--num_layers", type=int, default=6,
# help="int: Number of RNN layers in the model"
# )
parser.add_argument(
"--input_encoding_size", type=int, default=1000,
help="int: The encoding size of each token in the vocabulary, and the image."
)
parser.add_argument(
"--att_feat_size", type=int, default=2048,
help="int: 2048 for resnet, 512 for vgg"
)
parser.add_argument(
"--drop_prob_lm", type=float, default=0.5,
help="float: Strength of dropout in the Language Model RNN"
)
# AttModel args
parser.add_argument(
"--att_hid_size", type=int, default=512,
help="int: The hidden size of the attention MLP for show_attend_tell; 0 if not using hidden layer"
)
parser.add_argument(
"--fc_feat_size", type=int, default=2048,
help="int: 2048 for resnet, 4096 for vgg"
)
parser.add_argument(
"--logit_layers", type=int, default=1,
help="int: Number of layers in the RNN"
)
# fmt: on
# return parser
```
#### File: caption_vae/models/relation_transformer_computer.py
```python
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from argparse import ArgumentParser, _ArgumentGroup
from typing import Optional, Union, Dict
from copy import deepcopy
from models import register_model
from models.transformer import (
CachedMultiHeadedAttention, MultiHeadedAttention, PositionwiseFeedForward, PositionalEncoding,
InputEmbedding as Embeddings, OutputEmbedding as Generator,
LayerNorm, SublayerConnection,
Decoder, DecoderLayer, CachedTransformerBase
)
from data.collate import ObjectRelationCollate
from utils.model_utils import repeat_tensors, pack_wrapper, clones
from utils.misc import str_to_bool
logger = logging.getLogger(__name__)
# noinspection PyAbstractClass
class EncoderComputerDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, computer, decoder, src_embed, tgt_embed, generator):
super().__init__()
self.encoder = encoder
self.computer = computer
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, boxes, tgt, src_mask, tgt_mask):
"""Take in and process masked src and target sequences."""
enc_out = self.encode(src, boxes, src_mask)
assert enc_out.size(0) == src_mask.size(0)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f"{self.__class__.__name__}: "
f"Encoder output shape = `{enc_out.shape}` "
f"Target shape = `{tgt.shape}`"
)
com_out = self.compute(enc_out, src_mask)
if com_out.size(0) != tgt.size(0):
assert tgt.size(0) % com_out.size(0) == 0
seq_per_img = int(tgt.size(0) / com_out.size(0))
com_out = repeat_tensors(seq_per_img, com_out)
return self.decode(com_out, None, tgt, tgt_mask), com_out
def encode(self, src, boxes, src_mask):
return self.encoder(self.src_embed(src), boxes, src_mask)
def compute(self, memory, src_mask):
return self.computer(memory, src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)
# noinspection PyAbstractClass
class Computer(nn.Module):
"""Generic N layer decoder with masking."""
def __init__(self, layer, N):
super().__init__()
self.inputs = nn.Parameter(torch.Tensor(1, 5, layer.size))
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, memory, src_mask):
x = self.inputs
x = repeat_tensors(memory.size(0), x)
for layer in self.layers:
x = layer(x, memory, src_mask)
return self.norm(x)
# noinspection PyAbstractClass
class ComputerLayer(nn.Module):
"""Decoder is made of self-attn, src-attn, and feed forward (defined below)"""
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super().__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask):
"""Follow Figure 1 (right) for connections."""
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
# noinspection PyAbstractClass
class Encoder(nn.Module):
"""Core encoder is a stack of N layers"""
def __init__(self, layer, N):
super().__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, box, mask):
"""Pass the input (and mask) through each layer in turn."""
for layer in self.layers:
x = layer(x, box, mask)
return self.norm(x)
# noinspection PyAbstractClass
class EncoderLayer(nn.Module):
"""Encoder is made up of self-attn and feed forward (defined below)"""
def __init__(self, size, self_attn, feed_forward, dropout):
super().__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, box, mask):
"""Follow Figure 1 (left) for connections."""
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, box, mask))
return self.sublayer[1](x, self.feed_forward)
# noinspection PyAbstractClass
class BoxMultiHeadedAttention(nn.Module):
"""
Self-attention layer with relative position weights.
Following the paper "Relation Networks for Object Detection" in https://arxiv.org/pdf/1711.11575.pdf
"""
def __init__(self, h, d_model, trigonometric_embedding=True, dropout=0.1):
"""Take in model size and number of heads."""
super().__init__()
assert d_model % h == 0
self.trigonometric_embedding = trigonometric_embedding
# We assume d_v always equals d_k
self.h = h
self.d_k = d_model // h
if self.trigonometric_embedding:
self.dim_g = 64
else:
self.dim_g = 4
geo_feature_dim = self.dim_g
# matrices W_q, W_k, W_v, and one last projection layer
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.WGs = clones(nn.Linear(geo_feature_dim, 1, bias=True), 8)
# self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, input_query, input_key, input_value, input_box, mask=None):
"""Implements Figure 2 of Relation Network for Object Detection"""
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = input_query.size(0)
# tensor with entries R_mn given by a hardcoded embedding of the relative position between bbox_m and bbox_n
relative_geometry_embeddings = self.BoxRelationalEmbedding(
input_box,
trigonometric_embedding=self.trigonometric_embedding
)
flatten_relative_geometry_embeddings = relative_geometry_embeddings.view(-1, self.dim_g)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [
l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (input_query, input_key, input_value))
]
box_size_per_head = list(relative_geometry_embeddings.shape[:3])
box_size_per_head.insert(1, 1)
relative_geometry_weights_per_head = [
ly(flatten_relative_geometry_embeddings).view(box_size_per_head) for ly in self.WGs
]
relative_geometry_weights = torch.cat(relative_geometry_weights_per_head, 1)
relative_geometry_weights = F.relu(relative_geometry_weights)
# 2) Apply attention on all the projected vectors in batch.
x, box_attn = self.box_attention(
query, key, value, relative_geometry_weights, mask=mask, dropout=self.dropout
)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
# # Legacy
# x = input_value + x
return self.linears[-1](x)
@staticmethod
def BoxRelationalEmbedding(f_g, dim_g=64, wave_len=1000, trigonometric_embedding=True):
"""
Given a tensor with bbox coordinates for detected objects on each batch image,
this function computes a matrix for each image
with entry (i,j) given by a vector representation of the
displacement between the coordinates of bbox_i, and bbox_j
input: np.array of shape=(batch_size, max_nr_bounding_boxes, 4)
output: np.array of shape=(batch_size, max_nr_bounding_boxes, max_nr_bounding_boxes, 64)
"""
# returns a relational embedding for each pair of bboxes, with dimension = dim_g
# follow implementation of https://github.com/heefe92/Relation_Networks-pytorch/blob/master/model.py#L1014-L1055
batch_size = f_g.size(0)
x_min, y_min, x_max, y_max = torch.chunk(f_g, 4, dim=-1)
cx = (x_min + x_max) * 0.5
cy = (y_min + y_max) * 0.5
w = (x_max - x_min) + 1.
h = (y_max - y_min) + 1.
# cx.view(1,-1) transposes the vector cx, and so dim(delta_x) = (dim(cx), dim(cx))
delta_x = cx - cx.view(batch_size, 1, -1)
delta_x = torch.clamp(torch.abs(delta_x / w), min=1e-3)
delta_x = torch.log(delta_x)
delta_y = cy - cy.view(batch_size, 1, -1)
delta_y = torch.clamp(torch.abs(delta_y / h), min=1e-3)
delta_y = torch.log(delta_y)
delta_w = torch.log(w / w.view(batch_size, 1, -1))
delta_h = torch.log(h / h.view(batch_size, 1, -1))
matrix_size = delta_h.size()
delta_x = delta_x.view(batch_size, matrix_size[1], matrix_size[2], 1)
delta_y = delta_y.view(batch_size, matrix_size[1], matrix_size[2], 1)
delta_w = delta_w.view(batch_size, matrix_size[1], matrix_size[2], 1)
delta_h = delta_h.view(batch_size, matrix_size[1], matrix_size[2], 1)
position_mat = torch.cat((delta_x, delta_y, delta_w, delta_h), -1)
if trigonometric_embedding:
feat_range = torch.arange(dim_g / 8, device=f_g.device)
dim_mat = feat_range / (dim_g / 8)
dim_mat = 1. / (torch.pow(wave_len, dim_mat))
dim_mat = dim_mat.view(1, 1, 1, -1)
position_mat = position_mat.view(batch_size, matrix_size[1], matrix_size[2], 4, -1)
position_mat = 100. * position_mat
mul_mat = position_mat * dim_mat
mul_mat = mul_mat.view(batch_size, matrix_size[1], matrix_size[2], -1)
sin_mat = torch.sin(mul_mat)
cos_mat = torch.cos(mul_mat)
embedding = torch.cat((sin_mat, cos_mat), -1)
else:
embedding = position_mat
return embedding
@staticmethod
def box_attention(query, key, value, box_relation_embds_matrix, mask=None, dropout=None):
"""
Compute 'Scaled Dot Product Attention as in paper Relation Networks for Object Detection'.
Follow the implementation in
https://github.com/heefe92/Relation_Networks-pytorch/blob/master/model.py#L1026-L1055
"""
N = value.size()[:2]
dim_k = key.size(-1)
dim_g = box_relation_embds_matrix.size()[-1]
w_q = query
w_k = key.transpose(-2, -1)
w_v = value
# attention weights
scaled_dot = torch.matmul(w_q, w_k)
scaled_dot = scaled_dot / np.sqrt(dim_k)
if mask is not None:
scaled_dot = scaled_dot.masked_fill(mask == 0, -1e9)
# w_g = box_relation_embds_matrix.view(N,N)
w_g = box_relation_embds_matrix
w_a = scaled_dot
# w_a = scaled_dot.view(N,N)
# multiplying log of geometric weights by feature weights
w_mn = torch.log(torch.clamp(w_g, min=1e-6)) + w_a
w_mn = torch.nn.Softmax(dim=-1)(w_mn)
if dropout is not None:
w_mn = dropout(w_mn)
output = torch.matmul(w_mn, w_v)
return output, w_mn
# noinspection PyAbstractClass,PyAttributeOutsideInit
@register_model("ort_computer")
class ORTComputerModel(CachedTransformerBase):
COLLATE_FN = ObjectRelationCollate
def __init__(self, config):
super().__init__(config)
self.box_trigonometric_embedding = True
self.make_model()
def make_model(self, h=8, dropout=0.1):
"""Helper: Construct a model from hyperparameters."""
bbox_attn = BoxMultiHeadedAttention(h, self.d_model, self.box_trigonometric_embedding)
attn = CachedMultiHeadedAttention(h, self.d_model)
self_attn = deepcopy(attn)
self_attn.self_attention = True
ff = PositionwiseFeedForward(self.d_model, self.dim_feedforward, dropout)
position = PositionalEncoding(self.d_model, dropout)
model = EncoderComputerDecoder(
Encoder(EncoderLayer(
self.d_model, deepcopy(bbox_attn), deepcopy(ff), dropout), self.num_layers
),
Computer(ComputerLayer(
self.d_model, MultiHeadedAttention(h, self.d_model), MultiHeadedAttention(h, self.d_model),
deepcopy(ff), dropout), self.num_layers
),
Decoder(DecoderLayer(
self.d_model, self_attn, attn, deepcopy(ff), dropout), self.num_layers
),
lambda x: x, # nn.Sequential(Embeddings(self.d_model, src_vocab), deepcopy(position)),
nn.Sequential(Embeddings(self.d_model, self.vocab_size), deepcopy(position)),
Generator(self.d_model, self.vocab_size)
)
self.att_embed = nn.Sequential(
nn.Linear(self.att_feat_size, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_src)
)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
self.model = model
def _prepare_feature(
self,
att_feats: Tensor,
att_masks: Optional[Tensor] = None,
boxes: Optional[Tensor] = None,
seq: Optional[Tensor] = None
):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
if att_masks is None:
att_masks = att_feats.new_ones(att_feats.shape[:2], dtype=torch.long)
att_masks = att_masks.unsqueeze(-2)
if seq is not None:
# crop the last one
seq = seq[:, :-1]
seq_mask = seq.data.ne(self.pad_idx) # seq_mask: torch.Tensor
seq_mask = seq_mask.unsqueeze(-2)
seq_mask = seq_mask & self.subsequent_mask(seq.size(-1)).to(seq_mask)
else:
seq_mask = None
return att_feats, boxes, seq, att_masks, seq_mask
# noinspection PyMethodOverriding
def _forward(self, att_feats, boxes, seqs, att_masks=None, **kwargs):
att_feats, boxes, seq, att_masks, seq_mask = self._prepare_feature(att_feats, att_masks, boxes, seqs)
out = self.model(att_feats, boxes, seq, att_masks, seq_mask)
outputs = self.model.generator(out[0])
return outputs #, self.model.generator(out[1])
def get_logprobs_state(self, it, memory, mask, state):
"""
state = [ys.unsqueeze(0)]
"""
ys = it.unsqueeze(1)
if state is None:
pass
else:
# Retrieve reordered cache from state, and update them
self._update_caches(state[1:])
out = self.model.decode(
memory, mask, ys, self.subsequent_mask(ys.size(1)).to(memory.device)
)
logprobs = self.model.generator(out[:, -1])
# Add layer cache into state list, transposed so that beam_step can reorder them
return logprobs, [ys.unsqueeze(0)] + self._retrieve_caches()
# noinspection PyMethodOverriding
def _sample(self, att_feats, boxes, att_masks=None, opt=None, **kwargs):
if opt is None:
opt = {}
att_feats, boxes, seq, att_masks, seq_mask = self._prepare_feature(att_feats, att_masks, boxes)
memory = self.model.encode(att_feats, boxes, att_masks)
memory = self.model.compute(memory, att_masks)
state = None
mask = torch.ones(size=(memory.size(0), 1, memory.size(1)), dtype=memory.dtype, device=memory.device)
return self._generate_captions(att_feats, mask, memory, state, opt)
@staticmethod
def clip_att(att_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
return att_feats, att_masks
@staticmethod
def subsequent_mask(size):
"""Mask out subsequent positions."""
attn_shape = (1, size, size)
mask = torch.triu(torch.ones(attn_shape), diagonal=1).eq(0)
return mask
@staticmethod
def add_argparse_args(parser: Union[_ArgumentGroup, ArgumentParser]):
# fmt: off
ORTComputerModel.COLLATE_FN.add_argparse_args(parser)
CachedTransformerBase.add_argparse_args(parser)
# Relation args
parser.add_argument(
"--box_trigonometric_embedding", type=str_to_bool,
default=True
)
# fmt: on
# return parser
```
#### File: caption_vae/models/transformer.py
```python
import logging
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from argparse import ArgumentParser, _ArgumentGroup
from typing import Union, Dict, Callable
from copy import deepcopy
from itertools import chain
from models import register_model
from models.caption_model import CaptionModel
from data.collate import UpDownCollate
from utils.model_utils import repeat_tensors, clones
logger = logging.getLogger(__name__)
# noinspection PyAbstractClass
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture.
Base for this and many other models.
"""
def __init__(
self, encoder: Callable, decoder: Callable,
src_embed: Callable, tgt_embed: Callable, generator: Callable,
autoregressive: bool = True, pad_idx: int = 0
):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
self.autoregressive = autoregressive
self.pad_idx = pad_idx
def forward(self, src: Tensor, src_mask: Tensor, tgt: Tensor):
"""
Args:
src: (N, S, E)
src_mask: (N, S)
tgt: (N, T)
Returns:
"""
memory, memory_mask = self.encode(src, src_mask)
decoder_output = self.decode(tgt, memory, memory_mask)
outputs = self.generator(decoder_output)
return outputs
def encode(self, src: Tensor, src_mask: Tensor):
"""
Args:
src: (N, S, E)
src_mask: (N, S)
Returns:
"""
assert src_mask.ndimension() == 2, (
f"{self.__class__.__name__}: Expected `src_mask` has shape (N, S), saw `{src_mask.shape}`"
)
src_mask = src_mask.unsqueeze(-2)
src = self.src_embed(src)
encoder_output = self.encoder(x=src, mask=src_mask)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f"{self.__class__.__name__}: "
f"src.shape = `{src.shape}` "
f"src_mask.shape = `{src_mask.shape}` "
f"encoder_output.shape = `{encoder_output.shape}` "
)
return encoder_output, src_mask
def decode(self, tgt: Tensor, memory: Tensor, memory_mask: Tensor):
"""
Args:
tgt: (N, T)
memory: (N, S, E)
memory_mask: (N, S)
Returns:
"""
assert tgt.ndimension() == 2, (
f"{self.__class__.__name__}: Expected `tgt` has shape (N, T), saw `{tgt.shape}`"
)
if memory.size(0) != tgt.size(0):
assert tgt.size(0) % memory.size(0) == 0
seq_per_img = int(tgt.size(0) / memory.size(0))
memory, memory_mask = repeat_tensors(seq_per_img, (memory, memory_mask))
tgt_mask = tgt.ne(self.pad_idx).unsqueeze(-2)
if self.autoregressive:
subsequent_mask = memory.new_ones((1, tgt.size(-1), tgt.size(-1)))
subsequent_mask = torch.triu(subsequent_mask, diagonal=1).eq(0)
tgt_mask = tgt_mask & subsequent_mask
tgt_embed = self.tgt_embed(tgt)
decoder_output = self.decoder(
x=tgt_embed, memory=memory, src_mask=memory_mask, tgt_mask=tgt_mask
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f"{self.__class__.__name__}: "
f"tgt.shape = `{tgt.shape}` "
f"tgt_mask.shape = `{tgt_mask.shape}` "
f"tgt_embed.shape = `{tgt_embed.shape}` "
f"memory.shape = `{memory.shape}` "
f"memory_mask.shape = `{memory_mask.shape}` "
f"decoder_output.shape = `{decoder_output.shape}` "
)
return decoder_output
def generate(self, x):
return self.generator(x)
# noinspection PyAbstractClass
class Encoder(nn.Module):
"""
Core encoder is a stack of N layers
"""
def __init__(self, layer, N):
super().__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"""Pass the input (and mask) through each layer in turn."""
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
# noinspection PyAbstractClass
class EncoderLayer(nn.Module):
"""
Encoder is made up of self-attn and feed forward
"""
def __init__(self, size, self_attn, feed_forward, dropout):
super().__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"""Follow Figure 1 (left) for connections."""
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
# noinspection PyAbstractClass
class Decoder(nn.Module):
"""Generic N layer decoder with masking."""
def __init__(self, layer, N):
super().__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
# noinspection PyAbstractClass
class DecoderLayer(nn.Module):
"""Decoder is made of self-attn, src-attn, and feed forward (defined below)"""
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super().__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"""Follow Figure 1 (right) for connections."""
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
# noinspection PyAbstractClass
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1, self_attention=False):
"""Take in model size and number of heads."""
super().__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.self_attention = self_attention
self.dropout = nn.Dropout(p=dropout)
self.cache = [None, None]
self.cache_size = 2
def forward(self, query, key, value, mask=None):
"""Implements Figure 2"""
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query = self._project_qkv(self.linears[0], query)
# Maybe need to repeat cache along batch dim
if isinstance(self.cache[0], Tensor) and self.cache[0].size(0) != key.size(0):
cache_batch = self.cache[0].size(0)
assert cache_batch < key.size(0), (
f"cat_output_with_cache: "
f"Expected dim {0} of cached tensor to be smaller than that of key. "
f"Saw self.cache[0] = {self.cache[0].size()}, key = {key.size()}"
)
assert key.size(0) % cache_batch == 0, (
f"cat_output_with_cache: "
f"Expected dim {0} of key tensor to be divisible by that of cached tensor. "
f"Saw self.cache[0] = {self.cache[0].size()}, key = {key.size()}"
)
self.cache = repeat_tensors(key.size(0) // cache_batch, self.cache)
# Only encoder-attention may skip projection and directly reuse from cache
if not self.self_attention and isinstance(self.cache[0], Tensor):
key, value = self.cache
else:
key = self._project_qkv(self.linears[1], key)
value = self._project_qkv(self.linears[2], value)
if self.self_attention and isinstance(self.cache[0], Tensor):
# Concat with previous keys and values
key = torch.cat((self.cache[0], key), dim=2)
value = torch.cat((self.cache[1], value), dim=2)
mask = None
# Cache key and value tensors
if getattr(self, "incremental_decoding", False):
self.cache = [key, value]
# 2) Apply attention on all the projected vectors in batch.
x, attn = self.attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
def _project_qkv(self, layer, x):
return layer(x).view(x.size(0), -1, self.h, self.d_k).transpose(1, 2)
@staticmethod
def attention(query, key, value, mask=None, dropout=None):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
# noinspection PyAbstractClass
class CachedMultiHeadedAttention(MultiHeadedAttention):
def __init__(self, *args, **kwargs):
"""Take in model size and number of heads."""
super().__init__(*args, **kwargs)
self.incremental_decoding = False
def reset_cache(self):
self.cache = [None, None]
# Aliases
MHA = MultiHeadedAttention
CMHA = CachedMultiHeadedAttention
# noinspection PyAbstractClass
class PositionwiseFeedForward(nn.Module):
"""Implements FFN equation."""
def __init__(self, d_model, d_ff, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
# noinspection PyAbstractClass
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-6):
super().__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
# noinspection PyAbstractClass
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super().__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"""Apply residual connection to any sublayer with the same size."""
return x + self.dropout(sublayer(self.norm(x)))
# noinspection PyAbstractClass
class PositionalEncoding(nn.Module):
"""Implement the PE function."""
def __init__(self, d_model, dropout, max_len=5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(
torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer("pe", pe)
self.incremental_decoding = False
self.current_time_step = 0
def reset_cache(self):
self.current_time_step = 0
def forward(self, x):
if self.incremental_decoding:
assert x.size(1) == 1, \
f"{self.__class__.__name__}: Expected input to have shape (M, 1, N), saw {x.shape}"
x = x + self.pe[:, self.current_time_step:self.current_time_step + 1]
self.current_time_step += 1
else:
x = x + self.pe[:, :x.size(1)]
return self.dropout(x)
# noinspection PyAbstractClass
class InputEmbedding(nn.Module):
def __init__(self, d_model, vocab):
super().__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
# noinspection PyAbstractClass
class OutputEmbedding(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, d_model, vocab):
super().__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
# noinspection PyAbstractClass,PyAttributeOutsideInit
class CachedTransformerBase(CaptionModel):
def __init__(self, config):
super().__init__()
self.config = config
self.d_model = config.d_model # default: 512
self.dim_feedforward = config.dim_feedforward # default: 2048
self.num_layers = config.num_layers # default: 6
self.drop_prob_src = config.drop_prob_src
self.seq_length = config.max_seq_length
self.att_feat_size = config.att_feat_size
self.vocab_size = config.vocab_size
self.eos_idx = config.eos_token_id
self.bos_idx = config.bos_token_id
self.unk_idx = config.unk_token_id
self.pad_idx = config.pad_token_id
assert self.num_layers > 0, "num_layers should be greater than 0"
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
@staticmethod
def enable_incremental_decoding(module):
if hasattr(module, "incremental_decoding"):
module.incremental_decoding = True
module.reset_cache()
@staticmethod
def disable_incremental_decoding(module):
if hasattr(module, "incremental_decoding"):
module.incremental_decoding = False
module.reset_cache()
def _modules_with_cache(self):
return filter(
lambda x: getattr(x, "incremental_decoding", False) and hasattr(x, "cache"),
self.modules()
)
def _retrieve_caches(self):
caches = [m.cache for m in self._modules_with_cache()]
caches = [_.transpose(0, 1) for _ in chain.from_iterable(caches)]
return caches
def _update_caches(self, caches):
idx = 0
for i, m in enumerate(self._modules_with_cache()):
m.cache = [_.transpose(0, 1) for _ in caches[idx: idx + m.cache_size]]
idx += m.cache_size
def _generate_captions(self, att_feats, att_masks, memory, state, opt):
num_random_sample = opt.get("num_random_sample", 0)
beam_size = opt.get("beam_size", 1)
temperature = opt.get("temperature", 1.0)
decoding_constraint = opt.get("decoding_constraint", 0)
batch_size = att_feats.shape[0]
# Enable incremental decoding for faster decoding
self.apply(self.enable_incremental_decoding)
if num_random_sample <= 0 and beam_size > 1:
assert beam_size <= self.vocab_size
it = att_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
seq_logprobs = att_feats.new_zeros(batch_size, beam_size, self.seq_length)
seq = att_feats.new_full((batch_size, beam_size, self.seq_length), self.pad_idx, dtype=torch.long)
# first step, feed bos
logprobs, state = self.get_logprobs_state(it, memory, att_masks, state)
memory, att_masks = repeat_tensors(beam_size, [memory, att_masks])
self.done_beams = self.batch_beam_search(state, logprobs, memory, att_masks, opt=opt)
for k in range(batch_size):
for b in range(beam_size):
res = self.done_beams[k][b]
seq_len = res["seq"].shape[0]
seq[k, b, :seq_len] = res["seq"]
seq_logprobs[k, b, :seq_len] = res["logps"].gather(1, res["seq"].unsqueeze(1)).squeeze(1)
# top_seq = self.done_beams[k][0]["seq"]
# seq_len = top_seq.shape[0]
# seq[k, :seq_len] = top_seq # the first beam has highest cumulative score
# seq_logprobs[k, :seq_len] = self.done_beams[k][0]["logps"].gather(1, top_seq.unsqueeze(1)).squeeze(1)
# Disable incremental decoding so that regular training can continue
self.apply(self.disable_incremental_decoding)
# return the samples and their log likelihoods
return seq, seq_logprobs
# Greedy search or random sample
if num_random_sample > 0:
assert beam_size < 1, f"Beam size must be < 1, saw {beam_size}"
batch_size *= num_random_sample
memory = memory.repeat_interleave(num_random_sample, dim=0)
att_masks = att_masks.repeat_interleave(num_random_sample, dim=0)
else:
assert beam_size == 1, f"Beam size must be 1, saw {beam_size}"
it = att_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
seq_logprobs = att_feats.new_zeros(batch_size, self.seq_length)
seq = att_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long)
unfinished = it != self.eos_idx
for t in range(self.seq_length + 1):
logprobs, state = self.get_logprobs_state(it, memory, att_masks, state)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(batch_size, self.vocab_size)
tmp.scatter_(1, seq[:, t - 1].data.unsqueeze(1), float("-inf"))
logprobs = logprobs + tmp
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
if num_random_sample > 0:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data) # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature))
it = torch.multinomial(prob_prev, 1)
sample_logprobs = logprobs.gather(1, it) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
else:
# greedy search
sample_logprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
# stop when all finished
seq[:, t] = it * unfinished.type_as(it)
unfinished = unfinished * (it != self.eos_idx)
seq_logprobs[:, t] = sample_logprobs.view(-1)
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
if num_random_sample > 0:
seq = seq.view(-1, num_random_sample, self.seq_length)
seq_logprobs = seq_logprobs.view(-1, num_random_sample, self.seq_length)
else:
seq = seq.view(-1, 1, self.seq_length)
seq_logprobs = seq_logprobs.view(-1, 1, self.seq_length)
# Disable incremental decoding so that regular training can continue
self.apply(self.disable_incremental_decoding)
return seq, seq_logprobs
@staticmethod
def add_argparse_args(parser: Union[_ArgumentGroup, ArgumentParser]):
# fmt: off
parser.add_argument(
"--d_model", type=int, default=512,
help="int: The token and feature embedding size."
)
parser.add_argument(
"--dim_feedforward", type=int, default=2048,
help="int: Size of feedforward layers."
)
parser.add_argument(
"--num_layers", type=int, default=6,
help="int: Number of transformer layers."
)
parser.add_argument(
"--drop_prob_src", type=float, default=0.5,
help="float: Dropout rate applied to source embedding at the Encoder."
)
parser.add_argument(
"--att_feat_size", type=int, default=2048,
help="int: Number of channels of CNN features (ResNet = 2048, VGG = 512)."
)
# fmt: on
# noinspection PyAbstractClass,PyAttributeOutsideInit
@register_model("transformer")
class Transformer(CachedTransformerBase):
COLLATE_FN = UpDownCollate
def __init__(self, config):
super().__init__(config)
self.make_model()
def make_model(self):
nhead = 8
dropout = 0.1
ff = PositionwiseFeedForward(self.d_model, self.dim_feedforward, dropout)
position = PositionalEncoding(self.d_model, dropout)
self.core = EncoderDecoder(
src_embed=nn.Sequential(
nn.Linear(self.att_feat_size, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_src)
),
encoder=Encoder(
EncoderLayer(self.d_model, MHA(nhead, self.d_model), deepcopy(ff), dropout),
self.num_layers
),
tgt_embed=nn.Sequential(InputEmbedding(self.d_model, self.vocab_size), deepcopy(position)),
decoder=Decoder(
DecoderLayer(
self.d_model,
CMHA(nhead, self.d_model, self_attention=True), CMHA(nhead, self.d_model),
deepcopy(ff), dropout
),
self.num_layers
),
generator=OutputEmbedding(self.d_model, self.vocab_size),
autoregressive=True,
pad_idx=self.pad_idx
)
self._reset_parameters()
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def _forward(self, att_feats: Tensor, att_masks: Tensor, seqs: Tensor, **kwargs):
"""
Args:
att_feats: (N, S, E)
att_masks: (N, S)
seqs: (N, T, E)
Returns:
"""
if seqs is not None:
# crop the last one
seqs = seqs[:, :-1]
return self.core(src=att_feats, src_mask=att_masks, tgt=seqs)
def get_logprobs_state(self, it, memory, mask, state):
"""
state = [ys.unsqueeze(0)]
"""
ys = it.unsqueeze(1)
if state is None:
pass
else:
# Retrieve reordered cache from state, and update them
self._update_caches(state[1:])
# noinspection PyUnresolvedReferences
decoder_output = self.core.decode(tgt=ys, memory=memory, memory_mask=mask)
logprobs = self.core.generate(decoder_output[:, -1])
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f"{self.__class__.__name__}: "
f"it.shape = `{it.shape}` "
f"ys.shape = `{ys.shape}` "
f"len(state) = `{len(state) if state is not None else None}` "
f"decoder_output.shape = `{decoder_output.shape}` "
f"logprobs.shape = `{logprobs.shape}` "
)
# Add layer cache into state list, transposed so that beam_step can reorder them
return logprobs, [ys.unsqueeze(0)] + self._retrieve_caches()
def _sample(self, att_feats: Tensor, att_masks: Tensor, opt=None, **kwargs):
if opt is None:
opt = {}
memory, att_masks = self.core.encode(src=att_feats, src_mask=att_masks)
state = None
return self._generate_captions(att_feats, att_masks, memory, state, opt)
@staticmethod
def add_argparse_args(parser: Union[_ArgumentGroup, ArgumentParser]):
# fmt: off
Transformer.COLLATE_FN.add_argparse_args(parser)
CachedTransformerBase.add_argparse_args(parser)
# fmt: on
```
#### File: caption_vae/pruning/masked_layer.py
```python
import logging
import math
import torch
from torch import Tensor
from torch import nn
from torch.nn import init, functional as F
from torch.nn.parameter import Parameter
from torch.nn import Module
from copy import deepcopy
from typing import Tuple, List, Union, Optional
from pruning import prune, sampler
logger = logging.getLogger(__name__)
# noinspection PyAttributeOutsideInit
class MaskMixin:
mask_type: str
mask_init_value: float
mask_trainable: bool
training: bool
def setup_masks(
self,
parameters: Union[str, List[str], Tuple[str, ...]],
mask_type: str,
mask_init_value: float = 1.0,
bypass_sigmoid_grad: bool = False,
) -> None:
if not isinstance(parameters, (list, tuple)):
parameters = (parameters,)
assert all(isinstance(_, str) for _ in parameters)
self.mask_parameters = []
for name in parameters:
weight = getattr(self, name, None)
assert weight is not None, f"Invalid weight attribute name: {name}"
if not isinstance(weight, Parameter):
logger.warning(
f"{self.__class__.__name__}: "
f"Retrieved weight tensor of type {type(weight)}, converting it into a Parameter."
)
weight = Parameter(weight)
mask_name = f"{name}_pruning_mask"
setattr(self, mask_name, deepcopy(weight))
self.mask_parameters.append(getattr(self, mask_name, None))
assert all(_ is not None for _ in self.mask_parameters)
assert mask_type in prune.VALID_MASKS, \
f"`mask_type` must be one of {prune.VALID_MASKS}, saw `{mask_type}`"
self.mask_type = mask_type
if self.mask_type in prune.SUPER_MASKS:
assert isinstance(mask_init_value, (float, int)), \
"`mask_init_value` must be provided as a float or int."
self.mask_init_value = float(mask_init_value)
self.mask_train_sample_fn = lambda x: sampler.bernoulli_sample_sigmoid(x, bypass_sigmoid_grad)
self.mask_eval_sample_fn = lambda x: sampler.rounding_sigmoid(x, bypass_sigmoid_grad)
self.mask_trainable = True
else:
if mask_init_value is not None:
logger.info(
f"{self.__class__.__name__}: "
f"`mask_init_value` is always 1.0 for mask_type = `{self.mask_type}`"
)
# Regular pruning
self.mask_init_value = 1.
self.mask_train_sample_fn = self.mask_eval_sample_fn = None
self.mask_trainable = self.mask_type == prune.SNIP
for mask in self.mask_parameters:
mask.requires_grad = self.mask_trainable
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f"{self.__class__.__name__}: Init: "
f"mask_type = {mask_type} "
f"mask_init_value = {mask_init_value} "
f"mask_trainable = {self.mask_trainable}"
)
self.reset_masks()
def reset_masks(self) -> None:
for mask in self.mask_parameters:
init.constant_(mask, self.mask_init_value)
def get_masked_weight(self, weight_name: str):
# Get weight and its corresponding mask
weight = getattr(self, weight_name, None)
assert weight is not None, f"Invalid weight attribute name: {weight_name}"
mask_name = f"{weight_name}_pruning_mask"
mask = getattr(self, mask_name, None)
assert mask is not None, f"Invalid weight attribute name: {mask_name}"
# TODO: consider caching sampled mask for reuse, and clear cache when sparsity_loss is called
if self.mask_type in prune.SUPER_MASKS:
if self.training:
sample_fn = self.mask_train_sample_fn
else:
sample_fn = self.mask_eval_sample_fn
sampled_mask = sample_fn(mask)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f"{self.__class__.__name__}: "
f"Mask type = {self.mask_type} "
f"Sample fn = {sample_fn}"
)
else:
sampled_mask = mask
masked_weight = sampled_mask * weight
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f"{self.__class__.__name__}: "
f"Mask type = {self.mask_type} "
f"Sampled mask = {sampled_mask} "
f"Masked weight = {masked_weight}"
)
return masked_weight
@staticmethod
def assert_in_kwargs(key, kwargs):
assert key in kwargs, f"{key} not found in provided keyword arguments: {kwargs}"
# noinspection PyAbstractClass
class MaskedLinear(MaskMixin, nn.Linear):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
"""
__constants__ = nn.Linear.__constants__ + ['mask_type', 'mask_init_value', 'bypass_sigmoid_grad']
def __init__(
self, in_features: int, out_features: int,
mask_type: str, mask_init_value: float,
bypass_sigmoid_grad: bool = False,
**kwargs
) -> None:
super().__init__(in_features, out_features, **kwargs)
self.setup_masks("weight", mask_type, mask_init_value, bypass_sigmoid_grad)
def forward(self, input: Tensor) -> Tensor:
return F.linear(input, self.get_masked_weight("weight"), self.bias)
# noinspection PyAbstractClass
class MaskedEmbedding(MaskMixin, nn.Embedding):
r"""A simple lookup table that stores embeddings of a fixed dictionary and size.
This module is often used to store word embeddings and retrieve them using indices.
The input to the module is a list of indices, and the output is the corresponding
word embeddings.
"""
__constants__ = nn.Embedding.__constants__ + ['mask_type', 'mask_init_value', 'bypass_sigmoid_grad']
def __init__(
self, num_embeddings: int, embedding_dim: int,
mask_type: str, mask_init_value: float,
bypass_sigmoid_grad: bool = False,
**kwargs
) -> None:
super().__init__(num_embeddings, embedding_dim, **kwargs)
self.setup_masks("weight", mask_type, mask_init_value, bypass_sigmoid_grad)
def forward(self, input: Tensor) -> Tensor:
return F.embedding(
input, self.get_masked_weight("weight"), self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse
)
@classmethod
def from_pretrained(cls, *args, **kwargs):
r"""Creates Embedding instance from given 2-dimensional FloatTensor.
"""
raise NotImplementedError
# noinspection PyAbstractClass
class MaskedLSTMCell(MaskMixin, nn.LSTMCell):
r"""
A masked long short-term memory (LSTM) cell.
self.weight_ih = Parameter(torch.Tensor(num_chunks * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(num_chunks * hidden_size, hidden_size))
"""
def __init__(
self, input_size: int, hidden_size: int,
mask_type: str, mask_init_value: float,
bypass_sigmoid_grad: bool = False,
**kwargs
) -> None:
super().__init__(input_size, hidden_size, **kwargs)
self.setup_masks(("weight_ih", "weight_hh"), mask_type, mask_init_value, bypass_sigmoid_grad)
def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]:
self.check_forward_input(input)
if hx is None:
zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
hx = (zeros, zeros)
self.check_forward_hidden(input, hx[0], '[0]')
self.check_forward_hidden(input, hx[1], '[1]')
return torch._VF.lstm_cell(
input, hx,
self.get_masked_weight("weight_ih"), self.get_masked_weight("weight_hh"),
self.bias_ih, self.bias_hh,
)
```
#### File: caption_vae/scripts/plot_line.py
```python
r"""
Created on 25 Mar 2021 19:34:40
@author: jiahuei
"""
import os
import math
import seaborn as sns
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
# https://chrisalbon.com/python/data_visualization/seaborn_color_palettes/
gray3 = sns.color_palette("gray_r", n_colors=3)
crest3 = sns.color_palette("crest_r", n_colors=3)
summer3 = sns.color_palette("summer_r", n_colors=4)[1:]
mako3 = sns.color_palette("mako_r", n_colors=3)
flare3 = sns.color_palette("flare", n_colors=3)
blue3 = sns.cubehelix_palette(3, start=.5, rot=-.5)
cranberry3 = sns.dark_palette("#b2124d", n_colors=3, reverse=True)[:3]
coffee3 = sns.dark_palette("#a6814c", n_colors=4, reverse=True)[:3]
# sns.palplot([
# *sns.color_palette("OrRd", 3),
# *sns.dark_palette("#a6814c", n_colors=4, reverse=True),
# ])
# sns.set_theme(style="darkgrid", rc={"legend.loc": "lower left", "legend.framealpha": "0.6"})
sns.set_theme(
style="whitegrid",
rc={
"axes.edgecolor": ".3", "grid.color": "0.9", # "axes.grid.axis": "y",
"legend.loc": "lower left", "legend.framealpha": "0.6"
}
)
# print(plt.rcParams)
def get_lim(series, margin=(0.10, 0.05), min_threshold=None):
max_score = series.max()
if isinstance(min_threshold, (float, int)):
series = series[series > max_score * min_threshold]
min_score = series.min()
score_range = max_score - min_score
lim = (min_score - score_range * margin[0], max_score + score_range * margin[1])
return lim
def get_midpoint(series):
mid = (series.max() - series.min()) / 2 + series.min()
return mid
def set_style(ax, linestyle=None, marker=None):
if linestyle is None and marker is None:
return ax
if linestyle is None:
linestyle = [None] * len(marker)
if marker is None:
marker = [None] * len(linestyle)
legend_hdl, legend_lbl = ax.get_legend_handles_labels()
for line, leg_line, ls, m in zip(ax.lines, legend_hdl, linestyle, marker):
if ls is not None:
line.set_linestyle(ls)
leg_line.set_linestyle(ls)
if m is not None:
line.set_marker(m)
leg_line.set_marker(m)
return ax
def is_white_style():
return plt.rcParams["axes.facecolor"] == "white"
def despine_white(fig):
# Despine whitegrid
if is_white_style():
sns.despine(fig=fig, top=False, right=False, left=False, bottom=False, offset=None, trim=False)
def process_output_path(output_path):
output_name, output_ext = os.path.splitext(output_path)
if is_white_style():
output_name += " (w)"
else:
output_name += " (d)"
output_path = output_name + output_ext
return output_path
def plot_performance(
df, palette,
score_name, output_path, fig_title="",
output_dpi=600, min_threshold=0.8,
context="paper", fig_scale=1.5,
):
sns.set_context(context)
methods = [_ for _ in df.columns.tolist() if _.lower() != "nnz"]
line_styles = []
for m in methods:
m = m.lower()
if "baseline" in m:
line_styles.append(":")
elif m.startswith("hard"):
line_styles.append("--")
else:
line_styles.append(None)
marker_styles = []
for m in methods:
m = m.lower()
if "baseline" in m:
marker_styles.append(None)
elif "proposed" in m:
marker_styles.append("o")
elif m.startswith("lottery"):
marker_styles.append("^")
elif m.startswith("snip"):
marker_styles.append("v")
else:
marker_styles.append("X")
# Main chart
series_name = "Prune method"
xaxis_name = "Sparsity"
yaxis_name = score_name
df2 = df[methods].stack().reset_index(level=1).rename(columns={"level_1": series_name, 0: yaxis_name})
df2[series_name] = df2[series_name].map(lambda x: "Dense baseline" if x.lower() == "baseline" else x)
df2.index = df2.index.map(lambda x: f"{x * 100:.1f} %")
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4. * fig_scale, 3. * fig_scale))
ax.set(ylim=get_lim(df2.loc[:, yaxis_name], min_threshold=min_threshold))
ax = sns.lineplot(
data=df2, x=xaxis_name, y=yaxis_name, hue=series_name, ax=ax, palette=palette,
)
# Lines and legends
ax = set_style(ax, line_styles, marker_styles)
legend_xoffset = 0.15 if "soft-" in output_path.lower() and "SNIP" in methods else 0
ax.legend(loc=plt.rcParams["legend.loc"], bbox_to_anchor=(legend_xoffset, 0))
# NNZ axis
df2 = df.set_index("NNZ")[methods]
df2 = df2.stack().reset_index(level=1).rename(columns={"level_1": series_name, 0: yaxis_name})
with sns.axes_style(None, rc={"axes.grid": False}):
# print(sns.axes_style())
ax2 = ax.twiny()
sns.lineplot(
data=df2, x="NNZ", y=yaxis_name, hue=series_name, ax=ax2, legend=None, visible=False
)
# Title
if fig_title:
ax.set_title(fig_title, pad=plt.rcParams["font.size"] * 1.5)
despine_white(fig)
# Adjust margins and layout
plt.tight_layout(pad=1.5)
plt.savefig(process_output_path(output_path), dpi=output_dpi) # , plt.show()
plt.clf()
plt.close("all")
def plot_progression(
df, palette,
output_path, fig_title="",
output_dpi=600, linewidth=2.,
context="paper", fig_scale=1.5,
):
sns.set_context(context)
layers = df.columns.tolist()
line_styles = []
for m in layers:
m = m.lower()
if "target" in m:
line_styles.append("--")
else:
line_styles.append(None)
# Main chart
series_name = "Layer"
xaxis_name = "Training step"
yaxis_name = "Value" if any("loss" in _ for _ in layers) else "Sparsity"
df2 = df.stack().reset_index(level=1).rename(columns={"level_1": series_name, 0: yaxis_name})
# df2.index = df2.index.map(str)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4. * fig_scale, 3. * fig_scale))
# ax.set(ylim=get_ylim(df2, yaxis_name, min_threshold=min_threshold))
ax = sns.lineplot(
data=df2, x=xaxis_name, y=yaxis_name, hue=series_name, ax=ax,
palette=palette, linewidth=linewidth,
)
ax = set_style(ax, line_styles)
# Title
if fig_title:
ax.set_title(fig_title, pad=plt.rcParams["font.size"] * 1.5)
despine_white(fig)
# Adjust margins and layout
plt.tight_layout(pad=1.5)
plt.savefig(process_output_path(output_path), dpi=output_dpi) # , plt.show()
plt.clf()
plt.close("all")
def plot_layerwise(
df, palette,
output_path, fig_title="",
output_dpi=600, linewidth=2.,
context="paper", fig_scale=1.5,
):
sns.set_context(context)
layers = df.columns.tolist()
line_styles = []
for m in layers:
m = m.lower()
if "hard" in m:
line_styles.append("--")
else:
line_styles.append(None)
# Main chart
series_name = "Method"
xaxis_name = "Layer"
yaxis_name = "Sparsity"
df2 = df.stack().reset_index(level=1).rename(columns={"level_1": series_name, 0: yaxis_name})
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4. * fig_scale, 3. * fig_scale))
ax = sns.lineplot(
data=df2, x=xaxis_name, y=yaxis_name, hue=series_name, ax=ax,
palette=palette, linewidth=linewidth,
)
ax = set_style(ax, line_styles)
# Group Inception layers
if "lstm" in output_path.lower():
xticklabels = [
"Embedding", "Query", "Key", "Value", "QK", "Initial state", "LSTM", "Output"
]
ax.set_xticklabels(xticklabels, fontsize="x-small")
else:
xticks = []
xticklabels = []
layers = set()
for i, ly in enumerate(df.index.tolist()):
ly = ly.split("/")[0].split("_")[1]
if ly not in layers:
xticks.append(i)
xticklabels.append(ly)
layers.add(ly)
ax.set_xticks(xticks)
rotation = 90 if "inception" in output_path.lower() else 0
ax.set_xticklabels(xticklabels, rotation=rotation, fontsize="x-small")
# Title
if fig_title:
ax.set_title(fig_title, pad=plt.rcParams["font.size"] * 1.5)
despine_white(fig)
# Adjust margins and layout
plt.tight_layout(pad=1.5)
plt.savefig(process_output_path(output_path), dpi=output_dpi) # , plt.show()
plt.clf()
plt.close("all")
def plot_overview(
df, palette,
output_path, fig_title="",
output_dpi=600,
context="paper", fig_scale=1.5,
):
# https://datavizpyr.com/how-to-make-bubble-plot-with-seaborn-scatterplot-in-python/
sns.set_context(context)
# Main chart
series_name = "Method"
size_name = "Decoder Size (MB)"
xaxis_name = "Decoder NNZ (M)"
yaxis_name = "CIDEr"
sizes = (20, 600)
df2 = df
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4. * fig_scale, 3. * fig_scale))
ax.set(
xlim=get_lim(df2.index, margin=(0.1, 0.1)),
ylim=get_lim(df2.loc[:, yaxis_name], margin=(0.2, 0.2))
)
# Bubble plot
ax = sns.scatterplot(
data=df2, x=xaxis_name, y=yaxis_name, size=size_name, hue=series_name,
palette=palette, linewidth=0, sizes=sizes, alpha=1, ax=ax, legend="full"
)
# Line
ax = sns.lineplot(
data=df2, x=xaxis_name, y=yaxis_name, hue=series_name,
linewidth=0.8, linestyle=":", alpha=0.9,
ax=ax, palette=palette, legend=None,
)
# Annotate
for i in range(0, len(df2)):
x_offset = 0
y_offset = math.sqrt(df2[size_name].iloc[i] / math.pi)
if "99.1" in df2["Annotation"].iloc[i]:
x_offset = -1.5
y_offset = -y_offset - 6
elif "95" in df2["Annotation"].iloc[i]:
x_offset = 1.5
# Size in MB
ax.annotate(
f"{df2[size_name].iloc[i]} MB",
(df2.index[i] + x_offset, df2[yaxis_name].iloc[i] + y_offset / 6),
fontsize="x-small", va="bottom", ha="center"
)
ax.annotate(
"Pruned to 95% and 99.1% sparsities\nusing proposed Supermask Pruning", (20, 126), # (10, 116),
fontsize="small", linespacing=1.5, va="bottom", ha="center", color=cranberry3[1]
)
ax.annotate(
"Dense (original)", (46, 121.5),
fontsize="small", linespacing=1.5, va="bottom", ha="center", color=cranberry3[2]
)
# ax = set_style(ax, line_styles)
hdl, lbl = ax.get_legend_handles_labels()
size_idx = lbl.index(size_name)
# https://stackoverflow.com/a/53438726
# config A
method_legend = ax.legend(
hdl[:size_idx], lbl[:size_idx], ncol=5, loc="upper center",
bbox_to_anchor=(0.5, -0.3),
)
size_legend = ax.legend(
hdl[size_idx::2], lbl[size_idx::2], ncol=5, loc="lower center", borderpad=1,
bbox_to_anchor=(0.5, -0.33),
)
ax.add_artist(method_legend)
# Title
if fig_title:
ax.set_title(fig_title, pad=plt.rcParams["font.size"] * 1.5)
despine_white(fig)
# Adjust margins and layout
plt.tight_layout(pad=1.5)
plt.savefig(process_output_path(output_path), dpi=output_dpi) # , plt.show()
plt.clf()
plt.close("all")
def main():
d = os.path.join("plot_data", "performance")
for f in tqdm(sorted(os.listdir(d))):
min_threshold = 0.8
fname_low = f.lower()
if "inception" in fname_low:
# This must be first condition
palette = ["#9b59b6", *cranberry3, *mako3, mako3[1], mako3[2]]
min_threshold = 0.5
elif "soft-attention" in fname_low or "ort" in fname_low:
palette = ["#9b59b6", cranberry3[0], flare3[0], mako3[2], *mako3, "#9b59b6"]
elif "up-down" in fname_low:
palette = ["#9b59b6", cranberry3[0], *flare3, mako3[2], *mako3, "#9b59b6"]
else:
raise ValueError(f"Invalid file: {f}")
df = pd.read_csv(os.path.join(d, f), sep="\t", header=0, index_col=0)
fname = os.path.splitext(f)[0]
title, metric = fname.split(" --- ")
plot_performance(df, palette, metric, f"{fname}.png", min_threshold=min_threshold)
d = os.path.join("plot_data", "progression")
for f in tqdm(sorted(os.listdir(d))):
df = pd.read_csv(os.path.join(d, f), sep="\t", header=0, index_col=0)
fname = os.path.splitext(f)[0]
plot_progression(df, "deep", f"{fname}.png", linewidth=0.8)
d = os.path.join("plot_data", "layerwise")
for f in tqdm(sorted(os.listdir(d))):
fname_low = f.lower()
if "mobilenet" in fname_low:
palette = [cranberry3[0], cranberry3[1]]
else:
palette = [cranberry3[0], mako3[2], mako3[0]]
df = pd.read_csv(os.path.join(d, f), sep="\t", header=0, index_col=0)
fname = os.path.splitext(f)[0]
plot_layerwise(df, palette, f"{fname}.png", linewidth=0.8)
for f in tqdm(range(1)):
# Just for the progress bar
fname = "Pruning Image Captioning Models (MS-COCO)"
df = pd.read_csv(os.path.join("plot_data", f"{fname}.tsv"), sep="\t", header=0, index_col=0)
plot_overview(df, "icefire", f"{fname}.png")
if __name__ == "__main__":
main()
```
#### File: caption_vae/scripts/plot_nonzero_weights_kde.py
```python
import os
import logging
import torch
import numpy as np
import seaborn as sns
from scipy.stats import mstats
from matplotlib import pyplot as plt
from argparse import ArgumentParser, Namespace, ArgumentDefaultsHelpFormatter
from pruning import prune
from utils.model_utils import densify_state_dict
from utils.misc import replace_from_right, configure_logging
from utils.file import list_dir
from utils.config import Config
logger = logging.getLogger(__name__)
gray3 = sns.color_palette("gray_r", n_colors=3)
crest3 = sns.color_palette("crest_r", n_colors=3)
summer3 = sns.color_palette("summer_r", n_colors=4)[1:]
mako3 = sns.color_palette("mako_r", n_colors=3)
flare3 = sns.color_palette("flare", n_colors=3)
blue3 = sns.cubehelix_palette(3, start=.5, rot=-.5)
cranberry3 = sns.dark_palette("#b2124d", n_colors=3, reverse=True)[:3]
coffee3 = sns.dark_palette("#a6814c", n_colors=4, reverse=True)[:3]
# sns.set_theme(style="darkgrid", rc={"legend.loc": "lower left", "legend.framealpha": 0.7})
sns.set_theme(
style="whitegrid",
rc={
"axes.edgecolor": ".3", "grid.color": "0.9", # "axes.grid.axis": "y",
"legend.loc": "lower left", "legend.framealpha": "0.6"
}
)
def is_white_style():
return plt.rcParams["axes.facecolor"] == "white"
def despine_white(fig):
# Despine whitegrid
if is_white_style():
sns.despine(fig=fig, top=False, right=False, left=False, bottom=False, offset=None, trim=False)
def process_output_path(output_path):
output_name, output_ext = os.path.splitext(output_path)
if is_white_style():
output_name += " (w)"
else:
output_name += " (d)"
output_path = output_name + output_ext
return output_path
class KDE:
CONTEXT = "paper"
FIG_SCALE = 1.5
FIG_DPI = 600
PRUNE_TYPE_TITLE = {
prune.REGULAR: "Proposed",
prune.MAG_GRAD_BLIND: "Gradual (blind)",
prune.MAG_GRAD_UNIFORM: "Gradual (uniform)",
prune.MAG_GRAD_DIST: "Gradual (distribution)",
prune.LOTTERY_MASK_FREEZE: "Lottery (gradual)", # For now, we only pair this with MAG_GRAD_UNIFORM
prune.LOTTERY_MAG_BLIND: "Lottery (hard-blind)",
prune.LOTTERY_MAG_UNIFORM: "Lottery (hard-uniform)",
prune.LOTTERY_MAG_DIST: "Lottery (hard-distribution)",
prune.MAG_BLIND: "Hard-blind",
prune.MAG_UNIFORM: "Hard-uniform",
prune.MAG_DIST: "Hard-distribution",
prune.SNIP: "SNIP",
}
def __init__(self):
self.config = self.parse_opt()
self.config.model_file = self.config.model_file.split(",")
def __call__(self, model_dir, visualise_weights_only=True):
print(f"Processing `{model_dir}`")
try:
model_config = Config.load_config_json(os.path.join(model_dir, "config.json"))
ckpt_path = [os.path.join(model_dir, _) for _ in self.config.model_file]
ckpt_path = list(filter(os.path.isfile, ckpt_path))
if len(ckpt_path) > 0:
ckpt_path = ckpt_path[0]
else:
return None
state_dict = densify_state_dict(torch.load(ckpt_path, map_location=torch.device("cpu")))
print(f"Model weights loaded from `{ckpt_path}`")
if visualise_weights_only:
state_dict = {k: v for k, v in state_dict.items() if "weight" in k}
flat_weights_np = np.concatenate([_.view(-1).numpy() for _ in state_dict.values()])
except FileNotFoundError:
flat_weights_np = np.load(os.path.join(model_dir, "nonzero_weights_flat.npy"))
model_config = {
# Just hard-code this for now
"caption_model": "Soft-Attention LSTM",
"prune_type": prune.REGULAR if "REG" in model_dir else prune.MAG_GRAD_BLIND,
"prune_sparsity_target": 0.975
}
nonzero_weights = flat_weights_np[flat_weights_np != 0]
np.save(os.path.join(model_dir, "nonzero_weights_flat.npy"), nonzero_weights)
# Output Naming
net_name = model_config.get("caption_model", None)
if net_name.endswith("_prune"):
net_name = replace_from_right(net_name, "_prune", "", 1)
# net_name = net_name.replace("net", "Net")
output_suffix = net_name
fig_title = ""
pruning_type = model_config.get("prune_type", "")
if pruning_type:
if pruning_type == prune.MASK_FREEZE:
logger.warning(f"Mask type = {prune.MASK_FREEZE} not supported")
return None
try:
fig_title = f"{self.PRUNE_TYPE_TITLE[pruning_type]}, "
except KeyError:
raise ValueError(f"Invalid pruning type: `{pruning_type}`")
sparsity = model_config.get("prune_sparsity_target", 0) * 100
fig_title += f"{sparsity:.1f}% sparse, "
# TexStudio cannot accept filename with dot
output_suffix += f"_{int(sparsity)}_{pruning_type}"
fig_title += " ".join(_.title() for _ in net_name.split("_"))
fig_title = fig_title.replace("Lstm", "LSTM")
# TexStudio will annoyingly highlight underscores in filenames
output_suffix = output_suffix.replace("_", "-")
# Histogram and KDE
for i, clip_pct in enumerate([0.005, 0.001]):
# noinspection PyTypeChecker
self.plot_kde(
data=mstats.winsorize(nonzero_weights, limits=clip_pct),
# TexStudio will annoyingly highlight underscores in filenames
output_fig_path=process_output_path(os.path.join(model_dir, f"KDE-{i}-{output_suffix}.png")),
fig_title="",
fig_footnote=f"* {clip_pct * 100:.1f}% winsorization",
)
logger.info(f"Saved graph: clip percent = {clip_pct} (as float between 0. and 1.)")
print("")
def plot_kde(self, data, output_fig_path, fig_title, fig_footnote=None):
sns.set_context(self.CONTEXT)
# colours = ("goldenrod", "sandybrown", "chocolate", "peru")
# colours = ("c", "cadetblue", "lightseagreen", "skyblue")
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4. * self.FIG_SCALE, 3. * self.FIG_SCALE))
ax = sns.kdeplot(
data,
fill=True, common_norm=False, # palette="crest",
alpha=.5, linewidth=0,
color="c",
ax=ax,
)
if fig_title:
ax.set_title(fig_title, pad=plt.rcParams["font.size"] * 1.5)
if isinstance(fig_footnote, str):
plt.figtext(
0.90, 0.025,
fig_footnote,
horizontalalignment="right",
fontsize="xx-small",
)
despine_white(fig)
# Adjust margins and layout
plt.tight_layout(pad=1.5)
plt.savefig(output_fig_path, dpi=self.FIG_DPI)
print(f"Saved figure: `{output_fig_path}`")
plt.clf()
plt.close("all")
@staticmethod
def parse_opt() -> Namespace:
# fmt: off
# noinspection PyTypeChecker
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--log_dir", type=str, default="",
help="str: Logging / Saving directory."
)
parser.add_argument(
"--id", type=str, default="",
help="An id identifying this run/job."
)
parser.add_argument(
"--model_file", type=str, default="model_best_pruned_sparse.pth,model_best.pth",
help="str: Model checkpoint file."
)
parser.add_argument(
"--logging_level",
type=str,
default="INFO",
choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"],
help="str: Logging level.",
)
args = parser.parse_args()
return args
if __name__ == '__main__':
configure_logging("WARNING")
kde = KDE()
if kde.config.id:
dirs = [os.path.join(kde.config.log_dir, kde.config.id)]
else:
dirs = list(filter(os.path.isdir, list_dir(kde.config.log_dir)))
for d in dirs:
kde(d)
```
#### File: caption_vae/scst/scorers.py
```python
import logging
import numpy as np
from typing import Union, Tuple, List
from coco_caption.pycocoevalcap.bleu.bleu import Bleu
from coco_caption.pycocoevalcap.bleu.bleu_scorer import BleuScorer
from scst.cider.pyciderevalcap.ciderD.ciderD import CiderD
# from scst.cider.pyciderevalcap.cider.cider import Cider
_DEBUG = False
logger = logging.getLogger(__name__)
class CaptionScorer(object):
"""
An object that encapsulates the different scorers to provide a unified
interface.
"""
def __init__(
self, path_to_cached_tokens: str,
cider_weight: float = 1.0,
bleu_weight: Union[List, Tuple] = None
):
assert isinstance(cider_weight, float)
if bleu_weight is None:
bleu_weight = [0.] * 4
else:
assert isinstance(bleu_weight, (list, tuple))
assert len(bleu_weight) == 4
self.path_to_cached_tokens = path_to_cached_tokens
self.scorers = None
self.weights = {
"ciderD": cider_weight,
"bleu": bleu_weight,
}
@staticmethod
def input_check(inputs, same_sub_len=True):
assert isinstance(inputs, (list, tuple))
assert all(isinstance(_, (list, tuple)) for _ in inputs)
if same_sub_len:
lens = set(len(_) for _ in inputs)
assert len(lens) == 1, (
f"Each image should have the same number of captions."
f"Received captions per image: {lens}"
)
def __call__(self, refs, sample, greedy=None):
if self.scorers is None:
self.scorers = {
"ciderD": CiderD(df=self.path_to_cached_tokens),
"bleu": BleuSilent(4),
}
self.input_check(refs, same_sub_len=False)
self.input_check(sample)
assert len(refs) == len(sample), (
f"`ref` and `sample` have different lengths: "
f"refs = {len(refs)}, sample = {len(sample)}"
)
if greedy:
self.input_check(greedy)
assert len(sample) == len(greedy), (
f"`sample` and `greedy` have different lengths: "
f"sample = {len(sample)}, greedy = {len(greedy)}"
)
else:
assert greedy is None, "`greedy` should be one of: None, list or tuple."
weights = self.weights
num_greedy = len(greedy) if greedy else 0
num_sample_per_img = len(sample[0])
gts = {}
res = {}
item_id = 0
for i in range(num_greedy):
gts[item_id], res[item_id] = refs[i], greedy[i]
item_id += 1
for i in range(len(sample)):
for j in range(num_sample_per_img):
gts[item_id], res[item_id] = refs[i], sample[i][j: j + 1]
item_id += 1
num_items = item_id
assert (len(sample) * num_sample_per_img + num_greedy) == num_items
assert len(gts.keys()) == num_items
scores = {}
for metric in self.scorers:
wg = weights[metric]
if isinstance(wg, (float, int)) and wg <= 0:
continue
if isinstance(wg, (list, tuple)) and max(wg) <= 0:
continue
_, sc = self.scorers[metric].compute_score(gts, res)
if isinstance(wg, (list, tuple)):
for i, w in enumerate(wg):
scores[f"{metric}_{i}"] = np.array(sc[i]) * w
else:
scores[metric] = sc * wg
scores = sum(scores.values()) # Sum across metrics
assert len(scores) == num_items
sc_greedy = scores[:num_greedy] if greedy else np.array([0])
sc_sample = scores[num_greedy:]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f"{self.__class__.__name__}: Captions: greedy = `{greedy}` sampled = `{sample}`"
)
logger.debug(
f"{self.__class__.__name__}: "
f"Average scores: greedy = `{sc_greedy.mean()}` sampled = `{sc_sample.mean()}`"
)
greedy_num_words = 0
sample_num_words = 0
for i in range(num_items):
if i < num_greedy:
greedy_num_words += len(res[i][0].split(" "))
else:
sample_num_words += len(res[i][0].split(" "))
logger.debug(
f"{self.__class__.__name__}: Average # of words: "
f"greedy = `{greedy_num_words / num_greedy if greedy else 0:.2f}` "
f"sampled = `{sample_num_words / (num_items - num_greedy):.2f}`"
)
return sc_sample, sc_greedy
class BleuSilent(Bleu):
# noinspection PyMethodOverriding
def compute_score(self, gts, res):
return super().compute_score(gts=gts, res=res, verbose=0)
"""
Cross-entropy loss derivative is p_i - y_i,
where p is the output of softmax and y is the one-hot label.
This means XE loss grad is prob of class i minus 1.0 if true or 0 if false.
SCST loss derivative is
[r(sampled) - r(greedy)] * [p(sample @ t) - oneHot(sample @ t)]
This means it is equivalent to a weighted version of XE loss, where
the labels are sampled captions, and the weights are baselined rewards.
dec_log_ppl = tf.contrib.seq2seq.sequence_loss(
logits=sampled_logits,
targets=sampled_onehot,
weights=sampled_masks,
average_across_batch=False)
dec_log_ppl = tf.reduce_mean(dec_log_ppl * rewards)
"""
``` |
{
"source": "jiahuei/tf-sparse-captioning",
"score": 2
} |
#### File: caption_COMIC/utils/collect_sparsity_from_tb_dump.py
```python
import os
import numpy as np
pjoin = os.path.join
LAYERS = [
'rnn_initial_state_kernel_mask',
'basic_lstm_cell_kernel_mask',
'memory_layer_kernel_mask',
'value_layer_kernel_mask',
'query_layer_kernel_mask',
'MultiHeadAdd_attention_v_mask',
'rnn_decoder_embedding_map_mask',
'output_projection_kernel_mask',
'total_sparsity',
'total_nnz',
]
LAST_GSTEP = [177000, 531177]
def _valid_csv(dir_name):
for l in LAYERS:
if l in dir_name:
return l
return None
tb_dump_dir = r'C:\Users\snipe\Documents\GitHub\phd-papers-pruning\resources\tensorboard dumps'
dirs = [pjoin(tb_dump_dir, d) for d in os.listdir(tb_dump_dir)]
# Collect final sparsity values
data = dict(experiments=[])
for d in sorted(dirs):
if not os.path.isdir(d):
continue
exp_name = os.path.basename(d)
data['experiments'].append(exp_name)
for f in os.listdir(d):
layer_name = _valid_csv(f)
if not layer_name:
continue
fpath = pjoin(d, f)
_, gstep, sparsity = np.genfromtxt(fpath, delimiter=',', skip_header=1)[-1, :]
assert gstep in LAST_GSTEP
if layer_name not in data:
data[layer_name] = []
data[layer_name].append(sparsity)
# Write output file
fpath = pjoin(tb_dump_dir, 'final_sparsity_values.csv')
out = []
for i, e in enumerate(data['experiments']):
sps = []
for l in LAYERS:
sps.append('{:9.7f}'.format(data[l][i]))
sps = ','.join(sps)
out.append(','.join([e, sps]))
headers = ','.join(['Experiments'] + LAYERS)
out = '\n'.join([headers] + out)
with open(fpath, 'w') as f:
f.write(out)
```
#### File: caption_COMIC/utils/inspect_config.py
```python
from link_dirs import BASE_DIR, pjoin
import argparse
import os
from common.configuration_v1 import load_config
def main(args):
print('')
a = args
default_exp_dir = pjoin(BASE_DIR, 'experiments')
if a.log_dir == '':
a.log_dir = default_exp_dir
if a.inspect_attributes == '':
print('\nAttribute list is empty.\n')
return None
else:
inspect_attributes = a.inspect_attributes.split(',')
# List experiments
exp_names = os.listdir(a.log_dir)
all_run_dirs = []
for n in exp_names:
exp_dir = pjoin(a.log_dir, n)
if os.path.isdir(exp_dir):
sub_dirs = [pjoin(a.log_dir, n, d) for d in os.listdir(exp_dir)]
run_dirs = [d for d in sub_dirs if 'infer' not in os.path.split(d)[1]]
all_run_dirs += run_dirs
# List config files
# all_cfg_files = []
# for d in all_run_dirs:
# cfg_file = [f for f in os.listdir(d) if 'config' and '.pkl' in f]
# assert len(cfg_file) == 1
# all_cfg_files.append(pjoin(d, cfg_file[0]))
all_cfg_files = [pjoin(d, 'config.pkl') for d in all_run_dirs]
# Inspect
for attr in inspect_attributes:
print('\nInspecting attribute: `{}`\n'.format(attr))
for cpath in all_cfg_files:
try:
c = vars(load_config(cpath))
except IOError:
continue
print(os.path.sep.join(cpath.split(os.path.sep)[-3:-1]))
if attr in c:
print(c[attr])
else:
print('`{}` not found.'.format(attr))
print('\nAttribute inspection completed.\n')
# noinspection PyTypeChecker
def _create_parser():
_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter)
_parser.add_argument(
'--log_dir', '-l', type=str, default='',
help='The logging directory.')
_parser.add_argument(
'--inspect_attributes', '-a', type=str, default='',
help='Comma-separated list of attributes to inspect.')
return _parser
if __name__ == '__main__':
parser = _create_parser()
main(parser.parse_args())
```
#### File: tf-sparse-captioning/common/configuration_v1.py
```python
import os
import pickle
from time import localtime, strftime
from common.natural_sort import natural_keys
VOCAB_DICT = ['wtoi', 'itow', 'ctoi', 'itoc', 'radix_wtoi', 'radix_itow']
class Config(object):
""" Configuration object."""
def __init__(self, **kwargs):
for key, value in sorted(kwargs.items()):
setattr(self, key, value)
# noinspection PyUnresolvedReferences
def save_config_to_file(self):
params = vars(self)
keys = sorted(params.keys(), key=natural_keys)
txt_dump = ['%s = %s' % (k, params[k]) for k in keys if k not in VOCAB_DICT]
config_name = 'config___%s.txt' % strftime('%Y-%m-%d_%H-%M-%S', localtime())
with open(os.path.join(self.log_path, config_name), 'w') as f:
f.write('\r\n'.join(txt_dump))
# Save the dictionary instead of the object for maximum flexibility
# Avoid this error:
# https://stackoverflow.com/questions/27732354/unable-to-load-files-using-pickle-and-multiple-modules
with open(os.path.join(self.log_path, 'config.pkl'), 'wb') as f:
pickle.dump(params, f, pickle.HIGHEST_PROTOCOL)
# noinspection PyUnresolvedReferences
def overwrite_safety_check(self, overwrite):
""" Exits if log_path exists but `overwrite` is set to `False`."""
path_exists = os.path.exists(self.log_path)
if path_exists:
if not overwrite:
print('\nINFO: log_path already exists. '
'Set `overwrite` to True? Exiting now.')
raise SystemExit
else:
print('\nINFO: log_path already exists. '
'The directory will be overwritten.')
else:
print('\nINFO: log_path does not exist. '
'The directory will be created.')
os.makedirs(self.log_path)
def load_config(config_filepath):
with open(config_filepath, 'rb') as f:
c_dict = pickle.load(f)
config = Config(**c_dict)
return config
```
#### File: common/mask_prune/masked_convolution.py
```python
import functools
import six
from common.mask_prune.masked_layer_v4 import generate_masks, MaskedDense
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
# from tensorflow.python.eager import context
# from tensorflow.python.framework import constant_op
# from tensorflow.python.framework import dtypes
# from tensorflow.python.framework import function
from tensorflow.python.framework import ops
# from tensorflow.python.framework import sparse_tensor
# from tensorflow.python.framework import tensor_shape
# from tensorflow.python.layers import base
from tensorflow.python.layers import convolutional as convolutional_layers
from tensorflow.python.layers import core as core_layers
# from tensorflow.python.layers import normalization as normalization_layers
# from tensorflow.python.layers import pooling as pooling_layers
from tensorflow.python.ops import array_ops
# from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
# from tensorflow.python.ops import linalg_ops
# from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
# from tensorflow.python.ops import sparse_ops
# from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
# from tensorflow.python.training import moving_averages
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
DATA_FORMAT_NCDHW = 'NCDHW'
DATA_FORMAT_NDHWC = 'NDHWC'
@add_arg_scope
def convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None,
conv_dims=None,
mask_type=None,
mask_init_value=None,
mask_bern_sample=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
conv_dims: Optional convolution dimensionality, when set it would use the
corresponding convolution (e.g. 2 for Conv 2D, 3 for Conv 3D, ..). When
leaved to None it would select the convolution dimensionality based on
the input rank (i.e. Conv ND, with N = input_rank - 2).
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if conv_dims is not None and conv_dims + 2 != input_rank:
raise ValueError('Convolution expects input with rank %d, got %d' %
(conv_dims + 2, input_rank))
if input_rank == 3:
layer_class = convolutional_layers.Convolution1D
elif input_rank == 4:
layer_class = convolutional_layers.Convolution2D
elif input_rank == 5:
layer_class = convolutional_layers.Convolution3D
else:
raise ValueError('Convolution not supported for input with rank',
input_rank)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = layer_class(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
# Insert masks for pruning
layer.build(inputs.get_shape())
gen_mask_kwargs = dict(mask_bern_sample=mask_bern_sample,
mask_type=mask_type,
mask_shape=None,
mask_init_value=mask_init_value,
dtype=inputs.dtype.base_dtype,
get_var_fn=None)
masked_kernel, masked_bias = generate_masks(kernel=layer.kernel, bias=layer.bias, **gen_mask_kwargs)
layer.kernel_copy = layer.kernel
layer.bias_copy = layer.bias
layer.kernel = masked_kernel
layer.bias = masked_bias
outputs = layer.apply(inputs) # Compute
layer.kernel = layer.kernel_copy
layer.bias = layer.bias_copy
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def convolution1d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None,
mask_type=None,
mask_init_value=None,
mask_bern_sample=None):
return convolution(inputs,
num_outputs,
kernel_size,
stride,
padding,
data_format,
rate,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope,
conv_dims=1,
mask_type=mask_type,
mask_init_value=mask_init_value,
mask_bern_sample=mask_bern_sample)
convolution1d.__doc__ = convolution.__doc__
@add_arg_scope
def convolution2d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None,
mask_type=None,
mask_init_value=None,
mask_bern_sample=None):
return convolution(inputs,
num_outputs,
kernel_size,
stride,
padding,
data_format,
rate,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope,
conv_dims=2,
mask_type=mask_type,
mask_init_value=mask_init_value,
mask_bern_sample=mask_bern_sample)
convolution2d.__doc__ = convolution.__doc__
@add_arg_scope
def convolution3d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None,
mask_type=None,
mask_init_value=None,
mask_bern_sample=None):
return convolution(inputs,
num_outputs,
kernel_size,
stride,
padding,
data_format,
rate,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope,
conv_dims=3,
mask_type=mask_type,
mask_init_value=mask_init_value,
mask_bern_sample=mask_bern_sample)
convolution3d.__doc__ = convolution.__doc__
def _model_variable_getter(getter,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
rename=None,
use_resource=None,
**_):
"""Getter that uses model_variable for compatibility with core layers."""
short_name = name.split('/')[-1]
if rename and short_name in rename:
name_components = name.split('/')
name_components[-1] = rename[short_name]
name = '/'.join(name_components)
return variables.model_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=collections,
trainable=trainable,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=getter,
use_resource=use_resource)
def _build_variable_getter(rename=None):
"""Build a model variable getter that respects scope getter and renames."""
# VariableScope will nest the getters
def layer_variable_getter(getter, *args, **kwargs):
kwargs['rename'] = rename
return _model_variable_getter(getter, *args, **kwargs)
return layer_variable_getter
def _add_variable_to_collections(variable, collections_set, collections_name):
"""Adds variable (or all its parts) to all collections with that name."""
collections = utils.get_variable_collections(collections_set,
collections_name) or []
variables_list = [variable]
if isinstance(variable, tf_variables.PartitionedVariable):
variables_list = [v for v in variable]
for collection in collections:
for var in variables_list:
if var not in ops.get_collection(collection):
ops.add_to_collection(collection, var)
@add_arg_scope
def fully_connected(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None,
mask_type=None,
mask_init_value=None,
mask_bern_sample=None):
"""Adds a fully connected layer.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of at least rank 2 and static value for the last dimension;
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
The tensor variable representing the result of the series of operations.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
if not isinstance(num_outputs, six.integer_types):
raise ValueError('num_outputs should be int or long, got %s.' %
(num_outputs,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope,
'fully_connected', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
if mask_type is not None:
dense_cls = MaskedDense
mask_kwargs = dict(mask_type=mask_type,
mask_init_value=mask_init_value,
mask_bern_sample=mask_bern_sample)
else:
dense_cls = core_layers.Dense
mask_kwargs = {}
layer = dense_cls(
units=num_outputs,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse,
**mask_kwargs)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
# Apply normalizer function / layer.
if normalizer_fn is not None:
if not normalizer_params:
normalizer_params = {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def separable_convolution2d(
inputs,
num_outputs,
kernel_size,
depth_multiplier,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NHWC,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None,
mask_type=None,
mask_init_value=None,
mask_bern_sample=None):
"""Adds a depth-separable 2D convolution with optional batch_norm layer.
This op first performs a depthwise convolution that acts separately on
channels, creating a variable called `depthwise_weights`. If `num_outputs`
is not None, it adds a pointwise convolution that mixes channels, creating a
variable called `pointwise_weights`. Then, if `normalizer_fn` is None,
it adds bias to the result, creating a variable called 'biases', otherwise,
the `normalizer_fn` is applied. It finally applies an activation function
to produce the end result.
Args:
inputs: A tensor of size [batch_size, height, width, channels].
num_outputs: The number of pointwise convolution output filters. If is
None, then we skip the pointwise convolution stage.
kernel_size: A list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
stride: A list of length 2: [stride_height, stride_width], specifying the
depthwise convolution stride. Can be an int if both strides are the same.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
rate: A list of length 2: [rate_height, rate_width], specifying the dilation
rates for atrous convolution. Can be an int if both rates are the same.
If any value is larger than one, then both stride values need to be one.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'depthwise_kernel': 'depthwise_weights',
'pointwise_kernel': 'pointwise_weights'
})
gen_mask_kwargs = dict(mask_bern_sample=mask_bern_sample,
mask_type=mask_type,
mask_shape=None,
mask_init_value=mask_init_value,
dtype=inputs.dtype.base_dtype,
get_var_fn=None)
with variable_scope.variable_scope(
scope,
'SeparableConv2d', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
if num_outputs is not None:
# Apply separable conv using the SeparableConvolution2D layer.
layer = convolutional_layers.SeparableConvolution2D(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=utils.two_element_tuple(rate),
activation=None,
depth_multiplier=depth_multiplier,
use_bias=not normalizer_fn and biases_initializer,
depthwise_initializer=weights_initializer,
pointwise_initializer=weights_initializer,
bias_initializer=biases_initializer,
depthwise_regularizer=weights_regularizer,
pointwise_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
# Insert masks for pruning
layer.build(inputs.get_shape())
masked_depthwise_kernel, masked_bias = generate_masks(
kernel=layer.depthwise_kernel, bias=layer.bias, **gen_mask_kwargs)
masked_pointwise_kernel, _ = generate_masks(
kernel=layer.pointwise_kernel, bias=None, **gen_mask_kwargs)
layer.depthwise_kernel_copy = layer.depthwise_kernel
layer.pointwise_kernel_copy = layer.pointwise_kernel
layer.bias_copy = layer.bias
layer.depthwise_kernel = masked_depthwise_kernel
layer.pointwise_kernel = masked_pointwise_kernel
layer.bias = masked_bias
outputs = layer.apply(inputs) # Compute
layer.depthwise_kernel = layer.depthwise_kernel_copy
layer.pointwise_kernel = layer.pointwise_kernel_copy
layer.bias = layer.bias_copy
# Add variables to collections.
_add_variable_to_collections(layer.depthwise_kernel,
variables_collections, 'weights')
_add_variable_to_collections(layer.pointwise_kernel,
variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections,
'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
# Actually apply depthwise conv instead of separable conv.
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.channel_dimension(
inputs.get_shape(), df, min_rank=4)
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
depthwise_shape = [kernel_h, kernel_w, num_filters_in, depth_multiplier]
depthwise_weights = variables.model_variable(
'depthwise_weights',
shape=depthwise_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
masked_depthwise_weights, _ = generate_masks(
kernel=depthwise_weights, bias=None, **gen_mask_kwargs)
strides = [1, 1, stride_h,
stride_w] if data_format.startswith('NC') else [
1, stride_h, stride_w, 1
]
outputs = nn.depthwise_conv2d(
inputs,
masked_depthwise_weights,
strides,
padding,
rate=utils.two_element_tuple(rate),
data_format=data_format)
num_outputs = depth_multiplier * num_filters_in
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable(
'biases',
shape=[
num_outputs,
],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
trainable=trainable,
collections=biases_collections)
# TODO: bias is not masked currently
outputs = nn.bias_add(outputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
# Simple aliases which remove the activation_fn parameter.
elu = functools.partial(fully_connected, activation_fn=nn.elu)
relu = functools.partial(fully_connected, activation_fn=nn.relu)
relu6 = functools.partial(fully_connected, activation_fn=nn.relu6)
linear = functools.partial(fully_connected, activation_fn=None)
# Simple alias.
conv2d = convolution2d
conv3d = convolution3d
# conv2d_in_plane = convolution2d_in_plane
separable_conv2d = separable_convolution2d
```
#### File: tf-sparse-captioning/common/rnn_cells_v1.py
```python
import logging
import tensorflow as tf
from common.mask_prune import masked_layer
logger = logging.getLogger(__name__)
def get_rnn_cell(name,
num_units,
reuse,
use_fused_cell=False,
use_masked_cell=False,
use_sparse_cell=False,
masked_cell_kwargs=None):
if use_masked_cell:
assert masked_cell_kwargs is not None
assert len(masked_cell_kwargs) > 0
if not use_fused_cell:
logger.warning('Masked cells always use fused variants.')
if use_sparse_cell:
logger.warning('Masked cells does not have sparse variants.')
if name == 'LSTM':
return masked_layer.MaskedBasicLSTMCell(num_units=num_units,
reuse=reuse,
**masked_cell_kwargs)
elif name == 'LN_LSTM':
raise ValueError('LayerNormLSTM is not implemented for Supermasks.')
elif name == 'GRU':
return masked_layer.MaskedGRUCell(num_units=num_units,
reuse=reuse,
**masked_cell_kwargs)
else:
raise ValueError('Invalid RNN choice.')
elif use_sparse_cell:
assert not use_masked_cell
if use_fused_cell:
logger.warning('NOTE: Sparse cells does not have fused variants.')
pass
else:
assert not use_masked_cell
if name == 'LSTM':
if use_fused_cell:
return tf.contrib.rnn.LSTMBlockCell(num_units=num_units,
forget_bias=1.0,
cell_clip=-1,
use_peephole=False,
reuse=reuse,
name='basic_lstm_cell')
else:
return tf.contrib.rnn.BasicLSTMCell(num_units=num_units,
state_is_tuple=True,
reuse=reuse)
elif name == 'LN_LSTM':
if use_fused_cell:
logger.warning('`LN_LSTM` cells does not have fused variants.')
return tf.contrib.rnn.LayerNormBasicLSTMCell(num_units=num_units, reuse=reuse)
elif name == 'GRU':
if use_fused_cell:
return tf.contrib.rnn.GRUBlockCellV2(num_units=num_units, reuse=reuse, name='gru_cell')
else:
return tf.contrib.rnn.GRUCell(num_units=num_units, reuse=reuse)
# elif name == 'R_GRU':
# return ResidualGRUCell(num_units=num_units, reuse=reuse)
# elif name == 'RRN':
# return ResidualRNNCell(num_units=num_units, reuse=reuse)
# elif name == 'LN_RRN':
# return ResidualLayerNormRNNCell(num_units=num_units, reuse=reuse)
# elif name == 'LN_RRN_B':
# return ResidualLayerNormRNNCellB(num_units=num_units, reuse=reuse)
# elif name == 'preLN_RRN_a1':
# return ResidualPreLayerNormRNNCellA1(num_units=num_units, reuse=reuse)
# elif name == 'preLN_RRN_a2':
# return ResidualPreLayerNormRNNCellA2(num_units=num_units, reuse=reuse)
# elif name == 'preLN_RRN_b1':
# return ResidualPreLayerNormRNNCellB1(num_units=num_units, reuse=reuse)
# elif name == 'preLN_RRN_b2':
# return ResidualPreLayerNormRNNCellB2(num_units=num_units, reuse=reuse)
# elif name == 'preLN_RRN_c1':
# return ResidualPreLayerNormRNNCellC1(num_units=num_units, reuse=reuse)
# elif name == 'preLN_RRN_c2':
# return ResidualPreLayerNormRNNCellC2(num_units=num_units, reuse=reuse)
else:
raise ValueError('Invalid RNN choice.')
class ResidualGRUCell(tf.contrib.rnn.GRUCell):
def call(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
gate_inputs = tf.matmul(tf.concat([inputs, state], 1), self._gate_kernel)
gate_inputs = tf.nn.bias_add(gate_inputs, self._gate_bias)
value = tf.nn.sigmoid(gate_inputs)
r, u = tf.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
candidate = tf.matmul(tf.concat([inputs, r_state], 1), self._candidate_kernel)
candidate = tf.nn.bias_add(candidate, self._candidate_bias)
c = self._activation(candidate)
res_h = u * state + (1 - u) * c
new_h = tf.add(state, res_h)
return new_h, new_h
class ResidualRNNCell(tf.contrib.rnn.BasicRNNCell):
def call(self, inputs, state):
"""
Residual Recurrent Network: output = new_state = old_state + act(W * input + U * state + B)
"""
res_state = tf.matmul(tf.concat([inputs, state], 1), self._kernel)
res_state = tf.nn.bias_add(res_state, self._bias)
res_state = self._activation(res_state)
output = tf.add(res_state, state)
return output, output
class ResidualLayerNormRNNCell(tf.contrib.rnn.BasicRNNCell):
def __init__(self,
num_units,
activation=None,
reuse=None,
name=None,
dtype=None,
norm_gain=1.0,
norm_shift=0.0):
super(ResidualLayerNormRNNCell, self).__init__(
num_units=num_units,
activation=activation,
reuse=reuse,
name=name,
dtype=dtype)
self._norm_gain = norm_gain
self._norm_shift = norm_shift
# noinspection PyAttributeOutsideInit
def build(self, inputs_shape):
if inputs_shape[1].value is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape)
input_depth = inputs_shape[1].value
kernel_s = [input_depth + self._num_units, self._num_units]
self._kernel = self.add_variable("kernel", shape=kernel_s)
self.built = True
# Residual Recurrent Network with Layer Norm
def _norm(self, inp, scope, dtype=tf.float32):
shape = inp.get_shape()[-1:]
gamma_init = tf.constant_initializer(self._norm_gain)
beta_init = tf.constant_initializer(self._norm_shift)
with tf.variable_scope(scope):
# Initialize beta and gamma for use by layer_norm.
tf.get_variable("gamma", shape=shape, initializer=gamma_init, dtype=dtype)
tf.get_variable("beta", shape=shape, initializer=beta_init, dtype=dtype)
normalized = tf.contrib.layers.layer_norm(inp, reuse=True, scope=scope)
return normalized
def call(self, inputs, state):
"""
Residual Recurrent Network with Layer Normalisation:
output = new_state = old_state + act( LN( W * input + U * state ) )
"""
res_state = tf.matmul(tf.concat([inputs, state], 1), self._kernel)
res_state = self._norm(res_state, 'state_LN', dtype=inputs.dtype)
res_state = self._activation(res_state)
output = tf.add(res_state, state)
return output, output
class ResidualLayerNormRNNCellB(ResidualLayerNormRNNCell):
# noinspection PyAttributeOutsideInit
def build(self, inputs_shape):
if inputs_shape[1].value is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape)
input_depth = inputs_shape[1].value
kernel_s = [input_depth + self._num_units, 2 * self._num_units]
self._kernel = self.add_variable("kernel", shape=kernel_s)
self.built = True
def call(self, inputs, state):
"""
Residual Recurrent Network with Layer Normalisation:
output = new_state = old_state + act( LN( W * input + U * state ) )
"""
res_state = tf.matmul(tf.concat([inputs, state], 1), self._kernel)
res_state = self._norm(res_state, 'state_LN', dtype=inputs.dtype)
res_state = tf.split(value=res_state, num_or_size_splits=2, axis=1)
res_state = tf.multiply(tf.nn.sigmoid(res_state[0]), self._activation(res_state[1]))
output = tf.add(res_state, state)
return output, output
class ResidualPreLayerNormRNNCellA1(ResidualLayerNormRNNCell):
def call(self, inputs, state):
"""
Residual Recurrent Network with pre Layer Normalisation (type A1):
output = new_state = old_state + act( W * LN( [input, state] ) )
"""
res_state = self._norm(tf.concat([inputs, state], 1), 'pre_LN', dtype=inputs.dtype)
res_state = tf.matmul(res_state, self._kernel)
res_state = self._activation(res_state)
output = tf.add(res_state, state)
return output, output
class ResidualPreLayerNormRNNCellA2(ResidualLayerNormRNNCell):
def call(self, inputs, state):
"""
Residual Recurrent Network with pre Layer Normalisation (type A2):
output = new_state = old_state + act( W * input + U * LN( state ) )
"""
res_state = tf.concat([inputs, self._norm(state, 'pre_LN', dtype=inputs.dtype)], 1)
res_state = tf.matmul(res_state, self._kernel)
res_state = self._activation(res_state)
output = tf.add(res_state, state)
return output, output
class ResidualPreLayerNormRNNCellB1(ResidualLayerNormRNNCell):
def call(self, inputs, state):
"""
Residual Recurrent Network with pre Layer Normalisation (type B1):
output = new_state = old_state + W * act( LN( [input, state] ) )
"""
res_state = self._norm(tf.concat([inputs, state], 1), 'pre_LN', dtype=inputs.dtype)
res_state = self._activation(res_state)
res_state = tf.matmul(res_state, self._kernel)
output = tf.add(res_state, state)
return output, output
class ResidualPreLayerNormRNNCellB2(ResidualLayerNormRNNCell):
def call(self, inputs, state):
"""
Residual Recurrent Network with pre Layer Normalisation (type B2):
output = new_state = old_state + W * act( [input, LN( state )] )
"""
res_state = tf.concat([inputs, self._norm(state, 'pre_LN', dtype=inputs.dtype)], 1)
res_state = self._activation(res_state)
res_state = tf.matmul(res_state, self._kernel)
output = tf.add(res_state, state)
return output, output
class ResidualPreLayerNormRNNCellC1(ResidualLayerNormRNNCell):
# noinspection PyAttributeOutsideInit
def build(self, inputs_shape):
if inputs_shape[1].value is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape)
input_depth = inputs_shape[1].value
kernel_s = [input_depth + self._num_units, 2 * self._num_units]
self._kernel = self.add_variable("kernel", shape=kernel_s)
self.built = True
def call(self, inputs, state):
"""
Residual Recurrent Network with pre Layer Normalisation (type C1):
output = new_state = old_state + act( W * input + U * LN( state ) )
"""
res_state = self._norm(tf.concat([inputs, state], 1), 'pre_LN', dtype=inputs.dtype)
res_state = tf.matmul(res_state, self._kernel)
res_state = tf.split(value=res_state, num_or_size_splits=2, axis=1)
res_state = tf.multiply(tf.nn.sigmoid(res_state[0]), self._activation(res_state[1]))
output = tf.add(res_state, state)
return output, output
class ResidualPreLayerNormRNNCellC2(ResidualLayerNormRNNCell):
# noinspection PyAttributeOutsideInit
def build(self, inputs_shape):
if inputs_shape[1].value is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" % inputs_shape)
input_depth = inputs_shape[1].value
kernel_s = [input_depth + self._num_units, 2 * self._num_units]
self._kernel = self.add_variable("kernel", shape=kernel_s)
self.built = True
def call(self, inputs, state):
"""
Residual Recurrent Network with pre Layer Normalisation (type C2):
output = new_state = old_state + act( W * input + U * LN( state ) )
"""
res_state = tf.concat([inputs, self._norm(state, 'pre_LN', dtype=inputs.dtype)], 1)
res_state = tf.matmul(res_state, self._kernel)
res_state = tf.split(value=res_state, num_or_size_splits=2, axis=1)
res_state = tf.multiply(tf.nn.sigmoid(res_state[0]), self._activation(res_state[1]))
output = tf.add(res_state, state)
return output, output
``` |
{
"source": "jiahui890/chexpert-aml",
"score": 3
} |
#### File: src/data/batchloader.py
```python
import pandas as pd
import numpy as np
class BatchLoader:
def __init__(self, dataset, batch_size, return_labels=None, without_image=False, return_X_y=True):
self.dataset = dataset
self.batch_size = batch_size
self.return_labels = return_labels
self.data_size = len(dataset)
self.start = 0
self.without_image = without_image
self.return_X_y = return_X_y
def __iter__(self):
self.start = 0
return self
def __next__(self):
if self.start >= self.data_size:
raise StopIteration
start = self.start
end = min(start + self.batch_size, self.data_size)
all_features = []
image_features = []
all_labels = []
if self.without_image:
featrue_columns = ['Path'] + self.dataset._feature_header.tolist()
if self.return_labels:
labels_columns = self.return_labels
else:
labels_columns = self.dataset._label_header.tolist()
if self.return_X_y:
features = self.dataset.df.iloc[start:end][featrue_columns]
labels = self.dataset.df.iloc[start:end][labels_columns]
self.start = end
return features, labels
else:
columns = featrue_columns + labels_columns
data = self.dataset.df.iloc[start:end][columns]
self.start = end
return data
else:
for i in range(start, end):
features, image_feature, labels = self.dataset[i]
all_features.append(features)
image_features.append(image_feature)
all_labels.append(labels)
x_features, x_image = pd.DataFrame(all_features, columns=self.dataset._feature_header), pd.DataFrame(image_features)
y = pd.DataFrame(all_labels, columns=self.dataset._label_header)
if self.return_labels:
if len(self.return_labels) > 1:
y = y[self.return_labels]
else:
y = y[self.return_labels[0]]
self.start = end
return x_features, x_image, y
``` |
{
"source": "jiahuiguo/AIOpenNERO",
"score": 2
} |
#### File: AIOpenNERO/Maze/client.py
```python
from OpenNero import *
# add the key and mouse bindings
from inputConfig import createInputMapping
from common import *
import common.gui as gui
from common.module import openWiki
from Maze.module import getMod, delMod
from Maze.constants import *
from Maze.environment import EgocentricMazeEnvironment, GranularMazeEnvironment
# Agents and the functions that start them
AGENTS = [
('Depth First Search', lambda: getMod().start_dfs(), False),
('Breadth First Search', lambda: getMod().start_bfs(), False),
('A* Search', lambda: getMod().start_astar(), False),
('A* Search with Teleporting', lambda: getMod().start_astar2(), False),
('A* Search with Front', lambda: getMod().start_astar3(), False),
('IDA* Search', lambda:getMod().start_IDAstar(), False),
# ('Random Actions', lambda: getMod().start_random(), False),
# ('Sarsa RL', lambda: getMod().start_sarsa(), True),
# ('Q-Learning RL', lambda: getMod().start_qlearning(), True),
# ('Q-Learning RL (more continuous)', lambda: getMod().start_qlearning(GranularMazeEnvironment), True),
('Q-Learning (Coarse)', lambda: getMod().start_customrl(), True),
('Q-Learning (Fine)', lambda: getMod().start_customrl(GranularMazeEnvironment), True),
('First Person (Coarse)', lambda: getMod().start_fps(), False),
('First Person (Fine)', lambda: getMod().start_fps_granular(), False),
]
class UI:
pass
def CreateGui(guiMan):
guiMan.setTransparency(1.0)
guiMan.setFont("data/gui/fonthaettenschweiler.bmp")
ui = UI()
window_width = 300 # width
control_height = 30 # height
# AGENT SELECTION BOX
x, y = 5, 4 * control_height + 5
w, h = window_width - 15, control_height - 10
ui.agentBoxLabel = gui.create_text(guiMan, 'agentLabel', Pos2i(x,y), Pos2i(3*w/10,h), 'Agent Type:')
ui.agentComboBox = gui.create_combo_box(guiMan, "agentComboBox", Pos2i(x + 5 + 3*w/10, y), Pos2i(7*w/10, h))
for agent_name, agent_function, ee_enabled in AGENTS:
ui.agentComboBox.addItem(agent_name)
# EXPLORE/EXPLOIT TRADE-OFF SLIDER
x, y = 5, 0 * control_height + 5
w, h = window_width - 20, control_height - 5
epsilon_percent = int(INITIAL_EPSILON * 100)
ui.epsilonLabel = gui.create_text(guiMan, 'epsilonLabel', Pos2i(x, y), Pos2i(3*w/10, h), 'Exploit-Explore:')
ui.epsilonScroll = gui.create_scroll_bar(guiMan, 'epsilonScroll', Pos2i(x + 3*w/10 + 5, y), Pos2i(6*w/10, h - 5), True)
ui.epsilonValue = gui.create_text(guiMan, 'epsilonEditBox', Pos2i(x + 9*w/10 + 10, y), Pos2i(w/10, h), str(epsilon_percent))
ui.epsilonScroll.setMax(100)
ui.epsilonScroll.setLargeStep(10)
ui.epsilonScroll.setSmallStep(1)
ui.epsilonScroll.setPos(epsilon_percent)
ui.epsilonScroll.enabled = False
ui.epsilonValue.visible = False
ui.epsilonLabel.visible = False
ui.epsilonScroll.visible = False
getMod().set_epsilon(INITIAL_EPSILON)
ui.epsilonScroll.OnScrollBarChange = epsilon_adjusted(ui)
# START/RESET AND PAUSE/CONTINUE AGENT BUTTONS
x, y = 5, 3 * control_height
w, h = (window_width - 15) / 2, control_height - 5
ui.startAgentButton = gui.create_button(guiMan, 'startAgentButton', Pos2i(x, y), Pos2i(w, h), '')
ui.pauseAgentButton = gui.create_button(guiMan, 'pauseAgentButton', Pos2i(x + w + 5, y), Pos2i(w, h), '')
ui.startAgentButton.text = 'Start'
ui.pauseAgentButton.text = 'Pause'
ui.pauseAgentButton.enabled = False
ui.startAgentButton.OnMouseLeftClick = startAgent(ui)
ui.pauseAgentButton.OnMouseLeftClick = pauseAgent(ui)
# HELP BUTTON
w, h = (window_width - 15) / 2, control_height - 5
x, y = 5, 2 * control_height
ui.helpButton = gui.create_button(guiMan, 'helpButton', Pos2i(x, y), Pos2i(w, h), '')
ui.helpButton.text = 'Help'
ui.helpButton.OnMouseLeftClick = openWiki('MazeMod')
# NEW MAZE BUTTON
x = 10 + w
ui.newMazeButton = gui.create_button(guiMan, 'newMazeButton', Pos2i(x, y), Pos2i(w, h), '')
ui.newMazeButton.text = 'New Maze'
ui.newMazeButton.OnMouseLeftClick = lambda: getMod().generate_new_maze()
# SPEEDUP SLIDER
x, y = 5, 1 * control_height
w, h = window_width - 20, control_height - 5
ui.speedupLabel = gui.create_text(guiMan, 'speedupLabel', Pos2i(x, y), Pos2i(3*w/10, h), 'Speedup:')
ui.speedupScroll = gui.create_scroll_bar(guiMan, 'speedupScroll', Pos2i(x + 5 + 3*w/10, y), Pos2i(3*w/5, h-5), True)
ui.speedupValue = gui.create_text(guiMan, 'speedupEditBox', Pos2i(x + 10 + 9*w/10, y), Pos2i(w/10, h), str(0))
ui.speedupScroll.setMax(100)
ui.speedupScroll.setLargeStep(10)
ui.speedupScroll.setSmallStep(1)
ui.speedupScroll.setPos(0)
getMod().set_speedup(0)
ui.speedupScroll.OnScrollBarChange = speedup_adjusted(ui)
# THE WINDOW THAT HOLDS ALL THE CONTROLS ABOVE
ui.agentWindow = gui.create_window(guiMan, 'agentWindow', Pos2i(10, 10), Pos2i(window_width, 5*control_height+25), 'Agent')
ui.agentWindow.addChild(ui.agentBoxLabel)
ui.agentWindow.addChild(ui.agentComboBox)
ui.agentWindow.addChild(ui.newMazeButton)
ui.agentWindow.addChild(ui.startAgentButton)
ui.agentWindow.addChild(ui.pauseAgentButton)
ui.agentWindow.addChild(ui.helpButton)
ui.agentWindow.addChild(ui.epsilonLabel)
ui.agentWindow.addChild(ui.epsilonScroll)
ui.agentWindow.addChild(ui.epsilonValue)
ui.agentWindow.addChild(ui.speedupLabel)
ui.agentWindow.addChild(ui.speedupScroll)
ui.agentWindow.addChild(ui.speedupValue)
def epsilon_adjusted(ui):
""" generate a closure that will be called whenever the epsilon slider is adjusted """
ui.epsilonValue.text = str(ui.epsilonScroll.getPos())
getMod().set_epsilon(float(ui.epsilonScroll.getPos())/100)
def closure():
"""called whenever the epsilon slider is adjusted"""
ui.epsilonValue.text = str(ui.epsilonScroll.getPos())
getMod().set_epsilon(float(ui.epsilonScroll.getPos())/100)
return closure
def speedup_adjusted(ui):
"""generate a closure that will be called whenever the speedup slider is adjusted"""
ui.speedupValue.text = str(ui.speedupScroll.getPos())
getMod().set_speedup(float(ui.speedupScroll.getPos())/100)
def closure():
"""called whenever the speedup slider is adjusted"""
ui.speedupValue.text = str(ui.speedupScroll.getPos())
getMod().set_speedup(float(ui.speedupScroll.getPos())/100)
return closure
def startAgent(ui):
""" return a function that starts or stops the agent """
def closure():
"""starts or stops the agent"""
if ui.startAgentButton.text == 'Start':
i = ui.agentComboBox.getSelected()
(agent_name, agent_function, ee_enabled) = AGENTS[i]
if ee_enabled:
ui.epsilonScroll.enabled = True
ui.epsilonValue.visible = True
ui.epsilonLabel.visible = True
ui.epsilonScroll.visible = True
print 'Starting', agent_name
agent_function()
ui.pauseAgentButton.text = 'Pause'
ui.pauseAgentButton.enabled = True
ui.startAgentButton.text = 'Reset'
ui.agentComboBox.enabled = False
else:
getMod().stop_maze()
disable_ai()
ui.epsilonScroll.enabled = False
ui.epsilonValue.visible = False
ui.epsilonLabel.visible = False
ui.epsilonScroll.visible = False
get_environment().cleanup()
ui.startAgentButton.text = 'Start'
ui.pauseAgentButton.text = 'Pause'
ui.pauseAgentButton.enabled = False
ui.agentComboBox.enabled = True
return closure
def pauseAgent(ui):
""" return a function that pauses and continues the agent """
def closure():
"""pauses and continues the agent"""
if ui.pauseAgentButton.text == 'Continue':
ui.pauseAgentButton.text = 'Pause'
enable_ai()
else:
ui.pauseAgentButton.text = 'Continue'
disable_ai()
return closure
def recenter(cam):
""" return a function that recenters the camera """
def closure():
"""recenters the camera"""
cam.setPosition(Vector3f(-20, -20, 80))
cam.setTarget(Vector3f(GRID_DX * ROWS / 2, GRID_DY * COLS / 2, 0))
return closure
def ClientMain():
# create fog effect
getSimContext().setFog()
# add a camera
camRotateSpeed = 100
camMoveSpeed = 3000
camZoomSpeed = 500
cam = getSimContext().addCamera(camRotateSpeed, camMoveSpeed, camZoomSpeed)
cam.setFarPlane(5000)
cam.setEdgeScroll(False)
recenter_cam = recenter(cam) # create a closure to avoid having a global variable
recenter_cam() # call the recenter function
# load the background
addObject("data/terrain/Sea.xml", Vector3f(-3000 + NUDGE_X,-3000 + NUDGE_Y,-20))
addObject("data/terrain/IslandTerrain.xml", Vector3f(-1100 + NUDGE_X, -2400 + NUDGE_Y, -17), Vector3f(0,0,-45))
addSkyBox("data/sky/irrlicht2")
# load the maze
getSimContext().addLightSource(Vector3f(-500,-500,1000), 1500)
getMod().add_maze()
# load the GUI
CreateGui(getGuiManager())
# create the key binding
ioMap = createInputMapping()
ioMap.BindKey( "KEY_SPACE", "onPress", recenter_cam )
getSimContext().setInputMapping(ioMap)
```
#### File: AIOpenNERO/Maze/environment.py
```python
import time
from math import *
from copy import copy
from mazer import Maze
from constants import *
from OpenNero import *
from common import *
from collections import deque
from Maze.agent import MoveForwardAndStopAgent
class MazeRewardStructure:
""" This defines the reward that the agents get for running the maze """
def null_move(self, agent):
""" a null move is -1 """
return -1
def valid_move(self, agent):
""" a valid move is just a -1 (to reward shorter routes) """
return -1
def out_of_bounds(self, agent):
""" reward for running out of bounds of the maze (hitting the outer wall) """
return -1
def hit_wall(self, agent):
""" reward for hitting any other wall """
return -1
def goal_reached(self, agent):
""" reward for reaching the goal """
print 'GOAL REACHED!'
# reaching a goal is great!
return 100
def last_reward(self, agent):
""" reward for ending without reaching the goal """
#pos = agent.state.position
#(r,c) = get_environment().maze.xy2rc(pos.x, pos.y)
#print 'EPISODE ENDED AT', r, c
#return 100.0*(r+c)/(ROWS+COLS)
return -1
class MazeEnvironment(Environment):
maze = Maze.generate(ROWS, COLS, GRID_DX, GRID_DY)
"""
The environment is a 2-D maze.
In the discrete version, the agent moves from cell to cell.
* Actions (1 discrete action)
* 0 - move in the +r direction
* 1 - move in the -r direction
* 2 - move in the +c direction
* 3 - move in the -c direction
* 4 - do nothing
* Observations (6 discrete observations)
* o[0] - the current row position
* o[1] - the current col position
* o[2] - obstacle in the +r direction?
* o[3] - obstacle in the -r direction?
* o[4] - obstacle in the +c direction?
* o[5] - obstacle in the -c direction?
"""
def __init__(self):
"""
generate the maze
"""
Environment.__init__(self)
self.rewards = MazeRewardStructure()
action_info = FeatureVectorInfo()
observation_info = FeatureVectorInfo()
reward_info = FeatureVectorInfo()
action_info.add_discrete(0, len(MAZE_MOVES)-1) # select from the moves we can make
observation_info.add_discrete(0, ROWS-1)
observation_info.add_discrete(0, COLS-1)
observation_info.add_discrete(0,1)
observation_info.add_discrete(0,1)
observation_info.add_discrete(0,1)
observation_info.add_discrete(0,1)
reward_info.add_continuous(-100,100)
self.agent_info = AgentInitInfo(observation_info, action_info, reward_info)
self.max_steps = MAX_STEPS
self.speedup = 0
self.marker_map = {} # a map of cells and markers so that we don't have more than one per cell
self.marker_states = {} # states of the marker agents that run for one cell and stop
self.agent_map = {} # agents active on the map
self.agents_at_goal = set() # the set of agents that have reached the goal
self.handles = {} # handes for the objects used to draw q-values
print 'Initialized MazeEnvironment'
def can_move(self, agent, move):
"""
Figure out if the agent can make the specified move
"""
pos = agent.state.position
(r,c) = MazeEnvironment.maze.xy2rc(pos.x, pos.y)
(dr,dc) = move
return MazeEnvironment.maze.rc_bounds(r+dc, c+dc) and not MazeEnvironment.maze.is_wall(r,c,dr,dc)
def get_next_rotation(self, move):
"""
Figure out which way the agent should be facing in order to make the specified move
"""
return Vector3f(0,0,degrees(atan2(move[1], move[0])))
def reset(self, agent):
"""
reset the environment to its initial state
"""
print 'Episode %d complete' % agent.episode
(x,y) = MazeEnvironment.maze.rc2xy(0,0)
pos = Vector3f(x,y,0)
agent.state.position = pos
agent.state.rotation = Vector3f(0,0,0)
return True
def get_agent_info(self, agent):
return self.agent_info
def set_animation(self, agent, animation):
"""
set the agent's animation sequence to that named by animation
"""
if agent.state.animation != animation:
agent.state.animation = animation
delay = getSimContext().delay
animation_speed = agent.state.animation_speed
if delay > 0:
agent.state.animation_speed = animation_speed / delay
def set_position(self, agent, new_pose):
"""
set the next agent position to new_pose = (r,c,h)
"""
new_r, new_c, new_heading = new_pose
(new_x, new_y) = MazeEnvironment.maze.rc2xy(new_r, new_c)
pos = agent.state.position
if pos.x == new_x and pos.y == new_y:
self.set_animation(agent, 'stand')
else:
pos.x, pos.y = new_x, new_y
agent.state.position = pos
self.set_animation(agent, 'run')
def step(self, agent, action):
"""
Discrete version
"""
(r,c) = MazeEnvironment.maze.xy2rc(agent.state.position.x, agent.state.position.y)
# check if we reached the goal
if r == ROWS - 1 and c == COLS - 1 and not isinstance(agent, MoveForwardAndStopAgent):
self.agents_at_goal.add(agent)
return self.rewards.goal_reached(agent)
# check if we ran out of time
elif agent.step >= self.max_steps - 1 and not isinstance(agent, MoveForwardAndStopAgent):
return self.rewards.last_reward(agent)
if not self.agent_info.actions.validate(action):
# check if we ran out of time
if agent.step >= self.max_steps - 1 and not isinstance(agent, MoveForwardAndStopAgent):
return self.rewards.last_reward(agent)
# check if we reached the goal
elif r == ROWS - 1 and c == COLS - 1 and not isinstance(agent, MoveForwardAndStopAgent):
self.agents_at_goal.add(agent)
return self.rewards.goal_reached(agent)
else:
self.set_animation(agent, 'stand')
return self.rewards.null_move(agent)
# check for null action
a = int(round(action[0]))
if a == MAZE_NULL_MOVE:
self.set_animation(agent, 'stand')
return self.rewards.null_move(agent)
# calculate new pose
(dr, dc) = MAZE_MOVES[a]
new_r, new_c = r + dr, c + dc
next_rotation = self.get_next_rotation((dr,dc))
new_heading = next_rotation.z
rotation = agent.state.rotation
prev_heading = rotation.z
# if the heading is right
if new_heading == prev_heading:
# check if we are in bounds
if not MazeEnvironment.maze.rc_bounds(new_r, new_c):
self.set_animation(agent, 'jump')
return self.rewards.out_of_bounds(agent)
# check if there is a wall in the way
elif MazeEnvironment.maze.is_wall(r,c,dr,dc):
self.set_animation(agent, 'jump')
return self.rewards.hit_wall(agent)
# if the heading is right, change the position
self.set_position(agent, (new_r, new_c, new_heading))
else:
# if the heading is not right, just change the heading and run the
# rotation animation:
# "run" "stand" "turn_r_xc" "turn_l_xc" "turn_r_lx" "turn_l_lx"
# "turn_r_xxx" "turn_l_xxx" "pick_up" "put_down"
# "hold_run" "hold_stand" "hold_r_xc" "hold_l_xc"
# "hold_turn_r_lx" "hold_turn_l_lx" "hold_turn_r_xxx" "hold_turn_l_xxx"
# "jump" "hold_jump"
if new_heading - prev_heading > 0:
if new_heading - prev_heading > 90:
new_heading = prev_heading + 90
self.set_animation(agent, 'turn_l_lx')
else:
if new_heading - prev_heading < 90:
new_heading = prev_heading - 90
self.set_animation(agent, 'turn_r_lx')
rot0 = copy(agent.state.rotation)
rot0.z = new_heading
agent.state.rotation = rot0
agent.skip() # don't get a new action, just retry this one
return self.rewards.valid_move(agent)
# check if we reached the goal
if new_r == ROWS - 1 and new_c == COLS - 1 and not isinstance(agent, MoveForwardAndStopAgent):
self.agents_at_goal.add(agent)
return self.rewards.goal_reached(agent)
# check if we ran out of time
elif agent.step >= self.max_steps - 1 and not isinstance(agent, MoveForwardAndStopAgent):
return self.rewards.last_reward(agent)
# return a normal reward
return self.rewards.valid_move(agent)
def teleport(self, agent, r, c):
"""
move the agent to a new location
"""
(x,y) = MazeEnvironment.maze.rc2xy(r,c)
pos = agent.state.position
pos.x = x
pos.y = y
agent.state.position = pos
agent.teleport()
def sense(self, agent, obs):
"""
Discrete version
"""
p0 = agent.state.position
(r,c) = MazeEnvironment.maze.xy2rc(p0.x, p0.y)
obs[0] = r
obs[1] = c
offset = GRID_DX/10.0
for i, (dr, dc) in enumerate(MAZE_MOVES):
direction = Vector3f(dr, dc, 0)
ray = (p0 + direction * offset, p0 + direction * GRID_DX)
# we only look for objects of type 1, which means walls
objects = getSimContext().findInRay(ray[0], ray[1], 1, False)
obs[2 + i] = int(len(objects) > 0)
return obs
def is_episode_over(self, agent):
pos = agent.state.position
(r,c) = MazeEnvironment.maze.xy2rc(pos.x, pos.y)
if self.max_steps != 0 and agent.step >= self.max_steps:
return True
elif agent.__class__.__name__ == 'MoveForwardAndStopAgent':
return False
elif r == ROWS-1 and c == COLS-1:
if hasattr(agent, "highlight_path"):
disable_ai() # stop running
agent.highlight_path() # mark the final path
self.set_animation(agent, 'stand') # stop animation
return True
else:
return False
def mark_maze(self, r, c, marker):
""" mark a maze cell with the specified color """
# remove the previous object, if necessary
if (r,c) in self.marker_map:
removeObject(self.marker_map[(r,c)])
# remember the ID of the marker
self.marker_map[(r,c)] = addObject(marker, Vector3f( (r+1) * GRID_DX, (c+1) * GRID_DY, -1))
def mark_maze_blue(self, r, c):
self.mark_maze(r,c,"data/shapes/cube/BlueCube.xml")
def mark_maze_green(self, r, c):
self.mark_maze(r,c,"data/shapes/cube/GreenCube.xml")
def mark_maze_yellow(self, r, c):
self.mark_maze(r,c,"data/shapes/cube/YellowCube.xml")
def mark_maze_white(self, r, c):
self.mark_maze(r,c,"data/shapes/cube/WhiteCube.xml")
def unmark_maze_agent(self, r, c):
""" mark a maze cell with the specified color """
# remove the previous object, if necessary
if (r,c) in self.agent_map:
removeObject(self.agent_map[(r,c)])
del self.marker_states[self.agent_map[(r,c)]]
del self.agent_map[(r,c)]
def mark_maze_agent(self, agent, r1, c1, r2, c2):
""" mark a maze cell with an agent moving from r1, c1 to r2, c2 """
# remove the previous object, if necessary
self.unmark_maze_agent(r2,c2)
# add a new marker object
position = Vector3f( (r1+1) * GRID_DX, (c1+1) * GRID_DY, 0)
rotation = self.get_next_rotation( (r2-r1, c2-c1) )
agent_id = addObject(agent, position = position, rotation = rotation)
self.marker_states[agent_id] = ((r1, c1), (r2, c2))
self.agent_map[(r2,c2)] = agent_id
def cleanup(self):
# remove the marker blocks
for id in self.marker_map.values():
removeObject(id)
self.marker_map = {}
for id in self.agent_map.values():
removeObject(id)
for o in self.handles:
for a in range(len(self.handles[o])):
h = self.handles[o][a]
if h is not None:
removeObject(h)
self.handles = {}
self.agent_map = {}
def draw_q(self, o, Q):
aa = Q[o] # get the action values
min_a = min(aa) # minimum of the action values
aa = [a - min_a for a in aa] # shift to make all >= 0
sum_a = sum(aa) # sum of action values
if sum_a != 0: aa = [a/sum_a for a in aa] # normalize
if o not in self.handles: # create handles list
self.handles[o] = [None, None, None, None, None]
(x, y) = self.maze.rc2xy(o[0], o[1])
for a, (dr, dc) in enumerate(MAZE_MOVES):
p = Vector3f(x, y, 0)
value = aa[a] * 5
if dr == 0: dr = 0.1
else: p.x += dr*value
if dc == 0: dc = 0.1
else: p.y += dc*value
if value == 0 and self.handles[o][a] is not None:
# don't show 0 values
removeObject(self.handles[o][a])
self.handles[o][a] = None
elif self.handles[o][a] is None:
# create the cube to show the value
self.handles[o][a] = \
addObject("data/shapes/cube/BlueCube.xml", \
p, Vector3f(0, 0, 0), scale=Vector3f(0.5, 0.5, 0.5))
else:
# move the existing cube
getSimContext().setObjectPosition(self.handles[o][a], p)
center = len(MAZE_MOVES)
if self.handles[o][center] is None:
self.handles[o][center] = \
addObject("data/shapes/cube/YellowCube.xml", \
Vector3f(x, y, 0), \
scale=Vector3f(0.6,0.6,0.6))
class EgocentricMazeEnvironment(MazeEnvironment):
"""
The environment is a 2-D maze.
This is a slightly more continous version
* Actions (1 discrete action)
* 0 - move forward by WALK_BY
* 1 - turn CW by TURN_BY and move forward by WALK_BY
* 2 - turn CCW by TURN_BY and move forward by WALK_BY
* 3 - move backward by WALK_BY
* Observations ()
* o[0] - the current x position
* o[1] - the current y position
* o[2] - the angle to the target
* o[3] - the distance to the target
* o[4] - o[7] - ray sensors cast around the agent (starting with straight ahead and going clockwise)
"""
def __init__(self, granularity = 1):
"""
Constructor
@param granularity - the number of steps it takes to cover the whole WALK_BY distance
"""
MazeEnvironment.__init__(self)
action_info = FeatureVectorInfo() # describes the actions
observation_info = FeatureVectorInfo() # describes the observations
reward_info = FeatureVectorInfo() # describes the rewards
action_info.add_discrete(0, CONT_MAZE_N_ACTIONS-1) # action
( (xmin, ymin), (xmax, ymax) ) = MazeEnvironment.maze.xy_limits()
print 'MAZE LIMITS', ( (xmin, ymin), (xmax, ymax) )
observation_info.add_continuous(xmin, xmax) # x-coord
observation_info.add_continuous(ymin, ymax) # y-coord
observation_info.add_continuous(0, CONT_MAZE_MAX_DISTANCE ) # distance to target
observation_info.add_continuous(-180, 180) # angle to target
for i in range(CONT_MAZE_N_RAYS):
observation_info.add_continuous(0,1) # ray sensor
reward_info.add_continuous(-100,100)
self.agent_info = AgentInitInfo(observation_info, action_info, reward_info)
self.granularity = granularity
self.max_steps = MAX_STEPS * 15 * self.granularity # allow 15 * g actions per cell
print 'Initialized EgocentricMazeEnvironment'
def reset(self, agent):
"""
reset the environment to its initial state
"""
(x,y) = MazeEnvironment.maze.rc2xy(0,0)
agent.state.position = Vector3f(x,y,0)
agent.state.rotation = Vector3f(0,0,0)
self.agents_at_goal.discard(agent)
print 'Episode %d complete' % agent.episode
return True
def step(self, agent, action):
"""
Continuous version
"""
if not self.agent_info.actions.validate(action):
if agent.step >= self.max_steps - 1:
return self.max_steps * self.rewards.last_reward(agent)
else:
return self.rewards.null_move(agent)
a = int(round(action[0]))
pos = agent.state.position # current position
rot = agent.state.rotation # current rotation
(x,y,heading) = (pos.x, pos.y, rot.z) # current pose
new_x, new_y, new_heading = x, y, heading # pose to be computed
dx, dy = 0, 0
if a == CONT_MAZE_ACTIONS['CW']: # clockwise
new_heading = wrap_degrees(heading, -CONT_MAZE_TURN_BY)
elif a == CONT_MAZE_ACTIONS['CCW']: # counter-clockwise
new_heading = wrap_degrees(heading, CONT_MAZE_TURN_BY)
elif a == CONT_MAZE_ACTIONS['FWD']: # forward
dx = CONT_MAZE_WALK_BY * cos(radians(new_heading)) / self.granularity
dy = CONT_MAZE_WALK_BY * sin(radians(new_heading)) / self.granularity
elif a == CONT_MAZE_ACTIONS['BCK']: # backward
dx = -CONT_MAZE_WALK_BY * cos(radians(new_heading)) / self.granularity
dy = -CONT_MAZE_WALK_BY * sin(radians(new_heading)) / self.granularity
if dx != 0 or dy != 0:
new_x, new_y = x + dx, y + dy # this is where we are moving to
print 'move test', x, y, dx, dy,
# leave a buffer of space to check in the right direction
if dx != 0: dx = dx * 1.1 # leave a buffer for testing
if dy != 0: dy = dy * 1.1 # leave a buffer for testing
test_x, test_y = x + dx, y + dy # this is to check if there are walls there
print dx, dy, test_x, test_y
if not MazeEnvironment.maze.xy_bounds(test_x, test_y):
print "could not move, out of bounds"
self.set_animation(agent, 'stand')
return self.rewards.out_of_bounds(agent)
elif not MazeEnvironment.maze.xy_valid(x,y,test_x,test_y):
print "could not move, hit a wall"
self.set_animation(agent, 'stand')
return self.rewards.hit_wall(agent)
if new_x != x or new_y != y:
self.set_animation(agent, 'run')
# move the agent
agent.state.rotation = Vector3f(0,0,new_heading)
pos0 = agent.state.position
pos0.x = new_x
pos0.y = new_y
agent.state.position = pos0
(new_r, new_c) = MazeEnvironment.maze.xy2rc(new_x, new_y)
if new_r == ROWS - 1 and new_c == COLS - 1:
self.agents_at_goal.add(agent)
return self.max_steps * self.rewards.goal_reached(agent)
elif agent.step >= self.max_steps - 1:
return self.max_steps * self.rewards.last_reward(agent)
return self.rewards.valid_move(agent)
def sense(self, agent, obs):
"""
Continuous version
"""
pos = agent.state.position # current position
rot = agent.state.rotation # current rotation
(x,y,heading) = (pos.x, pos.y, rot.z) # current pose
obs[0] = x # the agent can observe its position
obs[1] = y # the agent can observe its position
(tx, ty) = MazeEnvironment.maze.rc2xy(ROWS-1,COLS-1) # coordinates of target
tx, ty = tx - x, ty - y # line to target
obs[2] = hypot(tx, ty) # distance to target
angle_to_target = degrees(atan2(ty, tx)) # angle to target from +x, in degrees
angle_to_target = wrap_degrees(angle_to_target, -heading) # heading to target relative to us
obs[3] = angle_to_target
d_angle = 360.0 / CONT_MAZE_N_RAYS
p0 = agent.state.position
for i in range(CONT_MAZE_N_RAYS):
angle = radians(heading + i * d_angle)
direction = Vector3f(cos(angle), sin(angle), 0) # direction of ray
ray = (p0, p0 + direction * GRID_DX)
# we only look for objects of type 1, which means walls
result = getSimContext().findInRay(ray[0], ray[1], 1, False)
# we can now return a continuous sensor since FindInRay returns the hit point
if len(result) > 0:
(sim, hit) = result
len1 = (ray[1] - ray[0]).getLength() # max extent
len2 = (hit - ray[0]).getLength() # actual extent
if len1 != 0:
obs[4+i] = len2/len1
else:
obs[4+i] = 0
else:
obs[4+i] = 1
if not self.agent_info.sensors.validate(obs):
print 'ERROR: incorect observation!', obs
print ' should be:', self.agent_info.sensors
return obs
class GranularMazeEnvironment(MazeEnvironment):
"""
The environment is a 2-D maze.
In the discrete version, the agent moves from cell to cell.
* Actions (1 discrete action)
* 0 - move in the +r direction
* 1 - move in the -r direction
* 2 - move in the +c direction
* 3 - move in the -c direction
* Observations (6 discrete observations)
* o[0] - the current x position
* o[1] - the current y position
* o[2] - obstacle in the +r direction?
* o[3] - obstacle in the -r direction?
* o[4] - obstacle in the +c direction?
* o[5] - obstacle in the -c direction?
"""
def __init__(self, granularity = 8):
"""
generate the maze
"""
MazeEnvironment.__init__(self)
action_info = FeatureVectorInfo() # describes the actions
observation_info = FeatureVectorInfo() # describes the observations
reward_info = FeatureVectorInfo() # describes the rewards
action_info.add_discrete(0, CONT_MAZE_N_ACTIONS-1) # action
( (xmin, ymin), (xmax, ymax) ) = MazeEnvironment.maze.xy_limits()
print 'MAZE LIMITS', ( (xmin, ymin), (xmax, ymax) )
observation_info.add_continuous(xmin, xmax) # x-coord
observation_info.add_continuous(ymin, ymax) # y-coord
for (dr, dc) in MAZE_MOVES:
observation_info.add_continuous(0,1) # movement sensor
reward_info.add_continuous(-100,100)
self.agent_info = AgentInitInfo(observation_info, action_info, reward_info)
self.max_steps = MAX_STEPS * (granularity * 2) # allow 2x granularity steps per cell
self.granularity = granularity
print 'Initialized GranularMazeEnvironment'
def reset(self, agent):
"""
reset the environment to its initial state
"""
(x,y) = MazeEnvironment.maze.rc2xy(0,0)
agent.state.position = Vector3f(x,y,0)
agent.state.rotation = Vector3f(0,0,0)
self.agents_at_goal.discard(agent)
print 'Episode %d complete' % agent.episode
return True
def set_position(self, agent, new_pose):
"""
set the next agent position to new_pose = (x,y,h)
"""
new_x, new_y, new_heading = new_pose
pos = agent.state.position
if pos.x == new_x and pos.y == new_y:
self.set_animation(agent, 'stand')
else:
pos.x, pos.y = new_x, new_y
agent.state.position = pos
self.set_animation(agent, 'run')
def step(self, agent, action):
"""
Granular version
"""
(x,y) = agent.state.position.x, agent.state.position.y
(r,c) = MazeEnvironment.maze.xy2rc(x, y)
a = int(action[0])
# check if we reached the goal
if r == ROWS - 1 and c == COLS - 1:
self.agents_at_goal.add(agent)
return self.rewards.goal_reached(agent) * self.max_steps
# check if we ran out of time
elif agent.step >= self.max_steps - 1:
return self.rewards.last_reward(agent) * self.max_steps
# check if the action was a null action
if not self.agent_info.actions.validate(action):
# check if we ran out of time
if agent.step >= self.max_steps - 1:
return self.rewards.last_reward(agent) * self.max_steps
# check if we reached the goal
elif r == ROWS - 1 and c == COLS - 1:
self.agents_at_goal.add(agent)
return self.rewards.goal_reached(agent) * self.max_steps
else:
self.set_animation(agent, 'stand')
return self.rewards.null_move(agent)
# calculate new pose
(dr, dc) = MAZE_MOVES[a]
dx, dy = dr * GRID_DX / self.granularity, dc * GRID_DY / self.granularity
new_x, new_y = x + dx, y + dy
(new_r, new_c) = MazeEnvironment.maze.xy2rc(new_x, new_y)
next_rotation = self.get_next_rotation((dx,dy))
new_heading = next_rotation.z
rotation = agent.state.rotation
prev_heading = rotation.z
# if the heading is right
if new_heading == prev_heading:
# check if we are in bounds
if not MazeEnvironment.maze.xy_bounds(new_x, new_y):
self.set_animation(agent, 'jump')
return self.rewards.out_of_bounds(agent)
# check if there is a wall in the way
elif MazeEnvironment.maze.is_wall(r,c, new_r - r, new_c - c):
self.set_animation(agent, 'jump')
return self.rewards.hit_wall(agent)
# if the heading is right, change the position
self.set_position(agent, (new_x, new_y, new_heading))
else:
# if the heading is not right, just change the heading and run the
# rotation animation:
# "run" "stand" "turn_r_xc" "turn_l_xc" "turn_r_lx" "turn_l_lx"
# "turn_r_xxx" "turn_l_xxx" "pick_up" "put_down"
# "hold_run" "hold_stand" "hold_r_xc" "hold_l_xc"
# "hold_turn_r_lx" "hold_turn_l_lx" "hold_turn_r_xxx" "hold_turn_l_xxx"
# "jump" "hold_jump"
if new_heading - prev_heading > 0:
if new_heading - prev_heading > 90:
new_heading = prev_heading + 90
self.set_animation(agent, 'turn_l_lx')
else:
if new_heading - prev_heading < 90:
new_heading = prev_heading - 90
self.set_animation(agent, 'turn_r_lx')
rot0 = copy(agent.state.rotation)
rot0.z = new_heading
agent.state.rotation = rot0
agent.skip() # don't get a new action, just retry this one
return self.rewards.valid_move(agent)
# check if we reached the goal
if new_r == ROWS - 1 and new_c == COLS - 1:
self.agents_at_goal.add(agent)
return self.rewards.goal_reached(agent) * self.max_steps
# check if we ran out of time
elif agent.step >= self.max_steps - 1:
return self.rewards.last_reward(agent) * self.max_steps
# return a normal reward
return self.rewards.valid_move(agent)
def sense(self, agent, obs):
"""
Compute granular maze environment sensor observations.
Fill in the obs vector with 6 values, where:
obs[0] is the x position
obs[1] is the y position
obs[2:5] are "free space" sensors in +r, -r, +c, -c
directions, with 0.0 meaning "wall is very near" and 1.0
meaning "free as far as the sensor can see"
"""
pos = agent.state.position # current position
rot = agent.state.rotation # current rotation
(x,y,heading) = (pos.x, pos.y, rot.z) # current pose
obs[0] = x # the agent can observe its position
obs[1] = y # the agent can observe its position
# current position of the agent
p0 = agent.state.position
# calculate free space in the possible move directions
for i, (dr, dc) in enumerate(MAZE_MOVES):
direction = Vector3f(dr, dc, 0)
p1 = p0 + direction * GRID_DX / self.granularity
hit_result = getSimContext().findInRay(p0, p1, 1, True)
if len(hit_result) > 0:
# if the ray hit a wall, return what fraction was clear
(sim, hit) = hit_result
len1 = (p1 - p0).getLength() # max extent
len2 = (hit - p0).getLength() # actual extent
obs[2 + i] = len2/len1 if len1 != 0 else 0.0
else:
# if the ray did not hit a wall, return 1.0
obs[2 + i] = 1.0
if not self.agent_info.sensors.validate(obs):
print 'ERROR: incorect observation!', obs
print ' should be:', self.agent_info.sensors
return obs
def draw_q(self, o, Q):
aa = Q[o] # get the action values
min_a = min(aa) # minimum of the action values
aa = [a - min_a for a in aa] # shift to make all >= 0
sum_a = sum(aa) # sum of action values
if sum_a != 0: aa = [a/sum_a for a in aa] # normalize
if o not in self.handles: # create handles list
self.handles[o] = [None, None, None, None, None]
x, y = o[0], o[1]
for a, (dr, dc) in enumerate(MAZE_MOVES):
p = Vector3f(x, y, 0)
value = aa[a] * 10 / self.granularity
p.x += dr*value
p.y += dc*value
if self.handles[o][a] is None:
self.handles[o][a] = \
addObject("data/shapes/cube/BlueCube.xml", \
p, Vector3f(0, 0, 0), \
scale=Vector3f(0.5, 0.5, 0.5)/self.granularity)
else:
getSimContext().setObjectPosition(self.handles[o][a], p)
center = len(MAZE_MOVES)
if self.handles[o][center] is None:
self.handles[o][center] = \
addObject("data/shapes/cube/YellowCube.xml", \
Vector3f(x, y, 0), \
scale=Vector3f(0.4,0.4,0.4)/self.granularity)
def wrap_degrees(a,da):
a2 = a + da
if a2 > 180:
a2 = -180 + (a2 % 180)
elif a2 < -180:
a2 = 180 - (abs(a2) % 180)
return a2
``` |
{
"source": "JiahuiLei/Pix2Surf",
"score": 3
} |
#### File: Pix2Surf/config/cmdline.py
```python
import argparse
def merge_from_cmdline(cfg):
"""
Merge some usually changed settings from cmd
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='None', help="Choose config file")
parser.add_argument('--gpu', type=str, default=None, help="Set GPU device, type=str, e.g: --gpu=0,1,2 ")
parser.add_argument('--logdir', type=str, default=None, help='log dir name in $project/log/XXXX/.... e.g. exp')
cmd = vars(parser.parse_args())
if cmd['config'] is not 'None':
cfg.CONFIG_FILE = cmd['config']
if cmd['gpu'] is not None:
cfg.GPU = [int(id) for id in cmd['gpu'].split(",")]
if cmd['logdir'] is not None:
cfg.LOG_DIR = cmd['logdir']
return cfg
```
#### File: Pix2Surf/config/__init__.py
```python
from .default import get_default_cfg
from .cmdline import merge_from_cmdline
from .startup import startup
from os.path import join
def get_cfg(filename=None, interactive=True):
"""
config priority: cmd>cfg-file>default
:param filename: filename XXX in $project/config/config_files/XXX.yaml
:return: a frozen configuration
"""
# get default
cfg = get_default_cfg()
# get config file parameter, will do another time later to overwrite local file config
cfg = merge_from_cmdline(cfg)
# merge local file, cmd>file
if cfg.CONFIG_FILE is not 'None':
filepath = join(cfg.ROOT_DIR, 'config', 'config_files', cfg.CONFIG_FILE)
cfg.merge_from_file(filepath)
elif filename is not None:
filepath = join(cfg.ROOT_DIR, 'config', 'config_files', filename)
cfg.merge_from_file(filepath)
# merge cmd line
cfg = merge_from_cmdline(cfg)
# startup
startup(cfg, interactive)
cfg.freeze()
return cfg
```
#### File: Pix2Surf/core/evaluation.py
```python
import numpy as np
from sklearn.neighbors import NearestNeighbors
from multiprocessing.dummy import Pool as ThreadPool
import cv2 as cv
from copy import deepcopy
def eval_warp(batch, method_name, nox_gt_key, nox_pred_key):
"""
Parameters
----------
batch: the batch in post-processing, must be a multi-view like batch
method_name: string of name e.g 'pix2surf-sv' that will be written to the xls
nox_gt_key: the name of nox-gt; WARNING! the nocs-map should have white background
nox_pred_key: see above, for prediction
Returns: the batch that added the 'metric-report' the report xls
-------
"""
# multi thread eval (Initial motivation of multi-thread is for some slow computation like normal)
n_batch = batch[nox_gt_key][0].shape[0]
n_view = len(batch[nox_gt_key])
# make the parameter tuple list for multi-thread
TASKS, RESULTS = [], []
id = 0
for bdx in range(n_batch):
for vdx in range(n_view):
arg = [id]
id += 1
_nocs_v_gt = batch[nox_gt_key][vdx][bdx].detach().cpu().numpy().transpose(1, 2, 0)
arg.append(_nocs_v_gt)
_nox_v_pred = batch[nox_pred_key][vdx][bdx].detach().cpu().numpy().transpose(1, 2, 0)
arg.append(_nox_v_pred)
TASKS.append(tuple(arg))
assert id == n_batch * n_view
with ThreadPool(max(id, 16)) as pool:
_results = [pool.apply_async(eval_thread, t) for t in TASKS]
RESULTS = [r.get() for r in _results]
ordered_results = []
for idx in range(id):
for r in RESULTS:
if r[0] == idx:
ordered_results.append(r)
assert len(ordered_results) == len(RESULTS)
accuracy_list, correspondence_error_list, discontinuity_score_list = [], [], []
id = 0
for bdx in range(n_batch):
_cd, _corr_l2, _disconti_score = 0, 0, 0
for vdx in range(n_view):
r = ordered_results[id]
id += 1
# for different viewpoint of each object, average across views
_cd += r[1] / n_view
_corr_l2 += r[2] / n_view
_disconti_score += r[3] / n_view
accuracy_list.append(_cd)
correspondence_error_list.append(_corr_l2)
discontinuity_score_list.append(_disconti_score)
# make xls
report_xls = dict()
if 'metric-report' in batch.keys():
report_xls = batch['metric-report']
report_xls[method_name + '-accuracy'] = accuracy_list
report_xls[method_name + '-2D3D-correspondence'] = correspondence_error_list
report_xls[method_name + '-discontinuity-score'] = discontinuity_score_list
batch['metric-report'] = report_xls
return batch
def eval_thread(bid, nocs_gt, nocs_pred):
"""
Parameters
----------
bid: batch idx, used to reconstruct a batch in right order
nocs_gt: numpy, [H,W,3]
nocs_pred: numpy, [H,W,3]
Returns
-------
a tuple of returned values elements
"""
nocs_gt_pts = nocs_gt.reshape(-1, 3)
mask_gt_pts = np.sum(nocs_gt_pts, axis=1) < 3.0
nocs_gt_pts = nocs_gt_pts[mask_gt_pts, :]
nocs_pred_pts = nocs_pred.reshape(-1, 3)
mask_pred_pts = np.sum(nocs_pred_pts, axis=1) < 3.0
nocs_pred_pts = nocs_pred_pts[mask_pred_pts, :]
# based on xyz, find nearest neighbor in each other
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(nocs_pred_pts)
gt_nn_distance, gt_nn_index_of_pred = neigh.kneighbors(nocs_gt_pts, return_distance=True)
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(nocs_gt_pts)
pred_nn_distance, pred_nn_index_of_gt = neigh.kneighbors(nocs_pred_pts, return_distance=True)
gt_nn_index_of_pred = gt_nn_index_of_pred.squeeze(1)
pred_nn_index_of_gt = pred_nn_index_of_gt.squeeze(1)
# Compute 2 way Chamfer distance
cd_dist_gt2pred = np.sum((nocs_gt_pts - nocs_pred_pts[gt_nn_index_of_pred, :]) ** 2, axis=1)
cd_dist_pred2gt = np.sum((nocs_pred_pts - nocs_gt_pts[pred_nn_index_of_gt, :]) ** 2, axis=1)
visible_2way_chamfer_distance = cd_dist_gt2pred.mean() + cd_dist_pred2gt.mean()
# Compute Correspondence error
mask_gt = (nocs_gt.sum(2) < 3.0).astype(np.float)
mask_pred = (nocs_pred.sum(2) < 3.0).astype(np.float)
mask_intersection = mask_gt * mask_pred # H,W,1
xyz_dif = np.sum((deepcopy(nocs_gt) - deepcopy(nocs_pred)) ** 2, axis=2)
xyz_correspondence_distance = (xyz_dif * mask_intersection).sum() / (mask_intersection.sum() + 1e-5)
# Compute Discontinuity score
pair_dist_gt = compute_pixel_neighbor_diff(deepcopy(nocs_gt))
pair_dist_pred = compute_pixel_neighbor_diff(deepcopy(nocs_pred))
k1, k2 = 30, 20
th = 0.05
gt_hist_normalized, gt_count = pair_dist2hist(pair_dist_gt, k1, k2, th)
pred_hist_normalized, pred_count = pair_dist2hist(pair_dist_pred, k1, k2, th)
large_dist_pairs_conv = np.sum((gt_count[k1:] / (gt_count[k1:].sum() + 1e-5)) * \
(pred_count[k1:] / (pred_count[k1:].sum() + 1e-5)))
# Cross-View Consistency Error is computed outside here, in the network
return (
bid,
visible_2way_chamfer_distance, # accuracy
xyz_correspondence_distance, # correspondence
large_dist_pairs_conv, # continuity
)
def compute_pixel_neighbor_diff(nocs_map):
mask = (np.sum(nocs_map, axis=2) < 3.0).astype(np.float)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
mask_smaller = cv.erode(mask, kernel, iterations=1)
d_r = deepcopy(nocs_map)
d_r[:, :-1, :] -= deepcopy(nocs_map)[:, 1:, :]
d_r = np.sqrt(np.sum(d_r ** 2, axis=2)) * mask_smaller
d_l = deepcopy(nocs_map)
d_l[:, 1:, :] -= deepcopy(nocs_map)[:, :-1, :]
d_l = np.sqrt(np.sum(d_l ** 2, axis=2)) * mask_smaller
d_d = deepcopy(nocs_map)
d_d[:-1, :, :] -= deepcopy(nocs_map)[1:, :, :]
d_d = np.sqrt(np.sum(d_d ** 2, axis=2)) * mask_smaller
d_u = deepcopy(nocs_map)
d_u[1:, :, :] -= deepcopy(nocs_map)[:-1, :, :]
d_u = np.sqrt(np.sum(d_u ** 2, axis=2)) * mask_smaller
select_mask = mask_smaller.reshape(-1) > 0.5
dr = d_r.reshape(-1)[select_mask]
dl = d_l.reshape(-1)[select_mask]
du = d_u.reshape(-1)[select_mask]
dd = d_d.reshape(-1)[select_mask]
distance = np.concatenate((dr, dl, du, dd), 0)
return distance
def pair_dist2hist(pair_dist, k1=20, k2=20, th=0.04):
bin1 = np.linspace(0, th, k1)
bin2 = np.linspace(th, np.sqrt(3), k2)
bin = np.concatenate((bin1, bin2, np.array([np.sqrt(3)])), 0)
conunt_list = []
for idx in range(k1 + k2):
mask = (pair_dist >= bin[idx]).astype(np.float) * (pair_dist < bin[idx + 1]).astype(np.float)
conunt_list.append(mask.sum())
count = np.array(conunt_list)
hist = count / (len(pair_dist) + 1e-5)
return hist, count
```
#### File: core/models/modelbase_v2.py
```python
import torch.nn as nn
from torch.nn import init
import torch
class ModelBase(object):
def __init__(self, cfg):
"""
Key components:
gpu_list; gpu_list from cfg
optimizer
network: one network in on model, compute loss inside each network on different gpus
Key functions need to implement:
_preprocess
_predict
_postprocess
train/vali_batch: composed of the three methods above
"""
self.model_name = 'Model Base'
# init device
self.gpu_list = cfg.GPU
self.__dataparallel_flag__ = False # flag: whether use DataParallel
# init optimizer
self.lr = cfg.LR
self.optimizer = None
# init network
self.network = None
# clarify output meanings
self.output_info_dict = {
'metric': list(),
}
# models core methods #########################################################
def _preprocess(self, batch):
"""
get a batch from dataset.__getitem__ following a collate_fn
Pass batch dict from CPU to GPU
"""
device = torch.device("cuda")
for k in batch.keys():
batch[k] = batch[k].to(device)
return batch
def _predict(self, batch, is_train=True):
"""
forward through the network,
"""
nn_out = self.network(batch['to-nn'], is_train=is_train)
for k, v in nn_out.items():
batch[k] = v
return batch
def _postprocess(self, batch):
"""
Post process, get multi GPU batch dicts, process on one gpu or cpu
:return: a dictionary
"""
return batch
def train_batch(self, batch):
batch = self._preprocess(batch)
self.set_train()
self.zero_grad()
batch = self._predict(batch)
batch = self._postprocess(batch)
if self.__dataparallel_flag__:
for k in batch.keys():
if k.endswith('loss') or k in self.output_info_dict['metric']:
if isinstance(batch[k], list):
for idx in len(batch[k]):
batch[k][idx] = batch[k][idx].mean()
else:
batch[k] = batch[k].mean()
batch['batch-loss'].backward()
self.optimizer.step()
return batch
def vali_batch(self, batch):
batch = self._preprocess(batch)
self.set_eval()
with torch.no_grad():
batch = self._predict(batch, is_train=False)
batch = self._postprocess(batch)
if self.__dataparallel_flag__:
for k in batch.keys():
if k.endswith('loss') or k in self.output_info_dict['metric']:
if isinstance(batch[k], list):
for idx in len(batch[k]):
batch[k][idx] = batch[k][idx].mean()
else:
batch[k] = batch[k].mean()
return batch
def test_batch(self, batch):
raise NotImplementedError
def zero_grad(self):
self.optimizer.zero_grad()
# models I/O methods ##########################################################
# load, save, init refers to https://github.com/xiumingzhang/GenRe-ShapeHD
def load_model(self, loadpath, current_model_state='cpu', load_optimizer=False, strict=True):
assert current_model_state in ['cpu'], "Model Loading Error!"
print("Load Model from: ")
print(loadpath)
device = torch.device(current_model_state)
if isinstance(loadpath, list):
for path in loadpath:
checkpoint = torch.load(path, map_location=device)
if self.__dataparallel_flag__:
for k in self.network.module.network_dict.keys():
if k in checkpoint.keys():
self.network.module.network_dict[k].load_state_dict(checkpoint[k], strict=strict)
if load_optimizer:
self.optimizer.module.load_state_dict(checkpoint['optimizer'], strict=strict)
else:
for k in self.network.network_dict.keys():
if k in checkpoint.keys():
self.network.network_dict[k].load_state_dict(checkpoint[k], strict=strict)
if load_optimizer:
self.optimizer.load_state_dict(checkpoint['optimizer'], strict=strict)
else:
path = loadpath
checkpoint = torch.load(path, map_location=device)
if self.__dataparallel_flag__:
for k in self.network.module.network_dict.keys():
if k in checkpoint.keys():
self.network.module.network_dict[k].load_state_dict(checkpoint[k])
if load_optimizer:
self.optimizer.module.load_state_dict(checkpoint['optimizer'])
else:
for k in self.network.network_dict.keys():
if k in checkpoint.keys():
self.network.network_dict[k].load_state_dict(checkpoint[k])
if load_optimizer:
self.optimizer.load_state_dict(checkpoint['optimizer'])
return checkpoint
def save_model(self, filepath, additional_dict=None):
save_dict = {}
# add additional info
if additional_dict is not None:
for k, v in additional_dict.items():
save_dict[k] = v
# save all self.__networks_dict param
if self.__dataparallel_flag__:
for net_name in self.network.module.network_dict.keys():
save_dict[net_name] = self.network.module.network_dict[net_name].state_dict()
else:
for net_name in self.network.network_dict.keys():
save_dict[net_name] = self.network.network_dict[net_name].state_dict()
save_dict['optimizer'] = self.optimizer.state_dict()
# save
torch.save(save_dict, filepath)
def init_weight(self, net=None, init_type='kaiming', init_param=0.02):
"""
Use nn.Module.apply(fn) to recursively apply initialization to the Model
If pass in a network, init the passed in net
else, init self.net
"""
def init_func(m, init_type=init_type):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_param)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_param)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orth':
init.orthogonal_(m.weight.data, gain=init_param)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm') != -1:
try:
init.normal_(m.weight.data, 1.0, init_param)
init.constant_(m.bias.data, 0.0)
except:
try:
init.normal_(m.bn.weight.data, 1.0, init_param)
init.constant_(m.bn.bias.data, 0.0)
except:
raise ValueError("Can't Initialize BN")
if net is not None:
net.apply(init_func)
else:
self.network.apply(init_func)
# models state methods ########################################################
def to_gpus(self):
# set device (if no gpu is available or gpu=[-1])
device = torch.device("cuda")
if len(self.gpu_list) > 1:
self.network = nn.DataParallel(self.network)
self.__dataparallel_flag__ = True
self.network.to(device)
def set_train(self):
"""
Set models's network to train mode
"""
self.network.train()
def set_eval(self):
"""
Set models's network to eval mode
WARNING! THIS METHOD IS VERY IMPORTANT DURING VALIDATION BECAUSE OF PYTORCH BN MECHANISM
"""
self.network.eval()
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.network_dict = None
def forward(self, *input):
raise NotImplementedError
```
#### File: core/models/neurips_baseline.py
```python
from .modelbase_v2 import ModelBase
from .modelbase_v2 import Network as NetBase
from core.net_bank.xnocs_segnet import SegNet
from core.net_bank.loss import MaskL2Loss
import os
import torch
from torch import nn
class Model(ModelBase):
def __init__(self, cfg):
super(Model, self).__init__(cfg)
self.name = 'neurips-nox'
self.cfg = cfg
self.network = Network()
self.optimizer = torch.optim.Adam(params=self.network.parameters(), lr=self.lr,
betas=(self.cfg.ADAM_BETA1, self.cfg.ADAM_BETA2))
self.resume = cfg.RESUME
if self.resume:
self.resume_id = cfg.RESUME_EPOCH_ID
load_path = os.path.join(cfg.ROOT_DIR, 'log', cfg.LOG_DIR, 'model',
'epoch_%d' % cfg.RESUME_EPOCH_ID + '.model')
self.load_model(loadpath=load_path, current_model_state='cpu')
elif cfg.MODEL_INIT_PATH != ['None']:
self.load_model(loadpath=cfg.MODEL_INIT_PATH)
self.to_gpus()
# config output meaning
self.output_info_dict = {
'metric': ['batch-loss', 'reg-v-loss', 'reg-x-loss', 'mask-v-loss', 'mask-x-loss'],
'image': ['rgb-v', 'nox-v-gt', 'nox-x-gt', 'nox-v-pred', 'nox-x-pred', 'mask-v', 'mask-x'],
}
def _preprocess(self, in_batch):
device = torch.device("cuda")
nox_v = in_batch['nox-v'].float().permute(0, 3, 1, 2).to(device) / 255.0 # [0,1]
nox_x = in_batch['nox-x'].float().permute(0, 3, 1, 2).to(device) / 255.0 # [0,1]
rgb_v = in_batch['rgb-v'].float().permute(0, 3, 1, 2).to(device) / 255.0 # [0,1]
rgb_x = in_batch['rgb-x'].float().permute(0, 3, 1, 2).to(device) / 255.0 # [0,1]
mask_v = in_batch['mask-v'].float().permute(0, 3, 1, 2).to(device) # 0,1
mask_x = in_batch['mask-x'].float().permute(0, 3, 1, 2).to(device) # 0,1
pack = {'rgb-v': rgb_v, 'rgb-x': rgb_x, 'nox-v': nox_v, 'nox-x': nox_x,
'mask-v': mask_v, 'mask-x': mask_x}
return {'to-nn': pack, 'meta-info': in_batch['info']}
class Network(NetBase):
def __init__(self):
super(Network, self).__init__()
net_dict = {
'seg-net': SegNet(out_channels=10)
}
self.network_dict = nn.ModuleDict(net_dict)
# loss
self.cls_criterion = nn.CrossEntropyLoss() # not masked, for all pixel
self.ml2_criterion = MaskL2Loss() # masked for nocs regression
def forward(self, pack, is_train=True):
batch = dict()
# make cnn prediction
pred = self.network_dict['seg-net'](pack['rgb-v'])
pred_nox_v = pred[:, :3, :, :]
pred_nox_x = pred[:, 3:6, :, :]
pred_score_v = pred[:, 6:8, :, :]
pred_score_x = pred[:, 8:10, :, :]
mask1c_v = pack['mask-v'][:, 0, :, :].unsqueeze(1).detach()
mask_v_loss = self.cls_criterion(pred_score_v, mask1c_v.squeeze(1).long().detach())
pred_mask_v = torch.argmax(pred_score_v, dim=1, keepdim=True).float()
mask1c_x = pack['mask-x'][:, 0, :, :].unsqueeze(1).detach()
mask_x_loss = self.cls_criterion(pred_score_x, mask1c_x.squeeze(1).long().detach())
pred_mask_x = torch.argmax(pred_score_x, dim=1, keepdim=True).float()
reg_v_loss = self.ml2_criterion(pred_nox_v, pack['nox-v'], mask1c_v, True)
reg_x_loss = self.ml2_criterion(pred_nox_x, pack['nox-x'], mask1c_x, True)
# summary
batch['batch-loss'] = ((reg_v_loss + reg_x_loss) * 0.3 + (mask_v_loss + mask_x_loss) * 0.7).unsqueeze(0)
batch['reg-v-loss'] = reg_v_loss.detach().unsqueeze(0)
batch['reg-x-loss'] = reg_x_loss.detach().unsqueeze(0)
batch['mask-v-loss'] = mask_v_loss.detach().unsqueeze(0)
batch['mask-x-loss'] = mask_x_loss.detach().unsqueeze(0)
batch['nox-v-gt'] = pack['nox-v'] * pack['mask-v'] + (1.0 - pack['mask-v'])
batch['nox-x-gt'] = pack['nox-x'] * pack['mask-x'] + (1.0 - pack['mask-x'])
batch['nox-v-pred'] = pred_nox_v * pred_mask_v + (1.0 - pred_mask_v)
batch['nox-x-pred'] = pred_nox_x * pred_mask_x + (1.0 - pred_mask_x)
batch['mask-v'] = pred_mask_v
batch['mask-x'] = pred_mask_x
batch['rgb-v'] = pack['rgb-v']
return batch
```
#### File: core/models/pix2surf_mv.py
```python
from .modelbase_v2 import ModelBase
from .modelbase_v2 import Network as NetBase
from core.models.utils import *
from core.net_bank.pix2surf_cnn import SegNetGroup
from core.net_bank.mlp import NOCS_AMP_MLP
from core.net_bank.loss import MaskL2Loss
import os
import torch
from torch import nn
class Model(ModelBase):
def __init__(self, cfg):
super(Model, self).__init__(cfg)
self.name = 'pix2surf-mv'
self.cfg = cfg
# register key component
self.network = Network()
self.optimizer = torch.optim.Adam(params=self.network.parameters(), lr=self.lr,
betas=(self.cfg.ADAM_BETA1, self.cfg.ADAM_BETA2))
# initialize models
self.resume = cfg.RESUME
if self.resume:
self.resume_id = cfg.RESUME_EPOCH_ID
load_path = os.path.join(cfg.ROOT_DIR, 'log', cfg.LOG_DIR, 'model',
'epoch_%d' % cfg.RESUME_EPOCH_ID + '.model')
self.load_model(loadpath=load_path, current_model_state='cpu', strict=False)
elif cfg.MODEL_INIT_PATH != ['None']:
self.load_model(loadpath=cfg.MODEL_INIT_PATH, strict=False)
self.to_gpus()
# config output meaning
self.output_info_dict = {
'metric': ['batch-loss', 'reg-v-loss', 'reg-x-loss', 'mask-v-loss', 'mask-x-loss',
'sp-loss', 'crr-xyz-loss'],
'image': ['rgb-v', 'nox-v-gt', 'mask-v', 'sp-image'] + # sp-image is a nocs map
['unwrapped-chart', 'unwrapped-chart-uni', 'learned-chart'],
# learned-chart is color coded uv in image space, unwrapped-chart is in uv space visualization
}
def _preprocess(self, in_batch):
return load_multiview_batch(in_batch)
class Network(NetBase):
def __init__(self):
super(Network, self).__init__()
net_dict = {
'seg-net': SegNetGroup(out_channels=10, additional=2),
'global-code': nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=0, stride=1),
nn.BatchNorm2d(512),
nn.ELU(),
nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, padding=0, stride=1),
nn.MaxPool2d(kernel_size=(3, 6))
),
'mlp': NOCS_AMP_MLP(latent_dim=1024, amp_dim=256, p_in=2, c_out=3)
}
self.network_dict = nn.ModuleDict(net_dict)
# loss
self.cls_criterion = nn.CrossEntropyLoss() # not masked, for all pixel
self.ml2_criterion = MaskL2Loss()
self.sgmd = nn.Sigmoid()
# visualization resolution
self.vis_chart_res = 256
self.vis_chart_container = torch.zeros(1, 3, self.vis_chart_res, self.vis_chart_res)
def forward(self, pack, is_train=True):
batch = dict()
n_batch = pack['nox-v'][0].shape[0]
n_view = len(pack['rgb-v'])
code_list = list()
pred_list, featuremap_list = self.network_dict['seg-net'](pack['rgb-v'], return_code=True)
for fm in featuremap_list: # do for each view
code_list.append(self.network_dict['global-code'](fm).reshape(n_batch, -1, 1).contiguous())
global_z = torch.max(torch.cat(code_list, 2), dim=2).values.contiguous()
# prepare gather container
pred_nox_v_list, pred_nox_x_list, pred_mask_v_list, pred_mask_x_list = [], [], [], []
pred_xyz_list, pred_uv_list = [], []
learned_chart, unwrapped_chart_list, sp_image = [], [], []
reg_v_loss, reg_x_loss, mask_v_loss, mask_x_loss, sp_loss = 0, 0, 0, 0, 0
for ii in range(n_view):
mask_v = pack['mask-v'][ii]
mask_x = pack['mask-x'][ii]
# make cnn prediction
pred = pred_list[ii]
pred_nox_v = pred[:, :3, :, :]
pred_nox_x = pred[:, 3:6, :, :]
pred_score_v = pred[:, 6:8, :, :]
pred_score_x = pred[:, 8:10, :, :]
learned_uv = self.sgmd(pred[:, 10:12, :, :])
# make NOCS-regression branch
mask1c_v = mask_v[:, 0, :, :].unsqueeze(1).detach()
mask_v_loss = mask_v_loss + self.cls_criterion(pred_score_v, mask1c_v.squeeze(1).long().detach()) / n_view
pred_mask_v = torch.argmax(pred_score_v, dim=1, keepdim=True).float()
mask1c_x = mask_x[:, 0, :, :].unsqueeze(1).detach()
mask_x_loss = mask_x_loss + self.cls_criterion(pred_score_x, mask1c_x.squeeze(1).long().detach()) / n_view
pred_mask_x = torch.argmax(pred_score_x, dim=1, keepdim=True).float()
reg_v_loss = reg_v_loss + self.ml2_criterion(pred_nox_v, pack['nox-v'][ii], mask1c_v, True) / n_view
reg_x_loss = reg_x_loss + self.ml2_criterion(pred_nox_x, pack['nox-x'][ii], mask1c_x, True) / n_view
# make mlp prediction
eachview_z = code_list[ii].squeeze(2)
latent_dim = eachview_z.shape[1]
c = torch.cat((eachview_z[:, :latent_dim // 2], global_z[:, latent_dim // 2:]), dim=1)
queried_uv = query_feature(learned_uv, pack['uv-v'][ii])
pred_xyz = self.network_dict['mlp'](c, queried_uv, unique_code=True)
pred_xyz = self.sgmd(pred_xyz)
sp_loss = sp_loss + self.ml2_criterion(pred_xyz, pack['uv-xyz-v'][ii], pack['uv-mask-v'][ii]) / n_view
# vis
unwrapped_chart = self.vis_chart_container.repeat(n_batch, 1, 1, 1).cuda()
unwrapped_chart = spread_feature(unwrapped_chart, learned_uv, pack['rgb-v'][ii], pack['mask-v'][ii])
vis_sampled_xyz = torch.ones_like(pack['rgb-v'][ii]).float()
uv = pack['uv-v'][ii]
uv[:, 0, :, :] = uv[:, 0, :, :] * mask1c_v.shape[2]
uv[:, 1, :, :] = uv[:, 1, :, :] * mask1c_v.shape[3]
uv = uv.long()
idx = uv[:, 0, :, :] * mask1c_v.shape[3] + uv[:, 1, :, :] # B,N,1
idx = idx.permute(0, 2, 1) # B,1,N
vis_sampled_xyz = vis_sampled_xyz.reshape(n_batch, 3, -1) # B,3,R*R
vis_sampled_xyz = vis_sampled_xyz.scatter(dim=2, index=idx.repeat(1, 3, 1), src=pred_xyz.squeeze(3))
vis_sampled_xyz = vis_sampled_xyz.reshape(n_batch, 3, mask1c_v.shape[2], mask1c_v.shape[3])
# gather
pred_nox_v_list.append(pred_nox_v)
pred_nox_x_list.append(pred_nox_x)
pred_mask_v_list.append(pred_mask_v)
pred_mask_x_list.append(pred_mask_x)
pred_xyz_list.append(pred_xyz)
pred_uv_list.append(queried_uv)
unwrapped_chart_list.append(unwrapped_chart)
learned_chart.append(learned_uv.repeat(1, 2, 1, 1)[:, :3, :, :] * mask1c_v + (1.0 - mask1c_v))
sp_image.append(vis_sampled_xyz)
# make naive multi-view constrain:
_p1_list, _p2_list, _m_list = [], [], []
_uv1_list, _uv2_list = [], []
for base_view_id in range(len(pack['crr-idx-mtx'])):
for query_view_id in range(len(pack['crr-idx-mtx'][base_view_id])):
base_pc = pred_xyz_list[base_view_id]
query_pc = pred_xyz_list[base_view_id + query_view_id + 1]
base_uv = pred_uv_list[base_view_id]
query_uv = pred_uv_list[base_view_id + query_view_id + 1]
pair_idx = pack['crr-idx-mtx'][base_view_id][query_view_id].squeeze(3)
paired_pc_from_base_to_query = torch.gather(base_pc.squeeze(3), dim=2,
index=pair_idx.repeat(1, 3, 1)).unsqueeze(3)
paired_uv_from_base_to_query = torch.gather(base_uv.squeeze(3), dim=2,
index=pair_idx.repeat(1, 2, 1)).unsqueeze(3)
_p1_list.append(paired_pc_from_base_to_query)
_p2_list.append(query_pc)
_uv1_list.append(paired_uv_from_base_to_query)
_uv2_list.append(query_uv)
_m_list.append(pack['crr-mask-mtx'][base_view_id][query_view_id])
crr_xyz_loss = self.ml2_criterion(torch.cat(_p1_list, dim=2).contiguous(),
torch.cat(_p2_list, dim=2).contiguous(),
torch.cat(_m_list, dim=2).contiguous(), detach=False)
crr_uv_loss = self.ml2_criterion(torch.cat(_uv1_list, dim=2).contiguous(),
torch.cat(_uv2_list, dim=2).contiguous(),
torch.cat(_m_list, dim=2).contiguous(), detach=False) # not used
# summary
batch['batch-loss'] = (((reg_v_loss + reg_x_loss) * 0.1 + (mask_v_loss + mask_x_loss) * 0.1) * 0.1 + \
sp_loss * 0.9 + crr_xyz_loss * 0.9).unsqueeze(0) # + crr_uv_loss * 0.1
batch['reg-v-loss'] = reg_v_loss.detach().unsqueeze(0)
batch['reg-x-loss'] = reg_x_loss.detach().unsqueeze(0)
batch['mask-v-loss'] = mask_v_loss.detach().unsqueeze(0)
batch['mask-x-loss'] = mask_x_loss.detach().unsqueeze(0)
batch['sp-loss'] = sp_loss.detach().unsqueeze(0)
batch['crr-xyz-loss'] = crr_xyz_loss.detach().unsqueeze(0)
batch['nox-v-gt'] = torch.cat([p * m + (1.0 - m) for p, m in zip(pack['nox-v'], pack['mask-v'])], 3)
batch['nox-x-gt'] = torch.cat([p * m + (1.0 - m) for p, m in zip(pack['nox-x'], pack['mask-x'])], 3)
batch['mask-v'] = torch.cat(pred_mask_v_list, 3)
batch['mask-x'] = torch.cat(pred_mask_x_list, 3)
batch['rgb-v'] = torch.cat(pack['rgb-v'], 3)
batch['sp-image'] = torch.cat(sp_image, 3)
batch['unwrapped-chart'] = torch.cat(unwrapped_chart_list, 3)
# vis all learned chart in one unified uv space, curious to see what happens
vis_nsc_uni = unwrapped_chart_list[0]
for new_scatter in unwrapped_chart_list:
vis_nsc_uni = torch.max(new_scatter, vis_nsc_uni)
batch['unwrapped-chart-uni'] = vis_nsc_uni
batch['learned-chart'] = torch.cat(learned_chart, 3)
return batch
```
#### File: models/utils/render_utils.py
```python
import torch
import cv2 as cv
import numpy as np
from sklearn.neighbors import NearestNeighbors
from .model_utils import spread_feature
def optimize_image_mask(image_mask, sp_image, nK=4, th=1e-2):
mask_pts = image_mask.reshape(-1)
xyz_pts = sp_image.reshape(-1, 3)
xyz_pts = xyz_pts[mask_pts > 0.5, :]
Neighbors = NearestNeighbors(n_neighbors=nK + 1, algorithm='kd_tree').fit(xyz_pts)
nn_dist, nn_idx = Neighbors.kneighbors(xyz_pts) # N,nK
nn_dist = nn_dist[:, 1:]
valid = (np.sum((nn_dist < th).astype(np.float), axis=1) == nK).astype(np.float)
optimized_mask = image_mask.reshape(-1)
optimized_mask[mask_pts > 0.5] = valid
optimized_mask = optimized_mask.reshape(image_mask.shape[0], image_mask.shape[1])
return optimized_mask
def generate_final_mask(image_learned_uv, image_mask,
image_resize_factor, mask_container_low_res, final_gim):
"""
Post Process Algorithm to generate mask of the unwrapped chart
Parameters
----------
image_learned_uv: [H,W,2]
image_mask: [H,W]
image_resize_factor: float
mask_container_low_res: a predefined tensor with intermediate low resolution
final_gim: a predefined tensor with target high resolution
"""
# resize (larger) rgb and uv with Bi-linear up-sampling
resized_uv = cv.resize(image_learned_uv, dsize=(image_resize_factor * image_learned_uv.shape[0],
image_resize_factor * image_learned_uv.shape[1]),
interpolation=cv.INTER_LINEAR)
resized_mask = cv.resize(image_mask, dsize=(image_resize_factor * image_learned_uv.shape[0],
image_resize_factor * image_learned_uv.shape[1]),
interpolation=cv.INTER_LINEAR)
resized_mask = (resized_mask > 0.5).astype(np.float)
# use gradient to remove the edge
discontinuous_mask_u = cv.Laplacian(image_learned_uv[..., 0], ddepth=cv.CV_32F) # small gradient map
discontinuous_mask_v = cv.Laplacian(image_learned_uv[..., 1], ddepth=cv.CV_32F) # small gradient map
# use the max and min in latent u and v to find the threshhold
u_max = (image_learned_uv[..., 0] * image_mask).max()
v_max = (image_learned_uv[..., 1] * image_mask).max()
u_min = (image_learned_uv[..., 0] * image_mask + (1.0 - image_mask)).min()
v_min = (image_learned_uv[..., 1] * image_mask + (1.0 - image_mask)).min()
u_th = (u_max - u_min) / 30
v_th = (v_max - v_min) / 30
discontinuous_mask_u = (discontinuous_mask_u > u_th).astype(np.float) * image_mask
discontinuous_mask_v = (discontinuous_mask_v > v_th).astype(np.float) * image_mask
discontinuous_mask = ((discontinuous_mask_u + discontinuous_mask_v) > 0).astype(np.float)
# use the mask to remove the boundary
boundary_recovery_mask = (cv.Laplacian(image_mask, ddepth=cv.CV_32F) > 0.01).astype(np.float)
discontinuous_mask = discontinuous_mask * (1.0 - boundary_recovery_mask)
resized_discontinuous_mask = cv.resize(discontinuous_mask,
dsize=(image_resize_factor * image_learned_uv.shape[0],
image_resize_factor * image_learned_uv.shape[1]),
interpolation=cv.INTER_NEAREST)
# make the small mask & texture
high_res_mask = torch.from_numpy(resized_mask * (1.0 - resized_discontinuous_mask)) \
.unsqueeze(0).unsqueeze(0).cuda().float() # 1,1,R,R
high_res_uv = torch.from_numpy(resized_uv).permute(2, 0, 1).unsqueeze(0).cuda().float()
low_res_mask = mask_container_low_res.cuda()
low_res_mask = spread_feature(low_res_mask, high_res_uv, high_res_mask, high_res_mask)
# use close to remove the holes in small mask and then resize
low_res_mask_closed = low_res_mask.detach().cpu().squeeze(0).squeeze(0).numpy() # R,R
close_k_size = int(final_gim.shape[2] / 100)
close_kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (close_k_size, close_k_size))
final_mask_np = cv.resize(low_res_mask_closed, dsize=(final_gim.shape[2],
final_gim.shape[2]),
interpolation=cv.INTER_NEAREST) # R,R,3
final_mask_np = (final_mask_np > 0).astype(np.float)
final_mask_np = cv.morphologyEx(final_mask_np, cv.MORPH_OPEN, close_kernel)
return final_mask_np
def generate_texture(sp_image, full_gim, image_rgb, image_mask, final_mask_np, final_res, nK=4, th=1e-2):
# prepare root and query points form the image and from the high-res chart
root_xyz_np = sp_image.reshape(-1, 3) # H*W,3
root_rgb_np = image_rgb.reshape(-1, 3) # H*W,3
_image_mask = image_mask.reshape(-1) # H*W
root_xyz_np = root_xyz_np[_image_mask > 0.5, :] # M,2 [0,1]
root_rgb_np = root_rgb_np[_image_mask > 0.5, :] # M,3 [0,1]
query_xyz_np = full_gim.reshape(-1, 3) # R*R,3
_final_mask_np = final_mask_np.reshape(-1) # R*R
query_xyz_np = query_xyz_np[_final_mask_np > 0.5, :] # N,3 [0,1]
# finding nearest root pixel points
Neighbors = NearestNeighbors(n_neighbors=nK, algorithm='kd_tree').fit(root_xyz_np)
nn_dist, nn_idx = Neighbors.kneighbors(query_xyz_np) # N,nK
# optimize the gim mask
valid = (nn_dist[:, 0] < th).astype(np.float)
optimized_final_mask_np = final_mask_np.reshape(-1).copy()
optimized_final_mask_np[_final_mask_np > 0.5] = valid
optimized_final_mask_np = optimized_final_mask_np.reshape(final_mask_np.shape[0], final_mask_np.shape[1])
# do interpolation based on chart distance
interpolation_weight = nn_dist.copy()
interpolation_weight = 1 - interpolation_weight / np.sum(interpolation_weight, 1, keepdims=True)
interpolation_weight = interpolation_weight / np.sum(interpolation_weight, 1, keepdims=True)
query_rgb_np = np.zeros((query_xyz_np.shape[0], 3))
for kdx in range(nK):
nn_color = root_rgb_np[nn_idx[:, kdx], :]
query_rgb_np += nn_color * interpolation_weight[:, kdx][..., np.newaxis]
final_texture_np = np.ones((final_res ** 2, 3))
final_texture_np[_final_mask_np > 0.5, :] = query_rgb_np
final_texture_np = final_texture_np.reshape(final_res, final_res, 3)
return final_texture_np, optimized_final_mask_np
```
#### File: core/net_bank/pix2surf_cnn.py
```python
import torch.nn as nn
import torchvision.models as models
from core.net_bank.modules import segnetDown2, segnetDown3, segnetUp2, segnetUp3
import torch
class SegNet(nn.Module):
def __init__(self, out_channels=8, in_channels=3,
pretrained=True, withSkipConnections=True, new_version=False, additional=None):
"""
:param out_channels:
:param in_channels:
:param pretrained:
:param withSkipConnections:
:param new_version:
:param additional: all additional output layer are new version
"""
super().__init__()
self.in_channels = in_channels
self.withSkipConnections = withSkipConnections
self.down1 = segnetDown2(self.in_channels, 64, withFeatureMap=self.withSkipConnections)
self.down2 = segnetDown2(64, 128, withFeatureMap=self.withSkipConnections)
self.down3 = segnetDown3(128, 256, withFeatureMap=self.withSkipConnections)
self.down4 = segnetDown3(256, 512, withFeatureMap=self.withSkipConnections)
self.down5 = segnetDown3(512, 512, withFeatureMap=self.withSkipConnections)
self.up5 = segnetUp3(512, 512, withSkipConnections=self.withSkipConnections)
self.up4 = segnetUp3(512, 256, withSkipConnections=self.withSkipConnections)
self.up3 = segnetUp3(256, 128, withSkipConnections=self.withSkipConnections)
self.up2 = segnetUp2(128, 64, withSkipConnections=self.withSkipConnections)
self.up1 = segnetUp2(64, out_channels, last_layer=True if new_version else False,
withSkipConnections=self.withSkipConnections)
if additional is not None:
self.additional_last_layer = segnetUp2(64, additional, last_layer=True,
withSkipConnections=self.withSkipConnections)
self.additional = True
else:
self.additional = False
if pretrained:
vgg16 = models.vgg16(pretrained=True)
Arch = 'SegNet'
if self.withSkipConnections:
Arch = 'SegNetSkip'
print('[ INFO ]: Using pre-trained weights from VGG16 with {}.'.format(Arch))
self.init_vgg16_params(vgg16)
def forward(self, inputs, return_code=False):
down1, indices_1, unpool_shape1, FM1 = self.down1(inputs)
down2, indices_2, unpool_shape2, FM2 = self.down2(down1)
down3, indices_3, unpool_shape3, FM3 = self.down3(down2)
down4, indices_4, unpool_shape4, FM4 = self.down4(down3)
down5, indices_5, unpool_shape5, FM5 = self.down5(down4)
up5 = self.up5(down5, indices_5, unpool_shape5, SkipFeatureMap=FM5)
up4 = self.up4(up5, indices_4, unpool_shape4, SkipFeatureMap=FM4)
up3 = self.up3(up4, indices_3, unpool_shape3, SkipFeatureMap=FM3)
up2 = self.up2(up3, indices_2, unpool_shape2, SkipFeatureMap=FM2)
up1 = self.up1(up2, indices_1, unpool_shape1, SkipFeatureMap=FM1)
# # DEBUG: print sizes
# print('down1:', down1.size())
# print('down2:', down2.size())
# print('down3:', down3.size())
# print('down4:', down4.size())
# print('down5:', down5.size())
#
# print('up5:', up5.size())
# print('up4:', up4.size())
# print('up3:', up3.size())
# print('up2:', up2.size())
# print('up1:', up1.size())
if self.additional:
add = self.additional_last_layer(up2, indices_1, unpool_shape1, SkipFeatureMap=FM1)
up1 = torch.cat((up1, add), dim=1)
if return_code:
return up1, down5
else:
return up1
def init_vgg16_params(self, vgg16):
blocks = [self.down1, self.down2, self.down3, self.down4, self.down5]
features = list(vgg16.features.children())
vgg_layers = []
for _layer in features:
if isinstance(_layer, nn.Conv2d):
vgg_layers.append(_layer)
merged_layers = []
for idx, conv_block in enumerate(blocks):
if idx < 2:
units = [conv_block.conv1.cbr_unit, conv_block.conv2.cbr_unit]
else:
units = [
conv_block.conv1.cbr_unit,
conv_block.conv2.cbr_unit,
conv_block.conv3.cbr_unit,
]
for _unit in units:
for _layer in _unit:
if isinstance(_layer, nn.Conv2d):
merged_layers.append(_layer)
assert len(vgg_layers) == len(merged_layers)
for l1, l2 in zip(vgg_layers, merged_layers):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
assert l1.weight.size() == l2.weight.size()
assert l1.bias.size() == l2.bias.size()
l2.weight.data = l1.weight.data
l2.bias.data = l1.bias.data
class SegNetGroup(nn.Module):
def __init__(self, out_channels=8, in_channels=3,
pretrained=True, withSkipConnections=True, new_version=False, additional=None):
"""
:param out_channels:
:param in_channels:
:param pretrained:
:param withSkipConnections:
:param new_version:
:param additional: all additional output layer are new version
"""
super().__init__()
self.in_channels = in_channels
self.withSkipConnections = withSkipConnections
self.down1 = segnetDown2(self.in_channels, 64, withFeatureMap=self.withSkipConnections)
self.down2 = segnetDown2(64, 128, withFeatureMap=self.withSkipConnections)
self.down3 = segnetDown3(128, 256, withFeatureMap=self.withSkipConnections)
self.down4 = segnetDown3(256, 512, withFeatureMap=self.withSkipConnections)
self.down5 = segnetDown3(512, 512, withFeatureMap=self.withSkipConnections)
self.up5 = segnetUp3(512, 512, withSkipConnections=self.withSkipConnections)
self.up4 = segnetUp3(512, 256, withSkipConnections=self.withSkipConnections)
self.up3 = segnetUp3(256, 128, withSkipConnections=self.withSkipConnections)
self.up2 = segnetUp2(128, 64, withSkipConnections=self.withSkipConnections)
self.up1 = segnetUp2(64, out_channels, last_layer=True if new_version else False,
withSkipConnections=self.withSkipConnections)
if additional is not None:
self.additional_last_layer = segnetUp2(64, additional, last_layer=True,
withSkipConnections=self.withSkipConnections)
self.additional = True
else:
self.additional = False
if pretrained:
vgg16 = models.vgg16(pretrained=True)
Arch = 'SegNet'
if self.withSkipConnections:
Arch = 'SegNetSkip'
print('[ INFO ]: Using pre-trained weights from VGG16 with {}.'.format(Arch))
self.init_vgg16_params(vgg16)
def forward(self, inputs, return_code=False):
"""
:param inputs: is a list of rgb
:param return_code: before the max pooling
:return:
"""
l1, l2, l3, l4, l5 = [], [], [], [], []
l5_feature = []
for rgb in inputs:
down1, indices_1, unpool_shape1, FM1 = self.down1(rgb)
down2, indices_2, unpool_shape2, FM2 = self.down2(down1)
down3, indices_3, unpool_shape3, FM3 = self.down3(down2)
down4, indices_4, unpool_shape4, FM4 = self.down4(down3)
down5, indices_5, unpool_shape5, FM5 = self.down5(down4)
l1.append([indices_1, unpool_shape1, FM1])
l2.append([indices_2, unpool_shape2, FM2])
l3.append([indices_3, unpool_shape3, FM3])
l4.append([indices_4, unpool_shape4, FM4])
l5.append([indices_5, unpool_shape5, FM5])
l5_feature.append(down5.unsqueeze(0))
max_pooled_feature = torch.max(torch.cat(l5_feature, 0), dim=0).values
f_dim = max_pooled_feature.shape[1]
pred_list = []
for i in range(len(inputs)):
down5 = torch.cat((max_pooled_feature[:, :f_dim // 2, :, :],
l5_feature[i].squeeze(0)[:, f_dim // 2:, :, :]), dim=1)
up5 = self.up5(down5, *l5[i])
up4 = self.up4(up5, *l4[i])
up3 = self.up3(up4, *l3[i])
up2 = self.up2(up3, *l2[i])
up1 = self.up1(up2, *l1[i])
if self.additional:
add = self.additional_last_layer(up2, *l1[i])
up1 = torch.cat((up1, add), dim=1)
pred_list.append(up1)
feature_list = [item.squeeze(0) for item in l5_feature]
if return_code:
return pred_list, feature_list
else:
return pred_list
def init_vgg16_params(self, vgg16):
blocks = [self.down1, self.down2, self.down3, self.down4, self.down5]
features = list(vgg16.features.children())
vgg_layers = []
for _layer in features:
if isinstance(_layer, nn.Conv2d):
vgg_layers.append(_layer)
merged_layers = []
for idx, conv_block in enumerate(blocks):
if idx < 2:
units = [conv_block.conv1.cbr_unit, conv_block.conv2.cbr_unit]
else:
units = [
conv_block.conv1.cbr_unit,
conv_block.conv2.cbr_unit,
conv_block.conv3.cbr_unit,
]
for _unit in units:
for _layer in _unit:
if isinstance(_layer, nn.Conv2d):
merged_layers.append(_layer)
assert len(vgg_layers) == len(merged_layers)
for l1, l2 in zip(vgg_layers, merged_layers):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
assert l1.weight.size() == l2.weight.size()
assert l1.bias.size() == l2.bias.size()
l2.weight.data = l1.weight.data
l2.bias.data = l1.bias.data
class SegNetEncoder(nn.Module):
def __init__(self, in_channels=3, withSkipConnections=True):
"""
:param in_channels:
:param pretrained:
:param withSkipConnections:
"""
super().__init__()
self.in_channels = in_channels
self.withSkipConnections = withSkipConnections
self.down1 = segnetDown2(self.in_channels, 64, withFeatureMap=self.withSkipConnections)
self.down2 = segnetDown2(64, 128, withFeatureMap=self.withSkipConnections)
self.down3 = segnetDown3(128, 256, withFeatureMap=self.withSkipConnections)
self.down4 = segnetDown3(256, 512, withFeatureMap=self.withSkipConnections)
self.down5 = segnetDown3(512, 512, withFeatureMap=self.withSkipConnections)
def forward(self, inputs, return_code=False):
down1, indices_1, unpool_shape1, FM1 = self.down1(inputs)
down2, indices_2, unpool_shape2, FM2 = self.down2(down1)
down3, indices_3, unpool_shape3, FM3 = self.down3(down2)
down4, indices_4, unpool_shape4, FM4 = self.down4(down3)
down5, indices_5, unpool_shape5, FM5 = self.down5(down4)
return down5
if __name__ == '__main__':
import torch
net = SegNetGroup(withSkipConnections=True, out_channels=8).cuda()
x = torch.rand(2, 3, 320, 240).cuda()
y, f = net([x, x], return_code=True)
print(f.shape)
print(y.shape)
```
#### File: logger/logger_meta/base_logger.py
```python
class BaseLogger(object):
def __init__(self, tb_logger, log_path, cfg):
super().__init__()
self.cfg = cfg
self.NAME = 'base'
self.tb = tb_logger
self.log_path = log_path
# make dir
def log_phase(self):
pass
def log_batch(self, batch):
pass
```
#### File: logger/logger_meta/xls_logger.py
```python
import pandas as pd
from .base_logger import BaseLogger
import os
class XLSLogger(BaseLogger):
def __init__(self, tb_logger, log_path, cfg):
super().__init__(tb_logger, log_path, cfg)
self.NAME = 'xls'
os.makedirs(self.log_path, exist_ok=True)
self.visual_interval_epoch = cfg.VIS_PER_N_EPOCH
self.record_interval_batch = cfg.VIS_PER_N_BATCH
self.visual_one = cfg.VIS_ONE_PER_BATCH
self.visual_train_interval_batch = cfg.VIS_TRAIN_PER_BATCH
self.pd_container = dict()
self.current_epoch = 1
self.current_phase = ''
def log_batch(self, batch):
# get data
if not self.NAME in batch['parser'].keys():
return
keys_list = batch['parser'][self.NAME]
if len(keys_list) == 0:
return
data = batch['data']
phase = batch['phase']
current_epoch = batch['epoch-id']
self.current_epoch = current_epoch
self.current_phase = phase
meta_info = batch['meta-info']
# for each key (file)
for sheet_key in keys_list:
kdata = data[sheet_key]
assert isinstance(kdata, dict)
if sheet_key not in self.pd_container.keys():
self.pd_container[sheet_key] = pd.DataFrame()
add_list = list()
count = len(meta_info['object'])
for ii in range(count):
data = dict()
for k, v in meta_info.items():
data[k] = v[ii]
for k, v in kdata.items():
data[k] = v[ii]
prefix = ""
for k, v in meta_info.items():
prefix += k + "_" + str(v[ii]) + "_"
data['prefix'] = prefix
add_list.append(data)
self.pd_container[sheet_key] = self.pd_container[sheet_key].append(add_list, ignore_index=True)
def log_phase(self):
for k in self.pd_container.keys():
self.pd_container[k].to_excel(
os.path.join(self.log_path, k + '_' + str(self.current_epoch) + '_' + self.current_phase + '.xls'))
self.pd_container[k] = pd.DataFrame()
``` |
{
"source": "JiahuiSun/image-segmentation",
"score": 2
} |
#### File: image-segmentation/models/__init__.py
```python
from .fcn import FCN8, FCN16, FCN32
from .unet import UNet
from .fcn_res import RES32, RES16, RES8
__factory = {
'fcn8': FCN8,
'fcn16': FCN16,
'fcn32': FCN32,
'unet': UNet,
'res32': RES32,
'res16': RES16,
'res8': RES8
}
def get_names():
return __factory.keys()
def get_model(name, *args, **kwargs):
if name not in __factory.keys():
raise KeyError("Unknown model: {}".format(name))
return __factory[name](*args, **kwargs)
```
#### File: image-segmentation/models/unet.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.utils import initialize_weights
from models.utils import unetConv2, unetUp
class _EncoderBlock(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False):
super(_EncoderBlock, self).__init__()
layers = [
nn.Conv2d(in_channels, out_channels, kernel_size=3),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
if dropout:
layers.append(nn.Dropout())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
self.encode = nn.Sequential(*layers)
def forward(self, x):
return self.encode(x)
class _DecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super(_DecoderBlock, self).__init__()
self.decode = nn.Sequential(
nn.Conv2d(in_channels, middle_channels, kernel_size=3),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.Conv2d(middle_channels, middle_channels, kernel_size=3),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=2, stride=2),
)
def forward(self, x):
return self.decode(x)
class UNet(nn.Module):
def __init__(self, n_classes=21):
super(UNet, self).__init__()
self.enc1 = _EncoderBlock(3, 64)
self.enc2 = _EncoderBlock(64, 128)
self.enc3 = _EncoderBlock(128, 256)
self.enc4 = _EncoderBlock(256, 512, dropout=True)
self.center = _DecoderBlock(512, 1024, 512)
self.dec4 = _DecoderBlock(1024, 512, 256)
self.dec3 = _DecoderBlock(512, 256, 128)
self.dec2 = _DecoderBlock(256, 128, 64)
self.dec1 = nn.Sequential(
nn.Conv2d(128, 64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.final = nn.Conv2d(64, n_classes, kernel_size=1)
initialize_weights(self)
def forward(self, x):
enc1 = self.enc1(x)
enc2 = self.enc2(enc1)
enc3 = self.enc3(enc2)
enc4 = self.enc4(enc3)
center = self.center(enc4)
dec4 = self.dec4(torch.cat([center, F.upsample(enc4, center.size()[2:], mode='bilinear')], 1))
dec3 = self.dec3(torch.cat([dec4, F.upsample(enc3, dec4.size()[2:], mode='bilinear')], 1))
dec2 = self.dec2(torch.cat([dec3, F.upsample(enc2, dec3.size()[2:], mode='bilinear')], 1))
dec1 = self.dec1(torch.cat([dec2, F.upsample(enc1, dec2.size()[2:], mode='bilinear')], 1))
final = self.final(dec1)
return F.upsample(final, x.size()[2:], mode='bilinear')
class unets(nn.Module):
def __init__(
self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3, is_batchnorm=True
):
super(unet, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
filters = [64, 128, 256, 512, 1024]
filters = [int(x / self.feature_scale) for x in filters]
# downsampling
self.conv1 = unetConv2(self.in_channels, filters[0], self.is_batchnorm)
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = unetConv2(filters[0], filters[1], self.is_batchnorm)
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
self.conv3 = unetConv2(filters[1], filters[2], self.is_batchnorm)
self.maxpool3 = nn.MaxPool2d(kernel_size=2)
self.conv4 = unetConv2(filters[2], filters[3], self.is_batchnorm)
self.maxpool4 = nn.MaxPool2d(kernel_size=2)
self.center = unetConv2(filters[3], filters[4], self.is_batchnorm)
# upsampling
self.up_concat4 = unetUp(filters[4], filters[3], self.is_deconv)
self.up_concat3 = unetUp(filters[3], filters[2], self.is_deconv)
self.up_concat2 = unetUp(filters[2], filters[1], self.is_deconv)
self.up_concat1 = unetUp(filters[1], filters[0], self.is_deconv)
# final conv (without any concat)
self.final = nn.Conv2d(filters[0], n_classes, 1)
def forward(self, inputs):
conv1 = self.conv1(inputs)
maxpool1 = self.maxpool1(conv1)
conv2 = self.conv2(maxpool1)
maxpool2 = self.maxpool2(conv2)
conv3 = self.conv3(maxpool2)
maxpool3 = self.maxpool3(conv3)
conv4 = self.conv4(maxpool3)
maxpool4 = self.maxpool4(conv4)
center = self.center(maxpool4)
up4 = self.up_concat4(conv4, center)
up3 = self.up_concat3(conv3, up4)
up2 = self.up_concat2(conv2, up3)
up1 = self.up_concat1(conv1, up2)
final = self.final(up1)
return final
```
#### File: JiahuiSun/image-segmentation/test.py
```python
import numpy as np
import torch
import os, time, sys
from os.path import join as pjoin
from PIL import Image
import argparse
from torch.utils.data import DataLoader
from torchvision.transforms import ToPILImage
import models
from utils import convert_state_dict, Logger
from dataset.dataset import VOC12
def main(args):
# ========= Setup device and seed ============
np.random.seed(42)
torch.manual_seed(42)
if args.cuda:
torch.cuda.manual_seed_all(42)
device = 'cuda' if args.cuda else 'cpu'
logger = Logger(pjoin(args.save_dir, args.model, 'test.log'))
logger.write(f'\nTesting configs: {args}')
# ================= Load processed data ===================
val_dataset = VOC12(args.data_dir, img_size=args.img_size, split='test')
val_loader = DataLoader(val_dataset, num_workers=8, batch_size=1)
n_classes = val_dataset.n_classes
# ================= Init model ====================
model = models.get_model(name=args.model, n_classes=n_classes)
model = model.to(device)
state = convert_state_dict(torch.load(args.model_path)["model_state"])
model.load_state_dict(state)
model.eval()
# ====================== Only one image ==========================
if args.eval:
with torch.no_grad():
img = Image.open(args.img_path)
origin = img.size
if args.img_size:
img = img.resize((val_dataset.img_size[0], val_dataset.img_size[1]))
img = val_dataset.input_transform(img).unsqueeze(0).to(device)
out = model(img)
pred = np.squeeze(out.data.max(1)[1].cpu().numpy(), axis=0)
decoded = val_dataset.decode_segmap(pred)
img_out = ToPILImage()(decoded).resize(origin)
img_out.save(pjoin(args.save_dir, args.model, f'eval_{args.img_size}.png'))
return
# ====================== Testing Many images ==============================
with torch.no_grad():
for idx, (name, img) in enumerate(val_loader):
img = img.to(device)
out = model(img)
pred = out.data.max(1)[1].squeeze_(1).squeeze_(0).cpu().numpy()
decoded = val_dataset.decode_segmap(pred)
ToPILImage()(decoded).save(pjoin(args.save_dir, args.model, f'{name[0]}_{args.img_size}.png'))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Image Segmentation')
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--model', type=str, default='fcn8')
parser.add_argument('--data-dir', type=str, default='/home/jinHM/sunjiahui/MachineLearning/dataset/VOCdevkit')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--model-path', type=str, default='./saved')
parser.add_argument('--img-path', type=str, default='./visual/2007_000129.jpg')
parser.add_argument('--save-dir', type=str, default='./saved')
parser.add_argument('--img-size', type=int, default=256)
args = parser.parse_args()
main(args)
``` |
{
"source": "jiahwa/pycookbook",
"score": 3
} |
#### File: pycookbook/todo/goals.py
```python
from win32com.client import Dispatch
import openpyxl
import xlrd
from xlutils.copy import copy
def getGoal(name, goals):
name, sep, tail = name.partition('_')
for item in goals:
_item, sep, tail = item[0].partition('_')
if name == _item:
return item[1]
# define constant
AREA_DICT = {}
# AREA_DICT["demo"] = (('read_path', 'read_sheet', ('read_row_begin', 'read_row_end'), ('read_col_select', 'read_col_copy')),
# ('write_path', 'write_sheet', ('write_row_begin', 'write_row_end'), ('write_col_select', 'write_col_copy'))
# times 0
# AREA_DICT["nanjing"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\2021应届毕业生前端培训阅卷.xlsx', '3.考核pc', (116, 179), (1, 23)),
# ('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\南京应届生名单-阅卷成绩.xls', '南京分配-成绩', (1, 62), (4, 5)))
# AREA_DICT["shenzhen"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\2021应届毕业生前端培训阅卷.xlsx', '3.考核pc', (242, 289), (1, 23)),
# ('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\2021届深圳花名册-阅卷成绩.xlsx', 'Sheet3', (1, 47), (1, 11)))
# AREA_DICT["beijing"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\2021应届毕业生前端培训阅卷.xlsx', '3.考核pc', (4, 116), (1, 23)),
# ('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\北京地区应届生PC实战-阅卷成绩.xlsx', '技术人员', (1, 113), (2, 3)))
# AREA_DICT["shanghai"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\2021应届毕业生前端培训阅卷.xlsx', '3.考核pc', (204, 239), (1, 23)),
# ('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\上海培训人员名单-阅卷成绩.xlsx', 'Sheet1', (1, 35), (2, 3)))
# AREA_DICT["suzhou"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\2021应届毕业生前端培训阅卷.xlsx', '3.考核pc', (182, 203), (1, 23)),
# ('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\苏州2021年应届生名单-前端阅卷成绩.xls', 'Sheet1', (1, 22), (1, 2)))
# AREA_DICT["chengdu"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\2021应届毕业生前端培训阅卷.xlsx', '3.考核pc', (292, 326), (1, 23)),
# ('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\成都现场培训名单(更正)-阅卷成绩.xlsx', 'Sheet1', (1, 34), (2, 8)))
# times 1
# AREA_DICT["nanjing"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\考试成绩20210803-09_19_05.xlsx', 'Sheet1', (1, 310), (2, 4)),
# ('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\2021应届毕业生前端培训阅卷-北京.xlsx', '南京', (1, 6), (0, 5)))
# AREA_DICT["beijing"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\考试成绩20210803-09_19_05.xlsx', 'Sheet1', (1, 310), (2, 4)),
# ('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\2021应届毕业生前端培训阅卷-北京.xlsx', '北京', (1, 112), (0, 5)))
# times 2
AREA_DICT["nanjing"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\理论考试成绩20210803-09_19_05.xlsx', 'Sheet1', (1, 310), (2, 4)),
('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\南京应届生名单-阅卷成绩.xls', '南京分配-成绩', (1, 62), (4, 6)))
AREA_DICT["shenzhen"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\理论考试成绩20210803-09_19_05.xlsx', 'Sheet1', (1, 310), (2, 4)),
('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\2021届深圳花名册-阅卷成绩.xlsx', 'Sheet3', (1, 47), (1, 12)))
AREA_DICT["beijing"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\理论考试成绩20210803-09_19_05.xlsx', 'Sheet1', (1, 310), (2, 4)),
('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\北京地区应届生PC实战-阅卷成绩.xlsx', '技术人员', (1, 113), (2, 4)))
AREA_DICT["shanghai"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\理论考试成绩20210803-09_19_05.xlsx', 'Sheet1', (1, 310), (2, 4)),
('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\上海培训人员名单-阅卷成绩.xlsx', 'Sheet1', (1, 35), (2, 4)))
AREA_DICT["suzhou"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\理论考试成绩20210803-09_19_05.xlsx', 'Sheet1', (1, 310), (2, 4)),
('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\苏州2021年应届生名单-前端阅卷成绩.xls', 'Sheet1', (1, 22), (1, 3)))
AREA_DICT["chengdu"] = (('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\理论考试成绩20210803-09_19_05.xlsx', 'Sheet1', (1, 310), (2, 4)),
('C:\\Users\\Zz\\Desktop\\python处理考试成绩\\成都现场培训名单(更正)-阅卷成绩.xlsx', 'Sheet1', (1, 34), (2, 9)))
def main(area):
print(area)
read_file = AREA_DICT[area][0][0]
read_sheet = AREA_DICT[area][0][1]
write_file = AREA_DICT[area][1][0]
write_sheet = AREA_DICT[area][1][1]
# initial open and close
xlApp = Dispatch("Excel.Application")
xlApp.Visible = False
xlBook = xlApp.Workbooks.Open(read_file)
xlBook.Save()
xlBook.Close()
# read only
workbook_read = openpyxl.load_workbook(read_file, data_only=True)
sheet_read = workbook_read.get_sheet_by_name(read_sheet)
# define save goals
goals = []
read_row_begin = AREA_DICT[area][0][2][0]
read_row_end = AREA_DICT[area][0][2][1]
read_col_select = AREA_DICT[area][0][3][0]
read_col_copy = AREA_DICT[area][0][3][1]
for row in range(read_row_begin, read_row_end):
col1 = sheet_read.cell(row=row, column=read_col_select).value
col23 = sheet_read.cell(row=row, column=read_col_copy).value
# print(sheet_read.cell(row=row, column=1).value, , end=' ')
# print('\r')
goals.append((col1,col23))
print(goals)
# write only
# workbook_write = xlrd.open_workbook(write_file, formatting_info=True)
workbook_write = xlrd.open_workbook(write_file) # xlsx要注释掉formatting_info=True, reason: raise NotImplementedError("formatting_info=True not yet implemented")
sheet_write = workbook_write.sheet_by_name(write_sheet)
# nrows = sheet_write.nrows
# copy copyer
copy_xlsx = copy(workbook_write)
target_sheet = copy_xlsx.get_sheet(0)
# target_sheet = copy_xlsx.get_sheet_by_name
write_row_begin = AREA_DICT[area][1][2][0]
write_row_end = AREA_DICT[area][1][2][1]
write_col_select = AREA_DICT[area][1][3][0]
write_col_copy = AREA_DICT[area][1][3][1]
for row in range(write_row_begin, write_row_end):
col4 = sheet_write.cell(row, write_col_select).value
goal = getGoal(col4, goals)
if goal is not None:
# print(row, write_col_copy, goal)
target_sheet.write(row, write_col_copy, goal)
# save
copy_xlsx.save(write_file)
# can be modified here
# area = 'nanjing'
# area = 'shenzhen'
# area = 'beijing'
# area = 'shanghai'
# area = 'suzhou'
# area = 'chengdu'
# execute six areas
for i in AREA_DICT:
main(i)
``` |
{
"source": "jiahy0825/scikit-multiflow",
"score": 3
} |
#### File: tests/data/test_data_stream.py
```python
import os
import numpy as np
import pandas as pd
import pytest
from skmultiflow.data.data_stream import DataStream
def test_data_stream(test_path, package_path):
test_file = os.path.join(package_path, 'src/skmultiflow/data/datasets/sea_stream.csv')
raw_data = pd.read_csv(test_file)
stream = DataStream(raw_data, name='Test')
assert not stream._Y_is_defined
stream.prepare_for_use()
assert stream.n_remaining_samples() == 40000
expected_names = ['attrib1', 'attrib2', 'attrib3']
assert stream.feature_names == expected_names
expected_targets = [0, 1]
assert stream.target_values == expected_targets
assert stream.target_names == ['class']
assert stream.n_features == 3
assert stream.n_cat_features == 0
assert stream.n_num_features == 3
assert stream.n_targets == 1
assert stream.get_data_info() == 'Test: 1 target(s), 2 classes'
assert stream.has_more_samples() is True
assert stream.is_restartable() is True
# Load test data corresponding to first 10 instances
test_file = os.path.join(test_path, 'sea_stream_file.npz')
data = np.load(test_file)
X_expected = data['X']
y_expected = data['y']
X, y = stream.next_sample()
assert np.alltrue(X[0] == X_expected[0])
assert np.alltrue(y[0] == y_expected[0])
X, y = stream.last_sample()
assert np.alltrue(X[0] == X_expected[0])
assert np.alltrue(y[0] == y_expected[0])
stream.restart()
X, y = stream.next_sample(10)
assert np.alltrue(X == X_expected)
assert np.alltrue(y == y_expected)
assert stream.n_targets == np.array(y).ndim
assert stream.n_features == X.shape[1]
assert 'stream' == stream._estimator_type
expected_info = "DataStream(n_targets=-1, target_idx=1, cat_features=None, name='Test')"
assert stream.get_info() == expected_info
def test_data_stream_X_y(test_path, package_path):
test_file = os.path.join(package_path, 'src/skmultiflow/data/datasets/sea_stream.csv')
raw_data = pd.read_csv(test_file)
y = raw_data.iloc[:, -1:]
X = raw_data.iloc[:, :-1]
stream = DataStream(X, y)
assert stream._Y_is_defined
stream.prepare_for_use()
assert stream.n_remaining_samples() == 40000
expected_names = ['attrib1', 'attrib2', 'attrib3']
assert stream.feature_names == expected_names
expected_targets = [0, 1]
assert stream.target_values == expected_targets
assert stream.target_names == ['class']
assert stream.n_features == 3
assert stream.n_cat_features == 0
assert stream.n_num_features == 3
assert stream.n_targets == 1
assert stream.get_data_info() == '1 target(s), 2 classes'
assert stream.has_more_samples() is True
assert stream.is_restartable() is True
# Load test data corresponding to first 10 instances
test_file = os.path.join(test_path, 'sea_stream_file.npz')
data = np.load(test_file)
X_expected = data['X']
y_expected = data['y']
X, y = stream.next_sample()
assert np.alltrue(X[0] == X_expected[0])
assert np.alltrue(y[0] == y_expected[0])
X, y = stream.last_sample()
assert np.alltrue(X[0] == X_expected[0])
assert np.alltrue(y[0] == y_expected[0])
stream.restart()
X, y = stream.next_sample(10)
assert np.alltrue(X == X_expected)
assert np.alltrue(y == y_expected)
assert stream.n_targets == np.array(y).ndim
assert stream.n_features == X.shape[1]
def test_check_data():
# Test if data contains non-numeric values
data = pd.DataFrame(np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 'invalid', 13, 14, 15]]))
with pytest.raises(ValueError):
DataStream(data=data, allow_nan=False).prepare_for_use()
# Test if data contains NaN values
data = pd.DataFrame(np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, np.nan, 13, 14, 15]]))
with pytest.raises(ValueError):
DataStream(data=data, allow_nan=False).prepare_for_use()
# Test warning for NaN values
with pytest.warns(UserWarning):
DataStream(data=data, allow_nan=True).prepare_for_use()
```
#### File: tests/trees/test_hoeffding_tree.py
```python
import numpy as np
from array import array
import os
from skmultiflow.data import RandomTreeGenerator, SEAGenerator
from skmultiflow.trees import HoeffdingTree
def test_hoeffding_tree_nb(test_path):
stream = RandomTreeGenerator(
tree_random_state=23, sample_random_state=12, n_classes=4,
n_cat_features=2, n_num_features=5, n_categories_per_cat_feature=5,
max_tree_depth=6, min_leaf_depth=3, fraction_leaves_per_level=0.15
)
stream.prepare_for_use()
nominal_attr_idx = [x for x in range(5, stream.n_features)]
learner = HoeffdingTree(
nominal_attributes=nominal_attr_idx, leaf_prediction='nb'
)
cnt = 0
max_samples = 5000
predictions = array('i')
proba_predictions = []
wait_samples = 100
while cnt < max_samples:
X, y = stream.next_sample()
# Test every n samples
if (cnt % wait_samples == 0) and (cnt != 0):
predictions.append(learner.predict(X)[0])
proba_predictions.append(learner.predict_proba(X)[0])
learner.partial_fit(X, y)
cnt += 1
expected_predictions = array('i', [0, 1, 3, 0, 0, 3, 0, 1, 1, 2, 0, 2, 1,
1, 2, 1, 3, 0, 1, 1, 1, 1, 0, 3, 1, 2,
1, 1, 3, 2, 1, 2, 2, 2, 1, 1, 1, 0, 1,
2, 0, 2, 0, 0, 0, 0, 1, 3, 2])
assert np.alltrue(predictions == expected_predictions)
expected_info = "HoeffdingTree(binary_split=False, grace_period=200, leaf_prediction='nb',\n" \
" max_byte_size=33554432, memory_estimate_period=1000000,\n" \
" nb_threshold=0, no_preprune=False,\n" \
" nominal_attributes=[5, 6, 7, 8, 9, 10, 11, 12, 13, 14],\n" \
" remove_poor_atts=False, split_confidence=1e-07,\n" \
" split_criterion='info_gain', stop_mem_management=False,\n" \
" tie_threshold=0.05)"
assert learner.get_info() == expected_info
def test_hoeffding_tree_nba(test_path):
stream = RandomTreeGenerator(tree_random_state=23, sample_random_state=12, n_classes=4, n_cat_features=2,
n_num_features=5, n_categories_per_cat_feature=5, max_tree_depth=6, min_leaf_depth=3,
fraction_leaves_per_level=0.15)
stream.prepare_for_use()
nominal_attr_idx = [x for x in range(5, stream.n_features)]
learner = HoeffdingTree(nominal_attributes=nominal_attr_idx)
cnt = 0
max_samples = 5000
predictions = array('i')
proba_predictions = []
wait_samples = 100
while cnt < max_samples:
X, y = stream.next_sample()
# Test every n samples
if (cnt % wait_samples == 0) and (cnt != 0):
predictions.append(learner.predict(X)[0])
proba_predictions.append(learner.predict_proba(X)[0])
learner.partial_fit(X, y)
cnt += 1
expected_predictions = array('i', [0, 1, 3, 0, 0, 3, 0, 1, 1, 2,
0, 2, 1, 1, 2, 1, 3, 0, 1, 1,
1, 1, 0, 3, 1, 2, 1, 1, 3, 2,
1, 2, 2, 2, 1, 1, 1, 0, 1, 2,
0, 2, 0, 0, 0, 0, 1, 3, 2])
test_file = os.path.join(test_path, 'test_hoeffding_tree.npy')
data = np.load(test_file)
assert np.alltrue(predictions == expected_predictions)
assert np.allclose(proba_predictions, data)
expected_info = "HoeffdingTree(binary_split=False, grace_period=200, leaf_prediction='nba',\n" \
" max_byte_size=33554432, memory_estimate_period=1000000,\n" \
" nb_threshold=0, no_preprune=False,\n" \
" nominal_attributes=[5, 6, 7, 8, 9, 10, 11, 12, 13, 14],\n" \
" remove_poor_atts=False, split_confidence=1e-07,\n" \
" split_criterion='info_gain', stop_mem_management=False,\n" \
" tie_threshold=0.05)"
assert learner.get_info() == expected_info
expected_model_1 = 'Leaf = Class 1.0 | {0.0: 1423.0, 1.0: 1745.0, 2.0: 978.0, 3.0: 854.0}\n'
assert (learner.get_model_description() == expected_model_1)
assert type(learner.predict(X)) == np.ndarray
assert type(learner.predict_proba(X)) == np.ndarray
X, y = stream.next_sample(20000)
learner.split_criterion = 'hellinger'
learner.partial_fit(X, y)
expected_rules = 'Att (5) == 0.000 and Att (12) == 0.000 | class: 1\n' + \
'Att (5) == 0.000 and Att (12) == 1.000 | class: 1\n' + \
'Att (5) == 1.000 and Att (13) == 0.000 and Att (1) <= 0.550 and Att (3) <= 0.730 | class: 0\n' +\
'Att (5) == 1.000 and Att (13) == 0.000 and Att (1) <= 0.550 and Att (3) > 0.730 | class: 2\n' + \
'Att (5) == 1.000 and Att (13) == 0.000 and Att (1) > 0.550 and Att (1) <= 0.800 | class: 0\n' + \
'Att (5) == 1.000 and Att (13) == 0.000 and Att (1) > 0.550 and Att (1) > 0.800 and Att (14) == 0.000 | class: 0\n' + \
'Att (5) == 1.000 and Att (13) == 0.000 and Att (1) > 0.550 and Att (1) > 0.800 and Att (14) == 1.000 | class: 1\n' + \
'Att (5) == 1.000 and Att (13) == 1.000 and Att (3) <= 0.730 | class: 1\n' + \
'Att (5) == 1.000 and Att (13) == 1.000 and Att (3) > 0.730 | class: 0\n'
assert expected_rules == learner.get_rules_description()
def test_hoeffding_tree_coverage():
# Cover memory management
stream = SEAGenerator(random_state=1, noise_percentage=0.05)
stream.prepare_for_use()
X, y = stream.next_sample(5000)
learner = HoeffdingTree(max_byte_size=30, memory_estimate_period=100, grace_period=10, leaf_prediction='mc')
learner.partial_fit(X, y, classes=stream.target_values)
learner.reset()
# Cover nominal attribute observer
stream = RandomTreeGenerator(tree_random_state=1, sample_random_state=1, n_num_features=0,
n_categories_per_cat_feature=2)
stream.prepare_for_use()
X, y = stream.next_sample(1000)
learner = HoeffdingTree(leaf_prediction='mc', nominal_attributes=[i for i in range(10)])
learner.partial_fit(X, y, classes=stream.target_values)
def test_hoeffding_tree_model_information():
stream = SEAGenerator(random_state=1, noise_percentage=0.05)
stream.prepare_for_use()
X, y = stream.next_sample(5000)
nominal_attr_idx = [x for x in range(5, stream.n_features)]
learner = HoeffdingTree(nominal_attributes=nominal_attr_idx)
learner.partial_fit(X, y, classes=stream.target_values)
expected_info = {
'Tree size (nodes)': 5,
'Tree size (leaves)': 3,
'Active learning nodes': 3,
'Tree depth': 2,
'Active leaf byte size estimate': 0.0,
'Inactive leaf byte size estimate': 0.0,
'Byte size estimate overhead': 1.0
}
observed_info = learner.get_model_measurements
for k in expected_info:
assert k in observed_info
assert expected_info[k] == observed_info[k]
expected_description = "if Attribute 0 <= 4.549969620513424:\n" \
" if Attribute 1 <= 5.440182925299016:\n" \
" Leaf = Class 0 | {0: 345.54817975126275, 1: 44.43855503614928}\n" \
" if Attribute 1 > 5.440182925299016:\n" \
" Leaf = Class 1 | {0: 54.451820248737235, 1: 268.5614449638507}\n" \
"if Attribute 0 > 4.549969620513424:\n" \
" Leaf = Class 1 | {0: 390.5845685762964, 1: 2372.3747376855454}\n" \
assert expected_description == learner.get_model_description()
def test_hoeffding_tree_categorical_features(test_path):
data_path = os.path.join(test_path, 'ht_categorical_features_testcase.npy')
stream = np.load(data_path)
# Removes the last two columns (regression targets)
stream = stream[:, :-2]
X, y = stream[:, :-1], stream[:, -1]
nominal_attr_idx = np.arange(7).tolist()
learner = HoeffdingTree(nominal_attributes=nominal_attr_idx)
learner.partial_fit(X, y, classes=np.unique(y))
expected_description = "if Attribute 0 = -15.0:\n" \
" Leaf = Class 2 | {2: 350.0}\n" \
"if Attribute 0 = 0.0:\n" \
" Leaf = Class 0 | {0: 420.0, 1: 252.0}\n" \
"if Attribute 0 = 1.0:\n" \
" Leaf = Class 1 | {0: 312.0, 1: 332.0}\n" \
"if Attribute 0 = 2.0:\n" \
" Leaf = Class 1 | {0: 236.0, 1: 383.0}\n" \
"if Attribute 0 = 3.0:\n" \
" Leaf = Class 1 | {0: 168.0, 1: 459.0}\n" \
"if Attribute 0 = -30.0:\n" \
" Leaf = Class 3.0 | {3.0: 46.0, 4.0: 42.0}\n"
assert learner.get_model_description() == expected_description
``` |
{
"source": "Jiaion/salt",
"score": 2
} |
#### File: road/raet/transacting.py
```python
import socket
import binascii
import struct
try:
import simplejson as json
except ImportError:
import json
# Import ioflo libs
from ioflo.base.odicting import odict
from ioflo.base import aiding
from . import raeting
from . import nacling
from . import packeting
from . import estating
from ioflo.base.consoling import getConsole
console = getConsole()
class Transaction(object):
'''
RAET protocol transaction class
'''
Timeout = 5.0 # default timeout
def __init__(self, stack=None, kind=None, timeout=None,
reid=None, rmt=False, bcst=False, sid=None, tid=None,
txData=None, txPacket=None, rxPacket=None):
'''
Setup Transaction instance
timeout of 0.0 means no timeout go forever
'''
self.stack = stack
self.kind = kind or raeting.PACKET_DEFAULTS['tk']
if timeout is None:
timeout = self.Timeout
self.timeout = timeout
self.timer = aiding.StoreTimer(self.stack.store, duration=self.timeout)
# local estate is the .stack.estate
self.reid = reid # remote estate eid
self.rmt = rmt
self.bcst = bcst
self.sid = sid
self.tid = tid
self.txData = txData or odict() # data used to prepare last txPacket
self.txPacket = txPacket # last tx packet needed for retries
self.rxPacket = rxPacket # last rx packet needed for index
@property
def index(self):
'''
Property is transaction tuple (rf, le, re, si, ti, bf,)
'''
le = self.stack.estate.eid
if le == 0: #bootstapping onto channel use ha
le = self.stack.estate.ha
re = self.reid
if re == 0: #bootstapping onto channel use ha
re = self.stack.estates[self.reid].ha
return ((self.rmt, le, re, self.sid, self.tid, self.bcst,))
def process(self):
'''
Process time based handling of transaction like timeout or retries
'''
pass
def receive(self, packet):
'''
Process received packet Subclasses should super call this
'''
self.rxPacket = packet
def transmit(self, packet):
'''
Queue tx duple on stack transmit queue
'''
try:
self.stack.txUdp(packet.packed, self.reid)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(packet.index)
return
self.txPacket = packet
def add(self, index=None):
'''
Add self to stack transactions
'''
if not index:
index = self.index
self.stack.addTransaction(index, self)
def remove(self, index=None):
'''
Remove self from stack transactions
'''
if not index:
index = self.index
self.stack.removeTransaction(index, transaction=self)
def statKey(self):
'''
Return the stat name key from class name
'''
return ("{0}_transaction_failure".format(self.__class__.__name__.lower()))
class Initiator(Transaction):
'''
RAET protocol initiator transaction class
'''
def __init__(self, **kwa):
'''
Setup Transaction instance
'''
kwa['rmt'] = False # force rmt to False
super(Initiator, self).__init__(**kwa)
def process(self):
'''
Process time based handling of transaction like timeout or retries
'''
if self.timeout > 0.0 and self.timer.expired:
self.stack.removeTransaction(self.index, transaction=self)
class Correspondent(Transaction):
'''
RAET protocol correspondent transaction class
'''
Requireds = ['sid', 'tid', 'rxPacket']
def __init__(self, **kwa):
'''
Setup Transaction instance
'''
kwa['rmt'] = True # force rmt to True
missing = []
for arg in self.Requireds:
if arg not in kwa:
missing.append(arg)
if missing:
emsg = "Missing required keyword arguments: '{0}'".format(missing)
raise TypeError(emsg)
super(Correspondent, self).__init__(**kwa)
class Staler(Initiator):
'''
RAET protocol Staler initiator transaction class
'''
def __init__(self, **kwa):
'''
Setup Transaction instance
'''
for key in ['kind', 'reid', 'sid', 'tid', 'rxPacket']:
if key not in kwa:
emsg = "Missing required keyword argumens: '{0}'".format(key)
raise TypeError(emsg)
super(Staler, self).__init__(**kwa)
self.prep()
def prep(self):
'''
Prepare .txData for nack to stale
'''
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=self.rxPacket.data['sh'],
dp=self.rxPacket.data['sp'],
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,
ck=raeting.coatKinds.nada,
fk=raeting.footKinds.nada)
def nack(self):
'''
Send nack to stale packet from correspondent.
This is used when a correspondent packet is received but no matching
Initiator transaction is found. So create a dummy initiator and send
a nack packet back. Do not add transaction so don't need to remove it.
'''
ha = (self.rxPacket.data['sh'], self.rxPacket.data['sp'])
emsg = "{0} Stale Transaction from {1} dropping ...".format(self.stack.name, ha )
console.terse(emsg + '\n')
self.stack.incStat('stale_correspondent_attempt')
if self.reid not in self.stack.estates:
emsg = "Unknown correspondent estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('unknown_correspondent_eid')
#return #maybe we should return and not respond at all in this case
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.nack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
return
self.stack.txes.append((packet.packed, ha))
console.terse("Nack stale correspondent at {0}\n".format(self.stack.store.stamp))
self.stack.incStat('stale_correspondent_nack')
class Joiner(Initiator):
'''
RAET protocol Joiner Initiator class Dual of Joinent
'''
RedoTimeoutMin = 1.0 # initial timeout
RedoTimeoutMax = 4.0 # max timeout
def __init__(self, mha = None, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup Transaction instance
'''
kwa['kind'] = raeting.trnsKinds.join
super(Joiner, self).__init__(**kwa)
if mha is None:
mha = ('127.0.0.1', raeting.RAET_PORT)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
if self.reid is None:
if not self.stack.estates: # no channel master so make one
master = estating.RemoteEstate(eid=0, ha=mha)
try:
self.stack.addRemote(master)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
return
self.reid = self.stack.estates.values()[0].eid # zeroth is channel master
self.sid = 0
self.tid = self.stack.estates[self.reid].nextTid()
self.prep()
self.add(self.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Joiner, self).receive(packet) # self.rxPacket = packet
if packet.data['tk'] == raeting.trnsKinds.join:
if packet.data['pk'] == raeting.pcktKinds.ack: #pending
self.pend() #set timer for redo
elif packet.data['pk'] == raeting.pcktKinds.response:
self.accept()
elif packet.data['pk'] == raeting.pcktKinds.nack: #rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
if self.txPacket and self.txPacket.data['pk'] == raeting.pcktKinds.request:
self.remove(self.txPacket.index) #index changes after accept
else:
self.remove(self.index) # in case never sent txPacket
console.concise("Joiner timed out at {0}\n".format(self.stack.store.stamp))
return
# need keep sending join until accepted or timed out
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMin)
self.redoTimer.restart(duration=duration)
if (self.txPacket and
self.txPacket.data['pk'] == raeting.pcktKinds.request):
self.transmit(self.txPacket) #redo
console.concise("Joiner Redo Join at {0}\n".format(self.stack.store.stamp))
def prep(self):
'''
Prepare .txData
'''
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=self.stack.estates[self.reid].host,
dp=self.stack.estates[self.reid].port,
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,
ck=raeting.coatKinds.nada,
fk=raeting.footKinds.nada)
def join(self):
'''
Send join request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat(self.statKey())
self.remove()
return
body = odict([('name', self.stack.estate.name),
('verhex', self.stack.estate.signer.verhex),
('pubhex', self.stack.estate.priver.pubhex)])
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.request,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Joiner Do Join at {0}\n".format(self.stack.store.stamp))
def pend(self):
'''
Process ack to join packet
'''
if not self.stack.parseInner(self.rxPacket):
return
pass
def accept(self):
'''
Perform acceptance in response to join response packet
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
leid = body.get('leid')
if not leid:
emsg = "Missing local estate id in accept packet"
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_accept')
self.remove(self.txPacket.index)
return
reid = body.get('reid')
if not reid:
emsg = "Missing remote estate id in accept packet"
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_accept')
self.remove(self.txPacket.index)
return
name = body.get('name')
if not name:
emsg = "Missing remote name in accept packet"
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_accept')
self.remove(self.txPacket.index)
return
verhex = body.get('verhex')
if not verhex:
emsg = "Missing remote verifier key in accept packet"
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_accept')
self.remove(self.txPacket.index)
return
pubhex = body.get('pubhex')
if not pubhex:
emsg = "Missing remote crypt key in accept packet"
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_accept')
self.remove(self.txPacket.index)
return
self.stack.estate.eid = leid
self.stack.dumpLocal()
remote = self.stack.estates[self.reid]
if remote.eid != reid: #move remote estate to new index
try:
self.stack.moveRemote(old=remote.eid, new=reid)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.txPacket.index)
return
if remote.name != name: # rename remote estate to new name
try:
self.stack.renameRemote(old=remote.name, new=name)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.txPacket.index)
return
self.reid = reid
# we are assuming for now that the joiner cannot talk peer to peer only
# to main estate otherwise we need to ensure unique eid, name, and ha on road
# check if remote keys of main estate are accepted here
status = self.stack.safe.statusRemoteEstate(remote,
verhex=verhex,
pubhex=pubhex,
main=False)
if status == raeting.acceptances.rejected:
self.nackAccept()
else:
remote.joined = True #accepted
remote.nextSid()
self.ackAccept()
self.stack.dumpRemote(remote)
def rejected(self):
'''
Process nack to join packet
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove(self.txPacket.index)
console.terse("Joiner Rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
def ackAccept(self):
'''
Send ack to accept response
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.txPacket.index)
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.txPacket.index)
return
self.transmit(packet)
self.remove(self.rxPacket.index)
console.concise("Joiner Do Accept at {0}\n".format(self.stack.store.stamp))
self.stack.incStat("join_initiate_complete")
def nackAccept(self):
'''
Send nack to accept response
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.txPacket.index)
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.nack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.txPacket.index)
return
self.transmit(packet)
self.remove(self.txPacket.index)
console.terse("Joiner Do Reject at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
class Joinent(Correspondent):
'''
RAET protocol Joinent transaction class, dual of Joiner
'''
RedoTimeoutMin = 0.1 # initial timeout
RedoTimeoutMax = 2.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup Transaction instance
'''
kwa['kind'] = raeting.trnsKinds.join
super(Joinent, self).__init__(**kwa)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store, duration=0.0)
self.prep()
# Since corresponding bootstrap transaction use packet.index not self.index
self.add(self.rxPacket.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Joinent, self).receive(packet) # self.rxPacket = packet
if packet.data['tk'] == raeting.trnsKinds.join:
if packet.data['pk'] == raeting.pcktKinds.ack: #accepted by joiner
self.joined()
elif packet.data['pk'] == raeting.pcktKinds.nack: #rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.nackJoin()
console.concise("Joinent timed out at {0}\n".format(self.stack.store.stamp))
return
# need to perform the check for accepted status and then send accept
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMax)
self.redoTimer.restart(duration=duration)
if (self.txPacket and
self.txPacket.data['pk'] == raeting.pcktKinds.response):
self.transmit(self.txPacket) #redo
console.concise("Joinent Redo Accept at {0}\n".format(self.stack.store.stamp))
else: #check to see if status has changed to accept
remote = self.stack.estates[self.reid]
if remote:
data = self.stack.safe.loadRemoteEstate(remote)
if data:
status = self.stack.safe.statusRemoteEstate(remote,
data['verhex'],
data['pubhex'])
if status == raeting.acceptances.accepted:
self.accept()
def prep(self):
'''
Prepare .txData
'''
#since bootstrap transaction use the reversed seid and deid from packet
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
se=self.rxPacket.data['de'],
de=self.rxPacket.data['se'],
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,
ck=raeting.coatKinds.nada,
fk=raeting.footKinds.nada,)
def join(self):
'''
Process join packet
Respond based on acceptance status of remote estate.
Rules for Colliding Estates
Apply the rules to ensure no colliding estates on (host, port)
If matching name estate found then return
Rules:
Only one estate with given eid is allowed on road
Only one estate with given name is allowed on road.
Only one estate with given ha on road is allowed on road.
Are multiple estates with same keys but different name (ha) allowed?
Current logic ignores same keys or not
Since creating new estate assigns unique eid,
we are looking for preexisting estates with any eid.
Processing steps:
I) Search remote estates for matching name
A) Found remote
1) HA not match
Search remotes for other matching HA but different name
If found other delete
Reuse found remote to be updated and joined
B) Not found
Search remotes for other matching HA
If found delete for now
Create new remote and update
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
name = body.get('name')
if not name:
emsg = "Missing remote name in join packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_join')
self.remove(self.rxPacket.index)
return
#raise raeting.TransactionError(emsg)
verhex = body.get('verhex')
if not verhex:
emsg = "Missing remote verifier key in join packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_join')
self.remove(self.rxPacket.index)
return
#raise raeting.TransactionError(emsg)
pubhex = body.get('pubhex')
if not pubhex:
emsg = "Missing remote crypt key in join packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_join')
self.remove(self.rxPacket.index)
return
#raise raeting.TransactionError(emsg)
host = data['sh']
port = data['sp']
self.txData.update( dh=host, dp=port,) # responses use received host port
remote = self.stack.fetchRemoteByName(name)
if remote:
if not (host == remote.host and port == remote.port):
other = self.stack.fetchRemoteByHostPort(host, port)
if other and other is not remote: #may need to terminate transactions
try:
self.stack.removeRemote(other.eid)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.rxPacket.index)
return
remote.host = host
remote.port = port
remote.rsid = self.sid
remote.rtid = self.tid
status = self.stack.safe.statusRemoteEstate(remote,
verhex=verhex,
pubhex=pubhex)
else:
other = self.stack.fetchRemoteByHostPort(host, port)
if other: #may need to terminate transactions
try:
self.stack.removeRemote(other.eid)
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.rxPacket.index)
return
remote = estating.RemoteEstate( stack=self.stack,
name=name,
host=host,
port=port,
acceptance=None,
verkey=verhex,
pubkey=pubhex,
rsid=self.sid,
rtid=self.tid, )
try:
self.stack.addRemote(remote) #provisionally add .accepted is None
except raeting.StackError as ex:
console.terse(ex + '\n')
self.stack.incStat(self.statKey())
self.remove(self.rxPacket.index)
return
status = self.stack.safe.statusRemoteEstate(remote,
verhex=verhex,
pubhex=pubhex)
self.stack.dumpRemote(remote)
self.reid = remote.eid # auto generated at instance creation above
if status == None or status == raeting.acceptances.pending:
self.ackJoin()
elif status == raeting.acceptances.accepted:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMax)
self.redoTimer.restart(duration=duration)
self.accept()
else:
self.nackJoin()
emsg = "Estate {0} eid {1} keys rejected\n".format(
remote.name, remote.eid)
console.terse(emsg)
def ackJoin(self):
'''
Send ack to join request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.rxPacket.index)
return
#since bootstrap transaction use updated self.reid
#self.txData.update( dh=self.stack.estates[self.reid].host,
#dp=self.stack.estates[self.reid].port,)
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.rxPacket.index)
return
self.transmit(packet)
console.concise("Joinent Pending Accept at {0}\n".format(self.stack.store.stamp))
def accept(self):
'''
Send accept response to join request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.rxPacket.index)
return
remote = self.stack.estates[self.reid]
body = odict([ ('leid', self.reid),
('reid', self.stack.estate.eid),
('name', self.stack.estate.name),
('verhex', self.stack.estate.signer.verhex),
('pubhex', self.stack.estate.priver.pubhex)])
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.response,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.rxPacket.index)
return
self.transmit(packet)
console.concise("Joinent Do Accept at {0}\n".format(self.stack.store.stamp))
def joined(self):
'''
process ack to accept response
'''
if not self.stack.parseInner(self.rxPacket):
return
remote = self.stack.estates[self.reid]
remote.joined = True # accepted
remote.nextSid()
self.stack.dumpRemote(remote)
self.remove(self.rxPacket.index)
self.stack.incStat("join_correspond_complete")
def rejected(self):
'''
Process nack to accept response or stale
'''
if not self.stack.parseInner(self.rxPacket):
return
remote = self.stack.estates[self.reid]
# use presence to remove remote
self.remove(self.rxPacket.index)
console.terse("Joinent Rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
def nackJoin(self):
'''
Send nack to join request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove(self.rxPacket.index)
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.nack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove(self.rxPacket.index)
return
self.transmit(packet)
self.remove(self.rxPacket.index)
console.terse("Joinent Reject at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
class Allower(Initiator):
'''
RAET protocol Allower Initiator class Dual of Allowent
CurveCP handshake
'''
Timeout = 4.0
RedoTimeoutMin = 0.25 # initial timeout
RedoTimeoutMax = 1.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup instance
'''
kwa['kind'] = raeting.trnsKinds.allow
super(Allower, self).__init__(**kwa)
self.oreo = None # cookie from correspondent needed until handshake completed
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
if self.reid is None:
self.reid = self.stack.estates.values()[0].eid # zeroth is channel master
remote = self.stack.estates[self.reid]
if not remote.joined:
emsg = "Must be joined first"
console.terse(emsg + '\n')
self.stack.incStat('unjoined_allow_attempt')
return
#raise raeting.TransactionError(emsg)
remote.refresh() # refresh short term keys and .allowed
self.sid = remote.sid
self.tid = remote.nextTid()
self.prep() # prepare .txData
self.add(self.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Allower, self).receive(packet) # self.rxPacket = packet
if packet.data['tk'] == raeting.trnsKinds.allow:
if packet.data['pk'] == raeting.pcktKinds.cookie:
self.cookie()
elif packet.data['pk'] == raeting.pcktKinds.ack:
self.allow()
elif packet.data['pk'] == raeting.pcktKinds.nack: # rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.remove()
console.concise("Allower timed out at {0}\n".format(self.stack.store.stamp))
return
# need keep sending join until accepted or timed out
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMin)
self.redoTimer.restart(duration=duration)
if self.txPacket:
if self.txPacket.data['pk'] == raeting.pcktKinds.hello:
self.transmit(self.txPacket) # redo
console.concise("Allower Redo Hello at {0}\n".format(self.stack.store.stamp))
if self.txPacket.data['pk'] == raeting.pcktKinds.initiate:
self.transmit(self.txPacket) # redo
console.concise("Allower Redo Initiate at {0}\n".format(self.stack.store.stamp))
if self.txPacket.data['pk'] == raeting.pcktKinds.ack:
self.transmit(self.txPacket) # redo
console.concise("Allower Redo Ack Final at {0}\n".format(self.stack.store.stamp))
def prep(self):
'''
Prepare .txData
'''
remote = self.stack.estates[self.reid]
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=remote.host,
dp=remote.port,
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid, )
def hello(self):
'''
Send hello request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
remote = self.stack.estates[self.reid]
plain = binascii.hexlify("".rjust(32, '\x00'))
cipher, nonce = remote.privee.encrypt(plain, remote.pubber.key)
body = raeting.HELLO_PACKER.pack(plain, remote.privee.pubraw, cipher, nonce)
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.hello,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Allower Do Hello at {0}\n".format(self.stack.store.stamp))
def cookie(self):
'''
Process cookie packet
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
if not isinstance(body, basestring):
emsg = "Invalid format of cookie packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_cookie')
self.remove()
return
#raise raeting.TransactionError(emsg)
if len(body) != raeting.COOKIE_PACKER.size:
emsg = "Invalid length of cookie packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_cookie')
self.remove()
return
#raise raeting.TransactionError(emsg)
cipher, nonce = raeting.COOKIE_PACKER.unpack(body)
remote = self.stack.estates[self.reid]
msg = remote.privee.decrypt(cipher, nonce, remote.pubber.key)
if len(msg) != raeting.COOKIESTUFF_PACKER.size:
emsg = "Invalid length of cookie stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_cookie')
self.remove()
return
#raise raeting.TransactionError(emsg)
shortraw, seid, deid, oreo = raeting.COOKIESTUFF_PACKER.unpack(msg)
if seid != remote.eid or deid != self.stack.estate.eid:
emsg = "Invalid seid or deid fields in cookie stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_cookie')
self.remove()
return
#raeting.TransactionError(emsg)
self.oreo = binascii.hexlify(oreo)
remote.publee = nacling.Publican(key=shortraw)
self.initiate()
def initiate(self):
'''
Send initiate request to cookie response to hello request
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
remote = self.stack.estates[self.reid]
vcipher, vnonce = self.stack.estate.priver.encrypt(remote.privee.pubraw,
remote.pubber.key)
fqdn = remote.fqdn.ljust(128, ' ')
stuff = raeting.INITIATESTUFF_PACKER.pack(self.stack.estate.priver.pubraw,
vcipher,
vnonce,
fqdn)
cipher, nonce = remote.privee.encrypt(stuff, remote.publee.key)
oreo = binascii.unhexlify(self.oreo)
body = raeting.INITIATE_PACKER.pack(remote.privee.pubraw,
oreo,
cipher,
nonce)
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.initiate,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Allower Do Initiate at {0}\n".format(self.stack.store.stamp))
def allow(self):
'''
Process ackInitiate packet
Perform allowment in response to ack to initiate packet
'''
if not self.stack.parseInner(self.rxPacket):
return
self.stack.estates[self.reid].allowed = True
self.ackFinal()
#self.remove()
def rejected(self):
'''
Process nack packet
terminate in response to nack
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Allower rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
def ackFinal(self):
'''
Send ack to ack Initiate to terminate transaction
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
body = ""
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
self.remove()
console.concise("Allower Ack Final at {0}\n".format(self.stack.store.stamp))
self.stack.incStat("allow_initiate_complete")
class Allowent(Correspondent):
'''
RAET protocol Allowent Correspondent class Dual of Allower
CurveCP handshake
'''
Timeout = 4.0
RedoTimeoutMin = 0.25 # initial timeout
RedoTimeoutMax = 1.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup instance
'''
kwa['kind'] = raeting.trnsKinds.allow
if 'reid' not in kwa:
emsg = "Missing required keyword argumens: '{0}'".format('reid')
raise TypeError(emsg)
super(Allowent, self).__init__(**kwa)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
remote = self.stack.estates[self.reid]
if not remote.joined:
emsg = "Must be joined first"
console.terse(emsg + '\n')
self.stack.incStat('unjoined_allow_attempt')
return
#raise raeting.TransactionError(emsg)
#Current .sid was set by stack from rxPacket.data sid so it is the new rsid
if not remote.validRsid(self.sid):
emsg = "Stale sid '{0}' in packet".format(self.sid)
console.terse(emsg + '\n')
self.stack.incStat('stale_sid_allow_attempt')
return
#raise raeting.TransactionError(emsg)
remote.rsid = self.sid #update last received rsid for estate
remote.rtid = self.tid #update last received rtid for estate
self.oreo = None #keep locally generated oreo around for redos
remote.refresh() # refresh short term keys and .allowed
self.prep() # prepare .txData
self.add(self.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Allowent, self).receive(packet) # self.rxPacket = packet
if packet.data['tk'] == raeting.trnsKinds.allow:
if packet.data['pk'] == raeting.pcktKinds.hello:
self.hello()
elif packet.data['pk'] == raeting.pcktKinds.initiate:
self.initiate()
elif packet.data['pk'] == raeting.pcktKinds.ack:
self.final()
elif packet.data['pk'] == raeting.pcktKinds.nack: # rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.nack()
console.concise("Allowent timed out at {0}\n".format(self.stack.store.stamp))
return
# need to perform the check for accepted status and then send accept
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMax)
self.redoTimer.restart(duration=duration)
if self.txPacket:
if self.txPacket.data['pk'] == raeting.pcktKinds.cookie:
self.transmit(self.txPacket) #redo
console.concise("Allowent Redo Cookie at {0}\n".format(self.stack.store.stamp))
if self.txPacket.data['pk'] == raeting.pcktKinds.ack:
self.transmit(self.txPacket) #redo
console.concise("Allowent Redo Ack at {0}\n".format(self.stack.store.stamp))
def prep(self):
'''
Prepare .txData
'''
remote = self.stack.estates[self.reid]
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=remote.host,
dp=remote.port,
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid, )
def hello(self):
'''
Process hello packet
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
if not isinstance(body, basestring):
emsg = "Invalid format of hello packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_hello')
self.remove()
return
#raise raeting.TransactionError(emsg)
if len(body) != raeting.HELLO_PACKER.size:
emsg = "Invalid length of hello packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_hello')
self.remove()
return
#raise raeting.TransactionError(emsg)
plain, shortraw, cipher, nonce = raeting.HELLO_PACKER.unpack(body)
remote = self.stack.estates[self.reid]
remote.publee = nacling.Publican(key=shortraw)
msg = self.stack.estate.priver.decrypt(cipher, nonce, remote.publee.key)
if msg != plain :
emsg = "Invalid plain not match decrypted cipher"
console.terse(emsg + '\n')
self.stack.incStat('invalid_hello')
self.remove()
return
#raise raeting.TransactionError(emsg)
self.cookie()
def cookie(self):
'''
Send Cookie Packet
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
remote = self.stack.estates[self.reid]
oreo = self.stack.estate.priver.nonce()
self.oreo = binascii.hexlify(oreo)
stuff = raeting.COOKIESTUFF_PACKER.pack(remote.privee.pubraw,
self.stack.estate.eid,
remote.eid,
oreo)
cipher, nonce = self.stack.estate.priver.encrypt(stuff, remote.publee.key)
body = raeting.COOKIE_PACKER.pack(cipher, nonce)
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.cookie,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Allowent Do Cookie at {0}\n".format(self.stack.store.stamp))
def initiate(self):
'''
Process initiate packet
'''
if not self.stack.parseInner(self.rxPacket):
return
data = self.rxPacket.data
body = self.rxPacket.body.data
if not isinstance(body, basestring):
emsg = "Invalid format of initiate packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
if len(body) != raeting.INITIATE_PACKER.size:
emsg = "Invalid length of initiate packet body"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
shortraw, oreo, cipher, nonce = raeting.INITIATE_PACKER.unpack(body)
remote = self.stack.estates[self.reid]
if shortraw != remote.publee.keyraw:
emsg = "Mismatch of short term public key in initiate packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
if (binascii.hexlify(oreo) != self.oreo):
emsg = "Stale or invalid cookie in initiate packet"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
msg = remote.privee.decrypt(cipher, nonce, remote.publee.key)
if len(msg) != raeting.INITIATESTUFF_PACKER.size:
emsg = "Invalid length of initiate stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
pubraw, vcipher, vnonce, fqdn = raeting.INITIATESTUFF_PACKER.unpack(msg)
if pubraw != remote.pubber.keyraw:
emsg = "Mismatch of long term public key in initiate stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
fqdn = fqdn.rstrip(' ')
if fqdn != self.stack.estate.fqdn:
emsg = "Mismatch of fqdn in initiate stuff"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
vouch = self.stack.estate.priver.decrypt(vcipher, vnonce, remote.pubber.key)
if vouch != remote.publee.keyraw or vouch != shortraw:
emsg = "Short term key vouch failed"
console.terse(emsg + '\n')
self.stack.incStat('invalid_initiate')
self.remove()
return
#raise raeting.TransactionError(emsg)
self.ackInitiate()
def ackInitiate(self):
'''
Send ack to initiate request
'''
if self.reid not in self.stack.estates:
msg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(msg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
body = ""
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
console.concise("Allowent Do Ack at {0}\n".format(self.stack.store.stamp))
self.allow()
def allow(self):
'''
Perform allowment
'''
self.stack.estates[self.reid].allowed = True
def final(self):
'''
Process ackFinal packet
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Allowent Do Final at {0}\n".format(self.stack.store.stamp))
self.stack.incStat("allow_correspond_complete")
def rejected(self):
'''
Process nack packet
terminate in response to nack
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Allowent rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
def nack(self):
'''
Send nack to terminate allower transaction
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.nack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
self.remove()
console.concise("Allowent Reject at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
class Messenger(Initiator):
'''
RAET protocol Messenger Initiator class Dual of Messengent
Generic messages
'''
Timeout = 10.0
RedoTimeoutMin = 1.0 # initial timeout
RedoTimeoutMax = 3.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup instance
'''
kwa['kind'] = raeting.trnsKinds.message
super(Messenger, self).__init__(**kwa)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
if self.reid is None:
self.reid = self.stack.estates.values()[0].eid # zeroth is channel master
remote = self.stack.estates[self.reid]
if not remote.allowed:
emsg = "Must be allowed first"
console.terse(emsg + '\n')
self.stack.incStat('unallowed_message_attempt')
return
#raise raeting.TransactionError(emsg)
self.sid = remote.sid
self.tid = remote.nextTid()
self.prep() # prepare .txData
self.tray = packeting.TxTray(stack=self.stack)
self.add(self.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Messenger, self).receive(packet)
if packet.data['tk'] == raeting.trnsKinds.message:
if packet.data['pk'] == raeting.pcktKinds.ack:
self.again()
elif packet.data['pk'] == raeting.pcktKinds.nack: # rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.remove()
console.concise("Messenger timed out at {0}\n".format(self.stack.store.stamp))
return
# need keep sending message until completed or timed out
if self.redoTimer.expired:
duration = min(
max(self.redoTimeoutMin,
self.redoTimer.duration) * 2.0,
self.redoTimeoutMin)
self.redoTimer.restart(duration=duration)
if self.txPacket:
if self.txPacket.data['pk'] == raeting.pcktKinds.message:
self.transmit(self.txPacket) # redo
console.concise("Messenger Redo Segment {0} at {1}\n".format(
self.tray.current, self.stack.store.stamp))
def prep(self):
'''
Prepare .txData
'''
remote = self.stack.estates[self.reid]
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=remote.host,
dp=remote.port,
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,)
def message(self, body=None):
'''
Send message
'''
if self.reid not in self.stack.estates:
emsg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(emsg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
if not self.tray.packets:
try:
self.tray.pack(data=self.txData, body=body)
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
if self.tray.current >= len(self.tray.packets):
return
packet = self.tray.packets[self.tray.current]
self.transmit(packet)
self.stack.incStat("message_segment_tx")
console.concise("Messenger Do Message Segment {0} at {1}\n".format(
self.tray.current, self.stack.store.stamp))
self.tray.current += 1
def again(self):
'''
Process ack packet
'''
if self.tray.current >= len(self.tray.packets):
self.complete()
else:
self.message()
def complete(self):
'''
Complete transaction and remove
'''
self.remove()
console.concise("Messenger Done at {0}\n".format(self.stack.store.stamp))
self.stack.incStat("message_initiate_complete")
def rejected(self):
'''
Process nack packet
terminate in response to nack
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Messenger rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
class Messengent(Correspondent):
'''
RAET protocol Messengent Correspondent class Dual of Messenger
Generic Messages
'''
Timeout = 10.0
RedoTimeoutMin = 1.0 # initial timeout
RedoTimeoutMax = 3.0 # max timeout
def __init__(self, redoTimeoutMin=None, redoTimeoutMax=None, **kwa):
'''
Setup instance
'''
kwa['kind'] = raeting.trnsKinds.message
if 'reid' not in kwa:
emsg = "Missing required keyword argumens: '{0}'".format('reid')
raise TypeError(emsg)
super(Messengent, self).__init__(**kwa)
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = aiding.StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
remote = self.stack.estates[self.reid]
if not remote.allowed:
emsg = "Must be allowed first"
console.terse(emsg + '\n')
self.stack.incStat('unallowed_message_attempt')
return
#raise raeting.TransactionError(emsg)
#Current .sid was set by stack from rxPacket.data sid so it is the new rsid
if not remote.validRsid(self.sid):
emsg = "Stale sid '{0}' in packet".format(self.sid)
console.terse(emsg + '\n')
self.stack.incStat('stale_sid_message_attempt')
return
#raise raeting.TransactionError(emsg)
remote.rsid = self.sid #update last received rsid for estate
remote.rtid = self.tid #update last received rtid for estate
self.prep() # prepare .txData
self.tray = packeting.RxTray(stack=self.stack)
self.add(self.index)
def receive(self, packet):
"""
Process received packet belonging to this transaction
"""
super(Messengent, self).receive(packet)
# resent message
if packet.data['tk'] == raeting.trnsKinds.message:
if packet.data['pk'] == raeting.pcktKinds.message:
self.message()
elif packet.data['pk'] == raeting.pcktKinds.nack: # rejected
self.rejected()
def process(self):
'''
Perform time based processing of transaction
'''
if self.timeout > 0.0 and self.timer.expired:
self.nack()
console.concise("Messengent timed out at {0}\n".format(self.stack.store.stamp))
return
# need to include current segment in ack or resend
#if self.redoTimer.expired:
#duration = min(
#max(self.redoTimeoutMin,
#self.redoTimer.duration) * 2.0,
#self.redoTimeoutMax)
#self.redoTimer.restart(duration=duration)
#if self.txPacket:
#if self.txPacket.data['pk'] == raeting.pcktKinds.ack:
#self.transmit(self.txPacket) #redo
#console.concise("Messengent Redo Ack at {0}\n".format(self.stack.store.stamp))
def prep(self):
'''
Prepare .txData
'''
remote = self.stack.estates[self.reid]
self.txData.update( sh=self.stack.estate.host,
sp=self.stack.estate.port,
dh=remote.host,
dp=remote.port,
se=self.stack.estate.eid,
de=self.reid,
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,)
def message(self):
'''
Process message packet
'''
try:
body = self.tray.parse(self.rxPacket)
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.incStat('parsing_message_error')
self.remove()
return
self.ackMessage()
if self.tray.complete:
console.verbose("{0} received message body\n{1}\n".format(
self.stack.name, body))
self.stack.rxMsgs.append(body)
self.complete()
def ackMessage(self):
'''
Send ack to message
'''
if self.reid not in self.stack.estates:
msg = "Invalid remote destination estate id '{0}'".format(self.reid)
#raise raeting.TransactionError(msg)
console.terse(emsg + '\n')
self.stack.incStat('invalid_remote_eid')
self.remove()
return
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=raeting.pcktKinds.ack,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(ex + '\n')
self.stack.incStat("packing_error")
self.remove()
return
self.transmit(packet)
self.stack.incStat("message_segment_rx")
console.concise("Messengent Do Ack Segment at {0}\n".format(
self.stack.store.stamp))
def complete(self):
'''
Complete transaction and remove
'''
self.remove()
console.concise("Messengent Complete at {0}\n".format(self.stack.store.stamp))
self.stack.incStat("messagent_correspond_complete")
def rejected(self):
'''
Process nack packet
terminate in response to nack
'''
if not self.stack.parseInner(self.rxPacket):
return
self.remove()
console.concise("Messengent rejected at {0}\n".format(self.stack.store.stamp))
self.stack.incStat(self.statKey())
```
#### File: salt/utils/etcd_util.py
```python
import logging
# Import third party libs
try:
import etcd
HAS_LIBS = True
except Exception:
HAS_LIBS = False
# Set up logging
log = logging.getLogger(__name__)
def get_conn(opts, profile=None):
'''
Return a client object for accessing etcd
'''
if profile:
conf = opts.get(profile, {})
else:
conf = opts
host = conf.get('etcd.host', '127.0.0.1')
port = conf.get('etcd.port', 4001)
return etcd.Client(host, port)
def tree(client, path):
'''
Recurse through etcd and return all values
'''
ret = {}
items = client.get(path)
for item in items.children:
comps = str(item.key).split('/')
if item.dir is True:
if item.key == path:
continue
ret[comps[-1]] = tree(client, item.key)
else:
ret[comps[-1]] = item.value
return ret
``` |
{
"source": "jia-jerry/aliyun-openapi-python-sdk",
"score": 2
} |
#### File: request/v20170912/CreateCenRouteMapRequest.py
```python
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class CreateCenRouteMapRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'CreateCenRouteMap','Cbn')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_CommunityMatchMode(self):
return self.get_query_params().get('CommunityMatchMode')
def set_CommunityMatchMode(self,CommunityMatchMode):
self.add_query_param('CommunityMatchMode',CommunityMatchMode)
def get_MapResult(self):
return self.get_query_params().get('MapResult')
def set_MapResult(self,MapResult):
self.add_query_param('MapResult',MapResult)
def get_NextPriority(self):
return self.get_query_params().get('NextPriority')
def set_NextPriority(self,NextPriority):
self.add_query_param('NextPriority',NextPriority)
def get_DestinationCidrBlockss(self):
return self.get_query_params().get('DestinationCidrBlockss')
def set_DestinationCidrBlockss(self, DestinationCidrBlockss):
for depth1 in range(len(DestinationCidrBlockss)):
if DestinationCidrBlockss[depth1] is not None:
self.add_query_param('DestinationCidrBlocks.' + str(depth1 + 1) , DestinationCidrBlockss[depth1])
def get_SourceInstanceIdss(self):
return self.get_query_params().get('SourceInstanceIdss')
def set_SourceInstanceIdss(self, SourceInstanceIdss):
for depth1 in range(len(SourceInstanceIdss)):
if SourceInstanceIdss[depth1] is not None:
self.add_query_param('SourceInstanceIds.' + str(depth1 + 1) , SourceInstanceIdss[depth1])
def get_SourceRegionIdss(self):
return self.get_query_params().get('SourceRegionIdss')
def set_SourceRegionIdss(self, SourceRegionIdss):
for depth1 in range(len(SourceRegionIdss)):
if SourceRegionIdss[depth1] is not None:
self.add_query_param('SourceRegionIds.' + str(depth1 + 1) , SourceRegionIdss[depth1])
def get_MatchAsnss(self):
return self.get_query_params().get('MatchAsnss')
def set_MatchAsnss(self, MatchAsnss):
for depth1 in range(len(MatchAsnss)):
if MatchAsnss[depth1] is not None:
self.add_query_param('MatchAsns.' + str(depth1 + 1) , MatchAsnss[depth1])
def get_Preference(self):
return self.get_query_params().get('Preference')
def set_Preference(self,Preference):
self.add_query_param('Preference',Preference)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Priority(self):
return self.get_query_params().get('Priority')
def set_Priority(self,Priority):
self.add_query_param('Priority',Priority)
def get_DestinationChildInstanceTypess(self):
return self.get_query_params().get('DestinationChildInstanceTypess')
def set_DestinationChildInstanceTypess(self, DestinationChildInstanceTypess):
for depth1 in range(len(DestinationChildInstanceTypess)):
if DestinationChildInstanceTypess[depth1] is not None:
self.add_query_param('DestinationChildInstanceTypes.' + str(depth1 + 1) , DestinationChildInstanceTypess[depth1])
def get_SourceRouteTableIdss(self):
return self.get_query_params().get('SourceRouteTableIdss')
def set_SourceRouteTableIdss(self, SourceRouteTableIdss):
for depth1 in range(len(SourceRouteTableIdss)):
if SourceRouteTableIdss[depth1] is not None:
self.add_query_param('SourceRouteTableIds.' + str(depth1 + 1) , SourceRouteTableIdss[depth1])
def get_SourceChildInstanceTypess(self):
return self.get_query_params().get('SourceChildInstanceTypess')
def set_SourceChildInstanceTypess(self, SourceChildInstanceTypess):
for depth1 in range(len(SourceChildInstanceTypess)):
if SourceChildInstanceTypess[depth1] is not None:
self.add_query_param('SourceChildInstanceTypes.' + str(depth1 + 1) , SourceChildInstanceTypess[depth1])
def get_CommunityOperateMode(self):
return self.get_query_params().get('CommunityOperateMode')
def set_CommunityOperateMode(self,CommunityOperateMode):
self.add_query_param('CommunityOperateMode',CommunityOperateMode)
def get_OperateCommunitySets(self):
return self.get_query_params().get('OperateCommunitySets')
def set_OperateCommunitySets(self, OperateCommunitySets):
for depth1 in range(len(OperateCommunitySets)):
if OperateCommunitySets[depth1] is not None:
self.add_query_param('OperateCommunitySet.' + str(depth1 + 1) , OperateCommunitySets[depth1])
def get_RouteTypess(self):
return self.get_query_params().get('RouteTypess')
def set_RouteTypess(self, RouteTypess):
for depth1 in range(len(RouteTypess)):
if RouteTypess[depth1] is not None:
self.add_query_param('RouteTypes.' + str(depth1 + 1) , RouteTypess[depth1])
def get_CidrMatchMode(self):
return self.get_query_params().get('CidrMatchMode')
def set_CidrMatchMode(self,CidrMatchMode):
self.add_query_param('CidrMatchMode',CidrMatchMode)
def get_CenId(self):
return self.get_query_params().get('CenId')
def set_CenId(self,CenId):
self.add_query_param('CenId',CenId)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_SourceInstanceIdsReverseMatch(self):
return self.get_query_params().get('SourceInstanceIdsReverseMatch')
def set_SourceInstanceIdsReverseMatch(self,SourceInstanceIdsReverseMatch):
self.add_query_param('SourceInstanceIdsReverseMatch',SourceInstanceIdsReverseMatch)
def get_DestinationRouteTableIdss(self):
return self.get_query_params().get('DestinationRouteTableIdss')
def set_DestinationRouteTableIdss(self, DestinationRouteTableIdss):
for depth1 in range(len(DestinationRouteTableIdss)):
if DestinationRouteTableIdss[depth1] is not None:
self.add_query_param('DestinationRouteTableIds.' + str(depth1 + 1) , DestinationRouteTableIdss[depth1])
def get_TransmitDirection(self):
return self.get_query_params().get('TransmitDirection')
def set_TransmitDirection(self,TransmitDirection):
self.add_query_param('TransmitDirection',TransmitDirection)
def get_DestinationInstanceIdss(self):
return self.get_query_params().get('DestinationInstanceIdss')
def set_DestinationInstanceIdss(self, DestinationInstanceIdss):
for depth1 in range(len(DestinationInstanceIdss)):
if DestinationInstanceIdss[depth1] is not None:
self.add_query_param('DestinationInstanceIds.' + str(depth1 + 1) , DestinationInstanceIdss[depth1])
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_DestinationInstanceIdsReverseMatch(self):
return self.get_query_params().get('DestinationInstanceIdsReverseMatch')
def set_DestinationInstanceIdsReverseMatch(self,DestinationInstanceIdsReverseMatch):
self.add_query_param('DestinationInstanceIdsReverseMatch',DestinationInstanceIdsReverseMatch)
def get_PrependAsPaths(self):
return self.get_query_params().get('PrependAsPaths')
def set_PrependAsPaths(self, PrependAsPaths):
for depth1 in range(len(PrependAsPaths)):
if PrependAsPaths[depth1] is not None:
self.add_query_param('PrependAsPath.' + str(depth1 + 1) , PrependAsPaths[depth1])
def get_AsPathMatchMode(self):
return self.get_query_params().get('AsPathMatchMode')
def set_AsPathMatchMode(self,AsPathMatchMode):
self.add_query_param('AsPathMatchMode',AsPathMatchMode)
def get_MatchCommunitySets(self):
return self.get_query_params().get('MatchCommunitySets')
def set_MatchCommunitySets(self, MatchCommunitySets):
for depth1 in range(len(MatchCommunitySets)):
if MatchCommunitySets[depth1] is not None:
self.add_query_param('MatchCommunitySet.' + str(depth1 + 1) , MatchCommunitySets[depth1])
def get_CenRegionId(self):
return self.get_query_params().get('CenRegionId')
def set_CenRegionId(self,CenRegionId):
self.add_query_param('CenRegionId',CenRegionId)
```
#### File: request/v20190307/InitDeviceRequest.py
```python
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudauth.endpoint import endpoint_data
class InitDeviceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cloudauth', '2019-03-07', 'InitDevice','cloudauth')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Channel(self):
return self.get_query_params().get('Channel')
def set_Channel(self,Channel):
self.add_query_param('Channel',Channel)
def get_BizData(self):
return self.get_query_params().get('BizData')
def set_BizData(self,BizData):
self.add_query_param('BizData',BizData)
def get_Merchant(self):
return self.get_query_params().get('Merchant')
def set_Merchant(self,Merchant):
self.add_query_param('Merchant',Merchant)
def get_AppVersion(self):
return self.get_query_params().get('AppVersion')
def set_AppVersion(self,AppVersion):
self.add_query_param('AppVersion',AppVersion)
def get_DeviceToken(self):
return self.get_query_params().get('DeviceToken')
def set_DeviceToken(self,DeviceToken):
self.add_query_param('DeviceToken',DeviceToken)
def get_CertifyId(self):
return self.get_query_params().get('CertifyId')
def set_CertifyId(self,CertifyId):
self.add_query_param('CertifyId',CertifyId)
def get_OuterOrderNo(self):
return self.get_query_params().get('OuterOrderNo')
def set_OuterOrderNo(self,OuterOrderNo):
self.add_query_param('OuterOrderNo',OuterOrderNo)
def get_ProduceNode(self):
return self.get_query_params().get('ProduceNode')
def set_ProduceNode(self,ProduceNode):
self.add_query_param('ProduceNode',ProduceNode)
def get_ProductName(self):
return self.get_query_params().get('ProductName')
def set_ProductName(self,ProductName):
self.add_query_param('ProductName',ProductName)
def get_CertifyPrincipal(self):
return self.get_query_params().get('CertifyPrincipal')
def set_CertifyPrincipal(self,CertifyPrincipal):
self.add_query_param('CertifyPrincipal',CertifyPrincipal)
def get_MetaInfo(self):
return self.get_query_params().get('MetaInfo')
def set_MetaInfo(self,MetaInfo):
self.add_query_param('MetaInfo',MetaInfo)
```
#### File: request/v20200201/DescribeAlarmsRequest.py
```python
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudesl.endpoint import endpoint_data
class DescribeAlarmsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'cloudesl', '2020-02-01', 'DescribeAlarms','cloudesl')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StoreId(self):
return self.get_body_params().get('StoreId')
def set_StoreId(self,StoreId):
self.add_body_params('StoreId', StoreId)
def get_PageNumber(self):
return self.get_body_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_body_params('PageNumber', PageNumber)
def get_PageSize(self):
return self.get_body_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_body_params('PageSize', PageSize)
def get_AlarmType(self):
return self.get_body_params().get('AlarmType')
def set_AlarmType(self,AlarmType):
self.add_body_params('AlarmType', AlarmType)
def get_AlarmStatus(self):
return self.get_body_params().get('AlarmStatus')
def set_AlarmStatus(self,AlarmStatus):
self.add_body_params('AlarmStatus', AlarmStatus)
def get_ErrorType(self):
return self.get_body_params().get('ErrorType')
def set_ErrorType(self,ErrorType):
self.add_body_params('ErrorType', ErrorType)
def get_AlarmId(self):
return self.get_body_params().get('AlarmId')
def set_AlarmId(self,AlarmId):
self.add_body_params('AlarmId', AlarmId)
def get_DeviceMac(self):
return self.get_body_params().get('DeviceMac')
def set_DeviceMac(self,DeviceMac):
self.add_body_params('DeviceMac', DeviceMac)
```
#### File: aliyunsdkcore/endpoint/endpoint_resolver_rules.py
```python
from aliyunsdkcore.endpoint.local_config_regional_endpoint_resolver \
import LocalConfigRegionalEndpointResolver
class EndpointResolverRules(LocalConfigRegionalEndpointResolver):
def __init__(self, *args, **kwargs):
LocalConfigRegionalEndpointResolver.__init__(self)
self.region_headers = ['cn', 'ap', 'eu', 'rus', 'us', 'me']
self.product_code_valid = False
self.region_id_valid = False
self.endpoint_map = None
self.endpoint_regional = None
self.request_network = 'public'
self.product_suffix = ''
def resolve(self, request):
if request.endpoint_map is None or request.endpoint_regional is None:
return None
request_network = "public" if not request.request_network else request.request_network
endpoint_regional = request.endpoint_regional
endpoint = ""
if request_network == "public":
endpoint = request.endpoint_map.get(request.region_id, "")
if endpoint == "":
if endpoint_regional == "regional":
if not self.verify_region_id(request.region_id.lower()):
return
endpoint_domain = ".{region_id}.aliyuncs.com".format(
region_id=request.region_id.lower())
elif endpoint_regional == "central":
endpoint_domain = ".aliyuncs.com"
else:
return None
network = "" if request_network == "public" else "-" + request_network
suffix = "-" + request.product_suffix if request.product_suffix else ""
endpoint_param_list = [request.product_code_lower, suffix, network, endpoint_domain]
endpoint = "".join(list(filter(lambda x: x, endpoint_param_list)))
return endpoint
def verify_region_id(self, region_id):
region = region_id.split('-')
if len(region) >= 2 and region[0] in self.region_headers:
return True
def is_product_code_valid(self, request):
return self.product_code_valid
def is_region_id_valid(self, request):
return self.region_id_valid
@classmethod
def get_valid_region_ids_by_product(cls, product_code):
return None
```
#### File: tests/endpoint/test_chained_endpoint_resolver.py
```python
from tests import unittest
from aliyunsdkcore.endpoint.user_customized_endpoint_resolver import UserCustomizedEndpointResolver
from aliyunsdkcore.endpoint.chained_endpoint_resolver import ChainedEndpointResolver
from aliyunsdkcore.endpoint.resolver_endpoint_request import ResolveEndpointRequest
from aliyunsdkcore.acs_exception.exceptions import ClientException
from aliyunsdkcore.endpoint.local_config_regional_endpoint_resolver import \
LocalConfigRegionalEndpointResolver
class TestChainedEndpointResolver(unittest.TestCase):
def test_resolver(self):
user = UserCustomizedEndpointResolver()
chain = [
user
]
resolver = ChainedEndpointResolver(chain)
# can not be resolved
request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "")
with self.assertRaises(ClientException) as ex:
resolver.resolve(request)
self.assertEqual(ex.exception.error_code, "SDK.EndpointResolvingError")
self.assertEqual(ex.exception.message,
"No endpoint for product 'ecs'.\n"
"Please check the product code, or set an endpoint for your request "
"explicitly.\n"
"See https://www.alibabacloud.com/help/doc-detail/92074.htm\n")
user.put_endpoint_entry("cn-huhehaote", "ecs",
"my-endpoint-for-cnhuhehaote-ecs")
# can not be resolved with cn-hangzhou
request = ResolveEndpointRequest("cn-hangzhou", "ecs", "", "")
with self.assertRaises(ClientException) as ex:
resolver.resolve(request)
self.assertEqual(ex.exception.error_code, "SDK.EndpointResolvingError")
self.assertEqual(
ex.exception.message, "No such region 'cn-hangzhou'. Please check your region ID.")
# cn-hangzhou and ecs is valid
user.put_endpoint_entry("cn-hangzhou", "rds",
"my-endpoint-for-cn-hangzhou-rds")
with self.assertRaises(ClientException) as ex:
resolver.resolve(request)
self.assertEqual(ex.exception.error_code, "SDK.EndpointResolvingError")
self.assertEqual(
ex.exception.message, "No endpoint in the region 'cn-hangzhou' for product 'ecs'.\n"
"You can set an endpoint for your request explicitly.\n"
"See https://www.alibabacloud.com/help/doc-detail/92074.htm\n")
# can be resolved
request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "")
self.assertEqual(resolver.resolve(request),
"my-endpoint-for-cnhuhehaote-ecs")
chain = [
LocalConfigRegionalEndpointResolver(),
user
]
resolver = ChainedEndpointResolver(chain)
request.request_network = 'inner'
endpoint = resolver.resolve(request)
self.assertEqual('my-endpoint-for-cnhuhehaote-ecs', endpoint)
```
#### File: tests/endpoint/test_local_config_regional_endpoint_resolver.py
```python
from tests import unittest
from aliyunsdkcore.endpoint.local_config_regional_endpoint_resolver \
import LocalConfigRegionalEndpointResolver
from aliyunsdkcore.endpoint.resolver_endpoint_request import ResolveEndpointRequest
class TestLocalConfigRegionalEndpointResolver(unittest.TestCase):
def test_resolver(self):
resolver = LocalConfigRegionalEndpointResolver()
request = ResolveEndpointRequest("", "", "", "")
self.assertEqual(resolver.resolve(request), None)
self.assertEqual(resolver._make_endpoint_entry_key(
"ecs", "cn-huhehaote"), "ecs.cn-huhehaote")
request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "")
self.assertEqual(resolver.resolve(request),
'ecs.cn-huhehaote.aliyuncs.com')
self.assertTrue(resolver.is_region_id_valid(request))
# resolver.put_endpoint_entry("ecs", "my-endpoint-for-cnhuhehaote-ecs")
# request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "")
# self.assertEqual(resolver.resolve(request), "my-endpoint-for-cnhuhehaote-ecs")
# self.assertTrue(resolver.is_region_id_valid(request))
request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "innerAPI")
self.assertEqual(resolver.resolve(request), None)
# _get_normalized_product_code
self.assertEqual(resolver._get_normalized_product_code(
"cloudapi"), "apigateway")
self.assertEqual(resolver._get_normalized_product_code("ecs"), "ecs")
self.assertEqual(len(resolver.get_valid_region_ids_by_product('ecs')), 19)
self.assertIsNone(resolver.get_valid_region_ids_by_product('xxx'))
self.assertTrue(resolver.is_product_code_valid(request))
def test_resolver_with_jsonstr(self):
resolver = LocalConfigRegionalEndpointResolver("{}")
request = ResolveEndpointRequest("", "", "", "")
self.assertEqual(resolver.resolve(request), None)
self.assertEqual(resolver._make_endpoint_entry_key(
"ecs", "<KEY>"), "ecs.cn-huhehaote")
request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "")
self.assertEqual(resolver.resolve(request), None)
self.assertFalse(resolver.is_region_id_valid(request))
resolver.put_endpoint_entry(
"ecs.cn-huhehaote", "my-endpoint-for-cnhuhehaote-ecs")
request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "")
self.assertEqual(resolver.resolve(request),
"my-endpoint-for-cnhuhehaote-ecs")
self.assertFalse(resolver.is_region_id_valid(request))
request = ResolveEndpointRequest("cn-huhehaote", "ecs", "", "innerAPI")
self.assertEqual(resolver.resolve(request), None)
# _get_normalized_product_code
self.assertEqual(resolver._get_normalized_product_code(
"cloudapi"), "cloudapi")
self.assertEqual(resolver._get_normalized_product_code("ecs"), "ecs")
```
#### File: request/v20151215/CreateTriggerHookRequest.py
```python
from aliyunsdkcore.request import RoaRequest
from aliyunsdkcs.endpoint import endpoint_data
class CreateTriggerHookRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'CS', '2015-12-15', 'CreateTriggerHook')
self.set_uri_pattern('/hook/trigger')
self.set_method('PUT')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_cluster_id(self):
return self.get_body_params().get('cluster_id')
def set_cluster_id(self,cluster_id):
self.add_body_params('cluster_id', cluster_id)
def get_project_id(self):
return self.get_body_params().get('project_id')
def set_project_id(self,project_id):
self.add_body_params('project_id', project_id)
def get_trigger_url(self):
return self.get_body_params().get('trigger_url')
def set_trigger_url(self,trigger_url):
self.add_body_params('trigger_url', trigger_url)
def get_region_id(self):
return self.get_body_params().get('region_id')
def set_region_id(self,region_id):
self.add_body_params('region_id', region_id)
```
#### File: request/v20180129/ListServerLockRequest.py
```python
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class ListServerLockRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'ListServerLock','domain')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_LockProductId(self):
return self.get_query_params().get('LockProductId')
def set_LockProductId(self,LockProductId):
self.add_query_param('LockProductId',LockProductId)
def get_EndExpireDate(self):
return self.get_query_params().get('EndExpireDate')
def set_EndExpireDate(self,EndExpireDate):
self.add_query_param('EndExpireDate',EndExpireDate)
def get_PageNum(self):
return self.get_query_params().get('PageNum')
def set_PageNum(self,PageNum):
self.add_query_param('PageNum',PageNum)
def get_BeginStartDate(self):
return self.get_query_params().get('BeginStartDate')
def set_BeginStartDate(self,BeginStartDate):
self.add_query_param('BeginStartDate',BeginStartDate)
def get_ServerLockStatus(self):
return self.get_query_params().get('ServerLockStatus')
def set_ServerLockStatus(self,ServerLockStatus):
self.add_query_param('ServerLockStatus',ServerLockStatus)
def get_StartExpireDate(self):
return self.get_query_params().get('StartExpireDate')
def set_StartExpireDate(self,StartExpireDate):
self.add_query_param('StartExpireDate',StartExpireDate)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_EndStartDate(self):
return self.get_query_params().get('EndStartDate')
def set_EndStartDate(self,EndStartDate):
self.add_query_param('EndStartDate',EndStartDate)
def get_UserClientIp(self):
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self,UserClientIp):
self.add_query_param('UserClientIp',UserClientIp)
```
#### File: request/v20200101/ConfigureSynchronizationJobRequest.py
```python
from aliyunsdkcore.request import RpcRequest
class ConfigureSynchronizationJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dts', '2020-01-01', 'ConfigureSynchronizationJob','dts')
def get_SourceEndpointInstanceId(self):
return self.get_query_params().get('SourceEndpoint.InstanceId')
def set_SourceEndpointInstanceId(self,SourceEndpointInstanceId):
self.add_query_param('SourceEndpoint.InstanceId',SourceEndpointInstanceId)
def get_Checkpoint(self):
return self.get_query_params().get('Checkpoint')
def set_Checkpoint(self,Checkpoint):
self.add_query_param('Checkpoint',Checkpoint)
def get_DestinationEndpointInstanceId(self):
return self.get_query_params().get('DestinationEndpoint.InstanceId')
def set_DestinationEndpointInstanceId(self,DestinationEndpointInstanceId):
self.add_query_param('DestinationEndpoint.InstanceId',DestinationEndpointInstanceId)
def get_SourceEndpointIP(self):
return self.get_query_params().get('SourceEndpoint.IP')
def set_SourceEndpointIP(self,SourceEndpointIP):
self.add_query_param('SourceEndpoint.IP',SourceEndpointIP)
def get_SynchronizationObjects(self):
return self.get_query_params().get('SynchronizationObjects')
def set_SynchronizationObjects(self,SynchronizationObjects):
self.add_query_param('SynchronizationObjects',SynchronizationObjects)
def get_DestinationEndpointPassword(self):
return self.get_query_params().get('DestinationEndpoint.Password')
def set_DestinationEndpointPassword(self,DestinationEndpointPassword):
self.add_query_param('DestinationEndpoint.Password',DestinationEndpointPassword)
def get_DataInitialization(self):
return self.get_query_params().get('DataInitialization')
def set_DataInitialization(self,DataInitialization):
self.add_query_param('DataInitialization',DataInitialization)
def get_StructureInitialization(self):
return self.get_query_params().get('StructureInitialization')
def set_StructureInitialization(self,StructureInitialization):
self.add_query_param('StructureInitialization',StructureInitialization)
def get_PartitionKeyModifyTime_Minute(self):
return self.get_query_params().get('PartitionKey.ModifyTime_Minute')
def set_PartitionKeyModifyTime_Minute(self,PartitionKeyModifyTime_Minute):
self.add_query_param('PartitionKey.ModifyTime_Minute',PartitionKeyModifyTime_Minute)
def get_PartitionKeyModifyTime_Day(self):
return self.get_query_params().get('PartitionKey.ModifyTime_Day')
def set_PartitionKeyModifyTime_Day(self,PartitionKeyModifyTime_Day):
self.add_query_param('PartitionKey.ModifyTime_Day',PartitionKeyModifyTime_Day)
def get_SourceEndpointInstanceType(self):
return self.get_query_params().get('SourceEndpoint.InstanceType')
def set_SourceEndpointInstanceType(self,SourceEndpointInstanceType):
self.add_query_param('SourceEndpoint.InstanceType',SourceEndpointInstanceType)
def get_SynchronizationJobId(self):
return self.get_query_params().get('SynchronizationJobId')
def set_SynchronizationJobId(self,SynchronizationJobId):
self.add_query_param('SynchronizationJobId',SynchronizationJobId)
def get_SynchronizationJobName(self):
return self.get_query_params().get('SynchronizationJobName')
def set_SynchronizationJobName(self,SynchronizationJobName):
self.add_query_param('SynchronizationJobName',SynchronizationJobName)
def get_AccountId(self):
return self.get_query_params().get('AccountId')
def set_AccountId(self,AccountId):
self.add_query_param('AccountId',AccountId)
def get_SourceEndpointUserName(self):
return self.get_query_params().get('SourceEndpoint.UserName')
def set_SourceEndpointUserName(self,SourceEndpointUserName):
self.add_query_param('SourceEndpoint.UserName',SourceEndpointUserName)
def get_SourceEndpointDatabaseName(self):
return self.get_query_params().get('SourceEndpoint.DatabaseName')
def set_SourceEndpointDatabaseName(self,SourceEndpointDatabaseName):
self.add_query_param('SourceEndpoint.DatabaseName',SourceEndpointDatabaseName)
def get_PartitionKeyModifyTime_Month(self):
return self.get_query_params().get('PartitionKey.ModifyTime_Month')
def set_PartitionKeyModifyTime_Month(self,PartitionKeyModifyTime_Month):
self.add_query_param('PartitionKey.ModifyTime_Month',PartitionKeyModifyTime_Month)
def get_SourceEndpointPort(self):
return self.get_query_params().get('SourceEndpoint.Port')
def set_SourceEndpointPort(self,SourceEndpointPort):
self.add_query_param('SourceEndpoint.Port',SourceEndpointPort)
def get_SourceEndpointOwnerID(self):
return self.get_query_params().get('SourceEndpoint.OwnerID')
def set_SourceEndpointOwnerID(self,SourceEndpointOwnerID):
self.add_query_param('SourceEndpoint.OwnerID',SourceEndpointOwnerID)
def get_DestinationEndpointUserName(self):
return self.get_query_params().get('DestinationEndpoint.UserName')
def set_DestinationEndpointUserName(self,DestinationEndpointUserName):
self.add_query_param('DestinationEndpoint.UserName',DestinationEndpointUserName)
def get_DestinationEndpointPort(self):
return self.get_query_params().get('DestinationEndpoint.Port')
def set_DestinationEndpointPort(self,DestinationEndpointPort):
self.add_query_param('DestinationEndpoint.Port',DestinationEndpointPort)
def get_PartitionKeyModifyTime_Year(self):
return self.get_query_params().get('PartitionKey.ModifyTime_Year')
def set_PartitionKeyModifyTime_Year(self,PartitionKeyModifyTime_Year):
self.add_query_param('PartitionKey.ModifyTime_Year',PartitionKeyModifyTime_Year)
def get_SourceEndpointRole(self):
return self.get_query_params().get('SourceEndpoint.Role')
def set_SourceEndpointRole(self,SourceEndpointRole):
self.add_query_param('SourceEndpoint.Role',SourceEndpointRole)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_PartitionKeyModifyTime_Hour(self):
return self.get_query_params().get('PartitionKey.ModifyTime_Hour')
def set_PartitionKeyModifyTime_Hour(self,PartitionKeyModifyTime_Hour):
self.add_query_param('PartitionKey.ModifyTime_Hour',PartitionKeyModifyTime_Hour)
def get_DestinationEndpointDataBaseName(self):
return self.get_query_params().get('DestinationEndpoint.DataBaseName')
def set_DestinationEndpointDataBaseName(self,DestinationEndpointDataBaseName):
self.add_query_param('DestinationEndpoint.DataBaseName',DestinationEndpointDataBaseName)
def get_SourceEndpointPassword(self):
return self.get_query_params().get('SourceEndpoint.Password')
def set_SourceEndpointPassword(self,SourceEndpointPassword):
self.add_query_param('SourceEndpoint.Password',SourceEndpointPassword)
def get_MigrationReserved(self):
return self.get_query_params().get('MigrationReserved')
def set_MigrationReserved(self,MigrationReserved):
self.add_query_param('MigrationReserved',MigrationReserved)
def get_DestinationEndpointIP(self):
return self.get_query_params().get('DestinationEndpoint.IP')
def set_DestinationEndpointIP(self,DestinationEndpointIP):
self.add_query_param('DestinationEndpoint.IP',DestinationEndpointIP)
def get_DestinationEndpointInstanceType(self):
return self.get_query_params().get('DestinationEndpoint.InstanceType')
def set_DestinationEndpointInstanceType(self,DestinationEndpointInstanceType):
self.add_query_param('DestinationEndpoint.InstanceType',DestinationEndpointInstanceType)
def get_SynchronizationDirection(self):
return self.get_query_params().get('SynchronizationDirection')
def set_SynchronizationDirection(self,SynchronizationDirection):
self.add_query_param('SynchronizationDirection',SynchronizationDirection)
```
#### File: request/v20180522/ListServicesRequest.py
```python
from aliyunsdkcore.request import RoaRequest
class ListServicesRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'eas', '2018-05-22', 'ListServices')
self.set_uri_pattern('/api/services')
self.set_method('GET')
def get_filter(self):
return self.get_query_params().get('filter')
def set_filter(self,filter):
self.add_query_param('filter',filter)
def get_sort(self):
return self.get_query_params().get('sort')
def set_sort(self,sort):
self.add_query_param('sort',sort)
```
#### File: request/v20140526/DescribeDemandsRequest.py
```python
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeDemandsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeDemands','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_InstanceType(self):
return self.get_query_params().get('InstanceType')
def set_InstanceType(self,InstanceType):
self.add_query_param('InstanceType',InstanceType)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self, Tags):
for depth1 in range(len(Tags)):
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
def get_InstanceChargeType(self):
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self,InstanceChargeType):
self.add_query_param('InstanceChargeType',InstanceChargeType)
def get_DryRun(self):
return self.get_query_params().get('DryRun')
def set_DryRun(self,DryRun):
self.add_query_param('DryRun',DryRun)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_InstanceTypeFamily(self):
return self.get_query_params().get('InstanceTypeFamily')
def set_InstanceTypeFamily(self,InstanceTypeFamily):
self.add_query_param('InstanceTypeFamily',InstanceTypeFamily)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_DemandStatuss(self):
return self.get_query_params().get('DemandStatuss')
def set_DemandStatuss(self, DemandStatuss):
for depth1 in range(len(DemandStatuss)):
if DemandStatuss[depth1] is not None:
self.add_query_param('DemandStatus.' + str(depth1 + 1) , DemandStatuss[depth1])
def get_DemandId(self):
return self.get_query_params().get('DemandId')
def set_DemandId(self,DemandId):
self.add_query_param('DemandId',DemandId)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
def get_DemandType(self):
return self.get_query_params().get('DemandType')
def set_DemandType(self,DemandType):
self.add_query_param('DemandType',DemandType)
```
#### File: request/v20171110/PreCreateEnsServiceRequest.py
```python
from aliyunsdkcore.request import RpcRequest
class PreCreateEnsServiceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ens', '2017-11-10', 'PreCreateEnsService','ens')
self.set_method('POST')
def get_BandwidthType(self):
return self.get_query_params().get('BandwidthType')
def set_BandwidthType(self,BandwidthType):
self.add_query_param('BandwidthType',BandwidthType)
def get_SchedulingPriceStrategy(self):
return self.get_query_params().get('SchedulingPriceStrategy')
def set_SchedulingPriceStrategy(self,SchedulingPriceStrategy):
self.add_query_param('SchedulingPriceStrategy',SchedulingPriceStrategy)
def get_ImageId(self):
return self.get_query_params().get('ImageId')
def set_ImageId(self,ImageId):
self.add_query_param('ImageId',ImageId)
def get_InstanceSpec(self):
return self.get_query_params().get('InstanceSpec')
def set_InstanceSpec(self,InstanceSpec):
self.add_query_param('InstanceSpec',InstanceSpec)
def get_KeyPairName(self):
return self.get_query_params().get('KeyPairName')
def set_KeyPairName(self,KeyPairName):
self.add_query_param('KeyPairName',KeyPairName)
def get_UserData(self):
return self.get_query_params().get('UserData')
def set_UserData(self,UserData):
self.add_query_param('UserData',UserData)
def get_Password(self):
return self.get_query_params().get('Password')
def set_Password(self,Password):
self.add_query_param('Password',Password)
def get_BuyResourcesDetail(self):
return self.get_query_params().get('BuyResourcesDetail')
def set_BuyResourcesDetail(self,BuyResourcesDetail):
self.add_query_param('BuyResourcesDetail',BuyResourcesDetail)
def get_SystemDiskSize(self):
return self.get_query_params().get('SystemDiskSize')
def set_SystemDiskSize(self,SystemDiskSize):
self.add_query_param('SystemDiskSize',SystemDiskSize)
def get_InstanceBandwithdLimit(self):
return self.get_query_params().get('InstanceBandwithdLimit')
def set_InstanceBandwithdLimit(self,InstanceBandwithdLimit):
self.add_query_param('InstanceBandwithdLimit',InstanceBandwithdLimit)
def get_EnsServiceName(self):
return self.get_query_params().get('EnsServiceName')
def set_EnsServiceName(self,EnsServiceName):
self.add_query_param('EnsServiceName',EnsServiceName)
def get_Version(self):
return self.get_query_params().get('Version')
def set_Version(self,Version):
self.add_query_param('Version',Version)
def get_NetLevel(self):
return self.get_query_params().get('NetLevel')
def set_NetLevel(self,NetLevel):
self.add_query_param('NetLevel',NetLevel)
def get_SchedulingStrategy(self):
return self.get_query_params().get('SchedulingStrategy')
def set_SchedulingStrategy(self,SchedulingStrategy):
self.add_query_param('SchedulingStrategy',SchedulingStrategy)
def get_DataDiskSize(self):
return self.get_query_params().get('DataDiskSize')
def set_DataDiskSize(self,DataDiskSize):
self.add_query_param('DataDiskSize',DataDiskSize)
```
#### File: request/v20181212/ScanCodeNotificationRequest.py
```python
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class ScanCodeNotificationRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'ScanCodeNotification')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RealCostAmount(self):
return self.get_query_params().get('RealCostAmount')
def set_RealCostAmount(self,RealCostAmount):
self.add_query_param('RealCostAmount',RealCostAmount)
def get_SalePrice(self):
return self.get_query_params().get('SalePrice')
def set_SalePrice(self,SalePrice):
self.add_query_param('SalePrice',SalePrice)
def get_CommodityId(self):
return self.get_query_params().get('CommodityId')
def set_CommodityId(self,CommodityId):
self.add_query_param('CommodityId',CommodityId)
def get_HolderId(self):
return self.get_query_params().get('HolderId')
def set_HolderId(self,HolderId):
self.add_query_param('HolderId',HolderId)
def get_DeviceType(self):
return self.get_query_params().get('DeviceType')
def set_DeviceType(self,DeviceType):
self.add_query_param('DeviceType',DeviceType)
def get_DeviceCode(self):
return self.get_query_params().get('DeviceCode')
def set_DeviceCode(self,DeviceCode):
self.add_query_param('DeviceCode',DeviceCode)
def get_ApplyPrice(self):
return self.get_query_params().get('ApplyPrice')
def set_ApplyPrice(self,ApplyPrice):
self.add_query_param('ApplyPrice',ApplyPrice)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId)
def get_OuterCode(self):
return self.get_query_params().get('OuterCode')
def set_OuterCode(self,OuterCode):
self.add_query_param('OuterCode',OuterCode)
def get_QueryStr(self):
return self.get_query_params().get('QueryStr')
def set_QueryStr(self,QueryStr):
self.add_query_param('QueryStr',QueryStr)
def get_Phase(self):
return self.get_query_params().get('Phase')
def set_Phase(self,Phase):
self.add_query_param('Phase',Phase)
def get_BizResult(self):
return self.get_query_params().get('BizResult')
def set_BizResult(self,BizResult):
self.add_query_param('BizResult',BizResult)
def get_TaskType(self):
return self.get_query_params().get('TaskType')
def set_TaskType(self,TaskType):
self.add_query_param('TaskType',TaskType)
def get_BrandUserId(self):
return self.get_query_params().get('BrandUserId')
def set_BrandUserId(self,BrandUserId):
self.add_query_param('BrandUserId',BrandUserId)
def get_Sex(self):
return self.get_query_params().get('Sex')
def set_Sex(self,Sex):
self.add_query_param('Sex',Sex)
def get_CostDetail(self):
return self.get_query_params().get('CostDetail')
def set_CostDetail(self,CostDetail):
self.add_query_param('CostDetail',CostDetail)
def get_ProxyUserId(self):
return self.get_query_params().get('ProxyUserId')
def set_ProxyUserId(self,ProxyUserId):
self.add_query_param('ProxyUserId',ProxyUserId)
def get_AlipayOpenId(self):
return self.get_query_params().get('AlipayOpenId')
def set_AlipayOpenId(self,AlipayOpenId):
self.add_query_param('AlipayOpenId',AlipayOpenId)
def get_BizType(self):
return self.get_query_params().get('BizType')
def set_BizType(self,BizType):
self.add_query_param('BizType',BizType)
def get_BrandNick(self):
return self.get_query_params().get('BrandNick')
def set_BrandNick(self,BrandNick):
self.add_query_param('BrandNick',BrandNick)
def get_V(self):
return self.get_query_params().get('V')
def set_V(self,V):
self.add_query_param('V',V)
def get_ChargeTag(self):
return self.get_query_params().get('ChargeTag')
def set_ChargeTag(self,ChargeTag):
self.add_query_param('ChargeTag',ChargeTag)
def get_Age(self):
return self.get_query_params().get('Age')
def set_Age(self,Age):
self.add_query_param('Age',Age)
def get_ChannelId(self):
return self.get_query_params().get('ChannelId')
def set_ChannelId(self,ChannelId):
self.add_query_param('ChannelId',ChannelId)
def get_Cid(self):
return self.get_query_params().get('Cid')
def set_Cid(self,Cid):
self.add_query_param('Cid',Cid)
``` |
{
"source": "jia-jerry/cc-utils",
"score": 2
} |
#### File: cc-utils/clamav/util.py
```python
import logging
import os
import shutil
import subprocess
import tarfile
import clamd
import container.registry
logger = logging.getLogger(__name__)
# XXX hard-code for now (see Dockerfile / res/clamd.conf)
_clamd_sock = '/run/clamav/clamd.sock'
def init_daemon():
if os.path.exists(_clamd_sock):
return # assume deaom is alrady running
# ensure runtime dependencies (we require clamav/clamd to be installed)
fresh_clam = shutil.which('freshclam')
if not fresh_clam:
raise RuntimeError('fresh_clam must be available from PATH')
logger.info("updating ClamAV's virus signature DB - this may take a while")
subprocess.run(
[fresh_clam],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False, # 1 is returned if clamav is outdated - ignore for now
)
logger.info('done updating virus signature DB')
clamd_executable = shutil.which('clamd')
if not clamd_executable:
raise RuntimeError('clamd must be available from PATH')
logger.info('starting clamd - this may take a while')
subprocess.run(
[clamd_executable],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
)
def clamd_client():
init_daemon()
client = clamd.ClamdUnixSocket(_clamd_sock)
# smoke-test
client.ping()
return client
def scan_stream(fileobj):
c = clamd_client()
result = c.instream(fileobj)
if not len(result) == 1 or not 'stream' in result:
# expected format: {"stream": (<status>, <signature-name|None>)}
raise RuntimeError(f'result does not meet expected format: {result}')
status, signature_or_none = result['stream']
return status, signature_or_none
def scan_container_image(image_reference: str):
logger.debug(f'scanning container image {image_reference}')
with tarfile.open(
mode='r|',
fileobj=container.registry.retrieve_container_image(image_reference)
) as tf:
for ti in tf:
# we only care to scan files, obviously
if not ti.isfile():
continue
if not ti.name.endswith('layer.tar'):
continue # only layer files may contain relevant data
with tarfile.open(mode='r|', fileobj=tf.extractfile(ti)) as inner_tf:
for inner_ti in inner_tf:
if not inner_ti.isfile():
continue
status, signature = scan_stream(fileobj=inner_tf.extractfile(inner_ti))
if result_ok(status, signature):
continue
else:
# early exit on first match
return status, f'{ti.name}:{inner_ti.name}: {signature}'
logger.debug(f'{image_reference}:{ti.name} looks clean')
logger.debug(f'image looked clean: {image_reference}')
return 'OK', None # no match
def result_ok(status, signature):
if status == 'OK':
return True
return False
```
#### File: concourse/steps/scan_container_images.py
```python
import functools
import textwrap
import typing
import tabulate
import clamav.util
import mailutil
from concourse.model.traits.image_scan import Notify
from product.model import ComponentName, UploadResult
class MailRecipients(object):
def __init__(
self,
root_component_name: str,
protecode_cfg,
protecode_group_id: int,
protecode_group_url: str,
cfg_set,
result_filter=None,
recipients: typing.List[str]=[],
recipients_component: ComponentName=None,
):
self._root_component_name = root_component_name
self._result_filter = result_filter
self._protecode_results = []
self._clamav_results = []
self._cfg_set = cfg_set
if not bool(recipients) ^ bool(recipients_component):
raise ValueError('exactly one of recipients, component_name must be given')
self._recipients = recipients
self._recipients_component= recipients_component
self._protecode_cfg = protecode_cfg
self._protecode_group_id = protecode_group_id
self._protecode_group_url = protecode_group_url
@functools.lru_cache()
def resolve_recipients(self):
if not self._recipients_component:
return self._recipients
# XXX it should not be necessary to pass github_cfg
return mailutil.determine_mail_recipients(
github_cfg_name=self._cfg_set.github().name(),
component_names=(self._recipients_component.name(),),
)
def add_protecode_results(self, results: typing.Iterable[typing.Tuple[UploadResult, int]]):
print(f'adding protecode results for {self}')
for result in results:
if self._result_filter:
if not self._result_filter(component=result[0].component):
print(f'did not match: {result[0].component.name()}')
continue
self._protecode_results.append(result)
def add_clamav_results(self, results):
for result in results:
self._clamav_results.append(result)
def has_results(self):
if self._protecode_results:
return True
if self._clamav_results:
return True
def mail_body(self):
parts = []
parts.append(self._mail_disclaimer())
parts.append(protecode_results_table(
protecode_cfg=self._protecode_cfg,
upload_results=self._protecode_results,
)
)
parts.append(self._clamav_report())
return ''.join(parts)
def _mail_disclaimer(self):
return textwrap.dedent(f'''
<div>
<p>
Note: you receive this E-Mail, because you were configured as a mail recipient
in repository "{self._root_component_name}" (see .ci/pipeline_definitions)
To remove yourself, search for your e-mail address in said file and remove it.
</p>
<p>
The following components in Protecode-group
<a href="{self._protecode_group_url}">{self._protecode_group_id}</a>
were found to contain critical vulnerabilities:
</p>
</div>
''')
def _clamav_report(self):
if not self._clamav_results:
return textwrap.dedent(f'''
<p>Scanned all container image(s) for matching virus signatures
without any matches (id est: all container images seem to be free of known malware)
''')
result = '<p><div>Virus Scanning Results</div>'
return result + tabulate.tabulate(
self._clamav_results,
headers=('Image-Reference', 'Scanning Result'),
tablefmt='html',
)
def __repr__(self):
if self._recipients_component:
descr = f'component {self._recipients_component.name()}'
else:
descr = 'for all results'
return 'MailRecipients: ' + descr
def mail_recipients(
notification_policy: Notify,
root_component_name:str,
protecode_cfg,
protecode_group_id: int,
protecode_group_url: str,
cfg_set,
email_recipients: typing.Iterable[str]=(),
components: typing.Iterable[ComponentName]=(),
):
mail_recps_ctor = functools.partial(
MailRecipients,
root_component_name=root_component_name,
protecode_cfg=protecode_cfg,
protecode_group_id=protecode_group_id,
protecode_group_url=protecode_group_url,
cfg_set=cfg_set,
)
notification_policy = Notify(notification_policy)
if notification_policy == Notify.EMAIL_RECIPIENTS:
if not email_recipients:
raise ValueError('at least one email_recipient must be specified')
# exactly one MailRecipients, catching all (hence no filter)
yield mail_recps_ctor(
recipients=email_recipients,
)
elif notification_policy == Notify.NOBODY:
return
elif notification_policy == Notify.COMPONENT_OWNERS:
def make_comp_filter(own_component):
def comp_filter(component):
print(f'filter: component: {own_component.name()} - other: {component.name()}')
return own_component.name() == component.name() # only care about matching results
return comp_filter
for comp in components:
yield mail_recps_ctor(
recipients_component=comp,
result_filter=make_comp_filter(own_component=comp)
)
else:
raise NotImplementedError()
def virus_scan_images(image_references: typing.Iterable[str]):
for image_reference in image_references:
status, signature = clamav.util.scan_container_image(image_reference=image_reference)
if clamav.util.result_ok(status=status, signature=signature):
continue
yield (image_reference, f'{status}: {signature}')
def protecode_results_table(protecode_cfg, upload_results: typing.Iterable[UploadResult]):
def result_to_tuple(upload_result: UploadResult):
# upload_result tuple of product.model.UploadResult and CVE Score
upload_result, greatest_cve = upload_result
# protecode.model.AnalysisResult
analysis_result = upload_result.result
name = analysis_result.display_name()
analysis_url = \
f'{protecode_cfg.api_url()}/products/{analysis_result.product_id()}/#/analysis'
link_to_analysis_url = f'<a href="{analysis_url}">{name}</a>'
custom_data = analysis_result.custom_data()
if custom_data is not None:
image_reference = custom_data.get('IMAGE_REFERENCE')
else:
image_reference = None
return [link_to_analysis_url, greatest_cve, image_reference]
table = tabulate.tabulate(
map(result_to_tuple, upload_results),
headers=('Component Name', 'Greatest CVE', 'Container Image Reference'),
tablefmt='html',
)
return table
```
#### File: cc-utils/landscape_setup/secrets_server.py
```python
from ensure import ensure_annotations
from landscape_setup import kube_ctx
from model.secrets_server import (
SecretsServerConfig,
)
from kubernetes.client import (
V1Service,
V1ObjectMeta,
V1ServiceSpec,
V1ServicePort,
V1Deployment,
V1DeploymentSpec,
V1PodTemplateSpec,
V1PodSpec,
V1Container,
V1ResourceRequirements,
V1ContainerPort,
V1Probe,
V1TCPSocketAction,
V1VolumeMount,
V1Volume,
V1SecretVolumeSource,
V1LabelSelector,
)
@ensure_annotations
def deploy_secrets_server(secrets_server_config: SecretsServerConfig):
ctx = kube_ctx
service_helper = ctx.service_helper()
deployment_helper = ctx.deployment_helper()
secrets_helper = ctx.secret_helper()
namespace_helper = ctx.namespace_helper()
namespace = secrets_server_config.namespace()
namespace_helper.create_if_absent(namespace)
secret_name = secrets_server_config.secrets().concourse_secret_name()
# Deploy an empty secret if none exists so that the secrets-server can start.
# However, if there is already a secret we should not purge its contents.
if not secrets_helper.get_secret(secret_name, namespace):
secrets_helper.put_secret(
name=secret_name,
data={},
namespace=namespace,
)
service = generate_secrets_server_service(secrets_server_config)
deployment = generate_secrets_server_deployment(secrets_server_config)
service_helper.replace_or_create_service(namespace, service)
deployment_helper.replace_or_create_deployment(namespace, deployment)
@ensure_annotations
def generate_secrets_server_service(
secrets_server_config: SecretsServerConfig,
):
# We need to ensure that the labels and selectors match between the deployment and the service,
# therefore we base them on the configured service name.
service_name = secrets_server_config.service_name()
selector = {'app':service_name}
return V1Service(
kind='Service',
metadata=V1ObjectMeta(
name=service_name,
),
spec=V1ServiceSpec(
type='ClusterIP',
ports=[
V1ServicePort(protocol='TCP', port=80, target_port=8080),
],
selector=selector,
session_affinity='None',
),
)
@ensure_annotations
def generate_secrets_server_deployment(
secrets_server_config: SecretsServerConfig,
):
service_name = secrets_server_config.service_name()
secret_name = secrets_server_config.secrets().concourse_secret_name()
# We need to ensure that the labels and selectors match for both the deployment and the service,
# therefore we base them on the configured service name.
labels={'app':service_name}
return V1Deployment(
kind='Deployment',
metadata=V1ObjectMeta(
name=service_name,
labels=labels
),
spec=V1DeploymentSpec(
replicas=1,
selector=V1LabelSelector(match_labels=labels),
template=V1PodTemplateSpec(
metadata=V1ObjectMeta(labels=labels),
spec=V1PodSpec(
containers=[
V1Container(
image='eu.gcr.io/gardener-project/cc/job-image:latest',
image_pull_policy='IfNotPresent',
name='secrets-server',
resources=V1ResourceRequirements(
requests={'cpu':'50m', 'memory': '50Mi'},
limits={'cpu':'50m', 'memory': '50Mi'},
),
command=['bash'],
args=[
'-c',
'''
# chdir to secrets dir; create if absent
mkdir -p /secrets && cd /secrets
# make Kubernetes serviceaccount secrets available by default
cp -r /var/run/secrets/kubernetes.io/serviceaccount serviceaccount
# store Kubernetes service endpoint env as file for consumer
env | grep KUBERNETES_SERVICE > serviceaccount/env
# launch secrets server serving secrets dir contents on all IFs
python3 -m http.server 8080
'''
],
ports=[
V1ContainerPort(container_port=8080),
],
liveness_probe=V1Probe(
tcp_socket=V1TCPSocketAction(port=8080),
initial_delay_seconds=10,
period_seconds=10,
),
volume_mounts=[
V1VolumeMount(
name=secret_name,
mount_path='/secrets/concourse-secrets',
read_only=True,
),
],
),
],
node_selector={
"worker.garden.sapcloud.io/group": "cc-control"
},
volumes=[
V1Volume(
name=secret_name,
secret=V1SecretVolumeSource(
secret_name=secret_name,
)
)
]
)
)
)
)
```
#### File: cc-utils/product/scanning.py
```python
from enum import Enum
from functools import partial
import tempfile
from protecode.client import ProtecodeApi
from protecode.model import (
ProcessingStatus,
AnalysisResult,
TriageScope,
)
from concourse.model.base import (
AttribSpecMixin,
AttributeSpec,
)
from util import not_none, warning, check_type, info
from container.registry import retrieve_container_image
from .model import ContainerImage, Component, UploadResult, UploadStatus
class ProcessingMode(AttribSpecMixin, Enum):
UPLOAD_IF_CHANGED = 'upload_if_changed'
RESCAN = 'rescan'
FORCE_UPLOAD = 'force_upload'
@classmethod
def _attribute_specs(cls):
return (
AttributeSpec.optional(
name=cls.UPLOAD_IF_CHANGED.value,
default=None,
doc='''
upload the container images. This will :strong:`not` upload the images if they
are already present. This will :strong:`not` cause images already present to be
rescanned.
''',
type=str,
),
AttributeSpec.optional(
name=cls.RESCAN.value,
default=None,
doc='''
trigger a scan of container images. Images will be uploaded unless they are
already present.
''',
type=str,
),
AttributeSpec.optional(
name=cls.FORCE_UPLOAD.value,
default=None,
doc='''
:strong:`always` upload the images. This will cause all images to be rescanned.
''',
type=str,
),
)
class UploadAction(Enum):
def __init__(self, upload, rescan, wait):
self.upload = upload
self.rescan = rescan
self.wait = wait
SKIP = (False, False, False)
UPLOAD = (True, False, True)
RESCAN = (False, True, True)
WAIT_FOR_RESULT = (False, False, True)
class ProtecodeUtil(object):
def __init__(
self,
protecode_api: ProtecodeApi,
processing_mode: ProcessingMode=ProcessingMode.UPLOAD_IF_CHANGED,
group_id: int=None,
reference_group_ids=(),
):
protecode_api.login()
self._processing_mode = check_type(processing_mode, ProcessingMode)
self._api = not_none(protecode_api)
self._group_id = group_id
self._reference_group_ids = reference_group_ids
def _image_ref_metadata(self, container_image, omit_version):
metadata_dict = {
'IMAGE_REFERENCE_NAME': container_image.name(),
}
if not omit_version:
metadata_dict['IMAGE_REFERENCE'] = container_image.image_reference()
return metadata_dict
def _component_metadata(self, component, omit_version=True):
metadata = {'COMPONENT_NAME': component.name()}
if not omit_version:
metadata['COMPONENT_VERSION'] = component.version()
return metadata
def _upload_name(self, container_image, component):
image_reference = container_image.image_reference()
image_path, image_tag = image_reference.split(':')
image_name = image_path.split('/')[-1]
return '{i}_{v}_{c}'.format(
i=image_name,
v=image_tag,
c=component.name(),
)
def _update_product_name(self, product_id: int, upload_name: str):
scan_result = self._api.scan_result_short(product_id=product_id)
current_name = scan_result.name()
if current_name == upload_name:
return # nothing to do
self._api.set_product_name(product_id=product_id, name=upload_name)
def _metadata(
self,
container_image: ContainerImage,
component: Component,
omit_version,
):
metadata = self._image_ref_metadata(container_image, omit_version=omit_version)
metadata.update(self._component_metadata(component=component, omit_version=omit_version))
return metadata
def retrieve_scan_result(
self,
container_image: ContainerImage,
component: Component,
group_id: int=None,
):
metadata = self._metadata(
container_image=container_image,
component=component,
omit_version=True, # omit version when searching for existing app
# (only one component version must exist per group by our chosen definition)
)
if not group_id:
group_id = self._group_id
existing_products = self._api.list_apps(
group_id=group_id,
custom_attribs=metadata
)
if len(existing_products) == 0:
return None # no result existed yet
if len(existing_products) > 1:
warning('found more than one product for image {i}'.format(i=container_image))
product_ids_to_rm = {p.product_id() for p in existing_products[1:]}
for product_id in product_ids_to_rm:
self._api.delete_product(product_id)
info(f'deleted product with product_id: {product_id}')
# use first (or only) match (we already printed a warning if we found more than one)
product = existing_products[0]
product_id = product.product_id()
# update upload name to reflect new component version (if changed)
upload_name = self._upload_name(container_image, component)
self._update_product_name(product_id, upload_name)
# retrieve existing product's details (list of products contained only subset of data)
product = self._api.scan_result(product_id=product_id)
return product
def _determine_upload_action(
self,
container_image: ContainerImage,
scan_result: AnalysisResult,
):
check_type(container_image, ContainerImage)
# take shortcut if 'force upload' is configured.
if self._processing_mode is ProcessingMode.FORCE_UPLOAD:
return UploadAction.UPLOAD
if self._processing_mode in (
ProcessingMode.UPLOAD_IF_CHANGED,
ProcessingMode.RESCAN,
):
# if no scan_result is available, we have to upload in all remaining cases
if not scan_result:
return UploadAction.UPLOAD
# determine if image to be uploaded is already present in protecode
metadata = scan_result.custom_data()
image_reference = metadata.get('IMAGE_REFERENCE')
image_changed = image_reference != container_image.image_reference()
if image_changed:
return UploadAction.UPLOAD
if self._processing_mode is ProcessingMode.UPLOAD_IF_CHANGED:
return UploadAction.SKIP
elif self._processing_mode is ProcessingMode.RESCAN:
# Wait for the current scan to finish if it there is still one pending
if scan_result.status() is ProcessingStatus.BUSY:
return UploadAction.WAIT_FOR_RESULT
short_scan_result = self._api.scan_result_short(scan_result.product_id())
if short_scan_result.is_stale():
if not short_scan_result.has_binary():
return UploadAction.UPLOAD
else:
return UploadAction.RESCAN
else:
return UploadAction.SKIP
else:
raise NotImplementedError
def upload_image(
self,
container_image: ContainerImage,
component: Component,
) -> UploadResult:
metadata = self._metadata(
container_image=container_image,
component=component,
omit_version=False,
)
upload_result = partial(UploadResult, container_image=container_image, component=component)
# check if the image has already been uploaded for this component
scan_result = self.retrieve_scan_result(
container_image=container_image,
component=component,
)
reference_results = [
self.retrieve_scan_result(
container_image=container_image,
component=component,
group_id=group_id,
) for group_id in self._reference_group_ids
]
reference_results = [r for r in reference_results if r] # remove None entries
if scan_result:
reference_results.insert(0, scan_result)
# collect old triages in order to "transport" them after new upload (may be None)
triages = self._existing_triages(
analysis_results=reference_results,
)
upload_action = self._determine_upload_action(
container_image=container_image,
scan_result=scan_result
)
if not upload_action.upload and not upload_action.rescan and not upload_action.wait:
# early exit (nothing to do)
return upload_result(
status=UploadStatus.SKIPPED,
result=scan_result,
)
if upload_action.upload:
info(f'uploading to protecode: {container_image.image_reference()}')
image_data_fh = retrieve_container_image(
container_image.image_reference(),
outfileobj=tempfile.NamedTemporaryFile(),
)
# keep old product_id (in order to delete after update)
if scan_result:
product_id = scan_result.product_id()
else:
product_id = None
try:
# Upload image and update outdated analysis result with the one triggered
# by the upload.
scan_result = self._api.upload(
application_name=self._upload_name(
container_image=container_image,
component=component
).replace('/', '_'),
group_id=self._group_id,
data=image_data_fh,
custom_attribs=metadata,
)
finally:
image_data_fh.close()
for triage in triages:
if triage.scope() is TriageScope.GROUP:
self._api.add_triage(
triage=triage,
scope=TriageScope.GROUP,
group_id=self._group_id,
)
else:
# hard-code scope for now
self._api.add_triage(
triage=triage,
scope=TriageScope.RESULT,
product_id=scan_result.product_id(),
)
# rm (now outdated) scan result
if product_id:
self._api.delete_product(product_id=product_id)
if upload_action.rescan:
self._api.rescan(scan_result.product_id())
if upload_action.wait:
result = self._api.wait_for_scan_result(product_id=scan_result.product_id())
else:
result = scan_result
if result.status() == ProcessingStatus.BUSY:
upload_status = UploadStatus.PENDING
else:
upload_status = UploadStatus.DONE
return upload_result(
status=upload_status,
result=result
)
def _existing_triages(self, analysis_results: AnalysisResult=()):
if not analysis_results:
return ()
for analysis_result in analysis_results:
for component in analysis_result.components():
for vulnerability in component.vulnerabilities():
yield from vulnerability.triages()
```
#### File: jia-jerry/cc-utils/setup.py
```python
import setuptools
import os
own_dir = os.path.abspath(os.path.dirname(__file__))
def requirements():
with open(os.path.join(own_dir, 'requirements.txt')) as f:
for line in f.readlines():
line = line.strip()
if not line or line.startswith('#'):
continue
yield line
def modules():
return [
os.path.basename(os.path.splitext(module)[0]) for module in
os.scandir(path=own_dir)
if module.is_file() and module.name.endswith('.py')
]
def packages():
return [
'ccc',
'clamav',
'concourse',
'container',
'github',
'kube',
'landscape_setup',
'mail',
'model',
'product',
'protecode',
'slackclient',
'whd',
]
def version():
with open(os.path.join(own_dir, 'VERSION')) as f:
return f.read().strip()
setuptools.setup(
name='cc-utils',
version=version(),
description='Gardener CI/CD Utils',
python_requires='>=3.6.*',
py_modules=modules(),
packages=packages(),
install_requires=list(requirements()),
entry_points={
'console_scripts': [
'cli=cli:main',
],
},
)
```
#### File: github/release_notes/default_util.py
```python
from github.release_notes.util import (
ReleaseNote,
extract_release_notes,
)
from github.release_notes.model import (
ReleaseNoteBlock,
ReferenceType,
REF_TYPE_PULL_REQUEST,
)
from github.release_notes.renderer import (
CATEGORY_IMPROVEMENT_ID,
TARGET_GROUP_USER_ID,
)
from product.model import ComponentName
CURRENT_REPO_NAME = 'github.com/madeup/current-repo'
CURRENT_REPO = ComponentName(CURRENT_REPO_NAME)
DEFAULT_CATEGORY = CATEGORY_IMPROVEMENT_ID
DEFAULT_TARGET_GROUP = TARGET_GROUP_USER_ID
DEFAULT_RELEASE_NOTE_TEXT = 'default release note text'
DEFAULT_USER = 'foo'
DEFAULT_REFERENCE_ID = '42'
DEFAULT_REFERENCE_TYPE = REF_TYPE_PULL_REQUEST
DEFAULT_REPO = CURRENT_REPO
def release_note_block_with_defaults(
category_id: str=DEFAULT_CATEGORY,
target_group_id: str=DEFAULT_TARGET_GROUP,
text: str=DEFAULT_RELEASE_NOTE_TEXT,
reference_type: ReferenceType=DEFAULT_REFERENCE_TYPE,
reference_id: str=DEFAULT_REFERENCE_ID,
user_login: str=DEFAULT_USER,
source_repo: str=CURRENT_REPO_NAME,
cn_current_repo: ComponentName=DEFAULT_REPO,
) -> ReleaseNoteBlock:
"""
unit tests can expect the default values to be stable
"""
return ReleaseNoteBlock(
category_id=category_id,
target_group_id=target_group_id,
text=text,
reference_type=reference_type,
reference_id=reference_id,
user_login=user_login,
source_repo=source_repo,
cn_current_repo=cn_current_repo
)
def extract_release_notes_with_defaults(
reference_id: str=DEFAULT_REFERENCE_ID,
reference_type: ReferenceType=DEFAULT_REFERENCE_TYPE,
text: str=DEFAULT_RELEASE_NOTE_TEXT,
user_login: str=DEFAULT_USER,
cn_current_repo: ComponentName=DEFAULT_REPO,
) -> [ReleaseNote]:
return extract_release_notes(
reference_id=reference_id,
reference_type=reference_type,
text=text,
user_login=user_login,
cn_current_repo=cn_current_repo
)
```
#### File: cc-utils/whd/pipelines.py
```python
import concourse.enumerator
import concourse.replicator
def logger():
from flask import current_app
return current_app.logger
def update_repository_pipelines(
repo_url,
cfg_set,
whd_cfg,
):
repo_enumerator = concourse.enumerator.GithubRepositoryDefinitionEnumerator(
repository_url=repo_url,
cfg_set=cfg_set,
)
preprocessor = concourse.enumerator.DefinitionDescriptorPreprocessor()
template_retriever = concourse.enumerator.TemplateRetriever(
template_path=whd_cfg.pipeline_templates_path(),
)
renderer = concourse.replicator.Renderer(
template_retriever=template_retriever,
template_include_dir=whd_cfg.pipeline_include_path(),
cfg_set=cfg_set,
)
deployer = concourse.replicator.ConcourseDeployer(
unpause_pipelines=True,
expose_pipelines=True,
)
# no need for parallelisation
definition_descriptors = repo_enumerator.enumerate_definition_descriptors()
preprocessed_descriptors = map(
preprocessor.process_definition_descriptor,
definition_descriptors,
)
render_results = map(
renderer.render,
preprocessed_descriptors,
)
for render_result in render_results:
if not render_result.render_status == concourse.replicator.RenderStatus.SUCCEEDED:
logger().warning('failed to render pipeline - ignoring')
continue
deploy_result = deployer.deploy(render_result.definition_descriptor)
if deploy_result.deploy_status == concourse.replicator.DeployStatus.SUCCEEDED:
logger().info('successfully rendered and deployed pipeline')
else:
logger().warning('failed to deploy a pipeline')
```
#### File: cc-utils/whd/webhook.py
```python
from flask import abort, request
from flask import current_app as app
from flask_restful import (
Resource,
reqparse,
)
from model.webhook_dispatcher import WebhookDispatcherConfig
from .dispatcher import GithubWebhookDispatcher
from .model import CreateEvent, PushEvent, PullRequestEvent
class GithubWebhook(Resource):
def __init__(
self,
cfg_set,
whd_cfg: WebhookDispatcherConfig
):
self.cfg_set = cfg_set
self.whd_cfg = whd_cfg
self.parser = reqparse.RequestParser()
self.parser.add_argument('X-GitHub-Event', type=str, location='headers')
self.dispatcher = GithubWebhookDispatcher(cfg_set=cfg_set, whd_cfg=whd_cfg)
def post(self):
args = self.parser.parse_args()
event = args.get('X-GitHub-Event')
if not event:
abort(400, 'X-GitHub-Event must be set')
if event == 'push':
parsed = PushEvent(raw_dict=request.get_json())
self.dispatcher.dispatch_push_event(push_event=parsed)
return 'OK'
if event == 'create':
parsed = CreateEvent(raw_dict=request.get_json())
self.dispatcher.dispatch_create_event(create_event=parsed)
elif event == 'pull_request':
parsed = PullRequestEvent(raw_dict=request.get_json())
self.dispatcher.dispatch_pullrequest_event(pr_event=parsed)
return 'OK'
else:
msg = f'event {event} ignored'
app.logger.info(msg)
return msg
``` |
{
"source": "jiajia0524/watermark-removal",
"score": 3
} |
#### File: jiajia0524/watermark-removal/video_water_mark_storyblocks.py
```python
import cv2 as cv
import numpy as np
import os
import time
def get_alpha_W_black():
video = "videos/storyblocks.mp4"
cap = cv.VideoCapture(video)
res, waterpic = cap.read()
W = (waterpic>25)*255
alpha_0 = 1.1* waterpic[W!=0].mean()/255
alpha = np.ones(W.shape) * alpha_0
alpha[W==0]=0
print(waterpic.max(), waterpic.min(), alpha_0)
# cv.imshow("W0", W.astype(float))
box = np.nonzero(W)
box = [min(box[0]),min(box[1]),max(box[0]),max(box[1])]
box[0]-=5
box[1]-=5
box[2]+=5
box[3]+=5
print(box)
return alpha, W, box
def blur_mask(alpha, W, box):
index = 0
sel = set(range(1,1000,50))
video = "/database/水印视频/storyblocks/多旋翼无人机/aerial-rc-helicopter_ey-9zj5ux__PM.mp4"
video = "/database/水印视频/storyblocks/多旋翼无人机/videoblocks-slow-motion-drone-takeoff_b3sv8vzub__SB_PM.mp4"
video = "/database/水印视频/storyblocks/运输机旅客机/4k-air-berlin-boeing-737-arriving-madeira-from-seascape_E1Ni7uQSe__SB_PM.mp4"
cap = cv.VideoCapture(video)
if True:
difs = []
while True:
result, frame = cap.read()
if not result: break
index += 1
# if index in sel:
I = (frame.astype(float) - alpha * W)/(1-alpha)
I[I<0] = 0
I = I.astype(np.uint8)
fI = I.copy()
fI[box[0]:box[2],box[1]:box[3],:] = cv.medianBlur(fI[box[0]:box[2],box[1]:box[3],:], 5)
oboximage = frame[box[0]:box[2],box[1]:box[3],:]
iboximage = I[box[0]:box[2],box[1]:box[3],:]
fboximage = fI[box[0]:box[2],box[1]:box[3],:]
dif = fboximage.astype(float)-iboximage.astype(float)
dif[dif<30]=0
difs.append(dif)
# cv.rectangle(I,(box[1],box[0]),(box[3],box[2]),(0, 255, 255),1,8)
# cv.imshow("diffenence", np.vstack([oboximage,iboximage,fboximage,
# dif.astype(np.uint8),np.median(np.array(difs),axis=0).astype(np.uint8)]))
# cv.waitKey(0)
# if index==100:break
np.save("med.npy", np.array(difs))
# exit()
difs = np.load("med.npy")
med = np.median(difs,axis=0).astype(np.uint8)
tmp = med.max(axis=2)
med[:,:,0] = tmp
med[:,:,1] = tmp
med[:,:,2] = tmp
bg = np.zeros(W.shape)
bg[box[0]:box[2],box[1]:box[3],:] = med
# cv.imshow("dif", med)
# cv.imshow("bg", bg)
W2 = W.copy()
W2[bg!=0] = 0
alpha2 = alpha.copy()
alpha2[bg!=0] = 0.1*alpha.max()
return alpha2,W2,bg
def process_video_demo(alpha, W, alpha2, W2, bg, box):
video = "/database/水印视频/storyblocks/多旋翼无人机/aerial-rc-helicopter_ey-9zj5ux__PM.mp4"
video = "/database/水印视频/storyblocks/多旋翼无人机/videoblocks-slow-motion-drone-takeoff_b3sv8vzub__SB_PM.mp4"
video = "/database/水印视频/storyblocks/运动飞机/4359-small-airplane-aircraft-aviation-landing-mountain-travel-trip-sunny-da_nkegz2_t__SB_PM.mp4"
cap = cv.VideoCapture(video)
sel = set(range(1,1000,50))
index = 0
while True:
result, frame = cap.read()
if not result: break
index += 1
if index in sel:
I1 = (frame.astype(float) - alpha * W)/(1-alpha)
I1[I1<0] = 0
I1 = I1.astype(np.uint8)
I2 = (frame.astype(float) - alpha2 * W2)/(1-alpha2)
I2[I2<0] = 0
I2 = I2.astype(np.uint8)
I3 = I1.copy()
fI = I1.copy()
fI[box[0]:box[2],box[1]:box[3],:] = cv.medianBlur(fI[box[0]:box[2],box[1]:box[3],:], 7)
I3[bg!=0] = fI[bg!=0]
I4 = I2.copy()
fI = I2.copy()
fI[box[0]:box[2],box[1]:box[3],:] = cv.medianBlur(fI[box[0]:box[2],box[1]:box[3],:], 7)
I3[bg!=0] = fI[bg!=0]
cv.imwrite("imgs/storyblocks_orig.png",frame)
cv.imwrite("imgs/storyblocks_subs.png",I1)
cv.imwrite("imgs/storyblocks_res.png",I3)
cv.imshow("", np.hstack([np.vstack([I1,I2]),np.vstack([I3,I4])]))
cv.waitKey(0)
def process_video(video,out_video):
camera = cv.VideoCapture(video)
if not camera.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(camera.get(cv.CAP_PROP_FOURCC))
video_fps = camera.get(cv.CAP_PROP_FPS)
video_size = (int(camera.get(cv.CAP_PROP_FRAME_WIDTH)),
int(camera.get(cv.CAP_PROP_FRAME_HEIGHT)))
vwriter = cv.VideoWriter(out_video, video_FourCC,
video_fps, video_size, isColor=True)
accum_time = 0
idx = -1
while True:
idx += 1
res, frame = camera.read()
if not res:
break
start = time.time()
I1 = (frame.astype(float) - alpha * W)/(1-alpha)
I1[I1<0] = 0
I1 = I1.astype(np.uint8)
fI = I1[box[0]:box[2],box[1]:box[3],:].copy()
fI = cv.medianBlur(fI, 5)
I1[box[0]:box[2],box[1]:box[3],:][med!=0] = fI[med!=0]
accum_time += time.time() - start
print("frame:", idx,"fps:", (idx+1)/accum_time)
vwriter.write(I1)
# break
camera.release()
vwriter.release()
def main():
alpha, W, box = get_alpha_W_black()
alpha2, W2, bg = blur_mask(alpha, W, box)
cv.imwrite("imgs/storyblocks_alpha.png", (alpha*255).astype(np.uint8))
cv.imwrite("imgs/storyblocks_W.png", (W).astype(np.uint8))
process_video_demo(alpha, W, alpha2, W2, bg, box)
return
dirname = "/database/水印视频/storyblocks/"
for root, dirs, names in os.walk(dirname):
if dirs==[]:
os.makedirs(root.replace("水印视频", "去水印视频"), exist_ok=True)
for name in names:
video = root+"/"+name
out_video = root.replace("水印视频", "去水印视频")+"/"+name
print(video)
process_video(video, out_video)
if __name__ == '__main__':
main()
``` |
{
"source": "jiajia15401/pywidget",
"score": 3
} |
#### File: pywidget/AI/__init__.py
```python
__all__ = ['ai']
v = '1.0.0'
class ai():
def make(self,path):
n = path + '.py'
self.name = n
s = open(n ,'w')
run = '''class ai():
name = "%s"
def __init__(self):
self.list = []
self.add = []
def why(self):
pass
def think(self):
self.why()
def work(self):
now = []
while True:
for i in self.add
i()
now.append('%s 1' % i)
for o in self.goal:
if not o():
list_of_goal("%s %s0" % now o)
list_of_goal("%s 1" % now)
self.think()
def add(self,add):
self.add.append(add)
def list_of_goal(self,goal):
self.list.append(goal)
'''% name
s.writelines (run)
return ai()
```
#### File: pywidget/pywidget/save.py
```python
__all__ = ['save', 'use_to_decode']
class use_to_decode():
def str_to_16(self,s):
return ' '.join([hex(ord(c)).replace('0x', '') for c in s])
def _16_to_str(self,s):
return ''.join([chr(i) for i in [int(b, 16) for b in s.split(' ')]])
def str_to_2(self,s):
return ' '.join([bin(ord(c)).replace('0b', '') for c in s])
def _2_to_str(self,s):
return ''.join([chr(i) for i in [int(b, 2) for b in s.split(' ')]])
class save():
def __init__(self,path = None,class_ = None,last_name = '.data'):
self.path = path
self.use = class_
self.name = last_name
def str(self,name,value,last_name = None):
def str_(self,name,value,last_name = self.name):
n = name + last_name
if not self.path is None:
n = self.path + n
s = open(n,'w')
v = value
if not self.use.str is None:
s.writelines(self.use().str(v))
else:
s.writelines(use_to_decode.str_to_2(use_to_decode,v))
s.writelines(use_to_decode.str_to_16(use_to_decode,v))
s.writelines(hash(v))
if last_name == None:
str_(name,value)
else:
str_(name,value,last_name)
def list(self,name,value,last_name = None):
def list_(self,name,value,last_name = '.list' + self.name):
n = name + last_name
if not self.path is None:
n = self.path + n
s = open(n,'w')
v = value
if not self.use.list is None:
s.writelines(self.use().list(v))
else:
for i in v:
s.writelines(use_to_decode.str_to_2(use_to_decode,i))
s.writelines(use_to_decode.str_to_16(use_to_decode,i))
s.writelines(hash(i))
if last_name == None:
self.make(name,value)
else:
self.make(name,value,last_name)
``` |
{
"source": "JiajiaHuang/smonus",
"score": 2
} |
#### File: core/basic/basic_info.py
```python
from django.forms import model_to_dict
from backstage.models import SmAdminMenu, IndexInfo, CarouselDisplay, ColumnListsInfo, ContactInfo
from blog.models import Article
from core.basic_functions import DictMerge
from core.fields import MultilingualField
def getAdminMenuList(request):
"""
:param request:
:return:
"""
adminMenuList = {}
for parent_id in range(1, 8):
sm_menu_list = []
smAdminMenu = SmAdminMenu.objects.filter(parent_id=parent_id).order_by('menu_order')
for s_list in range(smAdminMenu.count()):
# sm_menu = serializers.serialize('json', [smAdminMenu[s_list], ])
# print(sm_menu)
di = model_to_dict(smAdminMenu[s_list])
s_dict = {'url': di['url'],
'nenu_names': di[MultilingualField.localized_field_name(
'nenu_names', request.session['_language'])]}
sm_menu_list.append(s_dict)
adminMenuList["menu_" + str(parent_id)] = sm_menu_list
return {'adminMenuList': adminMenuList}
def getIndexInfoList(request):
index_info_List = dict()
index_info = IndexInfo.objects.all()
for i_list in index_info:
di = model_to_dict(i_list)
index_info_List[i_list.page_where] = dict()
index_info_List[i_list.page_where]["page_info"] = i_list.page_info
index_info_List[i_list.page_where]["page_url"] = i_list.page_url
index_info_List[i_list.page_where]["page_name"] = di[MultilingualField.localized_field_name(
'page_name', request.session['_language'])]
print(index_info_List)
return {"index_info": index_info_List}
def getCarouselDisplay(request):
carousel_display = CarouselDisplay.objects.all().order_by('carousel_nub')
carousel_display_List = ['' for x in range(carousel_display.count())]
print(carousel_display_List, carousel_display.count())
for c_list in range(carousel_display.count()):
print(carousel_display[c_list])
ci = model_to_dict(carousel_display[c_list])
c_dict = {'carousel_img': ci['carousel_img'],
'carousel_url': ci['carousel_url'],
'carousel_info': ci[MultilingualField.localized_field_name(
'carousel_info', request.session['_language'])]}
carousel_display_List[int(ci['carousel_nub']) - 1] = c_dict
return {"carousel_display": carousel_display_List}
def getColumnLists(request):
column_lists = ColumnListsInfo.objects.all().order_by('column_lists_nub')
column_lists_List = ['' for x in range(column_lists.count())]
print(column_lists_List, column_lists.count())
for c_list in range(column_lists.count()):
print(column_lists[c_list])
ci = model_to_dict(column_lists[c_list])
c_dict = {'column_url': ci['column_url'],
'column_lists_img': ci['column_lists_img'],
'column_lists_title': ci[MultilingualField.localized_field_name(
'column_lists_title', request.session['_language'])],
'column_lists_subtitle': ci[MultilingualField.localized_field_name(
'column_lists_subtitle', request.session['_language'])],
'column_lists_alt': ci[MultilingualField.localized_field_name(
'column_lists_alt', request.session['_language'])],
}
column_lists_List[int(ci['column_lists_nub']) - 1] = c_dict
return {"column_lists_List": column_lists_List}
def getContactInfoList(request):
contact_info_List = dict()
contact_info = ContactInfo.objects.all()
for c_contact_info in contact_info:
ci = model_to_dict(c_contact_info)
print(c_contact_info.contact_nub)
contact_info_List[c_contact_info.contact_nub] = dict()
contact_info_List[c_contact_info.contact_nub]["contact_name"] = c_contact_info.contact_name
contact_info_List[c_contact_info.contact_nub]["contact_url"] = c_contact_info.contact_url
contact_info_List[c_contact_info.contact_nub]["contact_info"] = ci[MultilingualField.localized_field_name(
'contact_info', request.session['_language'])]
print(contact_info_List)
return {"contact_info_List": contact_info_List}
def getHeaderAndFooterDict(request):
header_info = {
'countryinfo': request.session['ip2country'],
'weather': request.session['weather']
}
dict_info = dict()
dict_info = DictMerge(dict_info, {'header_info': header_info})
# admin Menu List 设置
adminMenuList = getAdminMenuList(request)
dict_info = DictMerge(dict_info, adminMenuList)
# index_info_List 设置
index_info_List = getIndexInfoList(request)
dict_info = DictMerge(dict_info, index_info_List)
# contact_info设置
contact_info_List = getContactInfoList(request)
dict_info = DictMerge(dict_info, contact_info_List)
return dict_info
def getArticleListToDict(request):
article_lists = Article.objects.all().order_by('-updated')
article_lists_List = []
for c_list in range(article_lists.count()):
ci = model_to_dict(article_lists[c_list])
article = article_lists.get(title_id=ci['title_id'])
article_dict = dict()
try:
tags_list = eval(ci['tags'])
except:
tags_list = [{"value": ""}]
if not ci['is_deleted'] and ci['process_state']:
article_dict = {
"title": ci[MultilingualField.localized_field_name(
'title', request.session['_language'])],
"channel_name": ci['channel_name'],
"title_url": ci[MultilingualField.localized_field_name(
'title_url', request.session['_language'])],
"updated": ci['created'],
"subtitle": ci[MultilingualField.localized_field_name(
'subtitle', request.session['_language'])],
"thumbnail_image": '/' + ci['thumbnail_image'].url,
# "is_deleted": ci['is_deleted'],
"is_image": ci['is_image'],
"is_show": ci['is_show'],
"tags": tags_list
}
article_lists_List.append(article_dict)
return article_lists_List
def getArticleToDict(request, article_):
ci = model_to_dict(article_)
try:
tags_list = eval(ci['tags'])
except:
tags_list = [{"value": ""}]
article_dict = None
if not ci['is_deleted'] and ci['process_state']:
article_dict = {
"title_id":ci['title_id'],
"title": ci[MultilingualField.localized_field_name('title', request.session['_language'])],
"channel_name": ci['channel_name'],
"title_url": ci[MultilingualField.localized_field_name('title_url', request.session['_language'])],
"updated": ci['created'],
"source": ci['source'],
"comment_count": ci['comment_count'],
"content": ci[MultilingualField.localized_field_name('content', request.session['_language'])],
"subtitle": ci[MultilingualField.localized_field_name('subtitle', request.session['_language'])],
"keyword": ci[MultilingualField.localized_field_name('keyword', request.session['_language'])],
"description_key": ci[MultilingualField.localized_field_name('description_key', request.session['_language'])],
"thumbnail_image": '/' + ci['thumbnail_image'].url,
# "is_deleted": ci['is_deleted'],
"is_image": ci['is_image'],
"is_show": ci['is_show'],
"tags": tags_list
}
return article_dict
```
#### File: user/articlemanage/views.py
```python
import time
from ckeditor.widgets import CKEditorWidget
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from SMONU.settings import LANGUAGES
from blog.models import Article
from core.basic_functions import set_flow
from core.fields import MultilingualField
from core.session.ManageSession import getheader_info, get_basic_info
class show_article_lists(View):
@login_required
def get(request):
header_info = getheader_info(request)
menu_order = request.GET.get("menu_order")
article_list = Article.objects.all()
return render(request,
"backstage/userauth/pagefile/page/article-management.html",
{"header_info": header_info, "article_list": article_list})
@login_required
class show_add_article(View):
@login_required
def get(request):
header_info = getheader_info(request)
basic_info = get_basic_info(request)
menu_order = request.GET.get("menu_order")
article_list = Article.objects.all().order_by('-id')[:1]
print(article_list)
if article_list.count() != 0:
print(article_list[0].id)
title_id = set_flow(article_list[0].id + 1)
else:
title_id = set_flow(1)
return render(request,
"backstage/userauth/pagefile/base/articlemanagement/addform/page/add-article.html",
{"header_info": header_info, "basic_info": basic_info, "article_list": article_list,
"title_id": title_id})
@login_required
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
basic_info = get_basic_info(request)
title_id = request.GET.get("title_id", "")
account_ID = request.POST.get("account_ID", "")
owner_ID = request.POST.get("owner_ID", "")
source = request.POST.get("source", "")
allow_comments = request.POST.get("allow_comments", "")
is_show = request.POST.get("is_show", "")
process_state = request.POST.get("process_state", "")
channel_ID = 1
channel_name = request.POST.get("channel_name", "")
privacy_level = request.POST.get("privacy_level", "")
tags = request.POST.get("tags", "")
# request.session["title_id"] = title_id
# request.session.get('tags', None)
if allow_comments == "on":
allow_comments = True
else:
allow_comments = False
if is_show == "on":
is_show = True
else:
is_show = False
if process_state == "on":
process_state = True
else:
process_state = False
if privacy_level == "on":
privacy_level = True
else:
privacy_level = False
try:
thumbnail_image = request.FILES['thumbnail_image']
is_image = True
except:
thumbnail_image = None
is_image = False
article = Article.objects.filter(title_id=title_id)
if article.count() != 0:
article_list = Article.objects.all().order_by('-id')[:1]
if article_list.count() != 0:
title_id = set_flow(article_list[0].id + 1)
else:
title_id = set_flow(1)
new_article = Article.objects.create(title_id=title_id, account_ID=basic_info['user_name'], owner_ID=owner_ID,
source=source, is_image=is_image, is_deleted=False,
allow_comments=allow_comments, is_show=is_show,
process_state=process_state,
channel_ID=channel_ID, channel_name=channel_name,
privacy_level=privacy_level, tags=tags)
if is_image:
new_article.thumbnail_image.save('title' + str(time.time()) + ".jpg", thumbnail_image, save=True)
new_article.save()
return HttpResponseRedirect('/management-admin/articlemanage/edit-article.html?title_id=%s' % title_id)
@login_required
class show_edit_article(View):
@login_required
def get(request):
header_info = getheader_info(request)
basic_info = get_basic_info(request)
title_id = request.GET.get("title_id")
language = request.GET.get("language", 'zh_hans')
article = Article.objects.get(title_id=title_id)
languages_list = []
for lang in LANGUAGES:
languages_list.append(lang[0].replace("-", "_"), )
if language not in languages_list:
return HttpResponseRedirect('/management-admin/articlemanage/management-article-lists.html')
languages = []
for lang in LANGUAGES:
if lang[0].replace("-", "_") == language:
languages.append((lang[0].replace("-", "_"), lang[1], True))
else:
languages.append((lang[0].replace("-", "_"), lang[1], False))
more_text = ['title', 'subtitle', 'keyword', 'description_key', 'content', 'title_url']
multilingual_text = {}
for mx in more_text:
exec("multilingual_text['{0}'] = article.{0}_{1}".format(mx, language))
multilingual_text['language'] = language
return render(request,
"backstage/userauth/pagefile/base/articlemanagement/editform/page/edit-article.html",
{"header_info": header_info, "basic_info": basic_info, "article": article,
"title_id": title_id, 'languages': languages, 'multilingual_text': multilingual_text})
@login_required
@csrf_exempt
@require_http_methods(["POST"])
def basic_post(request):
basic_info = get_basic_info(request)
title_id = request.GET.get("title_id", "")
account_ID = request.POST.get("account_ID", "")
owner_ID = request.POST.get("owner_ID", "")
source = request.POST.get("source", "")
allow_comments = request.POST.get("allow_comments", "")
is_show = request.POST.get("is_show", "")
process_state = request.POST.get("process_state", "")
channel_ID = 1
channel_name = request.POST.get("channel_name", "")
privacy_level = request.POST.get("privacy_level", "")
tags = request.POST.get("tags", "")
if allow_comments == "on":
allow_comments = True
else:
allow_comments = False
if is_show == "on":
is_show = True
else:
is_show = False
if process_state == "on":
process_state = True
else:
process_state = False
if privacy_level == "on":
privacy_level = True
else:
privacy_level = False
try:
thumbnail_image = request.FILES['thumbnail_image']
is_image = True
except:
thumbnail_image = None
is_image = False
article = Article.objects.filter(title_id=title_id)
if article.count() == 1:
article.update(title_id=title_id, account_ID=basic_info['user_name'],
owner_ID=owner_ID,
source=source, is_image=is_image, is_deleted=False,
allow_comments=allow_comments, is_show=is_show,
process_state=process_state,
channel_ID=channel_ID, channel_name=channel_name,
privacy_level=privacy_level, tags=tags)
new_article = article.get(title_id=title_id)
if is_image:
new_article.thumbnail_image.save('title' + str(time.time()) + ".jpg", thumbnail_image, save=True)
new_article.save()
return HttpResponseRedirect('/management-admin/articlemanage/edit-article.html?title_id=%s' % title_id)
else:
return HttpResponseRedirect('/management-admin/articlemanage/management-article-lists.html')
@login_required
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
title_id = request.GET.get("title_id", "")
language = request.GET.get("language", "")
title = request.POST.get("title", "")
subtitle = request.POST.get("subtitle", "")
keyword = request.POST.get("keyword", "")
title_url = request.POST.get("title_url", "")
description_key = request.POST.get("description_key", "")
content = request.POST.get("content", "")
title_url_find = exec('Article.objects.filter(title_url_{0}=title_url)'.format(language))
if title_url_find:
print(True)
article = Article.objects.filter(title_id=title_id)
title_url_find = []
exec('title_url_find.append(Article.objects.filter(title_url_{0}=title_url))'.format(language))
print(title_url_find[0],title_url)
print(not title_url_find[0] , title_url != "")
if not(not title_url_find[0] or title_url != ""):
title_url = "{0}.html".format(title_id)
print(LANGUAGES)
languages = []
for lang in LANGUAGES:
languages.append(lang[0].replace("-", "_"), )
if language not in languages:
return HttpResponseRedirect('/management-admin/articlemanage/management-article-lists.html')
if article.count() == 1:
new_article = article.get(title_id=title_id)
exec('new_article.title_{0} = title'.format(language))
exec('new_article.subtitle_{0} = subtitle'.format(language))
exec('new_article.keyword_{0} = keyword'.format(language))
exec('new_article.description_key_{0} = description_key'.format(language))
exec('new_article.content_{0} = content'.format(language))
exec('new_article.title_url_{0} = title_url'.format(language))
new_article.save()
# new_article.content.val(language, content)
# new_article.save()
return HttpResponseRedirect(
'/management-admin/articlemanage/edit-article.html?title_id=%s&language=%s#article-form' % (
title_id, language))
else:
return HttpResponseRedirect('/management-admin/articlemanage/management-article-lists.html')
```
#### File: user/basicinfo/views.py
```python
import time
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from backstage.models import SmAdminMenu, IndexInfo, CarouselDisplay, NewsInfo, ContactInfo, ColumnListsInfo
from core.session.ManageSession import getheader_info
@login_required
def header_index(request):
"""
# 管理首页
:param request:h=success&i=v1
:return:
"""
h = request.GET.get("h", "")
i = request.GET.get("i", "")
info = {}
if h == "success" and i != "":
info["s"] = True
info["e"] = False
info["info"] = "修改成功!"
elif h == "error" and i != "":
info["s"] = False
info["e"] = True
info["info"] = "修改失敗!"
else:
info["s"] = False
info["e"] = False
header_info = getheader_info(request)
adminMenuList = {}
for parent_id in range(1, 8):
smAdminMenu = SmAdminMenu.objects.filter(parent_id=parent_id).order_by('menu_order')
adminMenuList["menu_" + str(parent_id)] = smAdminMenu
print(adminMenuList)
URL_Info = IndexInfo.objects.all() #
Carousel_Info = CarouselDisplay.objects.all().order_by('carousel_nub') #
News_Info = NewsInfo.objects.all() #
Contact_Info = ContactInfo.objects.all() #
return render(request, "backstage/userauth/pagefile/page/header-index.html",
{"header_info": header_info, "adminMenuList": adminMenuList, "info": info, "URL_Info": URL_Info,
"Carousel_Info": Carousel_Info, "News_Info": News_Info, "Contact_Info": Contact_Info})
@login_required
def footer_index(request):
"""
# 管理首页
:param request:h=success&i=v1
:return:
"""
h = request.GET.get("h", "")
i = request.GET.get("i", "")
info = {}
if h == "success" and i != "":
info["s"] = True
info["e"] = False
info["info"] = "修改成功!"
elif h == "error" and i != "":
info["s"] = False
info["e"] = True
info["info"] = "修改失敗!"
else:
info["s"] = False
info["e"] = False
header_info = getheader_info(request)
adminMenuList = {}
for parent_id in range(1, 8):
smAdminMenu = SmAdminMenu.objects.filter(parent_id=parent_id).order_by('menu_order')
adminMenuList["menu_" + str(parent_id)] = smAdminMenu
print(adminMenuList)
column_lists = ColumnListsInfo.objects.all().order_by('column_lists_nub') #
return render(request, "backstage/userauth/pagefile/page/foot-index.html",
{"header_info": header_info, "adminMenuList": adminMenuList, "info": info,
"column_lists": column_lists})
@login_required
@csrf_exempt
@require_http_methods(["POST"])
def add_menu(request):
"""
:param request:
:return:
"""
menu = request.GET.get("menu")
menu_name = request.POST.get("menu_name", "")
menu_url = request.POST.get("menu_url", "")
menu_ft = request.POST.get("menu_ft", "")
menu_jt = request.POST.get("menu_jt", "")
menu_yw = request.POST.get("menu_yw", "")
menu_fw = request.POST.get("menu_fw", "")
menu_xby = request.POST.get("menu_xby", "")
menu_pty = request.POST.get("menu_pty", "")
print("POST不打開", menu, menu_name, menu_url)
sm_nub = SmAdminMenu.objects.filter(parent_id=menu).count()
SmAdminMenu.objects.create(parent_id=menu, menu_mc=menu_name, menu_order=sm_nub + 1, url=menu_url,
nenu_names_zh_hant=menu_ft, nenu_names_zh_hans=menu_jt, nenu_names_en=menu_yw,
nenu_names_fr=menu_fw, nenu_names_pt=menu_xby, nenu_names_es=menu_pty)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
def del_menu(request):
"""
:param request:
:return:
"""
menu = request.GET.get("menu")
menu_order = request.GET.get("menu_order")
menu_count = SmAdminMenu.objects.filter(parent_id=menu).count()
SmAdminMenu.objects.filter(parent_id=menu, menu_order=menu_order).delete()
print(menu_order, menu_count)
for nub in range(int(menu_order), menu_count + 1):
print(menu_order, menu_count)
SmAdminMenu.objects.filter(parent_id=menu, menu_order=nub).update(menu_order=nub - 1)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
class change_menu(View):
def get(request):
header_info = getheader_info(request)
menu = request.GET.get("menu")
menu_order = request.GET.get("menu_order")
smAdminMenu = SmAdminMenu.objects.get(parent_id=menu, menu_order=menu_order)
print(smAdminMenu)
return render(request, "backstage/userauth/pagefile/base/headerindex/addform/page/change-menu.html",
{"header_info": header_info, "smAdminMenu": smAdminMenu})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
menu = request.GET.get("menu")
menu_order = request.GET.get("menu_order")
menu_name = request.POST.get("menu_name", "")
menu_url = request.POST.get("menu_url", "")
menu_ft = request.POST.get("menu_ft", "")
menu_jt = request.POST.get("menu_jt", "")
menu_yw = request.POST.get("menu_yw", "")
menu_fw = request.POST.get("menu_fw", "")
menu_xby = request.POST.get("menu_xby", "")
menu_pty = request.POST.get("menu_pty", "")
print("POST不打開", menu, menu_order, menu_name, menu_url)
SmAdminMenu.objects.filter(parent_id=menu, menu_order=menu_order).update(menu_mc=menu_name,
url=menu_url,
nenu_names_zh_hant=menu_ft,
nenu_names_zh_hans=menu_jt,
nenu_names_en=menu_yw,
nenu_names_fr=menu_fw,
nenu_names_pt=menu_xby,
nenu_names_es=menu_pty)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
def change_menu_add(request):
menu = request.GET.get("menu")
menu_order = request.GET.get("menu_order")
sm_nub = SmAdminMenu.objects.filter(parent_id=menu).count()
if int(menu_order) >= sm_nub:
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html?h=error&i=v1")
sm_one = SmAdminMenu.objects.get(parent_id=int(menu), menu_order=int(menu_order))
sm_two = SmAdminMenu.objects.get(parent_id=int(menu), menu_order=int(menu_order) + 1)
sm_one.menu_order = int(menu_order) + 1
sm_two.menu_order = int(menu_order)
sm_one.save()
sm_two.save()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html?h=success&i=v1")
@login_required
def change_menu_dir(request):
menu = request.GET.get("menu")
menu_order = request.GET.get("menu_order")
sm_nub = 1
if int(menu_order) <= sm_nub:
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html?h=error&i=v1")
print(int(menu), int(menu_order))
sm_one = SmAdminMenu.objects.get(parent_id=int(menu), menu_order=int(menu_order) - 1)
sm_two = SmAdminMenu.objects.get(parent_id=int(menu), menu_order=int(menu_order))
sm_one.menu_order = int(menu_order) + 1
sm_two.menu_order = int(menu_order)
sm_one.save()
sm_two.save()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html?h=success&i=v1")
@login_required
class change_url_info(View):
def get(request):
header_info = getheader_info(request)
menu_order = request.GET.get("menu_order")
index_info = IndexInfo.objects.get(id=menu_order)
print(index_info)
return render(request, "backstage/userauth/pagefile/base/headerindex/addform/page/change-urlinfo.html",
{"header_info": header_info, "index_info": index_info})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
menu = request.GET.get("menu")
page_info = request.POST.get("page_info", "")
page_where = request.POST.get("page_where", "")
page_url = request.POST.get("page_url", "")
page_name_zh_hant = request.POST.get("page_name_zh_hant", "")
page_name_zh_hans = request.POST.get("page_name_zh_hans", "")
page_name_en = request.POST.get("page_name_en", "")
page_name_fr = request.POST.get("page_name_fr", "")
page_name_es = request.POST.get("page_name_es", "")
page_name_pt = request.POST.get("page_name_pt", "")
print("POST不打開", page_where, menu, page_info, page_url)
IndexInfo.objects.filter(id=menu).update(page_info=page_info, page_where=page_where, page_url=page_url,
page_name_zh_hant=page_name_zh_hant,
page_name_zh_hans=page_name_zh_hans, page_name_en=page_name_en,
page_name_fr=page_name_fr, page_name_es=page_name_es,
page_name_pt=page_name_pt)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
class add_news_list(View):
def get(request):
header_info = getheader_info(request)
return render(request, "backstage/userauth/pagefile/base/headerindex/addform/page/newslist/add-news-list.html",
{"header_info": header_info})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
news_name = request.POST.get("news_name", "")
news_url = request.POST.get("news_url", "")
news_info_zh_hant = request.POST.get("news_info_zh_hant", "")
news_info_zh_hans = request.POST.get("news_info_zh_hans", "")
news_info_en = request.POST.get("news_info_en", "")
news_info_fr = request.POST.get("news_info_fr", "")
news_info_es = request.POST.get("news_info_es", "")
news_info_pt = request.POST.get("news_info_pt", "")
NewsInfo.objects.create(news_name=news_name, news_url=news_url, news_info_zh_hant=news_info_zh_hant,
news_info_zh_hans=news_info_zh_hans, news_info_en=news_info_en,
news_info_fr=news_info_fr, news_info_es=news_info_es, news_info_pt=news_info_pt)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
class change_news_list(View):
def get(request):
header_info = getheader_info(request)
menu_order = request.GET.get("menu_order")
news_info = NewsInfo.objects.get(id=menu_order)
print(news_info)
return render(request,
"backstage/userauth/pagefile/base/headerindex/addform/page/newslist/change-news-list.html",
{"header_info": header_info, "news_info": news_info})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
menu = request.GET.get("menu")
news_name = request.POST.get("news_name", "")
news_url = request.POST.get("news_url", "")
news_info_zh_hant = request.POST.get("news_info_zh_hant", "")
news_info_zh_hans = request.POST.get("news_info_zh_hans", "")
news_info_en = request.POST.get("news_info_en", "")
news_info_fr = request.POST.get("news_info_fr", "")
news_info_es = request.POST.get("news_info_es", "")
news_info_pt = request.POST.get("news_info_pt", "")
NewsInfo.objects.filter(id=menu).update(news_name=news_name, news_url=news_url,
news_info_zh_hant=news_info_zh_hant,
news_info_zh_hans=news_info_zh_hans, news_info_en=news_info_en,
news_info_fr=news_info_fr, news_info_es=news_info_es,
news_info_pt=news_info_pt)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
def del_news_list(request):
menu = request.GET.get("menu_order", "")
NewsInfo.objects.filter(id=menu).delete()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
@login_required
class add_homecarousel(View):
def get(request):
header_info = getheader_info(request)
count = CarouselDisplay.objects.all().count() + 1
return render(request,
"backstage/userauth/pagefile/base/headerindex/addform/page/homecarousel/add-homecarousel.html",
{"header_info": header_info, 'count': count})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
carousel_nub = CarouselDisplay.objects.all().count() + 1
carousel_img = request.FILES['carousel_img']
carousel_url = request.POST.get("carousel_url", "")
carousel_info_zh_hant = request.POST.get("carousel_info_zh_hant", "")
carousel_info_zh_hans = request.POST.get("carousel_info_zh_hans", "")
carousel_info_en = request.POST.get("carousel_info_en", "")
carousel_info_fr = request.POST.get("carousel_info_fr", "")
carousel_info_es = request.POST.get("carousel_info_es", "")
carousel_info_pt = request.POST.get("carousel_info_pt", "")
# add_homecarousel
carousel_display = CarouselDisplay.objects.create(carousel_nub=carousel_nub, carousel_url=carousel_url,
carousel_info_zh_hant=carousel_info_zh_hant,
carousel_info_zh_hans=carousel_info_zh_hans,
carousel_info_en=carousel_info_en,
carousel_info_fr=carousel_info_fr,
carousel_info_es=carousel_info_es,
carousel_info_pt=carousel_info_pt)
carousel_display.carousel_img.save('card' + str(time.time())[0:9] + str(time.time())[5:0] + ".jpg",
carousel_img, save=True)
carousel_display.save()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html#homecarousel")
@login_required
class change_homecarousel(View):
def get(request):
header_info = getheader_info(request)
menu_order = request.GET.get("menu_order")
carousel_display = CarouselDisplay.objects.get(id=menu_order)
return render(request,
"backstage/userauth/pagefile/base/headerindex/addform/page/homecarousel/change-homecarousel.html",
{"header_info": header_info, "carousel_display": carousel_display})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
menu_order = request.GET.get("menu_order")
carousel_url = request.POST.get("carousel_url", "")
try:
carousel_img = request.FILES['carousel_img']
except:
carousel_img = None
carousel_info_zh_hant = request.POST.get("carousel_info_zh_hant", "")
carousel_info_zh_hans = request.POST.get("carousel_info_zh_hans", "")
carousel_info_en = request.POST.get("carousel_info_en", "")
carousel_info_fr = request.POST.get("carousel_info_fr", "")
carousel_info_es = request.POST.get("carousel_info_es", "")
carousel_info_pt = request.POST.get("carousel_info_pt", "")
carousel_display = CarouselDisplay.objects.get(carousel_nub=menu_order)
carousel_display.carousel_url = carousel_url
carousel_display.carousel_info_zh_hant = carousel_info_zh_hant
carousel_display.carousel_info_zh_hans = carousel_info_zh_hans
carousel_display.carousel_info_en = carousel_info_en
carousel_display.carousel_info_fr = carousel_info_fr
carousel_display.carousel_info_es = carousel_info_es
carousel_display.carousel_info_pt = carousel_info_pt
if carousel_img is not None:
carousel_display.carousel_img.save('card' + str(time.time())[0:9] + str(time.time())[5:0] + ".jpg",
carousel_img, save=True)
carousel_display.save()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html#homecarousel")
@login_required
def del_homecarousel(request):
menu_order = request.GET.get("menu_order")
menu_count = CarouselDisplay.objects.all().count()
CarouselDisplay.objects.get(carousel_nub=menu_order).delete()
print(menu_order, menu_count)
for nub in range(int(menu_order), menu_count + 1):
print(nub, menu_order, menu_count)
CarouselDisplay.objects.filter(carousel_nub=nub + 1).update(carousel_nub=nub)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html#homecarousel")
@login_required
def up_homecarousel(request):
menu_order = request.GET.get("menu_order")
menu_count = CarouselDisplay.objects.all().count()
if 1 < int(menu_order) <= menu_count:
c1 = CarouselDisplay.objects.get(carousel_nub=menu_order)
c2 = CarouselDisplay.objects.get(carousel_nub=int(menu_order) - 1)
c1.carousel_nub = int(menu_order) - 1
c2.carousel_nub = int(menu_order)
c1.save()
c2.save()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html#homecarousel")
@login_required
def down_homecarousel(request):
menu_order = request.GET.get("menu_order")
menu_count = CarouselDisplay.objects.all().count()
if 1 <= int(menu_order) < menu_count:
c1 = CarouselDisplay.objects.get(carousel_nub=menu_order)
c2 = CarouselDisplay.objects.get(carousel_nub=int(menu_order) + 1)
c1.carousel_nub = int(menu_order) + 1
c2.carousel_nub = int(menu_order)
c1.save()
c2.save()
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html#homecarousel")
@login_required
class change_enter_info(View):
def get(request):
header_info = getheader_info(request)
menu_order = request.GET.get("menu_order")
index_info = ContactInfo.objects.get(id=menu_order)
print(index_info)
return render(request, "backstage/userauth/pagefile/base/headerindex/addform/page/change-enterinfo.html",
{"header_info": header_info, "index_info": index_info})
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
menu = request.GET.get("menu")
contact_nub = request.POST.get("contact_nub", "")
contact_name = request.POST.get("contact_name", "")
contact_url = request.POST.get("contact_url", "")
contact_info_zh_hant = request.POST.get("contact_info_zh_hant", "")
contact_info_zh_hans = request.POST.get("contact_info_zh_hans", "")
contact_info_en = request.POST.get("contact_info_en", "")
contact_info_fr = request.POST.get("contact_info_fr", "")
contact_info_es = request.POST.get("contact_info_es", "")
contact_info_pt = request.POST.get("contact_info_pt", "")
print("POST不打開", contact_nub, menu, contact_name, contact_url)
ContactInfo.objects.filter(id=menu).update(contact_nub=contact_nub, contact_name=contact_name,
contact_url=contact_url,
contact_info_zh_hant=contact_info_zh_hant,
contact_info_zh_hans=contact_info_zh_hans,
contact_info_en=contact_info_en,
contact_info_fr=contact_info_fr, contact_info_es=contact_info_es,
contact_info_pt=contact_info_pt)
return HttpResponseRedirect("/management-admin/basicinfo/header_index.html")
# ************************************ *******************************************#
# carousel_list
class add_carousel_list(View):
@login_required
def get(request):
header_info = getheader_info(request)
count = ColumnListsInfo.objects.all().count() + 1
return render(request,
"backstage/userauth/pagefile/base/footerindex/addform/page/homecarousel/add-homecarousel.html",
{"header_info": header_info,'count':count})
@login_required
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
column_lists_nub = ColumnListsInfo.objects.all().count() + 1
column_url = request.POST.get("column_url", "")
column_lists_img = request.FILES['column_lists_img']
column_lists_title_zh_hant = request.POST.get("column_lists_title_zh_hant", "")
column_lists_title_zh_hans = request.POST.get("column_lists_title_zh_hans", "")
column_lists_title_en = request.POST.get("column_lists_title_en", "")
column_lists_title_fr = request.POST.get("column_lists_title_fr", "")
column_lists_title_es = request.POST.get("column_lists_title_es", "")
column_lists_title_pt = request.POST.get("column_lists_title_pt", "")
column_lists_subtitle_zh_hant = request.POST.get("column_lists_subtitle_zh_hant", "")
column_lists_subtitle_zh_hans = request.POST.get("column_lists_subtitle_zh_hans", "")
column_lists_subtitle_en = request.POST.get("column_lists_subtitle_en", "")
column_lists_subtitle_fr = request.POST.get("column_lists_subtitle_fr", "")
column_lists_subtitle_es = request.POST.get("column_lists_subtitle_es", "")
column_lists_subtitle_pt = request.POST.get("column_lists_subtitle_pt", "")
column_lists_alt_zh_hant = request.POST.get("column_lists_alt_zh_hant", "")
column_lists_alt_zh_hans = request.POST.get("column_lists_alt_zh_hans", "")
column_lists_alt_en = request.POST.get("column_lists_alt_en", "")
column_lists_alt_fr = request.POST.get("column_lists_alt_fr", "")
column_lists_alt_es = request.POST.get("column_lists_alt_es", "")
column_lists_alt_pt = request.POST.get("column_lists_alt_pt", "")
column_lists = ColumnListsInfo.objects.create(column_lists_nub=column_lists_nub, column_url=column_url,
column_lists_title_zh_hant=column_lists_title_zh_hant,
column_lists_title_zh_hans=column_lists_title_zh_hans,
column_lists_title_en=column_lists_title_en,
column_lists_title_fr=column_lists_title_fr,
column_lists_title_es=column_lists_title_es,
column_lists_title_pt=column_lists_title_pt,
column_lists_subtitle_zh_hant=column_lists_subtitle_zh_hant,
column_lists_subtitle_zh_hans=column_lists_subtitle_zh_hans,
column_lists_subtitle_en=column_lists_subtitle_en,
column_lists_subtitle_fr=column_lists_subtitle_fr,
column_lists_subtitle_es=column_lists_subtitle_es,
column_lists_subtitle_pt=column_lists_subtitle_pt,
column_lists_alt_zh_hant=column_lists_alt_zh_hant,
column_lists_alt_zh_hans=column_lists_alt_zh_hans,
column_lists_alt_en=column_lists_alt_en,
column_lists_alt_fr=column_lists_alt_fr,
column_lists_alt_es=column_lists_alt_es,
column_lists_alt_pt=column_lists_alt_pt,
)
column_lists.column_lists_img.save('card' + str(time.time())[0:9] + str(time.time())[5:0] + ".jpg",
column_lists_img, save=True)
column_lists.save()
return HttpResponseRedirect("/management-admin/basicinfo/footer_index.html")
class change_carousel_lists(View):
@login_required
def get(request):
header_info = getheader_info(request)
menu_order = request.GET.get("menu_order")
carousel_display = ColumnListsInfo.objects.get(id=menu_order)
return render(request,
"backstage/userauth/pagefile/base/footerindex/addform/page/homecarousel/change-homecarousel.html",
{"header_info": header_info, "carousel_display": carousel_display})
@login_required
@csrf_exempt
@require_http_methods(["POST"])
def post(request):
column_lists_nub = request.GET.get("menu_order", "")
column_url = request.POST.get("column_url", "")
try:
column_lists_img = request.FILES['column_lists_img']
except:
column_lists_img = None
print(column_lists_nub)
column_lists_title_zh_hant = request.POST.get("column_lists_title_zh_hant", "")
column_lists_title_zh_hans = request.POST.get("column_lists_title_zh_hans", "")
column_lists_title_zh_en = request.POST.get("column_lists_title_en", "")
column_lists_title_fr = request.POST.get("column_lists_title_fr", "")
column_lists_title_es = request.POST.get("column_lists_title_es", "")
column_lists_title_pt = request.POST.get("column_lists_title_pt", "")
column_lists_subtitle_zh_hant = request.POST.get("column_lists_subtitle_zh_hant", "")
column_lists_subtitle_zh_hans = request.POST.get("column_lists_subtitle_zh_hans", "")
column_lists_subtitle_zh_en = request.POST.get("column_lists_subtitle_en", "")
column_lists_subtitle_zh_fr = request.POST.get("column_lists_subtitle_fr", "")
column_lists_subtitle_zh_es = request.POST.get("column_lists_subtitle_es", "")
column_lists_subtitle_zh_pt = request.POST.get("column_lists_subtitle_pt", "")
column_lists_alt_zh_hant = request.POST.get("column_lists_alt_zh_hant", "")
column_lists_alt_zh_hans = request.POST.get("column_lists_alt_zh_hans", "")
column_lists_alt_zh_en = request.POST.get("column_lists_alt_en", "")
column_lists_alt_zh_fr = request.POST.get("column_lists_alt_fr", "")
column_lists_alt_zh_es = request.POST.get("column_lists_alt_es", "")
column_lists_alt_zh_pt = request.POST.get("column_lists_alt_pt", "")
column_lists = ColumnListsInfo.objects.get(column_lists_nub=column_lists_nub)
column_lists.column_url = column_url
column_lists.column_lists_title_zh_hant = column_lists_title_zh_hant
column_lists.column_lists_title_zh_hans = column_lists_title_zh_hans
column_lists.column_lists_title_en = column_lists_title_zh_en
column_lists.column_lists_title_fr = column_lists_title_fr
column_lists.column_lists_title_es = column_lists_title_es
column_lists.column_lists_title_pt = column_lists_title_pt
column_lists.column_lists_subtitle_zh_hant = column_lists_subtitle_zh_hant
column_lists.column_lists_subtitle_zh_hans = column_lists_subtitle_zh_hans
column_lists.column_lists_subtitle_en = column_lists_subtitle_zh_en
column_lists.column_lists_subtitle_fr = column_lists_subtitle_zh_fr
column_lists.column_lists_subtitle_es = column_lists_subtitle_zh_es
column_lists.column_lists_subtitle_pt = column_lists_subtitle_zh_pt
column_lists.column_lists_alt_zh_hant = column_lists_alt_zh_hant
column_lists.column_lists_alt_zh_hans = column_lists_alt_zh_hans
column_lists.column_lists_alt_en = column_lists_alt_zh_en
column_lists.column_lists_alt_fr = column_lists_alt_zh_fr
column_lists.column_lists_alt_es = column_lists_alt_zh_es
column_lists.column_lists_alt_pt = column_lists_alt_zh_pt
print(column_lists_img)
if column_lists_img is not None:
column_lists.column_lists_img.save('card' + str(time.time())[0:9] + str(time.time())[5:0] + ".jpg",
column_lists_img, save=True)
column_lists.save()
return HttpResponseRedirect("/management-admin/basicinfo/footer_index.html")
@login_required
def del_carousel_lists(request):
menu_order = request.GET.get("menu_order")
menu_count = CarouselDisplay.objects.all().count()
ColumnListsInfo.objects.get(column_lists_nub=menu_order).delete()
print(menu_order, menu_count)
for nub in range(int(menu_order), menu_count + 1):
print(nub, menu_order, menu_count)
ColumnListsInfo.objects.filter(column_lists_nub=nub + 1).update(column_lists_nub=nub)
return HttpResponseRedirect("/management-admin/basicinfo/footer_index.html")
@login_required
def up_carousel_lists(request):
menu_order = request.GET.get("menu_order")
menu_count = ColumnListsInfo.objects.all().count()
if 1 < int(menu_order) <= menu_count:
c1 = ColumnListsInfo.objects.get(column_lists_nub=menu_order)
c2 = ColumnListsInfo.objects.get(column_lists_nub=int(menu_order) - 1)
c1.column_lists_nub = int(menu_order) - 1
c2.column_lists_nub = int(menu_order)
c1.save()
c2.save()
return HttpResponseRedirect("/management-admin/basicinfo/footer_index.html")
@login_required
def down_carousel_lists(request):
menu_order = request.GET.get("menu_order")
menu_count = ColumnListsInfo.objects.all().count()
if 1 <= int(menu_order) < menu_count:
c1 = ColumnListsInfo.objects.get(column_lists_nub=menu_order)
c2 = ColumnListsInfo.objects.get(column_lists_nub=int(menu_order) + 1)
c1.column_lists_nub = int(menu_order) + 1
c2.column_lists_nub = int(menu_order)
c1.save()
c2.save()
return HttpResponseRedirect("/management-admin/basicinfo/footer_index.html")
# ********************************* ***********************************************#
```
#### File: smonus/user/models.py
```python
from django.contrib.auth.models import AbstractUser, UserManager
from django.db import models
# Create your views here.
class UserProfile(AbstractUser):
objects = UserManager()
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
# 重载为了打印自定义的字符串
def __unicode__(self):
return self.username
def __str__(self):
return "User Class"
class PersonalProfile(UserProfile):
Clear_Password = models.CharField(max_length=16, null=False, default='<PASSWORD>')
def __str__(self):
return "Personal Profile"
class RootProfile(UserProfile):
def __str__(self):
return "RootProfile Profile"
class SmAdminAction(models.Model):
"""权限标识表"""
action_id = models.IntegerField()
parent_id = models.IntegerField()
action_names = models.CharField(max_length=512, blank=True, null=True, verbose_name='权限标识的名字')
action_code = models.CharField(max_length=512, blank=True, null=True, verbose_name='权限标识码')
is_show = models.BooleanField(default=False, verbose_name='是否显示')
is_status = models.BooleanField(default=True, verbose_name='是否启用')
action_order = models.IntegerField(blank=True, null=True, default=99, verbose_name='排序')
action_note = models.CharField(max_length=1024, blank=True, null=True, verbose_name='权限标识说明')
def __str__(self):
return self.action_names
class Meta:
verbose_name = '权限标识'
verbose_name_plural = '权限标识'
ordering = ['-action_names']
class SmAdminUser(UserProfile):
"""用户表"""
user_status_choice = (
(0, '禁用'),
(1, '正常'),
(2, '离职'),
)
user_true_name = models.CharField(verbose_name='姓名', max_length=64, blank=True, null=True)
fenji = models.CharField(verbose_name='分机号', max_length=64, blank=True, null=True)
bianhao = models.CharField(verbose_name='编号', max_length=64, blank=True, null=False)
role_id = models.IntegerField(verbose_name='角色 与ck_role表的 role_id 关联', blank=True, null=False, default=0)
group_id = models.IntegerField(verbose_name='部门 获取部门名称', blank=True, null=False, default=0)
is_manage = models.BooleanField(verbose_name='是否部门主管 0不是 1 是', blank=True, null=True, default=False)
purview_type = models.BooleanField(verbose_name='是定义权限', blank=True, null=True, default=False)
user_plus = models.BooleanField(verbose_name='附加权限', blank=True, null=True, default=False)
enty_time = models.DateTimeField(verbose_name='入职时间', null=True, auto_now_add=True)
user_status = models.SmallIntegerField(verbose_name='用户状况 0禁用 1正常 2 离职', choices=user_status_choice, default=1)
is_hs = models.IntegerField()
one_two = models.IntegerField()
is_update = models.IntegerField(verbose_name='更新用户信息', blank=True, null=True, default=1)
logintype = models.CharField(verbose_name='', blank=True, null=True, max_length=64)
pid = models.IntegerField()
depth = models.CharField(verbose_name='', blank=True, null=True, max_length=64)
sproupid = models.IntegerField()
user_level = models.IntegerField()
website_id = models.CharField(verbose_name='为空取部门 不为空取website_id', max_length=64, blank=True, null=True)
class SmGroupType(models.Model):
"""部门表"""
group_name = models.CharField(verbose_name='部门名称', max_length=64, blank=True, null=True)
top_id = models.IntegerField(verbose_name='顶级ID', blank=True, null=True, default=0)
p_id = models.IntegerField(verbose_name='父级ID', blank=True, null=True, default=0)
g_level = models.IntegerField(verbose_name='部门所在的层级', blank=True, null=True, default=0)
g_list = models.IntegerField(verbose_name='', blank=True, null=True, default=0)
is_manage = models.IntegerField(verbose_name='', blank=True, null=True, default=0)
group_orderby = models.IntegerField(verbose_name='排序', blank=True, null=True, default=0)
is_status = models.BooleanField(verbose_name='是否启用', blank=True, null=True, default=True)
is_show = models.BooleanField(verbose_name='是否显示', blank=True, null=True, default=True)
group_list = models.CharField(verbose_name='部门所拥有的网站权限', max_length=64, blank=True, null=True)
group_list2 = models.CharField(verbose_name='部门所管辖的子部门,每个部门ID用[ ]进行分割', max_length=1024, blank=True, null=True)
action_list = models.CharField(verbose_name='部门所拥有的权限标识字符串', max_length=64, blank=True, null=True)
customer_site_list = models.CharField(verbose_name='客户来源,主要用于二线的网站控制', max_length=64, blank=True, null=True)
max_order_zck = models.IntegerField(verbose_name='部门最大暂存订单数', blank=True, null=True, default=5)
inter_userName = models.CharField(verbose_name='短信接口的名字,暂时用不上', max_length=64, blank=True, null=True)
sms_id = models.IntegerField(verbose_name='短信接口的ID', blank=True, null=True)
mail_user = models.CharField(verbose_name='邮箱的名字,用于邮箱的同步', max_length=64, blank=True, null=True)
mail_psw = models.CharField(verbose_name='邮箱的密码,用于邮箱的同步程序', max_length=64, blank=True, null=True)
company_id = models.CharField(verbose_name='部门管理的公司ID,只用到添加用户的时候,获取部门的这个值,具体管理公司的权限是在用户表', max_length=64,
blank=True, null=True)
work_company_id = models.IntegerField(verbose_name='部门所在的公司', blank=True, null=True)
zhekou = models.FloatField(verbose_name='最低折扣', blank=True, null=True, default=0)
max_customer = models.IntegerField(verbose_name='部门所能拥有的最大的客户数,用于二线的客户数量控制', blank=True, null=True, default=0)
is_leaf = models.IntegerField(verbose_name='暂时没有', blank=True, null=True, default=0)
class SmRole(models.Model):
"""权限表"""
parent_id = models.IntegerField(verbose_name='父级ID', blank=True, null=True)
role_name = models.CharField(verbose_name='角色名字', max_length=64, blank=True, null=True)
action_list = models.CharField(verbose_name='权限标识组成的字符串', max_length=64, blank=True, null=True)
web_list = models.CharField(verbose_name='', max_length=64, blank=True, null=True)
role_describe = models.CharField(verbose_name='角色描述', max_length=256, blank=True, null=True)
role_orderby = models.IntegerField(verbose_name='排序', blank=True, null=True, default=0)
action_code = models.CharField(verbose_name='角色标识码,ID是会变的,标识码更灵活一点', max_length=64, blank=True, null=True)
class SmMenu(models.Model):
"""Menu"""
url = models.CharField(verbose_name='链接', max_length=64, blank=True, null=True)
path = models.CharField(verbose_name='Path', max_length=64, blank=True, null=True)
component = models.CharField(verbose_name='component', max_length=64, blank=True, null=True)
name = models.CharField(verbose_name='名称', max_length=64, blank=True, null=True)
iconCls = models.CharField(verbose_name='图标', max_length=64, blank=True, null=True)
keepAlive = models.BooleanField(verbose_name='', blank=True, null=True, default=None)
requireAuth = models.BooleanField(verbose_name='', blank=True, null=True, default=None)
parentId = models.IntegerField(verbose_name='', blank=True, null=True)
enabled = models.BooleanField(verbose_name='', blank=True, null=True, default=True)
``` |
{
"source": "jiajianglong/pytorch",
"score": 2
} |
#### File: sharded_tensor/ops/test_embedding.py
```python
import sys
import torch
import torch.distributed as dist
from torch.distributed.shard import (
shard_parameter,
)
from torch.testing._internal.common_distributed import (
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
run_tests,
)
from torch.testing._internal.distributed.shard.sharded_tensor import (
TEST_GPU_NUM,
ShardedTensorTestBase,
with_comms,
)
from torch.testing._internal.distributed.shard.sharded_tensor._test_ops_common import (
generate_chunk_sharding_specs_for_test,
generate_local_weight_sharding_params_for_test,
)
if TEST_WITH_DEV_DBG_ASAN:
print(
"Skip dev-asan as torch + multiprocessing spawn have known issues",
file=sys.stderr,
)
sys.exit(0)
class TestShardedEmbedding(ShardedTensorTestBase):
def _run_sharded_embedding(
self,
spec,
input_size,
num_embeddings,
embedding_dim,
sharded_dim=None,
max_norm=None,
norm_type=2.0,
padding_idx=None,
):
# Use same seed.
torch.manual_seed(0)
local_embedding = torch.nn.Embedding(
num_embeddings,
embedding_dim,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
).cuda(self.rank)
sharded_embedding = torch.nn.Embedding(
num_embeddings,
embedding_dim,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
# Copy the weights from local embedding
sharded_embedding.weight = torch.nn.Parameter(
local_embedding.weight.detach().clone()
)
# Shard the parameter.
shard_parameter(sharded_embedding, "weight", spec)
# Run sharded computation
torch.manual_seed(self.rank) # inputs different on each rank
inp = torch.randint(0, num_embeddings, tuple(input_size)).cuda(self.rank)
sharded_output = sharded_embedding(inp)
# If max_norm is set, we need to ensure that the renorm has been applied across
# inputs from all ranks.
if max_norm is not None:
gathered_inputs = [torch.zeros_like(inp) for _ in range(TEST_GPU_NUM)]
dist.all_gather(gathered_inputs, inp)
unique_inp = torch.unique(torch.cat(gathered_inputs))
local_embedding(unique_inp)
# Run local computation
local_output = local_embedding(inp)
# Compare local weight and shared one to ensure the renorm
# as expected.
if max_norm is not None:
sharded_weight = sharded_embedding.weight.local_shards()[0].tensor
(start_pos, chunk_size) = generate_local_weight_sharding_params_for_test(
local_embedding.weight, sharded_dim, TEST_GPU_NUM, spec, self.rank
)
local_weight_narrowed = local_embedding.weight.narrow(
sharded_dim, start_pos, chunk_size
)
self.assertEqual(local_weight_narrowed, sharded_weight)
# Verify
self.assertEqual(local_output, sharded_output)
# Validate for torch.nn.functional.embedding version.
local_output = torch.nn.functional.embedding(
inp,
local_embedding.weight,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
sharded_output = torch.nn.functional.embedding(
inp,
sharded_embedding.weight,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
self.assertEqual(local_output, sharded_output)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_embedding_colwise(self):
for spec in generate_chunk_sharding_specs_for_test(1):
self._run_sharded_embedding(spec, [5, 4], 17, 12)
self._run_sharded_embedding(spec, [6, 7, 6], 21, 11)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 23, 13)
self._run_sharded_embedding(spec, [8, 6, 5, 4, 7], 23, 16)
self._run_sharded_embedding(spec, [4], 15, 14)
self._run_sharded_embedding(spec, [34], 15, 14, padding_idx=10)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 23, 13, padding_idx=12)
self._run_sharded_embedding(
spec, [4, 5, 6], 23, 13, max_norm=2.5, sharded_dim=1
)
self._run_sharded_embedding(
spec, [12, 7, 16], 23, 13, max_norm=2.5, sharded_dim=1
)
self._run_sharded_embedding(
spec, [8, 16, 20], 12, 12, max_norm=1.25, norm_type=1.0, sharded_dim=1
)
self._run_sharded_embedding(spec, [30], 15, 14, max_norm=2.0, sharded_dim=1)
@with_comms(init_rpc=False)
@skip_if_lt_x_gpu(TEST_GPU_NUM)
@requires_nccl()
def test_sharded_embedding_rowwise(self):
for spec in generate_chunk_sharding_specs_for_test(0):
# Test even split.
self._run_sharded_embedding(spec, [5, 12], 16, 22)
self._run_sharded_embedding(spec, [5, 4], 32, 12)
self._run_sharded_embedding(spec, [6, 7, 6], 64, 11)
self._run_sharded_embedding(
spec, [5, 12], 16, 22, max_norm=2.5, sharded_dim=0
)
self._run_sharded_embedding(spec, [6, 7, 6], 64, 11, padding_idx=30)
self._run_sharded_embedding(
spec, [6, 5, 3], 26, 11, max_norm=2.0, sharded_dim=0
)
# Test uneven split.
self._run_sharded_embedding(spec, [8, 6, 5, 4], 19, 11)
self._run_sharded_embedding(spec, [6, 7, 6], 21, 11)
self._run_sharded_embedding(spec, [4], 21, 11)
self._run_sharded_embedding(spec, [8, 6, 5, 4], 21, 11, padding_idx=10)
self._run_sharded_embedding(
spec, [12, 16, 8], 27, 11, max_norm=2.0, sharded_dim=0
)
self._run_sharded_embedding(spec, [4], 14, 11, max_norm=2.5, sharded_dim=0)
if __name__ == "__main__":
run_tests()
```
#### File: jit/fixtures_srcs/generate_models.py
```python
import io
import logging
import sys
import zipfile
from pathlib import Path
from typing import Set
import torch
# Use asterisk symbol so developer doesn't need to import here when they add tests for upgraders.
from test.jit.fixtures_srcs.fixtures_src import * # noqa: F403
from torch.jit.mobile import _load_for_lite_interpreter, _export_operator_list
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
"""
This file is used to generate model for test operator change. Please refer to
https://github.com/pytorch/rfcs/blob/master/RFC-0017-PyTorch-Operator-Versioning.md for more details.
A systematic workflow to change operator is needed to ensure
Backwards Compatibility (BC) / Forwards Compatibility (FC) for operator changes. For BC-breaking operator change,
an upgrader is needed. Here is the flow to properly land a BC-breaking operator change.
1. Write an upgrader in caffe2/torch/csrc/jit/operator_upgraders/upgraders_entry.cpp file. The softly enforced
naming format is <operator_name>_<operator_overload>_<start>_<end>. For example, the below example means that
div.Tensor at version from 0 to 3 needs to be replaced by this upgrader.
```
/*
div_Tensor_0_3 is added for a change of operator div in pr xxxxxxx.
Create date: 12/02/2021
Expire date: 06/02/2022
*/
{"div_Tensor_0_3", R"SCRIPT(
def div_Tensor_0_3(self: Tensor, other: Tensor) -> Tensor:
if (self.is_floating_point() or other.is_floating_point()):
return self.true_divide(other)
return self.divide(other, rounding_mode='trunc')
)SCRIPT"},
```
2. In caffe2/torch/csrc/jit/operator_upgraders/version_map.h, add changes like below.
You will need to make sure that the entry is SORTED according to the version bump number.
```
{"div.Tensor",
{{4,
"div_Tensor_0_3",
"aten::div.Tensor(Tensor self, Tensor other) -> Tensor"}}},
```
3. After rebuild PyTorch, run the following command and it will auto generate a change to
fbcode/caffe2/torch/csrc/jit/mobile/upgrader_mobile.cpp
```
python pytorch/tools/codegen/operator_versions/gen_mobile_upgraders.py
```
4. Generate the test to cover upgrader.
4.1 Switch the commit before the operator change, and add a module in
`test/jit/fixtures_srcs/fixtures_src.py`. The reason why switching to commit is that,
an old model with the old operator before the change is needed to ensure the upgrader
is working as expected. In `test/jit/fixtures_srcs/generate_models.py`, add the module and
it's corresponding changed operator like following
```
ALL_MODULES = {
TestVersionedDivTensorExampleV7(): "aten::div.Tensor",
}
```
This module should includes the changed operator. If the operator isn't covered in the model,
the model export process in step 4.2 will fail.
4.2 Export the model to `test/jit/fixtures` by running
```
python /Users/chenlai/pytorch/test/jit/fixtures_src/generate_models.py
```
4.3 In `test/jit/test_save_load_for_op_version.py`, add a test to cover the old models and
ensure the result is equivalent between current module and old module + upgrader.
4.4 Save all change in 4.1, 4.2 and 4.3, as well as previous changes made in step 1, 2, 3.
Submit a pr
"""
"""
A map of test modules and it's according changed operator
key: test module
value: changed operator
"""
ALL_MODULES = {
TestVersionedDivTensorExampleV7(): "aten::div.Tensor",
TestVersionedLinspaceV7(): "aten::linspace",
TestVersionedLinspaceOutV7(): "aten::linspace.out",
}
"""
Get the path to `test/jit/fixtures`, where all test models for operator changes
(upgrader/downgrader) are stored
"""
def get_fixtures_path() -> Path:
pytorch_dir = Path(__file__).resolve().parents[3]
fixtures_path = pytorch_dir / "test" / "jit" / "fixtures"
return fixtures_path
"""
Get all models' name in `test/jit/fixtures`
"""
def get_all_models(model_directory_path: Path) -> Set[str]:
files_in_fixtures = model_directory_path.glob('**/*')
all_models_from_fixtures = [fixture.stem for fixture in files_in_fixtures if fixture.is_file()]
return set(all_models_from_fixtures)
"""
Check if a given model already exist in `test/jit/fixtures`
"""
def model_exist(model_file_name: str, all_models: Set[str]) -> bool:
return model_file_name in all_models
"""
Get the operator list given a module
"""
def get_operator_list(script_module: torch) -> Set[str]:
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
operator_list = _export_operator_list(mobile_module)
return operator_list
"""
Get the output model operator version, given a module
"""
def get_output_model_version(script_module: torch.nn.Module) -> int:
buffer = io.BytesIO()
torch.jit.save(script_module, buffer)
buffer.seek(0)
zipped_model = zipfile.ZipFile(buffer)
try:
version = int(zipped_model.read('archive/version').decode("utf-8"))
return version
except KeyError:
version = int(zipped_model.read('archive/.data/version').decode("utf-8"))
return version
"""
Loop through all test modules. If the corresponding model doesn't exist in
`test/jit/fixtures`, generate one. For the following reason, a model won't be exported:
1. The test module doens't cover the changed operator. For example, test_versioned_div_tensor_example_v4
is supposed to test the operator aten::div.Tensor. If the model doesn't include this operator, it will fail.
The error message includes the actual operator list from the model.
2. The output model version is not the same as expected version. For example, test_versioned_div_tensor_example_v4
is used to test an operator change aten::div.Tensor, and the operator version will be bumped to v5. This script is
supposed to run before the operator change (before the commit to make the change). If the actual model version is v5,
likely this script is running with the commit to make the change.
3. The model already exists in `test/jit/fixtures`.
"""
def generate_models(model_directory_path: Path):
all_models = get_all_models(model_directory_path)
for a_module, expect_operator in ALL_MODULES.items():
print(a_module, expect_operator)
# For example: TestVersionedDivTensorExampleV7
torch_module_name = type(a_module).__name__
# The corresponding model name is: test_versioned_div_tensor_example_v4
model_name = ''.join([
'_' + char.lower() if char.isupper() else char for char in torch_module_name
]).lstrip('_')
# Some models may not compile anymore, so skip the ones
# that already has pt file for them.
logger.info(f"Processing {torch_module_name}")
if model_exist(model_name, all_models):
logger.info(f"Model {model_name} already exists, skipping")
continue
script_module = torch.jit.script(a_module)
actual_model_version = get_output_model_version(script_module)
current_operator_version = torch._C._get_max_operator_version()
if actual_model_version >= current_operator_version + 1:
logger.error(
f"Actual model version {actual_model_version} "
f"is equal or larger than {current_operator_version} + 1. "
f"Please run the script before the commit to change operator.")
continue
actual_operator_list = get_operator_list(script_module)
if expect_operator not in actual_operator_list:
logger.error(
f"The model includes operator: {actual_operator_list}, "
f"however it doesn't cover the operator {expect_operator}."
f"Please ensure the output model includes the tested operator.")
continue
export_model_path = str(model_directory_path / (str(model_name) + ".ptl"))
script_module._save_for_lite_interpreter(export_model_path)
logger.info(f"Generating model {model_name} and it's save to {export_model_path}")
def main() -> None:
model_directory_path = get_fixtures_path()
generate_models(model_directory_path)
if __name__ == '__main__':
main()
```
#### File: sharded_tensor/ops/init.py
```python
import torch
from torch.distributed.shard.sharded_tensor import (
sharded_op_impl,
)
def validate_param(param, param_name):
if param is None:
raise ValueError(f"param: {param_name} shouldn't be None!")
@sharded_op_impl(torch.nn.init.uniform_)
def uniform_(types, args=(), kwargs=None, pg=None):
r"""
Fills the Tensor in sharded_tensor.local_shards with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
sharded_tensor: tensor sharded across devices
a: the lower bound of the uniform distribution
b: the upper bound of the uniform distribution
"""
validate_param(kwargs, "kwargs")
sharded_tensor = kwargs["tensor"]
validate_param(sharded_tensor, "sharded_tensor")
a = kwargs['a']
validate_param(a, "a")
b = kwargs['b']
validate_param(b, "b")
for shard in sharded_tensor.local_shards():
torch.nn.init.uniform_(shard.tensor, a=a, b=b)
return sharded_tensor
@sharded_op_impl(torch.nn.init.normal_)
def normal_(types, args=(), kwargs=None, pg=None):
r"""
Fills the Tensors in sharded_tensor.local_shards with values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
Args:
sharded_tensor: tensor sharded across devices
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
"""
validate_param(kwargs, "kwargs")
sharded_tensor = kwargs["tensor"]
validate_param(sharded_tensor, "sharded_tensor")
mean = kwargs['mean']
validate_param(mean, "mean")
std = kwargs['std']
validate_param(std, "std")
for shard in sharded_tensor.local_shards():
torch.nn.init.normal_(shard.tensor, mean=mean, std=std)
return sharded_tensor
@sharded_op_impl(torch.nn.init.kaiming_uniform_)
def kaiming_uniform_(types, args=(), kwargs=None, pg=None):
r"""
Fills the Tensors in sharded_tensor.local_shards with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - <NAME>. et al. (2015), using a
uniform distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-\text{bound}, \text{bound})` where
.. math::
\text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}
Also known as He initialization.
Args:
sharded_tensor: tensor sharded across devices
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
"""
validate_param(kwargs, "kwargs")
sharded_tensor = kwargs["tensor"]
validate_param(sharded_tensor, "sharded_tensor")
a = kwargs['a']
validate_param(a, "a")
mode = kwargs['mode']
validate_param(mode, "mode")
nonlinearity = kwargs['nonlinearity']
validate_param(nonlinearity, "nonlinearity")
for shard in sharded_tensor.local_shards():
torch.nn.init.kaiming_uniform_(shard.tensor, a=a, mode=mode, nonlinearity=nonlinearity)
return sharded_tensor
``` |
{
"source": "JiajianLu/homework",
"score": 3
} |
#### File: hw1/Code/Behavioral_Cloning.py
```python
import tensorflow as tf
import numpy as np
import pickle
import tf_util
import load_policy
def load_rollout(filename):
"""
Extract data related to rollout generated by expert policy.
:param filename - str, name of file.
:return data - A tuple of lists of observations and actions.
"""
with open(filename, 'rb') as f:
data = pickle.loads(f.read())
return data['observations'], data['actions']
def tf_reset():
try:
sess.close()
except:
pass
tf.reset_default_graph()
return tf.Session()
'''
python Behavioral_Cloning.py expert_data/Humanoid-v2.pkl Humanoid-v2 --render \
--num_rollouts 20
'''
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('expert_data_file', type=str)
parser.add_argument('envname', type=str)
parser.add_argument('--render', action='store_true')
parser.add_argument("--max_timesteps", type=int)
parser.add_argument('--num_rollouts', type=int, default=20,
help='Number of expert roll outs')
args = parser.parse_args()
filename = args.expert_data_file
data = load_rollout(filename)
x = data[0]
y = []
for i in range(len(x)):
y.append(data[1][i][0])
y = np.array(y)
num_input_neuron = np.shape(x)[1]
num_output_neuron = np.shape(y)[1]
def create_model():
# create inputs
input_ph = tf.placeholder(dtype=tf.float32, shape=[None, num_input_neuron])
output_ph = tf.placeholder(dtype=tf.float32, shape=[None, num_output_neuron])
# create variables
W0 = tf.get_variable(name='W0', shape=[num_input_neuron, 128], initializer=tf.contrib.layers.xavier_initializer())
W1 = tf.get_variable(name='W1', shape=[128, 128], initializer=tf.contrib.layers.xavier_initializer())
W2 = tf.get_variable(name='W2', shape=[128, 128], initializer=tf.contrib.layers.xavier_initializer())
W3 = tf.get_variable(name='W3', shape=[128, num_output_neuron], initializer=tf.contrib.layers.xavier_initializer())
b0 = tf.get_variable(name='b0', shape=[128], initializer=tf.constant_initializer(0.))
b1 = tf.get_variable(name='b1', shape=[128], initializer=tf.constant_initializer(0.))
b2 = tf.get_variable(name='b2', shape=[128], initializer=tf.constant_initializer(0.))
b3 = tf.get_variable(name='b3', shape=[num_output_neuron], initializer=tf.constant_initializer(0.))
weights = [W0, W1, W2, W3]
biases = [b0, b1, b2, b3]
activations = [tf.nn.relu, tf.nn.relu, tf.nn.relu, None]
# create computation graph
layer = input_ph
for W, b, activation in zip(weights, biases, activations):
layer = tf.matmul(layer, W) + b
if activation is not None:
layer = activation(layer)
output_pred = layer
return input_ph, output_ph, output_pred
input_ph, output_ph, output_pred = create_model()
# create loss
mse = tf.reduce_mean(0.5 * tf.square(output_pred - output_ph))
# create optimizer
opt = tf.train.AdamOptimizer().minimize(mse)
# initialize variables
# create saver to save model variables
#saver = tf.train.Saver()
mean = []
# run training
batch_size = 100 #every time pass 100 rows of data
# train the entire data set for 200 times
#for epochs in range(20,250,30):
epochs = 20
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
for training_step in range(len(x)//batch_size):
# get a random subset of the training data
indices = np.random.randint(low=0, high=len(x), size=batch_size)
input_batch = x[indices]
output_batch = y[indices]
# run the optimizer and get the mse
_, mse_run = sess.run([opt, mse], feed_dict={input_ph: input_batch, output_ph: output_batch})
# print the mse every so often
print('{0:04d} mse: {1:.3f}'.format(epoch, mse_run))
import gym
env = gym.make(args.envname)
max_steps = args.max_timesteps or env.spec.timestep_limit
returns = []
observations = []
actions = []
for i in range(args.num_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = sess.run(output_pred, feed_dict={input_ph: obs[None, :]})
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action) #r is reward
totalr += r #total reward
steps += 1
if args.render:
env.render()
if steps % 100 == 0:
print("%i/%i"%(steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
if __name__ == '__main__':
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.