max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
flake8_eradicate.py | sobolevn/flake8-eradicate | 169 | 11173196 | import tokenize
from typing import Iterable, Iterator, List, Sequence, Tuple, Type
import pkg_resources
from eradicate import Eradicator
from flake8.options.manager import OptionManager
#: This is a name that we use to install this library:
pkg_name = 'flake8-eradicate'
#: We store the version number inside the `pyproject.toml`:
pkg_version = pkg_resources.get_distribution(pkg_name).version
#: Const for `stdin` mode of `flake8`:
STDIN = 'stdin'
class Checker(object):
"""Flake8 plugin to find commented out code."""
name = pkg_name
version = pkg_version
_error_template = 'E800 Found commented out code'
options = None
def __init__(
self,
tree, # that's the hack we use to trigger this check
file_tokens: List[tokenize.TokenInfo],
lines: Sequence[str],
) -> None:
"""
``flake8`` plugin constructor.
Arguments:
file_tokens: all tokens for this file.
lines: all file lines.
"""
self._file_tokens = file_tokens
self._lines = lines
self._options = {
'aggressive': self.options.eradicate_aggressive, # type: ignore
}
self._eradicator = Eradicator()
whitelist = self.options.eradicate_whitelist # type: ignore
whitelist_ext = self.options.eradicate_whitelist_extend # type: ignore
if whitelist_ext:
self._eradicator.update_whitelist(
whitelist_ext.split('#'),
extend_default=True,
)
elif whitelist:
self._eradicator.update_whitelist(
whitelist.split('#'),
extend_default=False,
)
@classmethod
def add_options(cls, parser: OptionManager) -> None:
"""
``flake8`` api method to register new plugin options.
See :class:`.Configuration` docs for detailed options reference.
Arguments:
parser: ``flake8`` option parser instance.
"""
parser.add_option(
'--eradicate-aggressive',
default=False,
help=(
'Enables aggressive mode for eradicate; '
'this may result in false positives'
),
action='store_true',
parse_from_config=True,
)
parser.add_option(
'--eradicate-whitelist',
default=False,
help=(
'String of "#" separated comment beginnings to whitelist '
'for eradicate. '
'Single parts are interpreted as regex. '
'OVERWRITING the default whitelist: {0}'
).format(Eradicator.DEFAULT_WHITELIST),
action='store',
parse_from_config=True,
)
parser.add_option(
'--eradicate-whitelist-extend',
default=False,
help=(
'String of "#" separated comment beginnings to whitelist '
'for eradicate. '
'Single parts are interpreted as regex. '
'Overwrites --eradicate-whitelist. '
'EXTENDING the default whitelist: {0} '
).format(Eradicator.DEFAULT_WHITELIST),
action='store',
parse_from_config=True,
)
@classmethod
def parse_options(cls, options) -> None:
"""Parses registered options for providing them to each visitor."""
cls.options = options
def run(self) -> Iterator[Tuple[int, int, str, Type['Checker']]]:
"""Runs on each step of flake8."""
for line_no in self._lines_with_commented_out_code():
yield line_no, 0, self._error_template, type(self)
def _lines_with_commented_out_code(self) -> Iterable[int]:
"""
Yield the physical line number that contain commented out code.
This test relies on eradicate function to remove commented out code
from a physical line.
Physical lines might appear like commented code although they are part
of a multi-line docstring (e.g. a `# noqa: DAR201` comment to suppress
flake8 warning about missing returns in the docstring).
To prevent this false-positive, the tokens of the physical line are
checked for a comment. The eradicate function is only invokes,
when the tokens indicate a comment in the physical line.
"""
comment_in_file = any(
token.type == tokenize.COMMENT
for token in self._file_tokens
)
if comment_in_file:
for line_no, line in enumerate(self._lines):
filtered_source = ''.join(
self._eradicator.filter_commented_out_code(
line,
aggressive=self._options['aggressive'],
),
)
if line != filtered_source:
yield line_no + 1
|
models/modules/discriminators.py | NguyenHoangAn0511/gan-compression | 1,005 | 11173202 | import argparse
import functools
import numpy as np
from torch import nn
from torch.nn import functional as F
from models.modules.munit_architecture.munit_generator import Conv2dBlock
from models.modules.spade_architecture.normalization import get_nonspade_norm_layer
from models.networks import BaseNetwork
class MsImageDiscriminator(nn.Module):
def __init__(self, input_dim, opt):
super(MsImageDiscriminator, self).__init__()
self.n_layer = opt.n_layers_D
self.dim = opt.ndf
self.norm = 'none'
self.activ = 'lrelu'
self.num_scales = 3
self.pad_type = 'reflect'
self.input_dim = input_dim
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
self.cnns = nn.ModuleList()
for _ in range(self.num_scales):
self.cnns.append(self._make_net())
def _make_net(self):
dim = self.dim
cnn_x = []
cnn_x += [Conv2dBlock(self.input_dim, dim, 4, 2, 1, norm='none', activation=self.activ, pad_type=self.pad_type)]
for i in range(self.n_layer - 1):
cnn_x += [Conv2dBlock(dim, dim * 2, 4, 2, 1, norm=self.norm, activation=self.activ, pad_type=self.pad_type)]
dim *= 2
cnn_x += [nn.Conv2d(dim, 1, 1, 1, 0)]
cnn_x = nn.Sequential(*cnn_x)
return cnn_x
def forward(self, x):
outputs = []
for model in self.cnns:
outputs.append(model(x))
x = self.downsample(x)
return outputs
class NLayerDiscriminator(BaseNetwork):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [
nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(BaseNetwork):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
# Defines the PatchGAN discriminator with the specified arguments.
class SPADENLayerDiscriminator(BaseNetwork):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
kw = 4
padw = int(np.ceil((kw - 1.0) / 2))
nf = opt.ndf
input_nc = self.compute_D_input_nc(opt)
norm_layer = get_nonspade_norm_layer(opt, opt.norm_D)
sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, False)]]
for n in range(1, opt.n_layers_D):
nf_prev = nf
nf = min(nf * 2, 512)
stride = 1 if n == opt.n_layers_D - 1 else 2
sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw,
stride=stride, padding=padw)),
nn.LeakyReLU(0.2, False)
]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
# We divide the layers into groups to extract intermediate layer outputs
for n in range(len(sequence)):
self.add_module('model' + str(n), nn.Sequential(*sequence[n]))
def compute_D_input_nc(self, opt):
input_nc = opt.semantic_nc + opt.output_nc
return input_nc
def forward(self, input):
results = [input]
for submodel in self.children():
intermediate_output = submodel(results[-1])
results.append(intermediate_output)
return results[1:]
class MultiscaleDiscriminator(nn.Module):
@staticmethod
def modify_commandline_options(parser, is_train):
assert isinstance(parser, argparse.ArgumentParser)
parser.add_argument('--num_D', type=int, default=2,
help='number of discriminators to be used in multiscale')
parser.add_argument('--norm_D', type=str, default='spectralinstance',
help='instance normalization or batch normalization')
opt, _ = parser.parse_known_args()
# define properties of each discriminator of the multiscale discriminator
subnetD = SPADENLayerDiscriminator
subnetD.modify_commandline_options(parser, is_train)
parser.set_defaults(n_layers_D=4)
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
for i in range(opt.num_D):
subnetD = SPADENLayerDiscriminator(opt)
self.add_module('discriminator_%d' % i, subnetD)
def downsample(self, input):
return F.avg_pool2d(input, kernel_size=3,
stride=2, padding=[1, 1],
count_include_pad=False)
# Returns list of lists of discriminator outputs.
# The final result is of size opt.num_D x opt.n_layers_D
def forward(self, input):
result = []
for name, D in self.named_children():
out = D(input)
result.append(out)
input = self.downsample(input)
return result
|
nndet/evaluator/__init__.py | joeranbosma/nnDetection | 242 | 11173213 | from nndet.evaluator.abstract import AbstractMetric, AbstractEvaluator, DetectionMetric
|
interleaving/interleaving_method.py | mpkato/interleaving | 107 | 11173255 | <filename>interleaving/interleaving_method.py
from collections import defaultdict
import json
import numpy as np
class InterleavingMethod(object):
'''
Abstract class for interleaving methods
Args:
lists: lists of document IDs
max_length: the maximum length of resultant interleaving.
If this is None (default), it is set to the minimum length
of the given lists.
sample_num: If this is None (default), an interleaved ranking is
generated every time when `interleave` is called.
Otherwise, `sample_num` rankings are sampled in the
initialization, one of which is returned when `interleave`
is called.
'''
def __init__(self, lists, max_length=None, sample_num=None):
'''
lists: lists of document IDs
max_length: the maximum length of resultant interleaving.
If this is None (default), it is set to the minimum length
of the given lists.
sample_num: If this is None (default), an interleaved ranking is
generated every time when `interleave` is called.
Otherwise, `sample_num` rankings are sampled in the
initialization, one of which is returned when `interleave`
is called.
'''
self.max_length = max_length
if self.max_length is None:
self.max_length = min([len(l) for l in lists])
self.sample_num = sample_num
self.lists = lists
if self.sample_num:
self._sample_rankings()
def _sample_rankings(self):
'''
Sample `sample_num` rankings
'''
distribution = defaultdict(int)
for i in range(self.sample_num):
ranking = self._sample(self.max_length, self.lists)
distribution[ranking] += 1.0 / self.sample_num
self._rankings, self._probabilities = zip(*distribution.items())
def _sample(self, max_length, lists):
'''
Sample a ranking
max_length: the maximum length of resultant interleaving
*lists: lists of document IDs
Return an instance of Ranking
'''
raise NotImplementedError()
def dump_rankings(self, file):
'''
Dump the sampled rankings into a file
'''
result = {}
for rid, ranking in enumerate(self._rankings):
result[hash(ranking)] = {
'probability': self._probabilities[rid],
'ranking': ranking.dumpd(),
}
with open(file, 'w') as f:
json.dump(result, f, indent=' ')
def interleave(self):
'''
Return an instance of Ranking
'''
if self.sample_num:
i = np.argmax(np.random.multinomial(1, self._probabilities))
return self._rankings[i]
else:
return self._sample(self.max_length, self.lists)
@property
def ranking_distribution(self):
'''
Return a list of Ranking and its probability
if rankings are sampled in the initialization.
Otherwise, return None.
'''
if self.sample_num:
return zip(self._rankings, self._probabilities)
else:
return None
@classmethod
def evaluate(cls, ranking, clicks):
'''
Args:
ranking: an instance of Ranking generated by Balanced.interleave
clicks: a list of indices clicked by a user
Returns:
a list of pairs of ranker indices in which element (i, j)
indicates i won j.
e.g. a result [(1, 0), (2, 1), (2, 0)] indicates
ranker 1 won ranker 0, and ranker 2 won ranker 0 as well as ranker 1.
'''
scores = cls.compute_scores(ranking, clicks)
result = []
for i in range(len(scores)):
for j in range(i + 1, len(scores)):
if scores[i] > scores[j]:
result.append((i, j))
elif scores[i] < scores[j]:
result.append((j, i))
else: # scores[i] == scores[j]
pass
return result
@classmethod
def compute_scores(cls, ranking, clicks):
'''
ranking: an instance of Ranking
clicks: a list of indices clicked by a user
Return a list of scores of each ranker.
'''
raise NotImplementedError()
|
python/run_format.py | SuperBigHui/stm32-bootloader | 681 | 11173261 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import subprocess
from common import collect_source_files
def run_format(source, style="file", executable="clang-format"):
# Normalize executable path
executable = os.path.normpath(executable)
for s in source:
clang_format_args = [executable]
clang_format_args.append("-style={}".format(style))
clang_format_args.append("-i")
clang_format_args.append(os.path.basename(s))
print("Formatting {}".format(s))
output = subprocess.check_output(clang_format_args,
cwd=os.path.dirname(s))
if len(output) > 0:
print(output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="C/C++ formatting check using clang-format")
# Style
parser.add_argument("-s", "--style",
default="file",
help="Coding style, pass-through to clang-format's "
"-style=<string>, (default is '%(default)s').")
# Specify executable for clang-format
parser.add_argument("-e", "--executable",
default="clang-format",
help="Path of clang-format (if it's not added to PATH")
args = parser.parse_args()
file_list = collect_source_files()
run_format(file_list, style=args.style, executable=args.executable)
|
static/paddlex/cv/nets/detection/ops.py | cheneyveron/PaddleX | 3,655 | 11173370 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numbers import Integral
import math
import six
import paddle
from paddle import fluid
def bbox_overlaps(boxes_1, boxes_2):
'''
bbox_overlaps
boxes_1: x1, y, x2, y2
boxes_2: x1, y, x2, y2
'''
assert boxes_1.shape[1] == 4 and boxes_2.shape[1] == 4
num_1 = boxes_1.shape[0]
num_2 = boxes_2.shape[0]
x1_1 = boxes_1[:, 0:1]
y1_1 = boxes_1[:, 1:2]
x2_1 = boxes_1[:, 2:3]
y2_1 = boxes_1[:, 3:4]
area_1 = (x2_1 - x1_1 + 1) * (y2_1 - y1_1 + 1)
x1_2 = boxes_2[:, 0].transpose()
y1_2 = boxes_2[:, 1].transpose()
x2_2 = boxes_2[:, 2].transpose()
y2_2 = boxes_2[:, 3].transpose()
area_2 = (x2_2 - x1_2 + 1) * (y2_2 - y1_2 + 1)
xx1 = np.maximum(x1_1, x1_2)
yy1 = np.maximum(y1_1, y1_2)
xx2 = np.minimum(x2_1, x2_2)
yy2 = np.minimum(y2_1, y2_2)
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (area_1 + area_2 - inter)
return ovr
def box_to_delta(ex_boxes, gt_boxes, weights):
""" box_to_delta """
ex_w = ex_boxes[:, 2] - ex_boxes[:, 0] + 1
ex_h = ex_boxes[:, 3] - ex_boxes[:, 1] + 1
ex_ctr_x = ex_boxes[:, 0] + 0.5 * ex_w
ex_ctr_y = ex_boxes[:, 1] + 0.5 * ex_h
gt_w = gt_boxes[:, 2] - gt_boxes[:, 0] + 1
gt_h = gt_boxes[:, 3] - gt_boxes[:, 1] + 1
gt_ctr_x = gt_boxes[:, 0] + 0.5 * gt_w
gt_ctr_y = gt_boxes[:, 1] + 0.5 * gt_h
dx = (gt_ctr_x - ex_ctr_x) / ex_w / weights[0]
dy = (gt_ctr_y - ex_ctr_y) / ex_h / weights[1]
dw = (np.log(gt_w / ex_w)) / weights[2]
dh = (np.log(gt_h / ex_h)) / weights[3]
targets = np.vstack([dx, dy, dw, dh]).transpose()
return targets
def DropBlock(input, block_size, keep_prob, is_test):
if is_test:
return input
def CalculateGamma(input, block_size, keep_prob):
input_shape = fluid.layers.shape(input)
feat_shape_tmp = fluid.layers.slice(input_shape, [0], [3], [4])
feat_shape_tmp = fluid.layers.cast(feat_shape_tmp, dtype="float32")
feat_shape_t = fluid.layers.reshape(feat_shape_tmp, [1, 1, 1, 1])
feat_area = fluid.layers.pow(feat_shape_t, factor=2)
block_shape_t = fluid.layers.fill_constant(
shape=[1, 1, 1, 1], value=block_size, dtype='float32')
block_area = fluid.layers.pow(block_shape_t, factor=2)
useful_shape_t = feat_shape_t - block_shape_t + 1
useful_area = fluid.layers.pow(useful_shape_t, factor=2)
upper_t = feat_area * (1 - keep_prob)
bottom_t = block_area * useful_area
output = upper_t / bottom_t
return output
gamma = CalculateGamma(input, block_size=block_size, keep_prob=keep_prob)
input_shape = fluid.layers.shape(input)
p = fluid.layers.expand_as(gamma, input)
input_shape_tmp = fluid.layers.cast(input_shape, dtype="int64")
random_matrix = fluid.layers.uniform_random(
input_shape_tmp, dtype='float32', min=0.0, max=1.0)
one_zero_m = fluid.layers.less_than(random_matrix, p)
one_zero_m.stop_gradient = True
one_zero_m = fluid.layers.cast(one_zero_m, dtype="float32")
mask_flag = fluid.layers.pool2d(
one_zero_m,
pool_size=block_size,
pool_type='max',
pool_stride=1,
pool_padding=block_size // 2)
mask = 1.0 - mask_flag
elem_numel = fluid.layers.reduce_prod(input_shape)
elem_numel_m = fluid.layers.cast(elem_numel, dtype="float32")
elem_numel_m.stop_gradient = True
elem_sum = fluid.layers.reduce_sum(mask)
elem_sum_m = fluid.layers.cast(elem_sum, dtype="float32")
elem_sum_m.stop_gradient = True
output = input * mask * elem_numel_m / elem_sum_m
return output
class MultiClassNMS(object):
def __init__(self,
score_threshold=.05,
nms_top_k=-1,
keep_top_k=100,
nms_threshold=.5,
normalized=False,
nms_eta=1.0,
background_label=0):
super(MultiClassNMS, self).__init__()
self.score_threshold = score_threshold
self.nms_top_k = nms_top_k
self.keep_top_k = keep_top_k
self.nms_threshold = nms_threshold
self.normalized = normalized
self.nms_eta = nms_eta
self.background_label = background_label
def __call__(self, bboxes, scores):
return fluid.layers.multiclass_nms(
bboxes=bboxes,
scores=scores,
score_threshold=self.score_threshold,
nms_top_k=self.nms_top_k,
keep_top_k=self.keep_top_k,
normalized=self.normalized,
nms_threshold=self.nms_threshold,
nms_eta=self.nms_eta,
background_label=self.background_label)
class MatrixNMS(object):
def __init__(self,
score_threshold=.05,
post_threshold=.05,
nms_top_k=-1,
keep_top_k=100,
use_gaussian=False,
gaussian_sigma=2.,
normalized=False,
background_label=0):
super(MatrixNMS, self).__init__()
self.score_threshold = score_threshold
self.post_threshold = post_threshold
self.nms_top_k = nms_top_k
self.keep_top_k = keep_top_k
self.normalized = normalized
self.use_gaussian = use_gaussian
self.gaussian_sigma = gaussian_sigma
self.background_label = background_label
def __call__(self, bboxes, scores):
return paddle.fluid.layers.matrix_nms(
bboxes=bboxes,
scores=scores,
score_threshold=self.score_threshold,
post_threshold=self.post_threshold,
nms_top_k=self.nms_top_k,
keep_top_k=self.keep_top_k,
normalized=self.normalized,
use_gaussian=self.use_gaussian,
gaussian_sigma=self.gaussian_sigma,
background_label=self.background_label)
class MultiClassSoftNMS(object):
def __init__(
self,
score_threshold=0.01,
keep_top_k=300,
softnms_sigma=0.5,
normalized=False,
background_label=0, ):
super(MultiClassSoftNMS, self).__init__()
self.score_threshold = score_threshold
self.keep_top_k = keep_top_k
self.softnms_sigma = softnms_sigma
self.normalized = normalized
self.background_label = background_label
def __call__(self, bboxes, scores):
def create_tmp_var(program, name, dtype, shape, lod_level):
return program.current_block().create_var(
name=name, dtype=dtype, shape=shape, lod_level=lod_level)
def _soft_nms_for_cls(dets, sigma, thres):
"""soft_nms_for_cls"""
dets_final = []
while len(dets) > 0:
maxpos = np.argmax(dets[:, 0])
dets_final.append(dets[maxpos].copy())
ts, tx1, ty1, tx2, ty2 = dets[maxpos]
scores = dets[:, 0]
# force remove bbox at maxpos
scores[maxpos] = -1
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
eta = 0 if self.normalized else 1
areas = (x2 - x1 + eta) * (y2 - y1 + eta)
xx1 = np.maximum(tx1, x1)
yy1 = np.maximum(ty1, y1)
xx2 = np.minimum(tx2, x2)
yy2 = np.minimum(ty2, y2)
w = np.maximum(0.0, xx2 - xx1 + eta)
h = np.maximum(0.0, yy2 - yy1 + eta)
inter = w * h
ovr = inter / (areas + areas[maxpos] - inter)
weight = np.exp(-(ovr * ovr) / sigma)
scores = scores * weight
idx_keep = np.where(scores >= thres)
dets[:, 0] = scores
dets = dets[idx_keep]
dets_final = np.array(dets_final).reshape(-1, 5)
return dets_final
def _soft_nms(bboxes, scores):
class_nums = scores.shape[-1]
softnms_thres = self.score_threshold
softnms_sigma = self.softnms_sigma
keep_top_k = self.keep_top_k
cls_boxes = [[] for _ in range(class_nums)]
cls_ids = [[] for _ in range(class_nums)]
start_idx = 1 if self.background_label == 0 else 0
for j in range(start_idx, class_nums):
inds = np.where(scores[:, j] >= softnms_thres)[0]
scores_j = scores[inds, j]
rois_j = bboxes[inds, j, :] if len(
bboxes.shape) > 2 else bboxes[inds, :]
dets_j = np.hstack((scores_j[:, np.newaxis], rois_j)).astype(
np.float32, copy=False)
cls_rank = np.argsort(-dets_j[:, 0])
dets_j = dets_j[cls_rank]
cls_boxes[j] = _soft_nms_for_cls(
dets_j, sigma=softnms_sigma, thres=softnms_thres)
cls_ids[j] = np.array([j] * cls_boxes[j].shape[0]).reshape(-1,
1)
cls_boxes = np.vstack(cls_boxes[start_idx:])
cls_ids = np.vstack(cls_ids[start_idx:])
pred_result = np.hstack([cls_ids, cls_boxes])
# Limit to max_per_image detections **over all classes**
image_scores = cls_boxes[:, 0]
if len(image_scores) > keep_top_k:
image_thresh = np.sort(image_scores)[-keep_top_k]
keep = np.where(cls_boxes[:, 0] >= image_thresh)[0]
pred_result = pred_result[keep, :]
return pred_result
def _batch_softnms(bboxes, scores):
batch_offsets = bboxes.lod()
bboxes = np.array(bboxes)
scores = np.array(scores)
out_offsets = [0]
pred_res = []
if len(batch_offsets) > 0:
batch_offset = batch_offsets[0]
for i in range(len(batch_offset) - 1):
s, e = batch_offset[i], batch_offset[i + 1]
pred = _soft_nms(bboxes[s:e], scores[s:e])
out_offsets.append(pred.shape[0] + out_offsets[-1])
pred_res.append(pred)
else:
assert len(bboxes.shape) == 3
assert len(scores.shape) == 3
for i in range(bboxes.shape[0]):
pred = _soft_nms(bboxes[i], scores[i])
out_offsets.append(pred.shape[0] + out_offsets[-1])
pred_res.append(pred)
res = fluid.LoDTensor()
res.set_lod([out_offsets])
if len(pred_res) == 0:
pred_res = np.array([[1]], dtype=np.float32)
res.set(np.vstack(pred_res).astype(np.float32), fluid.CPUPlace())
return res
pred_result = create_tmp_var(
fluid.default_main_program(),
name='softnms_pred_result',
dtype='float32',
shape=[-1, 6],
lod_level=1)
fluid.layers.py_func(
func=_batch_softnms, x=[bboxes, scores], out=pred_result)
return pred_result
class MultiClassDiouNMS(object):
def __init__(
self,
score_threshold=0.05,
keep_top_k=100,
nms_threshold=0.5,
normalized=False,
background_label=0, ):
super(MultiClassDiouNMS, self).__init__()
self.score_threshold = score_threshold
self.nms_threshold = nms_threshold
self.keep_top_k = keep_top_k
self.normalized = normalized
self.background_label = background_label
def __call__(self, bboxes, scores):
def create_tmp_var(program, name, dtype, shape, lod_level):
return program.current_block().create_var(
name=name, dtype=dtype, shape=shape, lod_level=lod_level)
def _calc_diou_term(dets1, dets2):
eps = 1.e-10
eta = 0 if self.normalized else 1
x1, y1, x2, y2 = dets1[0], dets1[1], dets1[2], dets1[3]
x1g, y1g, x2g, y2g = dets2[0], dets2[1], dets2[2], dets2[3]
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
w = x2 - x1 + eta
h = y2 - y1 + eta
cxg = (x1g + x2g) / 2
cyg = (y1g + y2g) / 2
wg = x2g - x1g + eta
hg = y2g - y1g + eta
x2 = np.maximum(x1, x2)
y2 = np.maximum(y1, y2)
# A or B
xc1 = np.minimum(x1, x1g)
yc1 = np.minimum(y1, y1g)
xc2 = np.maximum(x2, x2g)
yc2 = np.maximum(y2, y2g)
# DIOU term
dist_intersection = (cx - cxg)**2 + (cy - cyg)**2
dist_union = (xc2 - xc1)**2 + (yc2 - yc1)**2
diou_term = (dist_intersection + eps) / (dist_union + eps)
return diou_term
def _diou_nms_for_cls(dets, thres):
"""_diou_nms_for_cls"""
scores = dets[:, 0]
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
eta = 0 if self.normalized else 1
areas = (x2 - x1 + eta) * (y2 - y1 + eta)
dt_num = dets.shape[0]
order = np.array(range(dt_num))
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + eta)
h = np.maximum(0.0, yy2 - yy1 + eta)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
diou_term = _calc_diou_term([x1[i], y1[i], x2[i], y2[i]], [
x1[order[1:]], y1[order[1:]], x2[order[1:]], y2[order[1:]]
])
inds = np.where(ovr - diou_term <= thres)[0]
order = order[inds + 1]
dets_final = dets[keep]
return dets_final
def _diou_nms(bboxes, scores):
bboxes = np.array(bboxes)
scores = np.array(scores)
class_nums = scores.shape[-1]
score_threshold = self.score_threshold
nms_threshold = self.nms_threshold
keep_top_k = self.keep_top_k
cls_boxes = [[] for _ in range(class_nums)]
cls_ids = [[] for _ in range(class_nums)]
start_idx = 1 if self.background_label == 0 else 0
for j in range(start_idx, class_nums):
inds = np.where(scores[:, j] >= score_threshold)[0]
scores_j = scores[inds, j]
rois_j = bboxes[inds, j, :]
dets_j = np.hstack((scores_j[:, np.newaxis], rois_j)).astype(
np.float32, copy=False)
cls_rank = np.argsort(-dets_j[:, 0])
dets_j = dets_j[cls_rank]
cls_boxes[j] = _diou_nms_for_cls(dets_j, thres=nms_threshold)
cls_ids[j] = np.array([j] * cls_boxes[j].shape[0]).reshape(-1,
1)
cls_boxes = np.vstack(cls_boxes[start_idx:])
cls_ids = np.vstack(cls_ids[start_idx:])
pred_result = np.hstack([cls_ids, cls_boxes]).astype(np.float32)
# Limit to max_per_image detections **over all classes**
image_scores = cls_boxes[:, 0]
if len(image_scores) > keep_top_k:
image_thresh = np.sort(image_scores)[-keep_top_k]
keep = np.where(cls_boxes[:, 0] >= image_thresh)[0]
pred_result = pred_result[keep, :]
res = fluid.LoDTensor()
res.set_lod([[0, pred_result.shape[0]]])
if pred_result.shape[0] == 0:
pred_result = np.array([[1]], dtype=np.float32)
res.set(pred_result, fluid.CPUPlace())
return res
pred_result = create_tmp_var(
fluid.default_main_program(),
name='diou_nms_pred_result',
dtype='float32',
shape=[-1, 6],
lod_level=0)
fluid.layers.py_func(
func=_diou_nms, x=[bboxes, scores], out=pred_result)
return pred_result
class LibraBBoxAssigner(object):
def __init__(self,
batch_size_per_im=512,
fg_fraction=.25,
fg_thresh=.5,
bg_thresh_hi=.5,
bg_thresh_lo=0.,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
num_classes=81,
shuffle_before_sample=True,
is_cls_agnostic=False,
num_bins=3):
super(LibraBBoxAssigner, self).__init__()
self.batch_size_per_im = batch_size_per_im
self.fg_fraction = fg_fraction
self.fg_thresh = fg_thresh
self.bg_thresh_hi = bg_thresh_hi
self.bg_thresh_lo = bg_thresh_lo
self.bbox_reg_weights = bbox_reg_weights
self.class_nums = num_classes
self.use_random = shuffle_before_sample
self.is_cls_agnostic = is_cls_agnostic
self.num_bins = num_bins
def __call__(
self,
rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info, ):
return self.generate_proposal_label_libra(
rpn_rois=rpn_rois,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_boxes=gt_boxes,
im_info=im_info,
batch_size_per_im=self.batch_size_per_im,
fg_fraction=self.fg_fraction,
fg_thresh=self.fg_thresh,
bg_thresh_hi=self.bg_thresh_hi,
bg_thresh_lo=self.bg_thresh_lo,
bbox_reg_weights=self.bbox_reg_weights,
class_nums=self.class_nums,
use_random=self.use_random,
is_cls_agnostic=self.is_cls_agnostic,
is_cascade_rcnn=False)
def generate_proposal_label_libra(
self, rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
batch_size_per_im, fg_fraction, fg_thresh, bg_thresh_hi,
bg_thresh_lo, bbox_reg_weights, class_nums, use_random,
is_cls_agnostic, is_cascade_rcnn):
num_bins = self.num_bins
def create_tmp_var(program, name, dtype, shape, lod_level=None):
return program.current_block().create_var(
name=name, dtype=dtype, shape=shape, lod_level=lod_level)
def _sample_pos(max_overlaps, max_classes, pos_inds, num_expected):
if len(pos_inds) <= num_expected:
return pos_inds
else:
unique_gt_inds = np.unique(max_classes[pos_inds])
num_gts = len(unique_gt_inds)
num_per_gt = int(round(num_expected / float(num_gts)) + 1)
sampled_inds = []
for i in unique_gt_inds:
inds = np.nonzero(max_classes == i)[0]
before_len = len(inds)
inds = list(set(inds) & set(pos_inds))
after_len = len(inds)
if len(inds) > num_per_gt:
inds = np.random.choice(
inds, size=num_per_gt, replace=False)
sampled_inds.extend(list(inds)) # combine as a new sampler
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(
list(set(pos_inds) - set(sampled_inds)))
assert len(sampled_inds)+len(extra_inds) == len(pos_inds), \
"sum of sampled_inds({}) and extra_inds({}) length must be equal with pos_inds({})!".format(
len(sampled_inds), len(extra_inds), len(pos_inds))
if len(extra_inds) > num_extra:
extra_inds = np.random.choice(
extra_inds, size=num_extra, replace=False)
sampled_inds.extend(extra_inds.tolist())
elif len(sampled_inds) > num_expected:
sampled_inds = np.random.choice(
sampled_inds, size=num_expected, replace=False)
return sampled_inds
def sample_via_interval(max_overlaps, full_set, num_expected,
floor_thr, num_bins, bg_thresh_hi):
max_iou = max_overlaps.max()
iou_interval = (max_iou - floor_thr) / num_bins
per_num_expected = int(num_expected / num_bins)
sampled_inds = []
for i in range(num_bins):
start_iou = floor_thr + i * iou_interval
end_iou = floor_thr + (i + 1) * iou_interval
tmp_set = set(
np.where(
np.logical_and(max_overlaps >= start_iou, max_overlaps
< end_iou))[0])
tmp_inds = list(tmp_set & full_set)
if len(tmp_inds) > per_num_expected:
tmp_sampled_set = np.random.choice(
tmp_inds, size=per_num_expected, replace=False)
else:
tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
sampled_inds.append(tmp_sampled_set)
sampled_inds = np.concatenate(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(full_set - set(sampled_inds)))
assert len(sampled_inds)+len(extra_inds) == len(full_set), \
"sum of sampled_inds({}) and extra_inds({}) length must be equal with full_set({})!".format(
len(sampled_inds), len(extra_inds), len(full_set))
if len(extra_inds) > num_extra:
extra_inds = np.random.choice(
extra_inds, num_extra, replace=False)
sampled_inds = np.concatenate([sampled_inds, extra_inds])
return sampled_inds
def _sample_neg(max_overlaps,
max_classes,
neg_inds,
num_expected,
floor_thr=-1,
floor_fraction=0,
num_bins=3,
bg_thresh_hi=0.5):
if len(neg_inds) <= num_expected:
return neg_inds
else:
# balance sampling for negative samples
neg_set = set(neg_inds)
if floor_thr > 0:
floor_set = set(
np.where(
np.logical_and(max_overlaps >= 0, max_overlaps <
floor_thr))[0])
iou_sampling_set = set(
np.where(max_overlaps >= floor_thr)[0])
elif floor_thr == 0:
floor_set = set(np.where(max_overlaps == 0)[0])
iou_sampling_set = set(
np.where(max_overlaps > floor_thr)[0])
else:
floor_set = set()
iou_sampling_set = set(
np.where(max_overlaps > floor_thr)[0])
floor_thr = 0
floor_neg_inds = list(floor_set & neg_set)
iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
num_expected_iou_sampling = int(num_expected *
(1 - floor_fraction))
if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
if num_bins >= 2:
iou_sampled_inds = sample_via_interval(
max_overlaps,
set(iou_sampling_neg_inds),
num_expected_iou_sampling, floor_thr, num_bins,
bg_thresh_hi)
else:
iou_sampled_inds = np.random.choice(
iou_sampling_neg_inds,
size=num_expected_iou_sampling,
replace=False)
else:
iou_sampled_inds = np.array(
iou_sampling_neg_inds, dtype=np.int)
num_expected_floor = num_expected - len(iou_sampled_inds)
if len(floor_neg_inds) > num_expected_floor:
sampled_floor_inds = np.random.choice(
floor_neg_inds, size=num_expected_floor, replace=False)
else:
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
sampled_inds = np.concatenate(
(sampled_floor_inds, iou_sampled_inds))
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(neg_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = np.random.choice(
extra_inds, size=num_extra, replace=False)
sampled_inds = np.concatenate((sampled_inds, extra_inds))
return sampled_inds
def _sample_rois(rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
batch_size_per_im, fg_fraction, fg_thresh,
bg_thresh_hi, bg_thresh_lo, bbox_reg_weights,
class_nums, use_random, is_cls_agnostic,
is_cascade_rcnn):
rois_per_image = int(batch_size_per_im)
fg_rois_per_im = int(np.round(fg_fraction * rois_per_image))
# Roidb
im_scale = im_info[2]
inv_im_scale = 1. / im_scale
rpn_rois = rpn_rois * inv_im_scale
if is_cascade_rcnn:
rpn_rois = rpn_rois[gt_boxes.shape[0]:, :]
boxes = np.vstack([gt_boxes, rpn_rois])
gt_overlaps = np.zeros((boxes.shape[0], class_nums))
box_to_gt_ind_map = np.zeros((boxes.shape[0]), dtype=np.int32)
if len(gt_boxes) > 0:
proposal_to_gt_overlaps = bbox_overlaps(boxes, gt_boxes)
overlaps_argmax = proposal_to_gt_overlaps.argmax(axis=1)
overlaps_max = proposal_to_gt_overlaps.max(axis=1)
# Boxes which with non-zero overlap with gt boxes
overlapped_boxes_ind = np.where(overlaps_max > 0)[0]
overlapped_boxes_gt_classes = gt_classes[overlaps_argmax[
overlapped_boxes_ind]]
for idx in range(len(overlapped_boxes_ind)):
gt_overlaps[overlapped_boxes_ind[
idx], overlapped_boxes_gt_classes[idx]] = overlaps_max[
overlapped_boxes_ind[idx]]
box_to_gt_ind_map[overlapped_boxes_ind[
idx]] = overlaps_argmax[overlapped_boxes_ind[idx]]
crowd_ind = np.where(is_crowd)[0]
gt_overlaps[crowd_ind] = -1
max_overlaps = gt_overlaps.max(axis=1)
max_classes = gt_overlaps.argmax(axis=1)
# Cascade RCNN Decode Filter
if is_cascade_rcnn:
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws > 0) & (hs > 0))[0]
boxes = boxes[keep]
max_overlaps = max_overlaps[keep]
fg_inds = np.where(max_overlaps >= fg_thresh)[0]
bg_inds = np.where((max_overlaps < bg_thresh_hi) & (
max_overlaps >= bg_thresh_lo))[0]
fg_rois_per_this_image = fg_inds.shape[0]
bg_rois_per_this_image = bg_inds.shape[0]
else:
# Foreground
fg_inds = np.where(max_overlaps >= fg_thresh)[0]
fg_rois_per_this_image = np.minimum(fg_rois_per_im,
fg_inds.shape[0])
# Sample foreground if there are too many
if fg_inds.shape[0] > fg_rois_per_this_image:
if use_random:
fg_inds = _sample_pos(max_overlaps, max_classes,
fg_inds, fg_rois_per_this_image)
fg_inds = fg_inds[:fg_rois_per_this_image]
# Background
bg_inds = np.where((max_overlaps < bg_thresh_hi) & (
max_overlaps >= bg_thresh_lo))[0]
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.shape[0])
assert bg_rois_per_this_image >= 0, "bg_rois_per_this_image must be >= 0 but got {}".format(
bg_rois_per_this_image)
# Sample background if there are too many
if bg_inds.shape[0] > bg_rois_per_this_image:
if use_random:
# libra neg sample
bg_inds = _sample_neg(
max_overlaps,
max_classes,
bg_inds,
bg_rois_per_this_image,
num_bins=num_bins,
bg_thresh_hi=bg_thresh_hi)
bg_inds = bg_inds[:bg_rois_per_this_image]
keep_inds = np.append(fg_inds, bg_inds)
sampled_labels = max_classes[keep_inds] # N x 1
sampled_labels[fg_rois_per_this_image:] = 0
sampled_boxes = boxes[keep_inds] # N x 324
sampled_gts = gt_boxes[box_to_gt_ind_map[keep_inds]]
sampled_gts[fg_rois_per_this_image:, :] = gt_boxes[0]
bbox_label_targets = _compute_targets(
sampled_boxes, sampled_gts, sampled_labels, bbox_reg_weights)
bbox_targets, bbox_inside_weights = _expand_bbox_targets(
bbox_label_targets, class_nums, is_cls_agnostic)
bbox_outside_weights = np.array(
bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype)
# Scale rois
sampled_rois = sampled_boxes * im_scale
# Faster RCNN blobs
frcn_blobs = dict(
rois=sampled_rois,
labels_int32=sampled_labels,
bbox_targets=bbox_targets,
bbox_inside_weights=bbox_inside_weights,
bbox_outside_weights=bbox_outside_weights)
return frcn_blobs
def _compute_targets(roi_boxes, gt_boxes, labels, bbox_reg_weights):
assert roi_boxes.shape[0] == gt_boxes.shape[0]
assert roi_boxes.shape[1] == 4
assert gt_boxes.shape[1] == 4
targets = np.zeros(roi_boxes.shape)
bbox_reg_weights = np.asarray(bbox_reg_weights)
targets = box_to_delta(
ex_boxes=roi_boxes,
gt_boxes=gt_boxes,
weights=bbox_reg_weights)
return np.hstack([labels[:, np.newaxis], targets]).astype(
np.float32, copy=False)
def _expand_bbox_targets(bbox_targets_input, class_nums,
is_cls_agnostic):
class_labels = bbox_targets_input[:, 0]
fg_inds = np.where(class_labels > 0)[0]
bbox_targets = np.zeros((class_labels.shape[0], 4 * class_nums
if not is_cls_agnostic else 4 * 2))
bbox_inside_weights = np.zeros(bbox_targets.shape)
for ind in fg_inds:
class_label = int(class_labels[
ind]) if not is_cls_agnostic else 1
start_ind = class_label * 4
end_ind = class_label * 4 + 4
bbox_targets[ind, start_ind:end_ind] = bbox_targets_input[ind,
1:]
bbox_inside_weights[ind, start_ind:end_ind] = (1.0, 1.0, 1.0,
1.0)
return bbox_targets, bbox_inside_weights
def generate_func(
rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info, ):
rpn_rois_lod = rpn_rois.lod()[0]
gt_classes_lod = gt_classes.lod()[0]
# convert
rpn_rois = np.array(rpn_rois)
gt_classes = np.array(gt_classes)
is_crowd = np.array(is_crowd)
gt_boxes = np.array(gt_boxes)
im_info = np.array(im_info)
rois = []
labels_int32 = []
bbox_targets = []
bbox_inside_weights = []
bbox_outside_weights = []
lod = [0]
for idx in range(len(rpn_rois_lod) - 1):
rois_si = rpn_rois_lod[idx]
rois_ei = rpn_rois_lod[idx + 1]
gt_si = gt_classes_lod[idx]
gt_ei = gt_classes_lod[idx + 1]
frcn_blobs = _sample_rois(
rpn_rois[rois_si:rois_ei], gt_classes[gt_si:gt_ei],
is_crowd[gt_si:gt_ei], gt_boxes[gt_si:gt_ei], im_info[idx],
batch_size_per_im, fg_fraction, fg_thresh, bg_thresh_hi,
bg_thresh_lo, bbox_reg_weights, class_nums, use_random,
is_cls_agnostic, is_cascade_rcnn)
lod.append(frcn_blobs['rois'].shape[0] + lod[-1])
rois.append(frcn_blobs['rois'])
labels_int32.append(frcn_blobs['labels_int32'].reshape(-1, 1))
bbox_targets.append(frcn_blobs['bbox_targets'])
bbox_inside_weights.append(frcn_blobs['bbox_inside_weights'])
bbox_outside_weights.append(frcn_blobs['bbox_outside_weights'])
rois = np.vstack(rois)
labels_int32 = np.vstack(labels_int32)
bbox_targets = np.vstack(bbox_targets)
bbox_inside_weights = np.vstack(bbox_inside_weights)
bbox_outside_weights = np.vstack(bbox_outside_weights)
# create lod-tensor for return
# notice that the func create_lod_tensor does not work well here
ret_rois = fluid.LoDTensor()
ret_rois.set_lod([lod])
ret_rois.set(rois.astype("float32"), fluid.CPUPlace())
ret_labels_int32 = fluid.LoDTensor()
ret_labels_int32.set_lod([lod])
ret_labels_int32.set(
labels_int32.astype("int32"), fluid.CPUPlace())
ret_bbox_targets = fluid.LoDTensor()
ret_bbox_targets.set_lod([lod])
ret_bbox_targets.set(
bbox_targets.astype("float32"), fluid.CPUPlace())
ret_bbox_inside_weights = fluid.LoDTensor()
ret_bbox_inside_weights.set_lod([lod])
ret_bbox_inside_weights.set(
bbox_inside_weights.astype("float32"), fluid.CPUPlace())
ret_bbox_outside_weights = fluid.LoDTensor()
ret_bbox_outside_weights.set_lod([lod])
ret_bbox_outside_weights.set(
bbox_outside_weights.astype("float32"), fluid.CPUPlace())
return ret_rois, ret_labels_int32, ret_bbox_targets, ret_bbox_inside_weights, ret_bbox_outside_weights
rois = create_tmp_var(
fluid.default_main_program(),
name=None, #'rois',
dtype='float32',
shape=[-1, 4], )
bbox_inside_weights = create_tmp_var(
fluid.default_main_program(),
name=None, #'bbox_inside_weights',
dtype='float32',
shape=[-1, 8 if self.is_cls_agnostic else self.class_nums * 4], )
bbox_outside_weights = create_tmp_var(
fluid.default_main_program(),
name=None, #'bbox_outside_weights',
dtype='float32',
shape=[-1, 8 if self.is_cls_agnostic else self.class_nums * 4], )
bbox_targets = create_tmp_var(
fluid.default_main_program(),
name=None, #'bbox_targets',
dtype='float32',
shape=[-1, 8 if self.is_cls_agnostic else self.class_nums * 4], )
labels_int32 = create_tmp_var(
fluid.default_main_program(),
name=None, #'labels_int32',
dtype='int32',
shape=[-1, 1], )
outs = [
rois, labels_int32, bbox_targets, bbox_inside_weights,
bbox_outside_weights
]
fluid.layers.py_func(
func=generate_func,
x=[rpn_rois, gt_classes, is_crowd, gt_boxes, im_info],
out=outs)
return outs
class BBoxAssigner(object):
def __init__(self,
batch_size_per_im=512,
fg_fraction=.25,
fg_thresh=.5,
bg_thresh_hi=.5,
bg_thresh_lo=0.,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
num_classes=81,
shuffle_before_sample=True):
super(BBoxAssigner, self).__init__()
self.batch_size_per_im = batch_size_per_im
self.fg_fraction = fg_fraction
self.fg_thresh = fg_thresh
self.bg_thresh_hi = bg_thresh_hi
self.bg_thresh_lo = bg_thresh_lo
self.bbox_reg_weights = bbox_reg_weights
self.class_nums = num_classes
self.use_random = shuffle_before_sample
def __call__(self, rpn_rois, gt_classes, is_crowd, gt_boxes, im_info):
return fluid.layers.generate_proposal_labels(
rpn_rois=rpn_rois,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_boxes=gt_boxes,
im_info=im_info,
batch_size_per_im=self.batch_size_per_im,
fg_fraction=self.fg_fraction,
fg_thresh=self.fg_thresh,
bg_thresh_hi=self.bg_thresh_hi,
bg_thresh_lo=self.bg_thresh_lo,
bbox_reg_weights=self.bbox_reg_weights,
class_nums=self.class_nums,
use_random=self.use_random)
|
tests/unit/confidant/authnz/rbac_test.py | chadwhitacre/confidant | 1,820 | 11173421 | <gh_stars>1000+
from confidant.app import create_app
from confidant.authnz import rbac
def test_default_acl(mocker):
mocker.patch('confidant.settings.USE_AUTH', True)
app = create_app()
with app.test_request_context('/fake'):
g_mock = mocker.patch('confidant.authnz.g')
# Test for user type is user
g_mock.user_type = 'user'
assert rbac.default_acl(resource_type='service') is True
assert rbac.default_acl(resource_type='certificate') is False
# Test for user type is service, but not an allowed resource type
g_mock.user_type = 'service'
g_mock.username = 'test-service'
assert rbac.default_acl(
resource_type='service',
action='update',
resource_id='test-service'
) is False
# Test for user type is service, and an allowed resource, with metadata
# action, but service name doesn't match
g_mock.username = 'bad-service'
assert rbac.default_acl(
resource_type='service',
action='metadata',
resource_id='test-service',
) is False
# Test for user type is service, and an allowed resource, with metadata
# action
g_mock.username = 'test-service'
assert rbac.default_acl(
resource_type='service',
action='metadata',
resource_id='test-service',
) is True
# Test for user type is service, and an allowed resource, with get
# action
assert rbac.default_acl(
resource_type='service',
action='get',
resource_id='test-service',
) is True
# Test for user type is service, with certificate resource and get
# action, with a CN that doesn't match the name pattern
assert rbac.default_acl(
resource_type='certificate',
action='get',
# missing domain name...
resource_id='test-service',
kwargs={'ca': 'development'},
) is False
# Test for user type is service, with certificate resource and get
# action, with a valid CN
assert rbac.default_acl(
resource_type='certificate',
action='get',
resource_id='test-service.example.com',
kwargs={'ca': 'development'},
) is True
# Test for user type is service, with certificate resource and get
# action, with a valid CN, and valid SAN values
assert rbac.default_acl(
resource_type='certificate',
action='get',
resource_id='test-service.example.com',
kwargs={
'ca': 'development',
'san': [
'test-service.internal.example.com',
'test-service.external.example.com',
],
},
) is True
# Test for user type is service, with certificate resource and get
# action, with an invalid CN
assert rbac.default_acl(
resource_type='certificate',
action='get',
resource_id='bad-service.example.com',
kwargs={'ca': 'development'},
) is False
# Test for user type is service, with certificate resource and get
# action, with a valid CN, but an invalid SAN
assert rbac.default_acl(
resource_type='certificate',
action='get',
resource_id='test-service.example.com',
kwargs={
'ca': 'development',
'san': ['bad-service.example.com'],
},
) is False
# Test for user type is service, with certificate resource and get
# action, with a valid CN, but a mix of valid and invalid SAN values
assert rbac.default_acl(
resource_type='certificate',
action='get',
resource_id='test-service.example.com',
kwargs={
'ca': 'development',
'san': [
'bad-service.example.com',
'test-service.example.com',
],
},
) is False
# Test for user type is service, and an allowed resource, with
# disallowed fake action
assert rbac.default_acl(resource_type='service', action='fake') is False
# Test for bad user type
g_mock.user_type = 'badtype'
assert rbac.default_acl(resource_type='service', action='get') is False
def test_no_acl():
app = create_app()
with app.test_request_context('/fake'):
assert rbac.no_acl(resource_type='service', action='update') is True
|
test/low_rank_data.py | iskandr/matrix-completion | 840 | 11173440 | <gh_stars>100-1000
import numpy as np
def create_rank_k_dataset(
n_rows=5,
n_cols=5,
k=3,
fraction_missing=0.1,
symmetric=False,
random_seed=0):
np.random.seed(random_seed)
x = np.random.randn(n_rows, k)
y = np.random.randn(k, n_cols)
XY = np.dot(x, y)
if symmetric:
assert n_rows == n_cols
XY = 0.5 * XY + 0.5 * XY.T
missing_raw_values = np.random.uniform(0, 1, (n_rows, n_cols))
missing_mask = missing_raw_values < fraction_missing
XY_incomplete = XY.copy()
# fill missing entries with NaN
XY_incomplete[missing_mask] = np.nan
return XY, XY_incomplete, missing_mask
# create some default data to be shared across tests
XY, XY_incomplete, missing_mask = create_rank_k_dataset(
n_rows=500,
n_cols=10,
k=3,
fraction_missing=0.25)
|
src/torchphysics/problem/samplers/plot_samplers.py | uwe-iben/torchphysics | 203 | 11173462 | """Samplers for plotting and animations of model outputs.
"""
import numpy as np
import torch
from ..domains.domain import BoundaryDomain
from ..domains import Interval
from .sampler_base import PointSampler
from .grid_samplers import GridSampler
from ..spaces.points import Points
class PlotSampler(PointSampler):
"""A sampler that creates a point grid over a domain
(including the boundary). Only used for plotting,
Parameters
----------
plot_domain : Domain
The domain over which the model/function should later be plotted.
Will create points inside and at the boundary of the domain.
n_points : int, optional
The number of points that should be used for the plot.
density : float, optional
The desiered density of the created points.
device : str or torch device, optional
The device of the model/function.
data_for_other_variables : dict or torchphysics.spaces.Points, optional
Since the plot will only evaluate the model at a specific point,
the values for all other variables are needed.
E.g. {'t' : 1, 'D' : [1,2], ...}
Notes
-----
Can also be used to create your own PlotSampler. By either changing the
used sampler after the initialization (self.sampler=...) or by creating
your own class that inherits from PlotSampler.
"""
def __init__(self, plot_domain, n_points=None, density=None, device='cpu',
data_for_other_variables={}):
assert not isinstance(plot_domain, BoundaryDomain), \
"Plotting for boundaries is not implemented"""
super().__init__(n_points=n_points, density=density)
self.device = device
self.set_data_for_other_variables(data_for_other_variables)
self.domain = plot_domain(**self.data_for_other_variables.coordinates)
self.sampler = self.construct_sampler()
def set_data_for_other_variables(self, data_for_other_variables):
"""Sets the data for all other variables. Essentially copies the
values into a correct tensor.
"""
if isinstance(data_for_other_variables, Points):
self.data_for_other_variables = data_for_other_variables
elif len(data_for_other_variables) == 0:
self.data_for_other_variables = Points.empty()
else:
torch_data = self.transform_data_to_torch(data_for_other_variables)
self.data_for_other_variables = Points.from_coordinates(torch_data)
def transform_data_to_torch(self, data_for_other_variables):
"""Transforms all inputs to a torch.tensor.
"""
torch_data = {}
for vname, data in data_for_other_variables.items():
# transform data to torch
if not isinstance(data, torch.Tensor):
data = torch.tensor(data)
# check correct shape of data
if len(data.shape) == 0:
torch_data[vname] = data.reshape(-1, 1)
elif len(data.shape) == 1:
torch_data[vname] = data.reshape(-1, len(data))
else:
torch_data[vname] = data
return torch_data
def construct_sampler(self):
"""Construct the sampler which is used in the plot.
Can be overwritten to include your own points structure.
"""
if self.n_points:
return self._plot_sampler_with_n_points()
else: # density is used
return self._plot_sampler_with_density()
def _plot_sampler_with_n_points(self):
if isinstance(self.domain, Interval):
return self._construct_sampler_for_Interval(self.domain, n=self.n_points)
inner_n_points = self._compute_inner_number_of_points()
inner_sampler = GridSampler(self.domain, inner_n_points)
outer_sampler = GridSampler(self.domain.boundary, len(self)-inner_n_points)
return inner_sampler + outer_sampler
def _plot_sampler_with_density(self):
if isinstance(self.domain, Interval):
return self._construct_sampler_for_Interval(self.domain, d=self.density)
inner_sampler = GridSampler(self.domain, density=self.density)
outer_sampler = GridSampler(self.domain.boundary, density=self.density)
return inner_sampler + outer_sampler
def _construct_sampler_for_Interval(self, domain, n=None, d=None):
left_sampler = GridSampler(domain.boundary_left, 1)
inner_sampler = GridSampler(domain, n_points=n, density=d)
right_sampler = GridSampler(domain.boundary_right, 1)
return left_sampler + inner_sampler + right_sampler
def _compute_inner_number_of_points(self):
n_root = int(np.ceil(len(self)**(1/self.domain.dim)))
n_root -= 2
return n_root**self.domain.dim
def sample_points(self):
"""Creates the points for the plot. Does not need additional arguments, since
they were set in the init.
"""
plot_points = self.sampler.sample_points()
self.set_length(len(plot_points))
other_data = self._repeat_params(self.data_for_other_variables, len(self))
plot_points = plot_points.join(other_data)
self._set_device_and_grad_true(plot_points)
return plot_points
def _set_device_and_grad_true(self, plot_points):
plot_points._t.requires_grad = True
plot_points._t.to(self.device)
class AnimationSampler(PlotSampler):
"""A sampler that creates points for an animation.
Parameters
----------
plot_domain : Domain
The domain over which the model/function should later be plotted.
Will create points inside and at the boundary of the domain.
animation_domain : Interval
The variable over which the animation should be created, e.g a
time-interval.
frame_number : int
The number of frames that should be used for the animation. This
equals the number of points that will be created in the
animation_domain.
n_points : int, optional
The number of points that should be used for the plot domain.
density : float, optional
The desiered density of the created points, in the plot domain.
device : str or torch device, optional
The device of the model/function.
data_for_other_variables : dict, optional
Since the animation will only evaluate the model at specific points,
the values for all other variables are needed.
E.g. {'D' : [1,2], ...}
"""
def __init__(self, plot_domain, animation_domain, frame_number,
n_points=None, density=None, device='cpu',
data_for_other_variables={}):
super().__init__(plot_domain=plot_domain, n_points=n_points,
density=density, device=device,
data_for_other_variables=data_for_other_variables)
self._check_correct_types(animation_domain)
self.frame_number = frame_number
self.animation_domain = animation_domain(**data_for_other_variables)
self.animatoin_sampler = \
self._construct_sampler_for_Interval(self.animation_domain, n=frame_number)
def _check_correct_types(self, animation_domain):
assert isinstance(animation_domain, Interval), \
"The animation domain has to be a interval"
@property
def plot_domain_constant(self):
"""Returns if the plot domain is a constant domain or changes
with respect to other variables.
"""
dependent = any(vname in self.domain.necessary_variables \
for vname in self.animation_domain.space)
return not dependent
@property
def animation_key(self):
"""Retunrs the name of the animation variable
"""
ani_key = list(self.animation_domain.space.keys())[0]
return ani_key
def sample_animation_points(self):
"""Samples points out of the animation domain, e.g. time interval.
"""
ani_points = self.animatoin_sampler.sample_points()
num_of_points = len(ani_points)
self.frame_number = num_of_points
self._set_device_and_grad_true(ani_points)
return ani_points
def sample_plot_domain_points(self, animation_points):
"""Samples points in the plot domain, e.g. space.
"""
if self.plot_domain_constant:
plot_points = self.sampler.sample_points()
num_of_points = len(plot_points)
self.set_length(num_of_points)
self._set_device_and_grad_true(plot_points)
return plot_points
return self._sample_params_dependent(animation_points)
def _sample_params_dependent(self, params):
output_list = []
for i in range(self.frame_number):
ith_ani_points = params[i, ]
plot_points = self.sampler.sample_points(ith_ani_points)
plot_points._t.to(self.device)
output_list.append(plot_points)
return output_list |
examples/structured/setup/setup.py | flupke/py2app | 193 | 11173471 | <gh_stars>100-1000
"""
Script for building the example.
Usage:
python setup.py py2app
"""
from setuptools import setup
setup(
app = ['../python/myapp.py'],
data_files = ['../data'],
setup_requires=["py2app"],
)
|
metadrive/component/vehicle_module/mini_map.py | liuzuxin/metadrive | 125 | 11173497 | from panda3d.core import Vec3
from metadrive.component.vehicle_module.base_camera import BaseCamera
from metadrive.constants import CamMask
from metadrive.engine.engine_utils import get_global_config, engine_initialized
class MiniMap(BaseCamera):
CAM_MASK = CamMask.MiniMap
display_region_size = [0., 1 / 3, BaseCamera.display_bottom, BaseCamera.display_top]
def __init__(self):
assert engine_initialized(), "You should initialize engine before adding camera to vehicle"
config = get_global_config()["vehicle_config"]["mini_map"]
self.BUFFER_W, self.BUFFER_H = config[0], config[1]
height = config[2]
super(MiniMap, self).__init__()
cam = self.get_cam()
lens = self.get_lens()
cam.setZ(height)
cam.lookAt(Vec3(0, 20, 0))
lens.setAspectRatio(2.0)
|
models/sklearn_OCSVM_explicit_model.py | chihyunsong/oc-nn | 203 | 11173546 | <reponame>chihyunsong/oc-nn
import numpy as np
import pandas as pd
from sklearn import utils
import matplotlib
from scipy.optimize import minimize
dataPath = './data/'
# Create empty dataframe with given column names.
df_usps_scores = {}
df_fake_news_scores = {}
df_spam_vs_ham_scores = {}
df_cifar_10_scores = {}
nu = 0.04
def relu(x):
y = x
y[y < 0] = 0
return y
def dRelu(x):
y = x
y[x <= 0] = 0
y[x > 0] = np.ones((len(x[x > 0]),))
return y
def svmScore(X, w,g):
return g(X.dot(w))
def ocsvm_obj(theta, X, nu, D,g,dG):
w = theta[:D]
r = theta[D:]
term1 = 0.5 * np.sum(w**2)
term2 = 1/nu * np.mean(relu(r - svmScore(X, w, g)))
term3 = -r
return term1 + term2 + term3
def ocsvm_grad(theta, X, nu, D,g,dG):
w = theta[:D]
r = theta[D:]
deriv = dRelu(r - svmScore(X, w,g))
term1 = np.append(w, 0)
term2 = np.append(1/nu * np.mean(deriv[:,np.newaxis] * (-X), axis = 0),
1/nu * np.mean(deriv))
term3 = np.append(0*w, -1)
grad = term1 + term2 + term3
return grad
def sklearn_OCSVM_explicit_linear(data_train,data_test):
X = data_train
D = X.shape[1]
g = lambda x : x
dG = lambda x : np.ones(x.shape)
np.random.seed(42);
theta0 = np.random.normal(0, 1, D + 1);
from scipy.optimize import check_grad
print('Gradient error: %s' % check_grad(ocsvm_obj, ocsvm_grad, theta0, X, nu, D, g,dG));
res = minimize(ocsvm_obj, theta0, method = 'L-BFGS-B', jac = ocsvm_grad, args = (X, nu, D, g, dG),
options = {'gtol': 1e-8, 'disp': True, 'maxiter' : 50000, 'maxfun' : 10000});
pos_decisionScore = svmScore(data_train, res.x[0:-1],g) - res.x[-1];
neg_decisionScore = svmScore(data_test, res.x[0:-1],g) - res.x[-1];
return [pos_decisionScore,neg_decisionScore]
def sklearn_OCSVM_explicit_sigmoid(data_train,data_test):
X = data_train
D = X.shape[1]
g = lambda x : 1/(1 + np.exp(-x))
dG = lambda x : 1/(1 + np.exp(-x)) * 1/(1 + np.exp(+x))
np.random.seed(42);
theta0 = np.random.normal(0, 1, D + 1);
print("Inside sklearn_OCSVM_explicit_sigmoid.....")
from scipy.optimize import check_grad
print('Gradient error: %s' % check_grad(ocsvm_obj, ocsvm_grad, theta0, X, nu, D, g, dG));
res = minimize(ocsvm_obj, theta0, method = 'L-BFGS-B', jac = ocsvm_grad, args = (X, nu, D, g, dG),
options = {'gtol': 1e-8, 'disp': True, 'maxiter' : 50000, 'maxfun' : 10000});
pos_decisionScore = svmScore(data_train, res.x[0:-1],g) - res.x[-1];
neg_decisionScore = svmScore(data_test, res.x[0:-1],g) - res.x[-1];
return [pos_decisionScore,neg_decisionScore]
def func_getDecision_Scores_sklearn_OCSVM_explicit(dataset,data_train,data_test):
# print "Decision_Scores_sklearn_OCSVM Using Linear and RBF Kernels....."
if(dataset=="USPS" ):
result = sklearn_OCSVM_explicit_linear(data_train,data_test)
df_usps_scores["sklearn-OCSVM-explicit-Linear-Train"] = result[0]
df_usps_scores["sklearn-OCSVM-explicit-Linear-Test"] = result[1]
result = sklearn_OCSVM_explicit_sigmoid(data_train,data_test)
df_usps_scores["sklearn-OCSVM-explicit-Sigmoid-Train"] = result[0]
df_usps_scores["sklearn-OCSVM-explicit-Sigmoid-Test"] = result[1]
if(dataset=="FAKE_NEWS" ):
result = sklearn_OCSVM_explicit_linear(data_train,data_test)
df_fake_news_scores["sklearn-OCSVM-explicit-Linear-Train"] = result[0]
df_fake_news_scores["sklearn-OCSVM-explicit-Linear-Test"] = result[1]
result = sklearn_OCSVM_explicit_sigmoid(data_train,data_test)
df_fake_news_scores["sklearn-OCSVM-explicit-Sigmoid-Train"] = result[0]
df_fake_news_scores["sklearn-OCSVM-explicit-Sigmoid-Test"] = result[1]
# if(dataset=="SPAM_Vs_HAM" ):
# result = sklearn_OCSVM_explicit_linear(data_train,data_test)
# df_spam_vs_ham_scores["sklearn-OCSVM-explicit-Linear-Train"] = result[0]
# df_spam_vs_ham_scores["sklearn-OCSVM-explicit-Linear-Test"] = result[1]
# result = sklearn_OCSVM_explicit_sigmoid(data_train,data_test)
# df_spam_vs_ham_scores["sklearn-OCSVM-explicit-Sigmoid-Train"] = result[0]
# df_spam_vs_ham_scores["sklearn-OCSVM-explicit-Sigmoid-Test"] = result[1]
if(dataset=="CIFAR-10" ):
result = sklearn_OCSVM_explicit_linear(data_train,data_test)
df_cifar_10_scores["sklearn-OCSVM-explicit-Linear-Train"] = result[0]
df_cifar_10_scores["sklearn-OCSVM-explicit-Linear-Test"] = result[1]
result = sklearn_OCSVM_explicit_sigmoid(data_train,data_test)
df_cifar_10_scores["sklearn-OCSVM-explicit-Sigmoid-Train"] = result[0]
df_cifar_10_scores["sklearn-OCSVM-explicit-Sigmoid-Test"] = result[1]
return [df_usps_scores,df_fake_news_scores,df_spam_vs_ham_scores,df_cifar_10_scores]
|
test/sagemaker_tests/mxnet/inference/resources/default_handlers/model/code/eia_module.py | Yixiao99/deep-learning-containers | 383 | 11173750 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import mxnet as mx
import eimx
import logging
import os
def model_fn(model_dir):
logging.info('Invoking user-defined model_fn')
# The compiled model artifacts are saved with the prefix 'compiled'
sym, arg_params, aux_params = mx.model.load_checkpoint(os.path.join(model_dir, 'model'), 0)
sym = sym.optimize_for('EIA')
mod = mx.mod.Module(symbol=sym, context=mx.cpu(), label_names=None)
exe = mod.bind(for_training=False,
data_shapes=[('data', (1,2))],
label_shapes=mod._label_shapes)
mod.set_params(arg_params, aux_params, allow_missing=True)
return mod |
blender-spritesheets/panels/spritePanel.py | geoffsutcliffe/blender-spritesheets | 140 | 11173756 | import bpy
from properties.SpriteSheetPropertyGroup import SpriteSheetPropertyGroup
from properties.ProgressPropertyGroup import ProgressPropertyGroup
class UI_PT_SpritePanel(bpy.types.Panel):
"""Panel for configuring and rendering sprite sheets"""
bl_idname = "UI_PT_SpritePanel"
bl_label = "Create Sprite Sheet"
bl_category = "Sprite Sheet"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
def draw(self, context):
"""Draw method that initializes the UI for the panel"""
layout = self.layout
props = context.scene.SpriteSheetPropertyGroup
row = layout.row()
row.label(text="Selection", icon="CURSOR")
row = layout.row()
row.prop(props, "binPath")
row = layout.row()
row.prop_search(props, "target", bpy.data, "objects")
row = layout.row()
row.label(text="This target will have all Actions in the Scene applied to it when rendering animations")
layout.separator()
row = layout.row()
row.label(text="Rendering", icon="VIEW_CAMERA")
row = layout.row()
row.prop(props, "tileSize")
row = layout.row()
row.prop(props, "fps")
row = layout.row()
row.prop(props, "onlyRenderMarkedFrames")
layout.separator()
row = layout.row()
row.label(text="Output", icon="FILE_FOLDER")
row = layout.row()
row.prop(props, "outputPath")
layout.separator()
row = layout.row()
row.operator("spritesheets.render", text="Render Sprite Sheet")
|
src/main/python/smart/smartdata_run.py | cday97/beam | 123 | 11173822 | import smartdata_setup
def createUrl(foldername):
return "https://beam-outputs.s3.amazonaws.com/output/sfbay/"+foldername
# Main
baseline_2010 = (1, 2010, 15, "base", "Base", "baseline", createUrl("sfbay-smart-base-2010__2019-10-28_20-14-32"))
base_2030lt_2025 = (2, 2025, 15, "base", "2030 Low Tech", "base_fleet_2030_lt", createUrl("sfbay-smart-base-2030-lt-2025__2019-10-29_22-54-03"))
base_2030ht_2025 = (3, 2025, 15, "base", "2030 High Tech", "base_fleet_2030_ht", createUrl("sfbay-smart-base-2030-ht-2025__2019-10-29_22-54-02"))
base_2045lt_2040 = (4, 2040, 15, "base", "2045 Low Tech", "base_fleet_2045_lt", createUrl("sfbay-smart-base-2045-lt-2040__2019-10-29_22-54-02"))
base_2045ht_2040 = (5, 2040, 15, "base", "2045 High Tech", "base_fleet_2045_ht", createUrl("sfbay-smart-base-2045-ht-2040__2019-10-29_22-54-19"))
a_lt_2025 = (6, 2025, 15, "a", "Low Tech", "a_lt", createUrl("sfbay-smart-a-lt-2025__2019-10-28_20-14-33"))
a_ht_2025 = (7, 2025, 15, "a", "High Tech", "a_ht", createUrl("sfbay-smart-a-ht-2025__2019-10-28_20-14-32"))
b_lt_2040 = (8, 2040, 15, "b", "Low Tech", "b_lt", createUrl("sfbay-smart-b-lt-2040__2019-10-28_20-15-46"))
b_ht_2040 = (9, 2040, 15, "b", "High Tech", "b_ht", createUrl("sfbay-smart-b-ht-2040__2019-10-28_20-17-54"))
c_lt_2040 = (10, 2040, 15, "c", "Low Tech", "c_lt", createUrl("sfbay-smart-c-lt-2040__2019-10-28_20-18-25"))
c_ht_2040 = (11, 2040, 15, "c", "High Tech", "c_ht", createUrl("sfbay-smart-c-ht-2040__2019-10-28_20-18-30"))
scenarios_28_10_2019 = [baseline_2010,
base_2030lt_2025, base_2030ht_2025, base_2045lt_2040, base_2045ht_2040,
a_lt_2025, a_ht_2025, b_lt_2040, b_ht_2040, c_lt_2040, c_ht_2040]
setup_config_dict = {
"run_name": "28thOct2019",
"home_dir": "/home/ubuntu/git/jupyter/data",
"scenarios": scenarios_28_10_2019
}
smartdata_setup.make_plots(setup_config_dict)
print("END") |
downstream/votenet_det_new/lib/train.py | mbanani/PointContrast | 244 | 11173840 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
""" Training routine for 3D object detection with SUN RGB-D or ScanNet.
Sample usage:
python train.py --dataset sunrgbd --log_dir log_sunrgbd
To use Tensorboard:
At server:
python -m tensorboard.main --logdir=<log_dir_name> --port=6006
At local machine:
ssh -L 1237:localhost:6006 <server_name>
Then go to local browser and type:
localhost:1237
"""
import os
import sys
import numpy as np
from datetime import datetime
import argparse
import importlib
import logging
from omegaconf import OmegaConf
from models.loss_helper import get_loss as criterion
from tensorboardX import SummaryWriter
import torch
import torch.optim as optim
from torch.optim import lr_scheduler
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from models.backbone.pointnet2.pytorch_utils import BNMomentumScheduler
from models.dump_helper import dump_results
from models.ap_helper import APCalculator, parse_predictions, parse_groundtruths
def get_current_lr(epoch, config):
lr = config.optimizer.learning_rate
for i,lr_decay_epoch in enumerate(config.optimizer.lr_decay_steps):
if epoch >= lr_decay_epoch:
lr *= config.optimizer.lr_decay_rates[i]
return lr
def adjust_learning_rate(optimizer, epoch, config):
lr = get_current_lr(epoch, config)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train_one_epoch(net, train_dataloader, optimizer, bnm_scheduler, epoch_cnt, dataset_config, writer, config):
stat_dict = {} # collect statistics
adjust_learning_rate(optimizer, epoch_cnt, config)
bnm_scheduler.step() # decay BN momentum
net.train() # set model to training mode
for batch_idx, batch_data_label in enumerate(train_dataloader):
for key in batch_data_label:
batch_data_label[key] = batch_data_label[key].cuda()
# Forward pass
optimizer.zero_grad()
inputs = {'point_clouds': batch_data_label['point_clouds']}
if 'voxel_coords' in batch_data_label:
inputs.update({
'voxel_coords': batch_data_label['voxel_coords'],
'voxel_inds': batch_data_label['voxel_inds'],
'voxel_feats': batch_data_label['voxel_feats']})
end_points = net(inputs)
# Compute loss and gradients, update parameters.
for key in batch_data_label:
assert(key not in end_points)
end_points[key] = batch_data_label[key]
loss, end_points = criterion(end_points, dataset_config)
loss.backward()
optimizer.step()
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'ratio' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_interval = 10
if (batch_idx+1) % batch_interval == 0:
logging.info(' ---- batch: %03d ----' % (batch_idx+1))
for key in stat_dict:
writer.add_scalar('training/{}'.format(key), stat_dict[key]/batch_interval,
(epoch_cnt*len(train_dataloader)+batch_idx)*config.data.batch_size)
for key in sorted(stat_dict.keys()):
logging.info('mean %s: %f'%(key, stat_dict[key]/batch_interval))
stat_dict[key] = 0
def evaluate_one_epoch(net, train_dataloader, test_dataloader, config, epoch_cnt, CONFIG_DICT, writer):
stat_dict = {} # collect statistics
ap_calculator = APCalculator(ap_iou_thresh=0.5, class2type_map=CONFIG_DICT['dataset_config'].class2type)
net.eval() # set model to eval mode (for bn and dp)
for batch_idx, batch_data_label in enumerate(test_dataloader):
if batch_idx % 10 == 0:
logging.info('Eval batch: %d'%(batch_idx))
for key in batch_data_label:
batch_data_label[key] = batch_data_label[key].cuda()
# Forward pass
inputs = {'point_clouds': batch_data_label['point_clouds']}
if 'voxel_coords' in batch_data_label:
inputs.update({
'voxel_coords': batch_data_label['voxel_coords'],
'voxel_inds': batch_data_label['voxel_inds'],
'voxel_feats': batch_data_label['voxel_feats']})
with torch.no_grad():
end_points = net(inputs)
# Compute loss
for key in batch_data_label:
assert(key not in end_points)
end_points[key] = batch_data_label[key]
loss, end_points = criterion(end_points, CONFIG_DICT['dataset_config'])
# Accumulate statistics and print out
for key in end_points:
if 'loss' in key or 'acc' in key or 'ratio' in key:
if key not in stat_dict: stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_pred_map_cls = parse_predictions(end_points, CONFIG_DICT)
batch_gt_map_cls = parse_groundtruths(end_points, CONFIG_DICT)
ap_calculator.step(batch_pred_map_cls, batch_gt_map_cls)
# Dump evaluation results for visualization
if config.data.dump_results and batch_idx == 0 and epoch_cnt %10 == 0:
dump_results(end_points, 'results', CONFIG_DICT['dataset_config'])
# Log statistics
for key in sorted(stat_dict.keys()):
writer.add_scalar('validation/{}'.format(key), stat_dict[key]/float(batch_idx+1),
(epoch_cnt+1)*len(train_dataloader)*config.data.batch_size)
logging.info('eval mean %s: %f'%(key, stat_dict[key]/(float(batch_idx+1))))
# Evaluate average precision
metrics_dict = ap_calculator.compute_metrics()
for key in metrics_dict:
logging.info('eval %s: %f'%(key, metrics_dict[key]))
writer.add_scalar('validation/[email protected]', metrics_dict['mAP'], (epoch_cnt+1)*len(train_dataloader)*config.data.batch_size)
mean_loss = stat_dict['loss']/float(batch_idx+1)
return mean_loss
def train(net, train_dataloader, test_dataloader, dataset_config, config):
# Used for AP calculation
CONFIG_DICT = {'remove_empty_box':False, 'use_3d_nms':True,
'nms_iou':0.25, 'use_old_type_nms':False, 'cls_nms':True,
'per_class_proposal': True, 'conf_thresh':0.05,
'dataset_config': dataset_config}
# Load the Adam optimizer
optimizer = optim.Adam(net.parameters(), lr=config.optimizer.learning_rate, weight_decay=config.optimizer.weight_decay)
# writer
writer = SummaryWriter(log_dir='tensorboard')
# Load checkpoint if there is any
start_epoch = 0
CHECKPOINT_PATH = os.path.join('checkpoint.tar')
if os.path.isfile(CHECKPOINT_PATH):
checkpoint = torch.load(CHECKPOINT_PATH)
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch']
logging.info("-> loaded checkpoint %s (epoch: %d)"%(CHECKPOINT_PATH, start_epoch))
# Decay Batchnorm momentum from 0.5 to 0.999
# note: pytorch's BN momentum (default 0.1)= 1 - tensorflow's BN momentum
BN_MOMENTUM_INIT = 0.5
BN_MOMENTUM_MAX = 0.001
BN_DECAY_STEP = config.optimizer.bn_decay_step
BN_DECAY_RATE = config.optimizer.bn_decay_rate
bn_lbmd = lambda it: max(BN_MOMENTUM_INIT * BN_DECAY_RATE**(int(it / BN_DECAY_STEP)), BN_MOMENTUM_MAX)
bnm_scheduler = BNMomentumScheduler(net, bn_lambda=bn_lbmd, last_epoch=start_epoch-1)
loss = 0
for epoch in range(start_epoch, config.optimizer.max_epoch):
logging.info('**** EPOCH %03d ****' % (epoch))
logging.info('Current learning rate: %f'%(get_current_lr(epoch, config)))
logging.info('Current BN decay momentum: %f'%(bnm_scheduler.lmbd(bnm_scheduler.last_epoch)))
logging.info(str(datetime.now()))
# Reset numpy seed.
# REF: https://github.com/pytorch/pytorch/issues/5059
np.random.seed()
train_one_epoch(net=net, train_dataloader=train_dataloader, optimizer=optimizer,
bnm_scheduler=bnm_scheduler, epoch_cnt=epoch, dataset_config=dataset_config,
writer=writer, config=config)
if epoch == 0 or epoch % 5 == 4: # Eval every 5 epochs
loss = evaluate_one_epoch(net=net, train_dataloader=train_dataloader, test_dataloader=test_dataloader,
config=config, epoch_cnt=epoch, CONFIG_DICT=CONFIG_DICT, writer=writer)
# Save checkpoint
save_dict = {'epoch': epoch+1, # after training one epoch, the start_epoch should be epoch+1
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
}
try: # with nn.DataParallel() the net is added as a submodule of DataParallel
save_dict['state_dict'] = net.module.state_dict()
except:
save_dict['state_dict'] = net.state_dict()
torch.save(save_dict, 'checkpoint.tar')
OmegaConf.save(config, 'config.yaml')
|
hex2str/hex2str.py | DazEB2/SimplePyScripts | 117 | 11173855 | <reponame>DazEB2/SimplePyScripts
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
def hex2str(hex_string: str, encoding='utf-8') -> str:
data = bytes.fromhex(hex_string)
return str(data, encoding)
def str2hex(text: str, encoding='utf-8', upper=True) -> str:
hex_text = bytes(text, encoding).hex()
if upper:
hex_text = hex_text.upper()
return hex_text
if __name__ == '__main__':
assert hex2str("504F53542068747470733A") == "POST https:"
assert str2hex(hex2str("504F53542068747470733A")) == "504F53542068747470733A"
assert str2hex(hex2str("504F53542068747470733A"), upper=False) == "504f53542068747470733a"
assert str2hex("POST https:") == "504F53542068747470733A"
assert hex2str(str2hex("POST https:")) == "POST https:"
assert hex2str(str2hex("Привет мир!")) == "Привет мир!"
assert hex2str(str2hex("⌚⏰☀☁☔☺")) == "⌚⏰☀☁☔☺"
hex_text = "504F53542068747470733A"
text = hex2str(hex_text)
print('"{}"'.format(text))
text = "POST https:"
hex_text = str2hex(text)
print(hex_text)
|
alipay/aop/api/domain/AlipayDataDataserviceAdDataQueryModel.py | antopen/alipay-sdk-python-all | 213 | 11173903 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayDataDataserviceAdDataQueryModel(object):
def __init__(self):
self._ad_level = None
self._biz_token = None
self._charge_type = None
self._end_date = None
self._outer_id_list = None
self._query_type = None
self._start_date = None
@property
def ad_level(self):
return self._ad_level
@ad_level.setter
def ad_level(self, value):
self._ad_level = value
@property
def biz_token(self):
return self._biz_token
@biz_token.setter
def biz_token(self, value):
self._biz_token = value
@property
def charge_type(self):
return self._charge_type
@charge_type.setter
def charge_type(self, value):
self._charge_type = value
@property
def end_date(self):
return self._end_date
@end_date.setter
def end_date(self, value):
self._end_date = value
@property
def outer_id_list(self):
return self._outer_id_list
@outer_id_list.setter
def outer_id_list(self, value):
if isinstance(value, list):
self._outer_id_list = list()
for i in value:
self._outer_id_list.append(i)
@property
def query_type(self):
return self._query_type
@query_type.setter
def query_type(self, value):
self._query_type = value
@property
def start_date(self):
return self._start_date
@start_date.setter
def start_date(self, value):
self._start_date = value
def to_alipay_dict(self):
params = dict()
if self.ad_level:
if hasattr(self.ad_level, 'to_alipay_dict'):
params['ad_level'] = self.ad_level.to_alipay_dict()
else:
params['ad_level'] = self.ad_level
if self.biz_token:
if hasattr(self.biz_token, 'to_alipay_dict'):
params['biz_token'] = self.biz_token.to_alipay_dict()
else:
params['biz_token'] = self.biz_token
if self.charge_type:
if hasattr(self.charge_type, 'to_alipay_dict'):
params['charge_type'] = self.charge_type.to_alipay_dict()
else:
params['charge_type'] = self.charge_type
if self.end_date:
if hasattr(self.end_date, 'to_alipay_dict'):
params['end_date'] = self.end_date.to_alipay_dict()
else:
params['end_date'] = self.end_date
if self.outer_id_list:
if isinstance(self.outer_id_list, list):
for i in range(0, len(self.outer_id_list)):
element = self.outer_id_list[i]
if hasattr(element, 'to_alipay_dict'):
self.outer_id_list[i] = element.to_alipay_dict()
if hasattr(self.outer_id_list, 'to_alipay_dict'):
params['outer_id_list'] = self.outer_id_list.to_alipay_dict()
else:
params['outer_id_list'] = self.outer_id_list
if self.query_type:
if hasattr(self.query_type, 'to_alipay_dict'):
params['query_type'] = self.query_type.to_alipay_dict()
else:
params['query_type'] = self.query_type
if self.start_date:
if hasattr(self.start_date, 'to_alipay_dict'):
params['start_date'] = self.start_date.to_alipay_dict()
else:
params['start_date'] = self.start_date
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayDataDataserviceAdDataQueryModel()
if 'ad_level' in d:
o.ad_level = d['ad_level']
if 'biz_token' in d:
o.biz_token = d['biz_token']
if 'charge_type' in d:
o.charge_type = d['charge_type']
if 'end_date' in d:
o.end_date = d['end_date']
if 'outer_id_list' in d:
o.outer_id_list = d['outer_id_list']
if 'query_type' in d:
o.query_type = d['query_type']
if 'start_date' in d:
o.start_date = d['start_date']
return o
|
moto/awslambda/utils.py | oakbramble/moto | 5,460 | 11173919 | <reponame>oakbramble/moto
from collections import namedtuple
from functools import partial
ARN = namedtuple("ARN", ["region", "account", "function_name", "version"])
LAYER_ARN = namedtuple("LAYER_ARN", ["region", "account", "layer_name", "version"])
def make_arn(resource_type, region, account, name):
return "arn:aws:lambda:{0}:{1}:{2}:{3}".format(region, account, resource_type, name)
make_function_arn = partial(make_arn, "function")
make_layer_arn = partial(make_arn, "layer")
def make_ver_arn(resource_type, region, account, name, version="1"):
arn = make_arn(resource_type, region, account, name)
return "{0}:{1}".format(arn, version)
make_function_ver_arn = partial(make_ver_arn, "function")
make_layer_ver_arn = partial(make_ver_arn, "layer")
def split_arn(arn_type, arn):
arn = arn.replace("arn:aws:lambda:", "")
region, account, _, name, version = arn.split(":")
return arn_type(region, account, name, version)
split_function_arn = partial(split_arn, ARN)
split_layer_arn = partial(split_arn, LAYER_ARN)
|
safety_gym/random_agent.py | zhangdongkun98/safety-gym | 327 | 11173926 | <reponame>zhangdongkun98/safety-gym
#!/usr/bin/env python
import argparse
import gym
import safety_gym # noqa
import numpy as np # noqa
def run_random(env_name):
env = gym.make(env_name)
obs = env.reset()
done = False
ep_ret = 0
ep_cost = 0
while True:
if done:
print('Episode Return: %.3f \t Episode Cost: %.3f'%(ep_ret, ep_cost))
ep_ret, ep_cost = 0, 0
obs = env.reset()
assert env.observation_space.contains(obs)
act = env.action_space.sample()
assert env.action_space.contains(act)
obs, reward, done, info = env.step(act)
# print('reward', reward)
ep_ret += reward
ep_cost += info.get('cost', 0)
env.render()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--env', default='Safexp-PointGoal1-v0')
args = parser.parse_args()
run_random(args.env)
|
tests/apps/courses/test_templatetags_extra_tags_get_placeholder_plugins.py | leduong/richie | 174 | 11173997 | """Test suite for the GetPlaceholderPlugins template tag."""
from django.contrib.auth.models import AnonymousUser
from django.db import transaction
from django.template.exceptions import TemplateSyntaxError
from django.test import RequestFactory
from cms.api import add_plugin, create_page
from cms.test_utils.testcases import CMSTestCase
from richie.plugins.simple_text_ckeditor.cms_plugins import CKEditorPlugin
class GetPlaceholderPluginsTemplateTagsTestCase(CMSTestCase):
"""
Integration tests to validate the behavior of the `get_placeholder_plugins` template tag.
"""
@transaction.atomic
def test_templatetags_get_placeholder_plugins_current_page(self):
"""
The "get_placeholder_plugins" template tag should inject in the context, the plugins
of the targeted placeholder on the current page.
"""
page = create_page("Test", "richie/single_column.html", "en", published=True)
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, CKEditorPlugin, "en", body="<b>Test 1</b>")
add_plugin(placeholder, CKEditorPlugin, "en", body="<b>Test 2</b>")
request = RequestFactory().get("/")
request.current_page = page
request.user = AnonymousUser()
template = (
"{% load cms_tags extra_tags %}"
'{% get_placeholder_plugins "maincontent" as plugins %}'
"{% for plugin in plugins %}{% render_plugin plugin %}{% endfor %}"
)
output = self.render_template_obj(template, {}, request)
self.assertEqual(output, "<b>Test 1</b>\n<b>Test 2</b>\n")
@transaction.atomic
def test_templatetags_get_placeholder_plugins_empty(self):
"""
The "get_placeholder_plugins" template tag should render its node content if it has
no plugins and the "or keyword is passed.
"""
page = create_page("Test", "richie/single_column.html", "en", published=True)
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, CKEditorPlugin, "en", body="<b>Test</b>")
request = RequestFactory().get("/")
request.current_page = create_page(
"current", "richie/single_column.html", "en", published=True
)
request.user = AnonymousUser()
template = (
"{% load cms_tags extra_tags %}"
'{% get_placeholder_plugins "maincontent" as plugins or %}'
"<i>empty content</i>{% endget_placeholder_plugins %}"
"{% for plugin in plugins %}{% render_plugin plugin %}{% endfor %}"
)
output = self.render_template_obj(template, {}, request)
self.assertEqual("<i>empty content</i>", output)
@transaction.atomic
def test_templatetags_get_placeholder_plugins_empty_no_or(self):
"""
The "get_placeholder_plugins" template tag should raise an error if it has block
content but the "or keyword was forgotten.
"""
request = RequestFactory().get("/")
request.current_page = create_page(
"current", "richie/single_column.html", "en", published=True
)
request.user = AnonymousUser()
template_without_or = (
"{% load cms_tags extra_tags %}"
'{% get_placeholder_plugins "maincontent" as plugins %}'
"<i>empty content</i>{% endget_placeholder_plugins %}"
)
with self.assertRaises(TemplateSyntaxError):
self.render_template_obj(template_without_or, {}, request)
@transaction.atomic
def test_templatetags_get_placeholder_plugins_unknown_placeholder(self):
"""
When a new placeholder is added to the code, it does not exist on pages that were
pre-existing. The `get_placeholder_plugins` should not fail in this case.
"""
page = create_page("Test", "richie/single_column.html", "en", published=True)
request = RequestFactory().get("/")
request.current_page = page
request.user = AnonymousUser()
template = (
"{% load cms_tags extra_tags %}"
'{% get_placeholder_plugins "unknown" as plugins %}'
"{% for plugin in plugins %}{% render_plugin plugin %}{% endfor %}"
)
output = self.render_template_obj(template, {}, request)
self.assertEqual(output, "")
|
Chapter05/Custom_Modules/library/custom_module_2.py | stavsta/Mastering-Python-Networking-Second-Edition | 107 | 11174018 | #!/usr/bin/env python2
import requests
import json
def main():
module = AnsibleModule(
argument_spec = dict(
host = dict(required=True),
username = dict(required=True),
password = dict(required=True)
)
)
device = module.params.get('host')
username = module.params.get('username')
password = module.params.get('password')
url='http://' + host + '/ins'
switchuser=username
switchpassword=password
myheaders={'content-type':'application/json-rpc'}
payload=[
{
"jsonrpc": "2.0",
"method": "cli",
"params": {
"cmd": "show version",
"version": 1.2
},
"id": 1
}
]
response = requests.post(url,data=json.dumps(payload), headers=myheaders,auth=(switchuser,switchpassword)).json()
version = response['result']['body']['sys_ver_str']
data = json.dumps({"version": version})
module.exit_json(changed=False, msg=str(data))
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
tests/sparseml/tensorflow_v1/utils/test_variable.py | clementpoiret/sparseml | 922 | 11174045 | <reponame>clementpoiret/sparseml<filename>tests/sparseml/tensorflow_v1/utils/test_variable.py
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List
import pytest
from sparseml.tensorflow_v1.utils import (
clean_tensor_name,
get_op_input_var,
get_ops_and_inputs_by_name_or_regex,
get_prunable_ops,
tf_compat,
)
from tests.sparseml.tensorflow_v1.helpers import conv_net, mlp_net
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_TENSORFLOW_TESTS", False),
reason="Skipping tensorflow_v1 tests",
)
def test_op_var_name():
graph = tf_compat.Graph()
with graph.as_default():
var = tf_compat.Variable(
tf_compat.random_normal([64]), dtype=tf_compat.float32, name="test_var_name"
)
name = clean_tensor_name(var)
assert name == "test_var_name"
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_TENSORFLOW_TESTS", False),
reason="Skipping tensorflow_v1 tests",
)
def test_op_input_var():
with tf_compat.Graph().as_default() as graph:
mlp_net()
ops = get_prunable_ops(graph)
for op in ops:
inp = get_op_input_var(op[1])
assert inp is not None
assert isinstance(inp, tf_compat.Tensor)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_TENSORFLOW_TESTS", False),
reason="Skipping tensorflow_v1 tests",
)
@pytest.mark.parametrize(
"net_const,expected_ops",
[
(mlp_net, ["mlp_net/fc1/matmul", "mlp_net/fc2/matmul", "mlp_net/fc3/matmul"]),
(
conv_net,
["conv_net/conv1/conv", "conv_net/conv2/conv", "conv_net/mlp/matmul"],
),
],
)
def test_get_prunable_ops(net_const, expected_ops: List[str]):
with tf_compat.Graph().as_default():
net_const()
ops = get_prunable_ops()
assert len(ops) == len(expected_ops)
for op in ops:
assert op[0] in expected_ops
@pytest.mark.parametrize(
"net_const,var_names,expected_ops,expected_tens",
[
(
mlp_net,
["mlp_net/fc1/weight", "mlp_net/fc2/weight", "mlp_net/fc3/weight"],
["mlp_net/fc1/matmul", "mlp_net/fc2/matmul", "mlp_net/fc3/matmul"],
["mlp_net/fc1/weight", "mlp_net/fc2/weight", "mlp_net/fc3/weight"],
),
(
mlp_net,
["mlp_net/fc1/weight"],
["mlp_net/fc1/matmul"],
["mlp_net/fc1/weight"],
),
(
conv_net,
["re:conv_net/.*/weight"],
["conv_net/conv1/conv", "conv_net/conv2/conv", "conv_net/mlp/matmul"],
["conv_net/conv1/weight", "conv_net/conv2/weight", "conv_net/mlp/weight"],
),
],
)
def test_get_ops_and_inputs_by_name_or_regex(
net_const,
var_names,
expected_ops,
expected_tens,
):
with tf_compat.Graph().as_default() as graph:
net_const()
ops_and_inputs = get_ops_and_inputs_by_name_or_regex(var_names, graph)
assert len(ops_and_inputs) == len(expected_ops)
for op, inp in ops_and_inputs:
assert op.name in expected_ops
assert clean_tensor_name(inp.name) in expected_tens
|
django_th/urls.py | Leopere/django-th | 1,069 | 11174057 | from django.conf.urls import include, url
from django.conf import settings
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path
from django_th.forms.wizard import DummyForm, ProviderForm, ConsumerForm, ServicesDescriptionForm
from django_th.views import TriggerListView, TriggerDeleteView, TriggerUpdateView, TriggerEditedTemplateView, MeUpdate
from django_th.views import TriggerDeletedTemplateView, me
from django_th.views_fbv import trigger_switch_all_to, trigger_edit, trigger_on_off, fire_trigger
from django_th.views_fbv import service_related_triggers_switch_to
from django_th.views_userservices import UserServiceListView, UserServiceCreateView, UserServiceUpdateView
from django_th.views_userservices import UserServiceDeleteView, renew_service
from django_th.views_wizard import UserServiceWizard, finalcallback
from django_js_reverse.views import urls_js
urlpatterns = [
path('jsreverse/', urls_js, name='js_reverse'),
# ****************************************
# admin module
# ****************************************
path('admin/', admin.site.urls),
# ****************************************
# profil
# ****************************************
path(r'me/', me, name='me'),
path(r'me/edit/', MeUpdate.as_view(), name='edit_me'),
# ****************************************
# auth module
# ****************************************
path(
'auth/password_change/',
auth_views.PasswordChangeView.as_view(template_name='auth/change_password.html'),
),
path(
'auth/password_change/done/',
auth_views.PasswordChangeDoneView.as_view(template_name='auth/password_change_done.html'),
),
path('auth/', include('django.contrib.auth.urls')),
# ****************************************
# trigger happy module
# ****************************************
path('th/', TriggerListView.as_view(), name='base'),
url(r'^th/trigger/filter_by/(?P<trigger_filtered_by>[a-zA-Z]+)$', TriggerListView.as_view(),
name='trigger_filter_by'),
url(r'^th/trigger/order_by/(?P<trigger_ordered_by>[a-zA-Z_]+)$', TriggerListView.as_view(),
name='trigger_order_by'),
path('th/trigger/', TriggerListView.as_view(), name='home'),
# ****************************************
# * trigger
# ****************************************
url(r'^th/trigger/delete/(?P<pk>\d+)$', TriggerDeleteView.as_view(), name='delete_trigger'),
url(r'^th/trigger/edit/(?P<pk>\d+)$', TriggerUpdateView.as_view(), name='edit_trigger'),
path('th/trigger/editprovider/<int:trigger_id>', trigger_edit, {'edit_what': 'Provider'}, name='edit_provider'),
path('th/trigger/editconsumer/<int:trigger_id>', trigger_edit, {'edit_what': 'Consumer'}, name='edit_consumer'),
path('th/trigger/edit/thanks', TriggerEditedTemplateView.as_view(), name="trigger_edit_thanks"),
path('th/trigger/delete/thanks', TriggerDeletedTemplateView.as_view(), name="trigger_delete_thanks"),
path('th/trigger/onoff/<int:trigger_id>', trigger_on_off, name="trigger_on_off"),
url(r'^th/trigger/all/(?P<switch>(on|off))$', trigger_switch_all_to, name="trigger_switch_all_to"),
# ****************************************
# * service
# ****************************************
path('th/services/', UserServiceListView.as_view(), name='user_services'),
url(r'^th/service/add/(?P<service_name>\w+)$', UserServiceCreateView.as_view(), name='add_service'),
url(r'^th/service/edit/(?P<pk>\d+)$', UserServiceUpdateView.as_view(), name='edit_service'),
url(r'^th/service/delete/(?P<pk>\d+)$', UserServiceDeleteView.as_view(), name='delete_service'),
url(r'^th/service/renew/(?P<pk>\d+)$', renew_service, name="renew_service"),
path('th/service/delete/', UserServiceDeleteView.as_view(), name='delete_service'),
url(r'^th/service/onoff/(?P<user_service_id>\d+)/(?P<switch>(on|off))$', service_related_triggers_switch_to,
name="service_related_triggers_switch_to"),
# ****************************************
# wizard
# ****************************************
path('th/service/create/',
UserServiceWizard.as_view([ProviderForm,
DummyForm,
ConsumerForm,
DummyForm,
ServicesDescriptionForm]),
name='create_service'),
# every service will use django_th.views.finalcallback
# and give the service_name value to use to
# trigger the real callback
path("th/callbackevernote/", finalcallback, {'service_name': 'ServiceEvernote', }, name="evernote_callback",),
path("th/callbackgithub/", finalcallback, {'service_name': 'ServiceGithub', }, name="github_callback",),
path("th/callbackpocket/", finalcallback, {'service_name': 'ServicePocket', }, name="pocket_callback",),
path("th/callbackpushbullet/", finalcallback, {'service_name': 'ServicePushbullet', }, name="pushbullet_callback",),
path("th/callbackreddit/", finalcallback, {'service_name': 'ServiceReddit', }, name="reddit_callback",),
path("th/callbacktodoist/", finalcallback, {'service_name': 'ServiceTodoist', }, name="todoist_callback",),
path("th/callbacktrello/", finalcallback, {'service_name': 'ServiceTrello', }, name="trello_callback",),
path("th/callbacktumblr/", finalcallback, {'service_name': 'ServiceTumblr', }, name="tumblr_callback",),
path("th/callbacktwitter/", finalcallback, {'service_name': 'ServiceTwitter', }, name="twitter_callback",),
path("th/callbackwallabag/", finalcallback, {'service_name': 'ServiceWallabag', }, name="wallabag_callback",),
path("th/callbackmastodon/", finalcallback, {'service_name': 'ServiceMastodon', }, name="mastodon_callback",),
path('th/myfeeds/', include('th_rss.urls')),
path('th/api/taiga/webhook/', include('th_taiga.urls')),
path('th/api/slack/webhook/', include('th_slack.urls'))
]
if settings.DJANGO_TH.get('fire'):
urlpatterns += path('th/trigger/fire/<int:trigger_id>', fire_trigger, name="fire_trigger"),
|
lib/util.py | Rehzende/project-dev-kpis | 113 | 11174175 | <filename>lib/util.py<gh_stars>100-1000
import logging
import sys
import time
import urllib
import json
from itertools import islice
from dateutil.rrule import *
from datetime import tzinfo, timedelta, datetime
from dateutil.parser import parse as parse_date
from dateutil import tz
import pytz
import re
##
# logging
#
logging.Formatter.converter = time.gmtime
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s %(message)s',
datefmt='%Y-%m-%dT%H:%M:%SZ',
level=logging.INFO
)
logger = logging.getLogger()
##
# language
#
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
def flatten(l):
return [item for sublist in l for item in sublist]
def recursive_get(d, keys):
if len(keys) > 0 and d is not None:
next = keys[0]
rest = keys[1:]
return recursive_get(d[next], rest) if next in d else None
else:
return d
def json_pprint(js):
print(json.dumps(js, sort_keys=True, indent=4, separators=(',', ': ')))
def window(seq, n):
"""
Returns a sliding window (of width n) over data from the iterable
s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ...
"""
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
##
# time
#
local_tz = pytz.timezone('UTC')
def to_utc(d):
if d.tzinfo is not None and d.tzinfo.utcoffset(d) == timedelta(0):
dutc = d
elif d.tzinfo is None:
dutc = local_tz.localize(d)
else:
dutc = d.astimezone(tz.gettz('UTC'))
return dutc
def to_epoch(dt):
return (
to_utc(dt) - parse_date('1970-01-01T00:00:00Z')
).total_seconds()
def closest_biz_day(dt, forward=True):
adjust = -1 * timedelta(seconds=to_epoch(dt) % 86400)
if forward:
delta = timedelta(days=1)
else:
delta = timedelta(days=-1)
new_dt = dt
while new_dt.weekday() in [5, 6]:
new_dt = new_dt + delta
if new_dt != dt:
return new_dt + adjust
else:
return new_dt
def weekdays_between(d1, d2):
seconds_in_a_day = 86400
if d1 > d2:
return weekdays_between(d2, d1)
start = closest_biz_day(d1)
end = closest_biz_day(d2)
num_weekend_days = rrule(
WEEKLY,
byweekday=(SA, SU),
dtstart=start,
until=end
).count()
return (
(end - start).total_seconds() -
num_weekend_days * seconds_in_a_day
) / float(seconds_in_a_day)
##
# format
#
def percent_encode(s):
return urllib.quote(str(s), safe='')
def listify(lst):
return ','.join(['"' + str(s) + '"' for s in lst])
|
scratch/test.py | potassco/gringo | 423 | 11174178 | <filename>scratch/test.py
# {{{ Val
class FunVal:
def __init__(self, name, args):
self.name = name
self.args = args
def match(self, node, other, subst):
node.matchFun(self, other, subst)
def sig(self):
return (self.name, len(self.args))
def __eq__(self, other):
return isinstance(other, FunVal) and self.name == other.name and self.args == other.args
def __repr__(self):
return self.name + "("+ ",".join([str(arg) for arg in self.args]) + ")"
class NumVal:
def __init__(self, num):
self.num = num
def match(self, node, other, subst):
node.matchNum(self, other, subst)
def __eq__(self, other):
return isinstance(other, NumVal) and self.num == other.num
def __repr__(self):
return str(self.num)
# }}}
# {{{ Term
class FunTerm:
def __init__(self, name, args):
self.name = name
self.args = args
def sig(self):
return (self.name, len(self.args))
def add(self, node, other, leaf):
node.addFun(self, other, leaf)
def unify(self, node, other, subst):
node.unifyFun(self, other, subst)
def occurs(self, subst, var):
for arg in self.args:
if arg.occurs(subst, var): return True
return False
def __repr__(self):
return self.name + "("+ ",".join([str(arg) for arg in self.args]) + ")"
class VarTerm:
def __init__(self, name):
self.name = name
def add(self, node, other, leaf):
node.addVar(self, other, leaf)
def unify(self, node, other, subst):
node.unifyVar(self, other, subst)
def occurs(self, subst, var):
if self.name == var:
return True
else:
t = subst.get(self.name)
return t != None and t.occurs(subst, var)
def __repr__(self):
return self.name
# class ValTerm:
# ...
# }}}
class Node:
def __init__(self):
self.fun = {}
self.var = {}
self.leaf = None
def addFun(self, fun, other, leaf):
x = self.fun.setdefault(fun.sig(), Node())
n = fun.args + other
n[0].add(x, n[1:], leaf)
def addVar(self, var, other, leaf):
x = self.var.setdefault(var.name, Node())
if len(other) > 0:
other[0].add(x, other[1:], leaf)
else:
x.leaf = leaf
def __matchVar(self, val, other, subst):
for var, node in self.var.items():
match = True
if var in subst: match = subst[var] == val
else:
subst = dict(subst)
subst[var] = val
if match:
if len(other) > 0:
other[0].match(node, other[1:], subst)
else:
print(" matched: " + str(node.leaf) + " with: " + str(subst))
def matchFun(self, fun, other, subst):
node = self.fun.get(fun.sig())
if node != None:
n = fun.args + other
n[0].match(node, n[1:], subst)
self.__matchVar(fun, other, subst)
def matchNum(self, num, other, subst):
self.__matchVar(num, other, subst)
def unifyFun(self, fun, other, subst):
node = self.fun.get(fun.sig())
if node != None:
n = fun.args + other
n[0].unify(node, n[1:], subst)
for var, node in self.var.items():
t = subst.get(var)
match = True
if t != None:
print(" TODO: unify " + str(fun) + " with " + str(t))
match = False
else:
if not fun.occurs(subst, var):
subst = dict(subst)
subst[var] = fun
else: match = False
if match:
if len(other) > 0:
other[0].unify(node, other[1:], subst)
else:
print(" matched: " + str(node.leaf) + " with: " + str(subst))
def getFun(self, name, n, args):
# the data structures don't go together very well :(
if n == 0:
return [(self, FunTerm(name, args))]
else:
ret = []
for (nameB, nB), nodeB in self.fun.items():
funs = nodeB.getFun(nameB, nB, [])
for (node, fun) in funs:
ret.extend(node.getFun(name, n-1, args + [fun]))
for varB, nodeB in self.var.items():
ret.extend(nodeB.getFun(name, n-1, args + [VarTerm(varB)]))
return ret
def unifyVar(self, var, other, subst):
t = subst.get(var.name)
if t != None:
t.unify(self, other, subst)
else:
for (name, n), node in self.fun.items():
for nodeB, fun in node.getFun(name, n, []):
if not fun.occurs(subst, var.name):
substB = dict(subst)
substB[var] = fun
if len(other) > 0:
other[0].unify(nodeB, other[1:], substB)
else:
print(" matched: " + str(nodeB.leaf) + " with: " + str(substB))
for varB, node in self.var.items():
t = subst.get(varB)
match = True
if t != None:
print(" TODO: unify " + str(var) + " with " + str(t))
match = False
else:
if var.name != varB:
subst = dict(subst)
subst[var] = VarTerm(varB)
if match:
if len(other) > 0:
other[0].unify(node, other[1:], subst)
else:
print(" matched: " + str(node.leaf) + " with: " + str(subst))
def toString(self, ident):
s = ""
for x, y in self.fun.items():
s+= ident + str(x) + "\n"
s+= y.toString(ident + " ")
for x, y in self.var.items():
s+= ident + str(x) + "\n"
s+= y.toString(ident + " ")
if self.leaf != None:
s+= ident + "*" + str(self.leaf) + "*\n"
return s
class Lookup:
def __init__(self):
self.root = Node()
def add(self, x):
print("adding: " + str(x))
x.add(self.root, [], x)
def match(self, x):
print("matching: " + str(x))
x.match(self.root, [], {})
def unify(self, x):
print("unifying: " + str(x))
x.unify(self.root, [], {})
def __repr__(self):
return "root:\n" + self.root.toString(" ")
l = Lookup()
l.add(FunTerm("p", [FunTerm("f", [VarTerm("X"), VarTerm("Y")]), VarTerm("Z")]))
l.add(FunTerm("p", [FunTerm("g", [VarTerm("X"), VarTerm("Y")]), VarTerm("Z")]))
l.add(FunTerm("p", [VarTerm("X"), VarTerm("Y")]))
l.add(FunTerm("p", [VarTerm("X"), VarTerm("X")]))
print(l)
# next match tuples
l.match(FunVal("p", [FunVal("f", [NumVal(1), NumVal(2)]), NumVal(3)]))
l.match(FunVal("p", [FunVal("f", [NumVal(1), NumVal(2)]), FunVal("f", [NumVal(1), NumVal(2)])]))
# next unify terms
l.unify(FunTerm("p", [FunTerm("g", [VarTerm("A"), VarTerm("B")]), VarTerm("C")]))
l.unify(FunTerm("p", [VarTerm("A"), VarTerm("B")]))
"""
subst:
String -> Term
cases to consider:
a:VarTerm - b:FunTerm
if a in subst:
# moves completely to terms ...
return unify(subst[a], b, subst)
else:
# easily implemented ...
if b.occur(subst, a.name): # applies the substitution on the fly
return False
subst[a.name] = b
return True
a:FunTerm - b:FunTerm
# easiliy implemented ...
if a.sig() != b.sig():
return False
for x, y in zip(a.args(), b.args()):
if not unify(x, y, subst):
return False
return True
a:FunTerm - b:VarTerm
# needs extraction of FunTerm
return unify(b, a, subst)
a:VarTerm - b:VarTerm
if a in subst:
a = subst[a.name]
return unify(a, b, subst)
elif b in subst:
b = subst[b.name]
return unify(a, b, subst)
elif a.name != b.name:
# occurs check???
subst[a.name] = b
return True
else:
return True
the trick is to always let unify apply a substitution until a fixpoint
implement this similar to match in Lookup
afterwards adding these should be straightforward:
next add ValTerms
next add LinearTerm
"""
|
test/test_clai_plugins_howdoi.py | cohmoti/clai | 391 | 11174184 | #
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
import os
import unittest
from builtins import classmethod
from clai.server.command_message import State
from clai.server.plugins.howdoi.howdoi import HowDoIAgent
OS_NAME: str = os.uname().sysname.upper()
@unittest.skip("Only for local testing")
class SearchAgentTest(unittest.TestCase):
@classmethod
def set_up_class(cls):
_agent = HowDoIAgent()
cls.agent = _agent
def print_and_verify(self, question, answer):
state = State(user_name='tester', command_id='0', command=question)
action = self.agent.get_next_action(state=state)
print(f"Input: {state.command}")
print("===========================")
print(f"Response: {action.suggested_command}")
print("===========================")
print(f"Explanation: {action.description}")
self.assertEqual(answer, action.suggested_command)
@unittest.skip("Only for local testing")
def test_get_next_action_pwd_without_question(self):
self.agent.init_agent()
if OS_NAME in ("OS/390", "Z/OS"):
self.print_and_verify("pds", "pds")
else:
self.print_and_verify("pds", None)
@unittest.skip("Only for local testing")
def test_get_next_action_pwd_with_question(self):
self.agent.init_agent()
if OS_NAME in ("OS/390", "Z/OS"):
self.print_and_verify("What is a pds?", "man readlink")
else:
self.print_and_verify("What is pwd?", "man pwd")
@unittest.skip("Only for local testing")
def test_get_next_action_sudo(self):
self.agent.init_agent()
self.print_and_verify("when to use sudo vs su?", "man su")
@unittest.skip("Only for local testing")
def test_get_next_action_disk(self):
self.agent.init_agent()
question: str = "find out disk usage per user?"
if OS_NAME in ("OS/390", "Z/OS"):
self.print_and_verify(question, "man du")
else:
self.print_and_verify(question, "man df")
@unittest.skip("Only for local testing")
def test_get_next_action_zip(self):
self.agent.init_agent()
question: str = "How to process gz files?"
if OS_NAME in ("OS/390", "Z/OS"):
self.print_and_verify(question, "man dnctl")
else:
self.print_and_verify(question, "man gzip")
@unittest.skip("Only for local testing")
def test_get_next_action_pds(self):
self.agent.init_agent()
question: str = "copy a PDS member?"
if OS_NAME in ("OS/390", "Z/OS"):
self.print_and_verify(question, "man tcsh")
else:
self.print_and_verify(question, "man cmp")
|
dev_nb/nb_004a.py | discdiver/fastai_docs | 3,266 | 11174200 | <gh_stars>1000+
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/004a_discriminative_lr.ipynb
from nb_004 import *
ModuleList = Collection[nn.Module]
ParamList = Collection[nn.Parameter]
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)
def requires_grad(l:nn.Module, b:Optional[bool]=None)->Optional[bool]:
"If b is not set requires_grad on all params in l, else return requires_grad of first param"
ps = list(l.parameters())
if not ps: return None
if b is None: return ps[0].requires_grad
for p in ps: p.requires_grad=b
def trainable_params(m:nn.Module)->ParamList:
"Return list of trainable params in `m`"
res = filter(lambda p: p.requires_grad, m.parameters())
return res
def split_bn_bias(layer_groups:ModuleList)->ModuleList:
"Sort each layer in `layer_groups` into batchnorm (`bn_types`) and non-batchnorm groups"
split_groups = []
for l in layer_groups:
l1,l2 = [],[]
for c in l.children():
if isinstance(c, bn_types): l2.append(c)
else: l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
return split_groups
class OptimWrapper():
"Basic wrapper around an optimizer to simplify HP changes"
def __init__(self, opt:optim.Optimizer, wd:Floats=0., true_wd:bool=False, bn_wd:bool=True)->None:
self.opt,self.true_wd,self.bn_wd = opt,true_wd,bn_wd
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
@classmethod
def create(cls, opt_fn:Union[type,Callable], lr:Union[float,Tuple,List],
layer_groups:ModuleList, **kwargs:Any)->optim.Optimizer:
"Create an optim.Optimizer from `opt_fn` with `lr`. Set lr on `layer_groups``"
split_groups = split_bn_bias(layer_groups)
opt = opt_fn([{'params': trainable_params(l), 'lr':0} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr = listify(lr, layer_groups)
return opt
def __repr__(self)->str:
return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}'
#Pytorch optimizer methods
def step(self)->None:
"Set weight decay and step optimizer"
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr,wd,pg1,pg2 in zip(self._lr,self._wd,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
for p in pg1['params']: p.data.mul_(1 - wd*lr)
if self.bn_wd:
for p in pg2['params']: p.data.mul_(1 - wd*lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self)->None:
"Clear optimizer gradients"
self.opt.zero_grad()
#Hyperparameters as properties
@property
def lr(self)->float:
"Get learning rate"
return self._lr[-1]
@lr.setter
def lr(self, val:float)->None:
"Set learning rate"
self._lr = self.set_val('lr', listify(val, self._lr))
@property
def mom(self)->float:
"Get momentum"
return self._mom[-1]
@mom.setter
def mom(self, val:float)->None:
"Set momentum"
if 'momentum' in self.opt_keys: self.set_val('momentum', listify(val, self._mom))
elif 'betas' in self.opt_keys: self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
@property
def beta(self)->float:
"get beta"
return None if self._beta is None else self._beta[-1]
@beta.setter
def beta(self, val:float)->None:
"Set beta (or alpha as makes sense for give optimizer)"
if val is None: return
if 'betas' in self.opt_keys: self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys: self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
@property
def wd(self)->float:
"Get weight decay"
return self._wd[-1]
@wd.setter
def wd(self, val:float)->None:
"Set weight decay"
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
#Helper functions
def read_defaults(self)->None:
"Read the values inside the optimizer for the hyper-parameters"
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom,self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
def set_val(self, key:str, val:Any, bn_groups:bool=True)->Any:
"Set the values inside the optimizer dictionary at the key"
if is_tuple(val): val = [(v1,v2) for v1,v2 in zip(*val)]
for v,pg1,pg2 in zip(val,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
def read_val(self, key:str) -> Union[List[float],Tuple[List[float],List[float]]]:
"Read a hyper-parameter key in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
def children(m:nn.Module)->ModuleList:
"Get children of module"
return list(m.children())
def num_children(m:nn.Module)->int:
"Get number of child modules in module"
return len(children(m))
def range_children(m:nn.Module)->Iterator[int]:
"Return iterator of len of children of m"
return range(num_children(m))
flatten_model=lambda l: sum(map(flatten_model,l.children()),[]) if num_children(l) else [l]
def first_layer(m:nn.Module)->nn.Module:
"Retrieve first layer in a module"
return flatten_model(m)[0]
def split_model_idx(model:nn.Module, idxs:Collection[int])->ModuleList:
"Split the model according to the indices in [idxs]"
layers = flatten_model(model)
if idxs[0] != 0: idxs = [0] + idxs
if idxs[-1] != len(layers): idxs.append(len(layers))
return [nn.Sequential(*layers[i:j]) for i,j in zip(idxs[:-1],idxs[1:])]
def split_model(model:nn.Module, splits:Collection[ModuleList], want_idxs:bool=False):
"Split the model according to the layers in [splits]"
layers = flatten_model(model)
idxs = [layers.index(first_layer(s)) for s in listify(splits)]
res = split_model_idx(model, idxs)
return (res,idxs) if want_idxs else res
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)
def set_bn_eval(m:nn.Module)->None:
"Set bn layers in eval mode for all recursive children of m"
for l in m.children():
if isinstance(l, bn_types) and not next(l.parameters()).requires_grad:
l.eval()
set_bn_eval(l)
@dataclass
class BnFreeze(Callback):
"Set all bntypes layers in `learn` to eval() on_epoch_begin"
learn:Learner
def on_epoch_begin(self, **kwargs:Any)->None:
"Put bn layers in eval mode on epoch_begin"
set_bn_eval(self.learn.model)
def even_mults(start:float, stop:float, n:int)->np.ndarray:
"Build evenly stepped schedule from start to stop in n steps"
mult = stop/start
step = mult**(1/(n-1))
return np.array([start*(step**i) for i in range(n)])
default_lr = slice(3e-3)
default_wd = 1e-2
SplitFuncOrIdxList = Union[Callable, Collection[ModuleList]]
@dataclass
class Learner():
"Object that wraps together some data, a model, a loss function and an optimizer"
data:DataBunch
model:nn.Module
opt_fn:Callable=AdamW
loss_fn:Callable=F.cross_entropy
metrics:Collection[Callable]=None
true_wd:bool=True
bn_wd:bool=True
wd:Floats=default_wd
train_bn:bool=True
path:str = None
model_dir:str = 'models'
callback_fns:Collection[Callable]=None
callbacks:Collection[Callback]=field(default_factory=list)
layer_groups:Collection[nn.Module]=None
def __post_init__(self)->None:
"Setup path,metrics, callbacks and ensure model directory exists"
self.path = Path(ifnone(self.path, self.data.path))
(self.path/self.model_dir).mkdir(parents=True, exist_ok=True)
self.model = self.model.to(self.data.device)
self.metrics=listify(self.metrics)
if not self.layer_groups: self.layer_groups = [nn.Sequential(*flatten_model(self.model))]
self.callbacks = listify(self.callbacks)
self.callback_fns = [Recorder] + listify(self.callback_fns)
def lr_range(self, lr:Union[float,slice])->np.ndarray:
"Build learning rate schedule"
if not isinstance(lr,slice): return lr
if lr.start: res = even_mults(lr.start, lr.stop, len(self.layer_groups))
else: res = [lr.stop/3]*(len(self.layer_groups)-1) + [lr.stop]
return np.array(res)
def fit(self, epochs:int, lr:Union[Floats,slice]=default_lr,
wd:Floats=None, callbacks:Collection[Callback]=None)->None:
"fit the model on this learner with `lr` learning rate, `wd` weight decay for `epochs` with `callbacks`"
lr = self.lr_range(lr)
if wd is None: wd = self.wd
self.create_opt(lr, wd)
callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
fit(epochs, self.model, self.loss_fn, opt=self.opt, data=self.data, metrics=self.metrics,
callbacks=self.callbacks+callbacks)
def create_opt(self, lr:Floats, wd:Floats=0.)->None:
"create optimizer with `lr` learning rate and `wd` weight decay"
self.opt = OptimWrapper.create(self.opt_fn, lr, self.layer_groups, wd=wd, true_wd=self.true_wd, bn_wd=self.bn_wd)
def split(self, split_on:SplitFuncOrIdxList)->None:
"split the model at `split_on`"
if isinstance(split_on,Callable): self.layer_groups = split_on(self.model)
else: self.layer_groups = split_model(self.model, split_on)
def freeze_to(self, n:int)->None:
"freeze layers up to layer `n`"
for g in self.layer_groups[:n]:
for l in g:
if not self.train_bn or not isinstance(l, bn_types): requires_grad(l, False)
for g in self.layer_groups[n:]: requires_grad(g, True)
def freeze(self)->None:
"freeze up to last layer"
assert(len(self.layer_groups)>1)
self.freeze_to(-1)
def unfreeze(self):
"unfreeze entire model"
self.freeze_to(0)
def __del__(self): del(self.model, self.data)
def save(self, name:PathOrStr):
"save model with `name` to `self.model_dir`"
torch.save(self.model.state_dict(), self.path/self.model_dir/f'{name}.pth')
def load(self, name:PathOrStr):
"load model `name` from `self.model_dir"
self.model.load_state_dict(torch.load(self.path/self.model_dir/f'{name}.pth'))
def fit_one_cycle(learn:Learner, cyc_len:int,
max_lr:Union[Floats,slice]=default_lr, moms:Tuple[float,float]=(0.95,0.85),
div_factor:float=25., pct_start:float=0.3, wd:float=None, **kwargs)->None:
"Fits a model following the 1cycle policy"
max_lr = learn.lr_range(max_lr)
cbs = [OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor,
pct_start=pct_start, **kwargs)]
learn.fit(cyc_len, max_lr, wd=wd, callbacks=cbs)
Learner.fit_one_cycle = fit_one_cycle
Learner.lr_find = lr_find |
tools/ops/script_runner/lib/url_util.py | yetsun/hue | 5,079 | 11174267 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2011-2013 Cloudera, Inc. All rights reserved.
import httplib
import logging
import socket
import sys
import time
import urllib2
from urllib2_kerberos import HTTPKerberosAuthHandler
from M2Crypto import httpslib
from M2Crypto import SSL
from M2Crypto import m2
logging.basicConfig()
LOG = logging.getLogger(__name__)
# urlopen_with_timeout.
#
# The optional secure_http_service_name parameter allows callers to connect to
# secure HTTP servers via the urllib2_kerberos library. We have a modified
# version of the HTTPKerberosAuthHandler code which takes the Kerberos service
# name rather than construct the name using the HTTP request host. We always add
# the HTTPKerberosAuthHandler to urllib2 opener handlers because it has no effect
# if security is not actually enabled.
#
# The optional username and pasword parameters similarly handle setting up HTTP
# digest authentication. Again, this has no effect if HTTP digest authentication
# is not in use on the connection.
#
# The cafile, capath and max_cert_depth control the SSL certificate verification
# behavior. https://www.openssl.org/docs/ssl/SSL_CTX_load_verify_locations.html
# explains the semantics of the parameters. Passing none for both means that
# no verification of the server certification (including the server's hostname)
# will be performed.
def urlopen_with_timeout(url,
data=None,
timeout=None,
secure_http_service_name=None,
username=None,
password=<PASSWORD>,
cafile=None,
capath=None,
max_cert_depth=9):
openers = []
openers.append(_make_https_handler(cafile,
capath,
max_cert_depth))
openers.append(HTTPKerberosAuthHandler(secure_http_service_name))
full_url = url
if isinstance(url, urllib2.Request):
full_url = url.get_full_url()
openers.append(_make_http_digest_auth_handler(full_url, username, password))
LOG.info("url_util: urlopen_with_timeout: full_url: %s" % full_url)
if sys.version_info < (2, 6):
# The timeout parameter to urlopen was introduced in Python 2.6.
# To workaround it in older versions of python, we copy, with
# minor modification, httplib.HTTPConnection, and hook it all
# up.
openers.append(_make_timeout_handler(timeout))
opener = urllib2.build_opener(*openers)
LOG.info("url_util: urlopen_with_timeout: sys.version_inf < (2, 6): opener: %s" % opener)
return opener.open(url, data)
else:
openers.append(_make_timeout_handler(timeout))
opener = urllib2.build_opener(*openers)
LOG.info("url_util: urlopen_with_timeout: sys.version_inf > (2, 6): opener: %s" % opener)
return opener.open(url, data, timeout)
def head_request_with_timeout(url,
data=None,
timeout=None,
secure_http_service_name=None,
username=None,
password=<PASSWORD>,
cafile=None,
capath=None,
max_cert_depth=9):
class HeadRequest(urllib2.Request):
def get_method(self):
return "HEAD"
if isinstance(url, urllib2.Request):
raise Exception("Unsupported url type: urllib2.Request.")
LOG.info("url_util: head_request_with_timeout: url: %s: timeout: %s" % (url, timeout))
return urlopen_with_timeout(HeadRequest(url),
data,
timeout,
secure_http_service_name,
username,
password,
cafile,
capath,
max_cert_depth)
def _make_timeout_handler(timeout):
# Create these two helper classes fresh each time, since
# timeout needs to be in the closure.
class TimeoutHTTPConnection(httplib.HTTPConnection):
def connect(self):
"""Connect to the host and port specified in __init__."""
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
if timeout is not None:
self.sock.settimeout(timeout)
if self.debuglevel > 0:
LOG.info("connect: (%s, %s)" % (self.host, self.port))
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
LOG.info('connect fail:', (self.host, self.port))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class TimeoutHTTPHandler(urllib2.HTTPHandler):
http_request = urllib2.AbstractHTTPHandler.do_request_
def http_open(self, req):
return self.do_open(TimeoutHTTPConnection, req)
return TimeoutHTTPHandler
def _make_http_digest_auth_handler(url, username, password):
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, # realm
url,
username,
password)
return urllib2.HTTPDigestAuthHandler(password_manager)
def _make_https_handler(cafile=None,
capath=None,
max_cert_depth=9):
class HTTPSConnection(httpslib.HTTPSConnection):
"""
A class that extends the default HTTPSConnection to ensure two things:
1) Enforce tlsv1 protocol for all ssl connection. Some older pythons
(e.g., sles11, probably all versions <= 2.6) attempt SSLv23 handshake
that is rejected by newer web servers. See OPSAPS-32192 for an example.
2) Force validation if cafile/capath is supplied.
"""
def __init__(self, host, port=None, **ssl):
# Specifying sslv23 enables the following ssl versions:
# SSLv3, SSLv23, TLSv1, TLSv1.1, and TLSv1.2. We will explicitly exclude
# SSLv3 and SSLv2 below. This mimics what is done by create_default_context
# on newer python versions (python >= 2.7).
ctx = SSL.Context('sslv23')
# SSL_OP_ALL turns on all workarounds for known bugs. See
# https://www.openssl.org/docs/manmaster/ssl/SSL_CTX_set_options.html for
# a full list of these workarounds. I believe that we don't really need
# any of these workarounds, but, this is default in later pythons and is
# future looking.
ctx.set_options(m2.SSL_OP_ALL | m2.SSL_OP_NO_SSLv2 | m2.SSL_OP_NO_SSLv3)
if cafile is not None or capath is not None:
ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert,
max_cert_depth)
ctx.load_verify_info(cafile=cafile, capath=capath)
self._postConnectionCheck = True
else:
ctx.set_verify(SSL.verify_none, max_cert_depth)
self._postConnectionCheck = False
httpslib.HTTPSConnection.__init__(self, host, port, ssl_context=ctx)
def connect(self):
# This is a bit ugly but we need to override the connect method in order
# to disable hostname verification. This is buried deep inside M2Crypto
# and the only way to disable it is to disable post connection checks on
# the socket itself.
self.sock = SSL.Connection(self.ssl_ctx)
if self.session:
self.sock.set_session(self.session)
if not self._postConnectionCheck:
self.sock.postConnectionCheck = None
self.sock.connect((self.host, self.port))
class HTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return self.do_open(HTTPSConnection, req)
return HTTPSHandler()
def urlopen_with_retry_on_authentication_errors(function,
retries,
sleeptime):
# See OPSAPS-28469: we retry on 401 errors on the presumption that we
# are hitting a race with the kinit from the kt_renewer.
attempt = 1
while True:
try:
return function()
except urllib2.HTTPError, err:
if err.code == 401 and attempt <= retries:
LOG.exception("Autentication error on attempt %d. Retrying after "
"sleeping %f seconds." % (attempt, sleeptime))
time.sleep(sleeptime)
attempt += 1
else:
raise
|
george/solvers/basic.py | rychallener/george | 379 | 11174326 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["BasicSolver"]
import numpy as np
from scipy.linalg import cholesky, cho_solve
class BasicSolver(object):
"""
This is the most basic solver built using :func:`scipy.linalg.cholesky`.
kernel (george.kernels.Kernel): A subclass of :class:`Kernel` specifying
the kernel function.
"""
def __init__(self, kernel):
self.kernel = kernel
self._computed = False
self._log_det = None
@property
def computed(self):
"""
A flag indicating whether or not the covariance matrix was computed
and factorized (using the :func:`compute` method).
"""
return self._computed
@computed.setter
def computed(self, v):
self._computed = v
@property
def log_determinant(self):
"""
The log-determinant of the covariance matrix. This will only be
non-``None`` after calling the :func:`compute` method.
"""
return self._log_det
@log_determinant.setter
def log_determinant(self, v):
self._log_det = v
def compute(self, x, yerr):
"""
Compute and factorize the covariance matrix.
Args:
x (ndarray[nsamples, ndim]): The independent coordinates of the
data points.
yerr (ndarray[nsamples] or float): The Gaussian uncertainties on
the data points at coordinates ``x``. These values will be
added in quadrature to the diagonal of the covariance matrix.
"""
# Compute the kernel matrix.
K = self.kernel.get_value(x)
K[np.diag_indices_from(K)] += yerr ** 2
# Factor the matrix and compute the log-determinant.
self._factor = (cholesky(K, overwrite_a=True, lower=False), False)
self.log_determinant = 2 * np.sum(np.log(np.diag(self._factor[0])))
self.computed = True
def apply_inverse(self, y, in_place=False):
r"""
Apply the inverse of the covariance matrix to the input by solving
.. math::
K\,x = y
Args:
y (ndarray[nsamples] or ndadrray[nsamples, nrhs]): The vector or
matrix :math:`y`.
in_place (Optional[bool]): Should the data in ``y`` be overwritten
with the result :math:`x`? (default: ``False``)
"""
return cho_solve(self._factor, y, overwrite_b=in_place)
def dot_solve(self, y):
r"""
Compute the inner product of a vector with the inverse of the
covariance matrix applied to itself:
.. math::
y\,K^{-1}\,y
Args:
y (ndarray[nsamples]): The vector :math:`y`.
"""
return np.dot(y.T, cho_solve(self._factor, y))
def apply_sqrt(self, r):
"""
Apply the Cholesky square root of the covariance matrix to the input
vector or matrix.
Args:
r (ndarray[nsamples] or ndarray[nsamples, nrhs]: The input vector
or matrix.
"""
return np.dot(r, self._factor[0])
def get_inverse(self):
"""
Get the dense inverse covariance matrix. This is used for computing
gradients, but it is not recommended in general.
"""
return self.apply_inverse(np.eye(len(self._factor[0])), in_place=True)
|
rls/algorithms/single/offline/__init__.py | StepNeverStop/RLs | 371 | 11174328 | <reponame>StepNeverStop/RLs
#!/usr/bin/env python3
# encoding: utf-8
from rls.algorithms.register import register
# logo: font-size: 12, foreground character: 'O', font: 幼圆
# http://life.chacuo.net/convertfont2char
register(
name='cql_dqn',
path='single.offline.cql_dqn',
is_multi=False,
class_name='CQL_DQN',
logo="""
OOO OOO OOO
OOOOOO OOOOO OOO OOOOOOOO OOOOO OOOO OOO
OOO OO OO OOO O OO OOO OO OOO OOOO O
OO O OOO OOO O OO O OO OO OOO OOO OOO OOOO O
OOO OOO OOO O OOO OOO OOO OO OO OOO OOO O OO O
OOO OOO OOO O O OO OOO OOO OO OO OOO OOO O OOOO
OO OO OO O OO OO OOO OO OO O OOO
OOOO OO OOOOOOO OOOOOO OOOOOOO OOOOOOO OO OO
OOOOO OOOOO OOOOOOO OOOOO OOO O
OOOO OOOO
"""
)
register(
name='bcq',
path='single.offline.bcq',
is_multi=False,
class_name='BCQ',
logo="""
OOOOOOO OOOOOOO OOOOOO
OO OOOO OOOO OOO OOO OOOO
OO OOO OOOO O OOO OOOO
OO OOO OOO O OOO OOO
OOOOOO OOO OO OOO
OO OOOO OOO OOO OOO
OO OOO OOO OOO OOO
OO OO OOO O OOO OOO
OO OOOO OOOOOOOO OOOOOOOO
OOOOOOOO OOOOO OOOOO
OOOO
OOO
"""
)
|
src/bepasty/bepasty_xstatic.py | Emojigit/bepasty-server | 123 | 11174420 | <gh_stars>100-1000
from xstatic.main import XStatic
# names below must be package names
mod_names = [
'asciinema_player',
'bootbox',
'bootstrap',
'font_awesome',
'jquery',
'jquery_ui',
'jquery_file_upload',
'pygments',
]
pkg = __import__('xstatic.pkg', fromlist=mod_names)
serve_files = {}
for mod_name in mod_names:
mod = getattr(pkg, mod_name)
xs = XStatic(mod, root_url='/static', provider='local', protocol='http')
serve_files[xs.name] = xs.base_dir
|
mountaincar/maxent/train.py | amy12xx/lets-do-irl | 408 | 11174438 | <gh_stars>100-1000
import gym
import pylab
import numpy as np
from maxent import *
n_states = 400 # position - 20, velocity - 20
n_actions = 3
one_feature = 20 # number of state per one feature
q_table = np.zeros((n_states, n_actions)) # (400, 3)
feature_matrix = np.eye((n_states)) # (400, 400)
gamma = 0.99
q_learning_rate = 0.03
theta_learning_rate = 0.05
np.random.seed(1)
def idx_demo(env, one_feature):
env_low = env.observation_space.low
env_high = env.observation_space.high
env_distance = (env_high - env_low) / one_feature
raw_demo = np.load(file="expert_demo/expert_demo.npy")
demonstrations = np.zeros((len(raw_demo), len(raw_demo[0]), 3))
for x in range(len(raw_demo)):
for y in range(len(raw_demo[0])):
position_idx = int((raw_demo[x][y][0] - env_low[0]) / env_distance[0])
velocity_idx = int((raw_demo[x][y][1] - env_low[1]) / env_distance[1])
state_idx = position_idx + velocity_idx * one_feature
demonstrations[x][y][0] = state_idx
demonstrations[x][y][1] = raw_demo[x][y][2]
return demonstrations
def idx_state(env, state):
env_low = env.observation_space.low
env_high = env.observation_space.high
env_distance = (env_high - env_low) / one_feature
position_idx = int((state[0] - env_low[0]) / env_distance[0])
velocity_idx = int((state[1] - env_low[1]) / env_distance[1])
state_idx = position_idx + velocity_idx * one_feature
return state_idx
def update_q_table(state, action, reward, next_state):
q_1 = q_table[state][action]
q_2 = reward + gamma * max(q_table[next_state])
q_table[state][action] += q_learning_rate * (q_2 - q_1)
def main():
env = gym.make('MountainCar-v0')
demonstrations = idx_demo(env, one_feature)
expert = expert_feature_expectations(feature_matrix, demonstrations)
learner_feature_expectations = np.zeros(n_states)
theta = -(np.random.uniform(size=(n_states,)))
episodes, scores = [], []
for episode in range(30000):
state = env.reset()
score = 0
if (episode != 0 and episode == 10000) or (episode > 10000 and episode % 5000 == 0):
learner = learner_feature_expectations / episode
maxent_irl(expert, learner, theta, theta_learning_rate)
while True:
state_idx = idx_state(env, state)
action = np.argmax(q_table[state_idx])
next_state, reward, done, _ = env.step(action)
irl_reward = get_reward(feature_matrix, theta, n_states, state_idx)
next_state_idx = idx_state(env, next_state)
update_q_table(state_idx, action, irl_reward, next_state_idx)
learner_feature_expectations += feature_matrix[int(state_idx)]
score += reward
state = next_state
if done:
scores.append(score)
episodes.append(episode)
break
if episode % 1000 == 0:
score_avg = np.mean(scores)
print('{} episode score is {:.2f}'.format(episode, score_avg))
pylab.plot(episodes, scores, 'b')
pylab.savefig("./learning_curves/maxent_30000.png")
np.save("./results/maxent_q_table", arr=q_table)
if __name__ == '__main__':
main() |
demo/cookie/ops/hello.py | marco-souza/falsy | 127 | 11174468 | <gh_stars>100-1000
def get_it(name, id):
return {
'get1': name,
'get2': id
}
|
sdk/python/kfp_tekton/_client.py | jppgks/kfp-tekton | 102 | 11174474 | # Copyright 2020 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from datetime import datetime
from typing import Mapping, Callable
import kfp
from .compiler import TektonCompiler
class TektonClient(kfp.Client):
"""Tekton API Client for Kubeflow Pipelines."""
def create_run_from_pipeline_func(self,
pipeline_func: Callable,
arguments: Mapping[str, str],
run_name=None,
experiment_name=None,
pipeline_conf: kfp.dsl.PipelineConf = None,
namespace=None):
"""Runs pipeline on Kubernetes cluster with Kubeflow Pipelines Tekton backend.
This command compiles the pipeline function, creates or gets an experiment and
submits the pipeline for execution.
:param pipeline_func: A function that describes a pipeline by calling components
and composing them into execution graph.
:param arguments: Arguments to the pipeline function provided as a dict.
:param run_name: Optional. Name of the run to be shown in the UI.
:param experiment_name: Optional. Name of the experiment to add the run to.
:param pipeline_conf: Optional. Pipeline configuration.
:param namespace: kubernetes namespace where the pipeline runs are created.
For single user deployment, leave it as None;
For multi user, input a namespace where the user is authorized
:return: RunPipelineResult
"""
# TODO: Check arguments against the pipeline function
pipeline_name = pipeline_func.__name__
run_name = run_name or pipeline_name + ' ' + datetime.now().strftime('%Y-%m-%d %H-%M-%S')
try:
(_, pipeline_package_path) = tempfile.mkstemp(suffix='.zip')
TektonCompiler().compile(pipeline_func, pipeline_package_path, pipeline_conf=pipeline_conf)
return self.create_run_from_pipeline_package(pipeline_package_path, arguments,
run_name, experiment_name, namespace)
finally:
os.remove(pipeline_package_path)
|
mmhuman3d/utils/path_utils.py | ykk648/mmhuman3d | 472 | 11174482 | import os
import warnings
from enum import Enum
from pathlib import Path
from typing import List, Union
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
def check_path_suffix(path_str: str,
allowed_suffix: Union[str, List[str]] = '') -> bool:
"""Check whether the suffix of the path is allowed.
Args:
path_str (str):
Path to check.
allowed_suffix (List[str], optional):
What extension names are allowed.
Offer a list like ['.jpg', ',jpeg'].
When it's [], all will be received.
Use [''] then directory is allowed.
Defaults to [].
Returns:
bool:
True: suffix test passed
False: suffix test failed
"""
if isinstance(allowed_suffix, str):
allowed_suffix = [allowed_suffix]
pathinfo = Path(path_str)
suffix = pathinfo.suffix.lower()
if len(allowed_suffix) == 0:
return True
if pathinfo.is_dir():
if '' in allowed_suffix:
return True
else:
return False
else:
for index, tmp_suffix in enumerate(allowed_suffix):
if not tmp_suffix.startswith('.'):
tmp_suffix = '.' + tmp_suffix
allowed_suffix[index] = tmp_suffix.lower()
if suffix in allowed_suffix:
return True
else:
return False
class Existence(Enum):
"""State of file existence."""
FileExist = 0
DirectoryExistEmpty = 1
DirectoryExistNotEmpty = 2
MissingParent = 3
DirectoryNotExist = 4
FileNotExist = 5
def check_path_existence(
path_str: str,
path_type: Literal['file', 'dir', 'auto'] = 'auto',
) -> Existence:
"""Check whether a file or a directory exists at the expected path.
Args:
path_str (str):
Path to check.
path_type (Literal[, optional):
What kind of file do we expect at the path.
Choose among `file`, `dir`, `auto`.
Defaults to 'auto'. path_type = path_type.lower()
Raises:
KeyError: if `path_type` conflicts with `path_str`
Returns:
Existence:
0. FileExist: file at path_str exists.
1. DirectoryExistEmpty: folder at path exists and.
2. DirectoryExistNotEmpty: folder at path_str exists and not empty.
3. MissingParent: its parent doesn't exist.
4. DirectoryNotExist: expect a folder at path_str, but not found.
5. FileNotExist: expect a file at path_str, but not found.
"""
path_type = path_type.lower()
assert path_type in {'file', 'dir', 'auto'}
pathinfo = Path(path_str)
if not pathinfo.parent.is_dir():
return Existence.MissingParent
suffix = pathinfo.suffix.lower()
if path_type == 'dir' or\
path_type == 'auto' and suffix == '':
if pathinfo.is_dir():
if len(os.listdir(path_str)) == 0:
return Existence.DirectoryExistEmpty
else:
return Existence.DirectoryExistNotEmpty
else:
return Existence.DirectoryNotExist
elif path_type == 'file' or\
path_type == 'auto' and suffix != '':
if pathinfo.is_file():
return Existence.FileExist
elif pathinfo.is_dir():
if len(os.listdir(path_str)) == 0:
return Existence.DirectoryExistEmpty
else:
return Existence.DirectoryExistNotEmpty
if path_str.endswith('/'):
return Existence.DirectoryNotExist
else:
return Existence.FileNotExist
def prepare_output_path(output_path: str,
allowed_suffix: List[str] = [],
tag: str = 'output file',
path_type: Literal['file', 'dir', 'auto'] = 'auto',
overwrite: bool = True) -> None:
"""Check output folder or file.
Args:
output_path (str): could be folder or file.
allowed_suffix (List[str], optional):
Check the suffix of `output_path`. If folder, should be [] or [''].
If could both be folder or file, should be [suffixs..., ''].
Defaults to [].
tag (str, optional): The `string` tag to specify the output type.
Defaults to 'output file'.
path_type (Literal[, optional):
Choose `file` for file and `dir` for folder.
Choose `auto` if allowed to be both.
Defaults to 'auto'.
overwrite (bool, optional):
Whether overwrite the existing file or folder.
Defaults to True.
Raises:
FileNotFoundError: suffix does not match.
FileExistsError: file or folder already exists and `overwrite` is
False.
Returns:
None
"""
if path_type.lower() == 'dir':
allowed_suffix = []
exist_result = check_path_existence(output_path, path_type=path_type)
if exist_result == Existence.MissingParent:
warnings.warn(
f'The parent folder of {tag} does not exist: {output_path},' +
f' will make dir {Path(output_path).parent.absolute().__str__()}')
os.makedirs(
Path(output_path).parent.absolute().__str__(), exist_ok=True)
elif exist_result == Existence.DirectoryNotExist:
os.mkdir(output_path)
print(f'Making directory {output_path} for saving results.')
elif exist_result == Existence.FileNotExist:
suffix_matched = \
check_path_suffix(output_path, allowed_suffix=allowed_suffix)
if not suffix_matched:
raise FileNotFoundError(
f'The {tag} should be {", ".join(allowed_suffix)}: '
f'{output_path}.')
elif exist_result == Existence.FileExist:
if not overwrite:
raise FileExistsError(
f'{output_path} exists (set overwrite = True to overwrite).')
else:
print(f'Overwriting {output_path}.')
elif exist_result == Existence.DirectoryExistEmpty:
pass
elif exist_result == Existence.DirectoryExistNotEmpty:
if not overwrite:
raise FileExistsError(
f'{output_path} is not empty (set overwrite = '
'True to overwrite the files).')
else:
print(f'Overwriting {output_path} and its files.')
else:
raise FileNotFoundError(f'No Existence type for {output_path}.')
def check_input_path(
input_path: str,
allowed_suffix: List[str] = [],
tag: str = 'input file',
path_type: Literal['file', 'dir', 'auto'] = 'auto',
):
"""Check input folder or file.
Args:
input_path (str): input folder or file path.
allowed_suffix (List[str], optional):
Check the suffix of `input_path`. If folder, should be [] or [''].
If could both be folder or file, should be [suffixs..., ''].
Defaults to [].
tag (str, optional): The `string` tag to specify the output type.
Defaults to 'output file'.
path_type (Literal[, optional):
Choose `file` for file and `directory` for folder.
Choose `auto` if allowed to be both.
Defaults to 'auto'.
Raises:
FileNotFoundError: file does not exists or suffix does not match.
Returns:
None
"""
if path_type.lower() == 'dir':
allowed_suffix = []
exist_result = check_path_existence(input_path, path_type=path_type)
if exist_result in [
Existence.FileExist, Existence.DirectoryExistEmpty,
Existence.DirectoryExistNotEmpty
]:
suffix_matched = \
check_path_suffix(input_path, allowed_suffix=allowed_suffix)
if not suffix_matched:
raise FileNotFoundError(
f'The {tag} should be {", ".join(allowed_suffix)}:' +
f'{input_path}.')
else:
raise FileNotFoundError(f'The {tag} does not exist: {input_path}.')
|
mpld3/test_plots/test_nan.py | odidev/mpld3 | 1,101 | 11174502 | """Plot to test line styles"""
import matplotlib.pyplot as plt
import numpy as np
import mpld3
def create_plot():
fig, ax = plt.subplots()
np.random.seed(0)
numPoints = 10
xx = np.arange(numPoints, dtype=float)
xx[6] = np.nan
yy = np.random.normal(size=numPoints)
yy[3] = np.nan
ax.plot(xx, yy, 'ks-', ms=10, mec='w', mew=3)
ax.set_xlabel('x has uniform spacing')
ax.set_ylabel('y includes a nan')
ax.set_title('NaN test', size=14)
return fig
def test_nan():
fig = create_plot()
html = mpld3.fig_to_html(fig)
plt.close(fig)
if __name__ == "__main__":
mpld3.show(create_plot())
|
ctpn/utils/text_proposal_connector.py | tainenko/keras-ctpn | 118 | 11174515 | <reponame>tainenko/keras-ctpn<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
File Name: text_proposal_connector
Description : 文本框连接,构建文本行
Author : mick.yi
date: 2019/3/13
"""
import numpy as np
from .text_proposal_graph_builder import TextProposalGraphBuilder
from .np_utils import clip_boxes
class TextProposalConnector:
"""
连接文本框构建文本行
"""
def __init__(self):
self.graph_builder = TextProposalGraphBuilder()
def group_text_proposals(self, text_proposals, scores, im_size):
"""
将文本框连接起来,按照文本行分组
:param text_proposals: 文本框,[n,(y1,x1,y2,x2)]
:param scores: 文本框得分,[n]
:param im_size: 图像尺寸,tuple(H,W,C)
:return: list of list; 文本行列表,每个文本行是文本框索引号列表
"""
graph = self.graph_builder.build_graph(text_proposals, scores, im_size)
return graph.sub_graphs_connected()
def fit_y(self, X, Y, x1, x2):
"""
一元线性函数拟合X,Y,并返回x1,x2的的函数值
"""
len(X) != 0
# 只有一个点返回 y=Y[0]
if np.sum(X == X[0]) == len(X):
return Y[0], Y[0]
p = np.poly1d(np.polyfit(X, Y, 1))
return p(x1), p(x2)
def get_text_lines(self, text_proposals, scores, im_size):
"""
获取文本行
:param text_proposals: 文本框,[n,(y1,x1,y2,x2)]
:param scores: 文本框得分,[n]
:param im_size: 图像尺寸,tuple(H,W,C)
:return: 文本行,边框和得分,numpy数组 [m,(y1,x1,y2,x2,score)]
"""
tp_groups = self.group_text_proposals(text_proposals, scores, im_size)
text_lines = np.zeros((len(tp_groups), 9), np.float32)
# print("len(tp_groups):{}".format(len(tp_groups)))
# 逐个文本行处理
for index, tp_indices in enumerate(tp_groups):
text_line_boxes = text_proposals[list(tp_indices)]
# 宽度方向最小值和最大值
x_min = np.min(text_line_boxes[:, 1])
x_max = np.max(text_line_boxes[:, 3])
# 文本框宽度的一半
offset = (text_line_boxes[0, 3] - text_line_boxes[0, 1]) * 0.5
# 使用一元线性函数求文本行左右两边高度边界
lt_y, rt_y = self.fit_y(text_line_boxes[:, 1], text_line_boxes[:, 0], x_min - offset, x_max + offset)
lb_y, rb_y = self.fit_y(text_line_boxes[:, 1], text_line_boxes[:, 2], x_min - offset, x_max + offset)
# 文本行的得分为所有文本框得分的均值
score = scores[list(tp_indices)].sum() / float(len(tp_indices))
# 文本行坐标
text_lines[index, 0] = x_min
text_lines[index, 1] = lt_y
text_lines[index, 2] = x_max
text_lines[index, 3] = rt_y
text_lines[index, 4] = x_max
text_lines[index, 5] = rb_y
text_lines[index, 6] = x_min
text_lines[index, 7] = lb_y
text_lines[index, 8] = score
# 裁剪到图像尺寸内
text_lines = clip_boxes(text_lines, im_size)
return text_lines
|
main.py | logicguy1/The-all-in-one-discord-tool | 105 | 11174601 | LICENCE = """
Copyright © 2021 Drillenissen#4268
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import time
import os
print(LICENCE)
time.sleep(1)
os.system('cls' if os.name == 'nt' else 'clear')
try:
import time
import os
from colored import fg, bg, attr
import modules.massReport as massReport
import modules.credits as credits
import modules.tokenGrabber as grabber
import modules.tokenRape as tokenRape
import modules.historyClear as historyClear
import modules.tokenWebhookChecker as checkers
import modules.webhookSpammer as spammer
import modules.autoBump as bumper
import modules.dankMemer as memer
import modules.serverLookup as serverLookup
except ImportError as ex:
input(f"Module {ex.name} not installed, to install run '{'python' if os.name == 'nt' else 'python3.8'} -m pip install {ex.name}'\nPress enter to exit")
exit()
r = fg(241) # Setup color variables
r2 = fg(255)
b = fg(31)
w = fg(15)
y = fg(3) + attr(1)
d = r2 + attr(21)
class Client:
def __init__(self):
modules = {
"1" : {"function" : tokenRape.rape, "name" : "TokenRape"},
"2" : {"function" : spammer.spammer, "name" : "WebhookSpammer"},
"3" : {"function" : checkers.token, "name" : "TokenChecker"},
"4" : {"function" : checkers.webhook, "name" : "WebhookChecker"},
"5" : {"function" : checkers.webhook_deleter, "name" : "Webhook Deleter"},
"6" : {"function" : historyClear.clear, "name" : "HistoryClear"},
"7" : {"function" : bumper.bumper, "name" : "AutoBump"},
"8" : {"function" : grabber.create_grabber, "name" : "TokenGrabber"},
"9" : {"function" : memer.start, "name" : "Dank memer grinder"},
"10" : {"function" : serverLookup.fetch_data, "name" : "Server Lookup"},
"11" : {"function" : massReport.start, "name" : "Mass Report"},
"12" : {"function" : credits.show_credits, "name" : "Credits"},
"13" : {"function" : exit, "name" : "Exit"}
}
self.modules = modules
def main(self):
os.system('cls' if os.name == 'nt' else 'clear')
print(f""" {r2} █████{b}╗{r2} ███{b}╗{r2} ██{b}╗{r2} ██████{b}╗{r2} ███{b}╗{r2} ██{b}╗{r2}██{b}╗{r2}██{b}╗{r2} ██{b}╗{r2}
██{b}╔══{r2}██{b}╗{r2}████{b}╗ {r2}██{b}║{r2}██{b}╔═══{r2}██{b}╗{r2}████{b}╗ {r2}██{b}║{r2}██{b}║╚{r2}██{b}╗{r2}██{b}╔╝{r2}
███████{b}║{r2}██{b}╔{r2}██{b}╗ {r2}██{b}║{r2}██{b}║ {r2}██{b}║{r2}██{b}╔{r2}██{b}╗ {r2}██{b}║{r2}██{b}║ ╚{r2}███{b}╔╝{r2}
██{b}╔══{r2}██{b}║{r2}██{b}║╚{r2}██{b}╗{r2}██{b}║{r2}██{b}║ {r2}██{b}║{r2}██{b}║╚{r2}██{b}╗{r2}██{b}║{r2}██{b}║ {r2}██{b}╔{r2}██{b}╗{r2}
██{b}║ {r2}██{b}║{r2}██{b}║ ╚{r2}████{b}║╚{r2}██████{b}╔╝{r2}██{b}║ ╚{r2}████{b}║{r2}██{b}║{r2}██{b}╔╝ {r2}██{b}╗{r2}
{b}╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝╚═╝ ╚═╝
{r2} * DISCLAIMR: This script is made for *
* educational purporses and the developers *
* assume no liabilaty and are not responsible *
* for any misuse or damages caused by the *
* script *
""")
indx = 0
for key, val in self.modules.items():
num = f"{r2}[{b}{key}{r2}]"
print(
f" {num:<6} {val['name']:<{20 if int(key) < 10 else 19}}",
end = "" if indx % 2 == 0 else "\n"
)
indx += 1
if indx % 2 == 1:
print("")
option = input(f"\n {r2}[{b}?{r2}] Option: ")
data = self.modules[option]
try:
data["function"]()
except KeyboardInterrupt:
input(f"\n {r2}[{b}!{r2}] Keyboard interupt")
else:
input(f"\n {r2}[{b}!{r2}] Done! Press enter to continue")
self.main()
if __name__ == '__main__':
client = Client()
client.main()
|
third_party/blink/tools/run_webgpu_cts.py | zealoussnow/chromium | 14,668 | 11174620 | #!/usr/bin/env vpython
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from blinkpy.common import multiprocessing_bootstrap
multiprocessing_bootstrap.run('..', '..', 'webgpu-cts', 'scripts',
'run_webgpu_cts.py')
|
src/blockdiag/noderenderer/__init__.py | flying-foozy/blockdiag | 155 | 11174624 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import pkg_resources
renderers = {}
searchpath = []
def init_renderers():
for plugin in pkg_resources.iter_entry_points('blockdiag_noderenderer'):
module = plugin.load()
if hasattr(module, 'setup'):
module.setup(module)
def install_renderer(name, renderer):
renderers[name] = renderer
def set_default_namespace(path):
searchpath[:] = []
for path in path.split(','):
searchpath.append(path)
def get(shape):
if not renderers:
init_renderers()
for path in searchpath:
name = "%s.%s" % (path, shape)
if name in renderers:
return renderers[name]
return renderers.get(shape)
|
test/modulepath.py | mhils/HoneyProxy | 116 | 11174633 | <reponame>mhils/HoneyProxy
import inspect, os
print __file__
print os.path.abspath(__file__)
print os.path.abspath(inspect.getfile(inspect.currentframe()))
print "==="
print inspect.getfile(inspect.currentframe())
print os.path.split(inspect.getfile( inspect.currentframe() ))[0]
print os.path.split(inspect.getfile( inspect.currentframe() ))[0] + "/mitmproxy"
print "==="
print os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0])
print os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0] + "/mitmproxy")
print os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]) + "/mitmproxy"
print "==="
print os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
print os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0] + "/mitmproxy"))
print os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]) + "/mitmproxy") |
hack/lib/images.py | fabianofranz/release | 185 | 11174646 | <reponame>fabianofranz/release
import json, sys, yaml, os;
base = sys.argv[1]
target_branch = sys.argv[2] if len(sys.argv) > 2 else "master"
for root, dirs, files in os.walk(base):
rel = root[len(base):]
parts = rel.split("/")
repo_prefix = "-".join(parts[:len(parts)]) + "-"
if len(parts) > 1:
org, repo = parts[0], parts[1]
last = parts[len(parts)-1]
for name in files:
filename, ext = os.path.splitext(name)
if ext != ".yaml":
continue
if not filename.startswith(repo_prefix):
continue
branch_modifier = filename[len(repo_prefix):]
parts = branch_modifier.split("_")
if len(parts) > 1:
branch, variant = parts
else:
branch, variant = branch_modifier, ""
if branch != target_branch:
continue
if variant != "":
continue
cfg = yaml.load(open(os.path.join(root, name)))
spec = cfg.get("tag_specification", {})
if spec.get("name", "") != "origin-v4.0":
continue
for image in cfg.get("images", []):
if image.get("optional", False):
continue
print("github.com/%s/%s: name=%s context=%s path=%s" % (org, repo, image["to"], image.get("context_dir", ""), image.get("dockerfile_path", "Dockerfile")))
|
chapter-13-out-of-memory/vcf2sqlite.py | cjvillar/Greenbook | 486 | 11174647 | import sys
import gzip
from collections import OrderedDict
import sqlite3
import pdb
pops = "EAS SAS AFR EUR AMR".split()
pop_freqs = [p + "_AF" for p in pops]
cols = "CHROM POS RSID REF ALT QUAL FILTER INFO FORMAT".lower().split()
db_filename = sys.argv[1]
db_tablename = sys.argv[2]
vcf_filename = sys.argv[3]
reader = gzip.open if vcf_filename.endswith('.gz') else open
TBL_COLS = ["chrom", "pos", "rsid", "ref", "alt"] + [s.lower() for s in pop_freqs]
TBL_TYPES = ["text", "integer", "text", "text", "text"] + ["text"] * len(pop_freqs)
TBL = OrderedDict(zip(TBL_COLS, TBL_TYPES))
TBL_TYPESTR = ",\n".join(["%s %s" % (k, v) for k, v in TBL.items()])
conn = sqlite3.connect(db_filename)
conn.text_factory = str
c = conn.cursor()
TBL_SCHEMA = "CREATE TABLE %s(\nid integer PRIMARY KEY NOT NULL,\n%s)" % (db_tablename, TBL_TYPESTR)
c.execute(TBL_SCHEMA)
for line in reader(vcf_filename):
if line.startswith("#"):
continue
fields = line.strip().split("\t")
fields = dict(zip(cols, fields[:len(cols)]))
if not fields['rsid'].startswith('rs'):
continue
# parse INFO block, extract pop freqs
info_chunks = [x.partition('=') for x in fields['info'].split(';')]
info = dict([(k, v) for k, _, v in info_chunks])
for pop_freq in pop_freqs:
fields[pop_freq.lower()] = info[pop_freq]
# insert into table
placeholders = ["?"] * len(TBL_COLS)
query = "INSERT INTO %s (%s) VALUES (%s);" % (db_tablename, ", ".join(TBL_COLS), ", ".join(placeholders))
c.execute(query, [fields[k] for k in TBL_COLS])
conn.commit() # commit these inserts
c = conn.cursor()
|
protobuf_inspector/__main__.py | jmendeth/protobuf-parser | 355 | 11174654 | <filename>protobuf_inspector/__main__.py
from sys import stdin, argv
from os.path import ismount, exists, join
from runpy import run_path
from .types import StandardParser
def main():
# Parse arguments
root_type = "root"
if len(argv) >= 2: root_type = argv[1]
# Load the config
config = {}
directory = "."
while not ismount(directory):
filename = join(directory, "protobuf_config.py")
if exists(filename):
config = run_path(filename)
break
directory = join(directory, "..")
# Create and initialize parser with config
parser = StandardParser()
if "types" in config:
for type, value in config["types"].items():
assert(type not in parser.types)
parser.types[type] = value
if "native_types" in config:
for type, value in config["native_types"].items():
parser.native_types[type] = value
# Make sure root type is defined and not compactable
if root_type not in parser.types: parser.types[root_type] = {}
parser.types[root_type]["compact"] = False
# PARSE!
print(parser.safe_call(parser.match_handler("message"), stdin.buffer, root_type) + "\n")
return 1 if len(parser.errors_produced) else 0
if __name__ == "__main__":
exit(main())
|
data_structures/binary_indexed_tree/Python/FenwickTree.py | avi-pal/al-go-rithms | 1,253 | 11174753 | <reponame>avi-pal/al-go-rithms
# Binary indexed tree or fenwick tree
# Space Complexity: O(N) for declaring another array of N=size num_of_elements
# Time Complexity: O(logN) for each operation(update and query as well)
# original array for storing values for later lookup
# Part of Cosmos by OpenGenus Foundation
array=[]
# array to store cumulative sum
bit=[]
'''
index i in the bit[] array stores the cumulative sum from the index i to i - (1<<r) + 1 (both inclusive),
where r represents the last set bit in the index i
'''
class FenwickTree:
# To intialize list of num_of_elements+1 size
def initialize(self,num_of_elements):
for i in range(num_of_elements+1):
array.append(0)
bit.append(0)
def update(self,x,delta):
while x<=num_of_elements:
bit[x]=bit[x]+delta
# x&(-x) gives the last set bit in a number x
x=x+(x&-x)
def query(self,x):
range_sum=0
while x>0:
range_sum=range_sum+bit[x]
# x&(-x) gives the last set bit in a number x
x=x-(x&-x)
return range_sum
fenwick_tree=FenwickTree()
num_of_elements=int(input("Enter the size of list: "))
fenwick_tree.initialize(num_of_elements)
for i in range(num_of_elements):
# storing data in orginal list
element=int(input("Enter the list element: "))
# updating the BIT array
fenwick_tree.update(i+1,element)
number_of_queries=int(input("Enter number of queries: "))
for i in range(number_of_queries):
left_index=int(input("Enter left index (1 indexing): "))
right_index=int(input("Enter right index (1 indexing): "))
if right_index < left_index:
print("Invalid range ")
continue
print("Sum in range[%d,%d]: "%(left_index,right_index))
print(fenwick_tree.query(right_index)-fenwick_tree.query(left_index-1))
|
tests/test_db/test_backends/test_exceptions.py | Jyrno42/django-test-migrations | 294 | 11174774 | from django_test_migrations.db.backends import exceptions
def test_database_configuration_not_found():
"""Ensure exception returns proper string representation."""
vendor = 'ms_sql'
exception = exceptions.DatabaseConfigurationNotFound(vendor)
assert vendor in str(exception)
def test_database_configuration_setting_not_found():
"""Ensure exception returns proper string representation."""
vendor = 'ms_sql'
setting_name = 'fake_setting'
exception = exceptions.DatabaseConfigurationSettingNotFound(
vendor,
setting_name,
)
assert vendor in str(exception)
assert setting_name in str(exception)
|
pybrain/rl/agents/optimization.py | sveilleux1/pybrain | 2,208 | 11174778 | <reponame>sveilleux1/pybrain<filename>pybrain/rl/agents/optimization.py<gh_stars>1000+
__author__ = '<NAME>, <EMAIL>'
from pybrain.rl.agents.agent import Agent
class OptimizationAgent(Agent):
""" A simple wrapper to allow optimizers to conform to the RL interface.
Works only in conjunction with EpisodicExperiment.
"""
def __init__(self, module, learner):
self.module = module
self.learner = learner
|
stylegan_runner.py | markriedl/dragnet | 171 | 11174789 | <reponame>markriedl/dragnet
import os
import pdb
import sys
import pickle
import random
import math
import argparse
import numpy as np
from PIL import Image
from tqdm import tqdm_notebook as tqdm
def easygen_train(model_path, images_path, dataset_path, start_kimg=7000, max_kimg=25000, schedule='', seed=1000):
#import stylegan
#from stylegan import config
##from stylegan import dnnlib
#from stylegan.dnnlib import EasyDict
#images_dir = '/content/raw'
#max_kimg = 25000
#start_kimg = 7000
#schedule = ''
#model_in = '/content/karras2019stylegan-cats-256x256.pkl'
#dataset_dir = '/content/stylegan_dataset' #os.path.join(cwd, 'cache', 'stylegan_dataset')
import config
config.data_dir = '/content/datasets'
config.results_dir = '/content/results'
config.cache_dir = '/contents/cache'
run_dir_ignore = ['/contents/results', '/contents/datasets', 'contents/cache']
import copy
import dnnlib
from dnnlib import EasyDict
from metrics import metric_base
# Prep dataset
import dataset_tool
print("prepping dataset...")
dataset_tool.create_from_images(tfrecord_dir=dataset_path, image_dir=images_path, shuffle=False)
# Set up training parameters
desc = 'sgan' # Description string included in result subdir name.
train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop.
G = EasyDict(func_name='training.networks_stylegan.G_style') # Options for generator network.
D = EasyDict(func_name='training.networks_stylegan.D_basic') # Options for discriminator network.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer.
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.
G_loss = EasyDict(func_name='training.loss.G_logistic_nonsaturating') # Options for generator loss.
D_loss = EasyDict(func_name='training.loss.D_logistic_simplegp', r1_gamma=10.0) # Options for discriminator loss.
dataset = EasyDict() # Options for load_dataset().
sched = EasyDict() # Options for TrainingSchedule.
grid = EasyDict(size='1080p', layout='random') # Options for setup_snapshot_image_grid().
#metrics = [metric_base.fid50k] # Options for MetricGroup.
submit_config = dnnlib.SubmitConfig() # Options for dnnlib.submit_run().
tf_config = {'rnd.np_random_seed': seed} # Options for tflib.init_tf().
# Dataset
desc += '-custom'
dataset = EasyDict(tfrecord_dir=dataset_path)
train.mirror_augment = False
# Number of GPUs.
desc += '-1gpu'
submit_config.num_gpus = 1
sched.minibatch_base = 4
sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4} #{4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 16}
# Default options.
train.total_kimg = max_kimg
sched.lod_initial_resolution = 8
sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
sched.D_lrate_dict = EasyDict(sched.G_lrate_dict)
# schedule
schedule_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:30, 1024:20} #{4: 2, 8:2, 16:2, 32:2, 64:2, 128:2, 256:2, 512:2, 1024:2} # Runs faster for small datasets
if len(schedule) >=5 and schedule[0] == '{' and schedule[-1] == '}' and ':' in schedule:
# is schedule a string of a dict?
try:
temp = eval(schedule)
schedule_dict = dict(temp)
# assert: it is a dict
except:
pass
elif len(schedule) > 0:
# is schedule an int?
try:
schedule_int = int(schedule)
#assert: schedule is an int
schedule_dict = {}
for i in range(1, 10):
schedule_dict[int(math.pow(2, i+1))] = schedule_int
except:
pass
print('schedule:', str(schedule_dict))
sched.tick_kimg_dict = schedule_dict
# resume kimg
resume_kimg = start_kimg
# path to model
resume_run_id = model_path
# tick snapshots
image_snapshot_ticks = 1
network_snapshot_ticks = 1
# Submit run
kwargs = EasyDict(train)
kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss)
kwargs.update(dataset_args=dataset, sched_args=sched, grid_args=grid, tf_config=tf_config)
kwargs.update(resume_kimg=resume_kimg, resume_run_id=resume_run_id)
kwargs.update(image_snapshot_ticks=image_snapshot_ticks, network_snapshot_ticks=network_snapshot_ticks)
kwargs.submit_config = copy.deepcopy(submit_config)
kwargs.submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir)
kwargs.submit_config.run_dir_ignore += config.run_dir_ignore
kwargs.submit_config.run_desc = desc
dnnlib.submit_run(**kwargs)
def easygen_run(model_path, images_path, num=1):
# from https://github.com/ak9250/stylegan-art/blob/master/styleganportraits.ipynb
truncation = 0.7 # hard coding because everyone uses this value
import dnnlib
import dnnlib.tflib as tflib
import config
tflib.init_tf()
#num = 10
#model = '/content/karras2019stylegan-cats-256x256.pkl'
#images_dir = '/content/cache/run_out'
#truncation = 0.7
_G = None
_D = None
Gs = None
with open(model_path, 'rb') as f:
_G, _D, Gs = pickle.load(f)
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), minibatch_size=8)
latents = np.random.RandomState(int(1000*random.random())).randn(num, *Gs.input_shapes[0][1:])
labels = np.zeros([latents.shape[0]] + Gs.input_shapes[1][1:])
images = Gs.run(latents, None, truncation_psi=truncation, randomize_noise=False, output_transform=fmt)
for n, image in enumerate(images):
# img = Image.fromarray(images[0])
img = Image.fromarray(image)
img.save(os.path.join(images_path, str(n) + '.jpg'), "JPEG")
def get_latent_interpolation(endpoints, num_frames_per, mode = 'linear', shuffle = False):
if shuffle:
random.shuffle(endpoints)
num_endpoints, dim = len(endpoints), len(endpoints[0])
num_frames = num_frames_per * num_endpoints
endpoints = np.array(endpoints)
latents = np.zeros((num_frames, dim))
for e in range(num_endpoints):
e1, e2 = e, (e+1)%num_endpoints
for t in range(num_frames_per):
frame = e * num_frames_per + t
r = 0.5 - 0.5 * np.cos(np.pi*t/(num_frames_per-1)) if mode == 'ease' else float(t) / num_frames_per
latents[frame, :] = (1.0-r) * endpoints[e1,:] + r * endpoints[e2,:]
return latents
def easygen_movie(model_path, movie_path, num=10, interp=10, duration=10):
# from https://github.com/ak9250/stylegan-art/blob/master/styleganportraits.ipynb
import dnnlib
import dnnlib.tflib as tflib
import config
tflib.init_tf()
truncation = 0.7 # what everyone uses
# Get model
_G = None
_D = None
Gs = None
with open(model_path, 'rb') as f:
_G, _D, Gs = pickle.load(f)
# Make waypoints
#fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
#synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), minibatch_size=8)
waypoint_latents = np.random.RandomState(int(1000*random.random())).randn(num, *Gs.input_shapes[0][1:])
#waypoint_labels = np.zeros([waypoint_latents.shape[0]] + Gs.input_shapes[1][1:])
#waypoint_images = Gs.run(latents, None, truncation_psi=truncation, randomize_noise=False, output_transform=fmt)
# interpolate
interp_latents = get_latent_interpolation(waypoint_latents, interp)
interp_labels = np.zeros([interp_latents.shape[0]] + Gs.input_shapes[1][1:])
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True), minibatch_size=8)
batch_size = 8
num_frames = interp_latents.shape[0]
num_batches = int(np.ceil(num_frames/batch_size))
images = []
for b in tqdm(range(num_batches)):
new_images = Gs.run(interp_latents[b*batch_size:min((b+1)*batch_size, num_frames-1), :], None, truncation_psi=truncation, randomize_noise=False, output_transform=fmt)
for img in new_images:
images.append(Image.fromarray(img)) # convert to PIL.Image
images[0].save(movie_path, "GIF",
save_all=True,
append_images=images[1:],
duration=duration,
loop=0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process runner commands.')
parser.add_argument('--train', action="store_true", default=False)
parser.add_argument('--run', action="store_true", default=False)
parser.add_argument('--movie', action="store_true", default=False)
parser.add_argument("--model", help="model to load", default="")
parser.add_argument("--images_in", help="directory containing training images", default="")
parser.add_argument("--images_out", help="diretory to store generated images", default="")
parser.add_argument("--movie_out", help="directory to save movie", default="")
parser.add_argument("--dataset_temp", help="where to store prepared image data", default="")
parser.add_argument("--schedule", help="training schedule", default="")
parser.add_argument("--max_kimg", help="iteration to stop training at", type=int, default=25000)
parser.add_argument("--start_kimg", help="iteration to start training at", type=int, default=7000)
parser.add_argument("--num", help="number of images to generate", type=int, default=1)
parser.add_argument("--interp", help="number of images to interpolate", type=int, default=10)
parser.add_argument("--duration", help="how long for each image in movie", type=int, default=10)
parser.add_argument("--seed", help="seed number", type=int, default=1000)
args = parser.parse_args()
if args.train:
easygen_train(model_path=args.model,
images_path=args.images_in,
dataset_path=args.dataset_temp,
start_kimg=args.start_kimg,
max_kimg=args.max_kimg,
schedule=args.schedule,
seed=args.seed)
elif args.run:
easygen_run(model_path=args.model,
images_path=args.images_out,
num=args.num)
elif args.movie:
easygen_movie(model_path=args.model,
movie_path=args.movie_out,
num=args.num,
interp=args.interp,
duration=args.duration)
|
qa/rpc-tests/electrum_shutdownonerror.py | MONIMAKER365/BitcoinUnlimited | 535 | 11174797 | <reponame>MONIMAKER365/BitcoinUnlimited
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Unlimited developers
"""
Tests for shutting down Bitcoin Unlimited on electrum server failure
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import waitFor, is_bitcoind_running
import os
import random
import subprocess
# Create a program that exists after 10 seconds.
def create_exiting_program():
import tempfile
tmpfh = tempfile.NamedTemporaryFile(suffix = '.c', mode="w", delete=False)
tmpfh.write("#include <unistd.h>\n")
tmpfh.write("int main(int argc, char** argv) { sleep(10); return 0; }\n")
tmpfh.close()
path_in = tmpfh.name
path_out = tmpfh.name + ".out"
try:
subprocess.check_call(["gcc", "-o", path_out, path_in])
finally:
os.unlink(path_in)
return path_out
class ElectrumShutdownTests(BitcoinTestFramework):
skip = False
dummy_electrum_path = None
def __init__(self):
super().__init__()
try:
self.dummy_electrum_path = create_exiting_program()
except Exception as e:
print("SKIPPING TEST - failed to create dummy electrum program: " + str(e))
self.skip = True
self.setup_clean_chain = True
self.num_nodes = 2
if not self.dummy_electrum_path:
return
common_args = ["-electrum=1", "-electrum.exec=%s" % self.dummy_electrum_path]
self.extra_args = [
common_args,
common_args + ["-electrum.shutdownonerror=1"]]
def run_test(self):
if self.skip:
return
n = self.nodes[0]
# bitcoind #1 should shutdown when "electrs" does
waitFor(30, lambda: not is_bitcoind_running(1))
# bitcoind #0 should not have exited, even though "electrs" has
assert(is_bitcoind_running(0))
# del so the test framework doesn't try to stop the stopped node
del self.nodes[1]
if self.dummy_electrum_path:
os.unlink(self.dummy_electrum_path)
def setup_network(self, dummy = None):
self.nodes = self.setup_nodes()
if __name__ == '__main__':
ElectrumShutdownTests().main()
|
find_cube_root.py | nicetone/Python | 28,321 | 11174798 |
# This method is called exhaustive numeration!
# I am checking every possible value
# that can be root of given x systematically
# Kinda brute forcing
def cubeRoot():
x = int(input("Enter an integer: "))
for ans in range(0, abs(x) + 1):
if ans ** 3 == abs(x):
break
if ans ** 3 != abs(x):
print(x, 'is not a perfect cube!')
else:
if x < 0:
ans = -ans
print('Cube root of ' + str(x) + ' is ' + str(ans))
cubeRoot()
cont = str(input("Would you like to continue: "))
while cont == "yes":
cubeRoot()
cont = str(input("Would you like to continue: "))
if cont == "no":
exit()
else:
print("Enter a correct answer(yes or no)")
cont = str(input("Would you like to continue: "))
|
tools/deep_memory_profiler/visualizer/app_unittest.py | kjthegod/chromium | 231 | 11174801 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is expected to be used under another directory to use,
# so we disable checking import path of GAE tools from this directory.
# pylint: disable=F0401,E0611
import json
import unittest
from google.appengine.api import files
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from google.appengine.ext.blobstore import BlobInfo
import services
class ServicesTest(unittest.TestCase):
@staticmethod
def CreateBlob(path):
# Initialize blob dictionary to return.
blob = {}
# Read sample file.
blob['json_str'] = open(path, 'r').read()
# Create file in blobstore according to sample file.
file_name = files.blobstore.create(mime_type='text/plain')
with files.open(file_name, 'a') as f:
f.write(blob['json_str'])
files.finalize(file_name)
# Get BlobInfo of sample file.
blob['blob_info'] = BlobInfo.get(files.blobstore.get_blob_key(file_name))
return blob
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_all_stubs()
# Read sample file.
self.correct_blob = ServicesTest.CreateBlob('testdata/sample.json')
self.error_blob = ServicesTest.CreateBlob('testdata/error_sample.json')
def tearDown(self):
self.testbed.deactivate()
def testProfiler(self):
correct_blob = self.correct_blob
# Call services function to create Profiler entity.
run_id = services.CreateProfiler(correct_blob['blob_info'])
# Test GetProfiler
self.assertEqual(services.GetProfiler(run_id), correct_blob['json_str'])
# Create Profiler entity with the same file again and check uniqueness.
services.CreateProfiler(correct_blob['blob_info'])
self.assertEqual(services.Profiler.query().count(), 1)
def testTemplate(self):
correct_blob = self.correct_blob
# Call services function to create template entities.
services.CreateTemplates(correct_blob['blob_info'])
# Test templates being stored in database correctly.
json_obj = json.loads(correct_blob['json_str'])
for content in json_obj['templates'].values():
template_entity = ndb.Key('Template', json.dumps(content)).get()
self.assertEqual(template_entity.content, content)
# Create template entities with the same file again and check uniqueness.
services.CreateTemplates(correct_blob['blob_info'])
self.assertEqual(services.Template.query().count(), 2)
def testErrorBlob(self):
error_blob = self.error_blob
# Test None when default template not indicated or found in templates.
dflt_tmpl = services.CreateTemplates(error_blob['blob_info'])
self.assertIsNone(dflt_tmpl)
|
modules/dbnd-airflow-monitor/test_dbnd_airflow_monitor/test_integration/conftest.py | busunkim96/dbnd | 224 | 11174806 | # conftest.py
import pytest
try:
from dbnd_web.utils.testing.utils import WebAppTest
pytest_plugins = [
"dbnd.testing.pytest_dbnd_plugin",
"dbnd.testing.pytest_dbnd_markers_plugin",
"dbnd.testing.pytest_dbnd_home_plugin",
"dbnd_web.utils.testing.pytest_web_plugin",
]
except ModuleNotFoundError:
pytest_plugins = []
class WebAppTest(object):
pass
@pytest.fixture(autouse=True, scope="module")
def check_dbnd_web():
pytest.skip("skipped due to missing dbnd_web")
|
sty/lib.py | technikian/sty | 170 | 11174893 | <filename>sty/lib.py
from .primitive import Register
def mute(*objects: Register) -> None:
"""
Use this function to mute multiple register-objects at once.
:param objects: Pass multiple register-objects to the function.
"""
err = ValueError(
"The mute() method can only be used with objects that inherit "
"from the 'Register class'."
)
for obj in objects:
if not isinstance(obj, Register):
raise err
obj.mute()
def unmute(*objects: Register) -> None:
"""
Use this function to unmute multiple register-objects at once.
:param objects: Pass multiple register-objects to the function.
"""
err = ValueError(
"The unmute() method can only be used with objects that inherit "
"from the 'Register class'."
)
for obj in objects:
if not isinstance(obj, Register):
raise err
obj.unmute()
|
script/common.py | Henny20/skija | 2,466 | 11174904 | <filename>script/common.py
#! /usr/bin/env python3
import argparse, contextlib, os, pathlib, platform, re, shutil, subprocess, sys, time, urllib.request, zipfile
arch = {'AMD64': 'x64', 'x86_64': 'x64', 'arm64': 'arm64'}[platform.machine()]
parser = argparse.ArgumentParser()
parser.add_argument('--arch', default=arch)
(args, _) = parser.parse_known_args()
arch = args.arch
system = {'Darwin': 'macos', 'Linux': 'linux', 'Windows': 'windows'}[platform.system()]
classpath_separator = ';' if system == 'windows' else ':'
mvn = "mvn.cmd" if system == "windows" else "mvn"
space_skija = 'https://packages.jetbrains.team/maven/p/skija/maven'
classifier = ('macos-' + arch if system == 'macos' else system)
module = 'org.jetbrains.skija.' + ('macos.' + arch if system == 'macos' else system)
verbose = '--verbose' in sys.argv
root = os.path.abspath(os.path.dirname(__file__) + '/..')
def check_call(args, **kwargs):
t0 = time.time()
res = subprocess.check_call(args, **kwargs)
if verbose:
print('[', round((time.time() - t0) * 1000), 'ms', ']', ' '.join(args))
return res
def check_output(args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
return check_call(args, **kwargs).stdout
def fetch(url, file):
if not os.path.exists(file):
print('Downloading', url)
if os.path.dirname(file):
os.makedirs(os.path.dirname(file), exist_ok = True)
# if url.startswith('https://packages.jetbrains.team/'):
# check_call(["curl", "--fail", "--location", '--show-error', url, '--output', file])
# else:
with open(file, 'wb') as f:
f.write(urllib.request.urlopen(url).read())
def fetch_maven(group, name, version, classifier=None, repo='https://repo1.maven.org/maven2'):
path = '/'.join([group.replace('.', '/'), name, version, name + '-' + version + ('-' + classifier if classifier else '') + '.jar'])
file = os.path.join(os.path.expanduser('~'), '.m2', 'repository', path)
fetch(repo + '/' + path, file)
return file
def deps():
return [
fetch_maven('org.projectlombok', 'lombok', '1.18.20'),
fetch_maven('org.jetbrains', 'annotations', '20.1.0'),
]
def javac(sources, target, classpath = [], modulepath = [], add_modules = [], release = '11', opts = []):
classes = {path.stem: path.stat().st_mtime for path in pathlib.Path(target).rglob('*.class') if '$' not in path.stem}
newer = lambda path: path.stem not in classes or path.stat().st_mtime > classes.get(path.stem)
new_sources = [path for path in sources if newer(pathlib.Path(path))]
if new_sources:
print('Compiling', len(new_sources), 'java files to', target)
check_call([
'javac',
'-encoding', 'UTF8',
'--release', release] + opts + [
# '-J--illegal-access=permit',
# '-Xlint:deprecation',
# '-Xlint:unchecked',
'--class-path', classpath_separator.join(classpath + [target])] +
(['--module-path', classpath_separator.join(modulepath)] if modulepath else []) +
(['--add-modules', ','.join(add_modules)] if add_modules else []) +
['-d', target] + new_sources)
def glob(dir, pattern):
return [str(x) for x in pathlib.Path(dir).rglob(pattern)]
@contextlib.contextmanager
def replaced(filename, replacements):
with open(filename, 'r') as f:
original = f.read()
try:
updated = original
for key, value in replacements.items():
updated = updated.replace(key, value)
with open(filename, 'w') as f:
f.write(updated)
yield f
finally:
with open(filename, 'w') as f:
f.write(original)
def copy_newer(src, dst):
if not os.path.exists(dst) or os.path.getmtime(src) > os.path.getmtime(dst):
if os.path.exists(dst):
os.remove(dst)
shutil.copy2(src, dst) |
py/rest_tests/test_html.py | ahmedengu/h2o-3 | 6,098 | 11174920 | <filename>py/rest_tests/test_html.py
import requests
import h2o
import h2o_test_utils
def test(a_node, pp):
####################################
# test HTML pages GET
url_prefix = 'http://' + a_node.http_addr + ':' + str(a_node.port)
urls = {
'': 'Analytics',
'/': 'Analytics',
'/index.html': 'Analytics',
'/flow/index.html': 'modal',
'/LATEST/Cloud.html': 'Ready',
}
for (suffix, expected_word) in urls.iteritems():
url = url_prefix + suffix
h2o.H2O.verboseprint('Testing ' + url + '. . .')
r = requests.get(url)
assert r.text.find(expected_word), "FAIL: didn't find '" + expected_word + "' in: " + url
|
tools/perf/benchmarks/power_mobile.py | zealoussnow/chromium | 14,668 | 11174959 | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from core import platforms
import page_sets
from page_sets.system_health import story_tags
from telemetry import benchmark
from telemetry import story
from telemetry.web_perf import timeline_based_measurement
@benchmark.Info(
emails=['<EMAIL>'],
documentation_url='https://goto.google.com/power-mobile-benchmark')
class PowerMobile(perf_benchmark.PerfBenchmark):
"""A benchmark for power measurements using on-device power monitor (ODPM).
"""
SUPPORTED_PLATFORMS = [story.expectations.ALL_ANDROID]
SUPPORTED_PLATFORM_TAGS = [platforms.ANDROID]
def CreateStorySet(self, options):
return page_sets.SystemHealthStorySet(platform='mobile',
tag=story_tags.INFINITE_SCROLL)
def CreateCoreTimelineBasedMeasurementOptions(self):
options = timeline_based_measurement.Options()
options.config.enable_experimental_system_tracing = True
options.config.system_trace_config.EnableChrome(
chrome_trace_config=options.config.chrome_trace_config)
options.config.system_trace_config.EnablePower()
options.config.system_trace_config.EnableFtraceCpu()
options.config.system_trace_config.EnableFtraceSched()
options.SetTimelineBasedMetrics(
['tbmv3:power_rails_metric', 'tbmv3:power_cpu_estimate'])
return options
@classmethod
def Name(cls):
return 'power.mobile'
|
oslo/torch/nn/parallel/tensor_parallel/_parallel_1d/_ops.py | lipovsek/oslo | 249 | 11174960 | <reponame>lipovsek/oslo<gh_stars>100-1000
import torch
from oslo.torch.distributed import ParallelMode
from oslo.torch.distributed.nn.functional import all_gather, all_reduce, scatter
class _Broadcast1D(torch.autograd.Function):
def forward(ctx, inputs, parallel_context):
ctx.parallel_context = parallel_context
return inputs
def backward(ctx, grad):
parallel_context = ctx.parallel_context
return (
all_reduce(
grad,
parallel_mode=ParallelMode.TENSOR_1D,
parallel_context=parallel_context,
on_cpu=str(grad.device) == "cpu",
async_op=False,
),
None,
)
class _AllReduce1D(torch.autograd.Function):
def forward(ctx, inputs, parallel_context):
return all_reduce(
inputs,
parallel_mode=ParallelMode.TENSOR_1D,
parallel_context=parallel_context,
on_cpu=str(inputs.device) == "cpu",
async_op=False,
)
def backward(ctx, grad):
return grad, None
class _AllGather1D(torch.autograd.Function):
def forward(ctx, inputs, parallel_context):
ctx.parallel_context = parallel_context
return all_gather(
inputs,
dim=-1,
parallel_mode=ParallelMode.TENSOR_1D,
parallel_context=parallel_context,
on_cpu=str(inputs.device) == "cpu",
async_op=False,
)
def backward(ctx, grad):
parallel_context = ctx.parallel_context
return scatter(grad, parallel_context), None
def broadcast_1d(inputs, parallel_context):
return _Broadcast1D.apply(inputs, parallel_context)
def all_reduce_1d(inputs, parallel_context):
return _AllReduce1D.apply(inputs, parallel_context)
def all_gather_1d(inputs, parallel_context):
return _AllGather1D.apply(inputs, parallel_context)
|
external/mask-rcnn-detection/detection.py | vision4j/vision4j-collection | 154 | 11174978 | <reponame>vision4j/vision4j-collection<gh_stars>100-1000
import detection_pb2
import cv2
import numpy as np
from PIL import Image
from io import BytesIO
import tensorflow as tf
import keras
import mrcnn.model as modellib
from mrcnn.config import Config
# source: https://github.com/matterport/Mask_RCNN/commit/cbff80f3e3f653a9eeee43d0d383a0385aba546b
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Uncomment to train on 8 GPUs (default is 1)
# GPU_COUNT = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
class InferenceConfig(CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
def deserialize(request):
data = request.image_data
shape = (request.width, request.height, request.channels)
nparr = np.fromstring(data, np.uint8)
img = cv2.cvtColor(cv2.imdecode(nparr, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
return img.astype(np.uint8)
def serialize(result):
return detection_pb2.DetectionBoundingBoxes(categoriesToBoundingBoxes=result)
class MaskRCNNDetection(object):
def __init__(self, weights_path):
config = InferenceConfig()
self.model = modellib.MaskRCNN(mode="inference", model_dir='.', config=config)
self.model.load_weights(weights_path, by_name=True)
self.model.keras_model._make_predict_function()
def detect(self, img):
results = self.model.detect([img])
return results[0]
def detect_on_deserialized(self, request, deserialized):
img = deserialized
r = self.detect(img)
masks = r['rois']
class_ids = r['class_ids']
res = {}
n = len(masks) # does not matter the len of masks or of class_ids
for i in range(n):
mask = masks[i]
class_id = class_ids[i]
if not class_id in res:
res[class_id] = detection_pb2.BoundingBoxes()
bounding_box = res[class_id].boundingBoxes.add()
bounding_box.left = mask[0]
bounding_box.top = mask[1]
bounding_box.right = mask[2]
bounding_box.bottom = mask[3]
return res
def detect_request(self, request):
deserialized = deserialize(request)
result = self.detect_on_deserialized(request, deserialized)
return serialize(result)
|
snippod_boilerplate/settings/dev.py | Musbell/snippod-boilerplate | 140 | 11174986 | """
Django settings for snippod boilerplate project.
This is a base starter for snippod.
For more information on this file, see
https://github.com/shalomeir/snippod-boilerplate
"""
from snippod_boilerplate.settings.common import *
# from snippod_boilerplate.settings.config_dev import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$6(x*g_2g9l_*g8peb-@anl5^*8q!1w)k&e&2!i)t6$s8kia93'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', True)
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS += (
'debug_toolbar',
)
# MIDDLEWARE_CLASSES += (
# )
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
DATABASE_OPTIONS = {'charset': 'utf8'}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'snippod_webapp/.tmp'), # grunt serve
os.path.join(BASE_DIR, 'snippod_webapp/dist/client'), #grunt
# os.path.join(BASE_DIR, 'static'),
)
COMPRESS_ENABLED = os.environ.get('COMPRESS_ENABLED', False)
#MEDIA FILE (user uploaded files)
# TEMPLATE_DIRS = (
# os.path.join(BASE_DIR, 'djangoapps/templates'),
# )
|
FeatureFlagsCo.Experiments/redismq/redis_foo_sender.py | ZhenhangTung/feature-flags-co | 681 | 11175021 | import logging
from redismq.send_consume import RedisSender
TOPIC_NAME = 'ds'
Q1_START = {
"ExptId": 'FF__38__48__103__PayButton_exp1',
"IterationId": "2",
"EnvId": "103",
"FlagId": "FF__38__48__103__PayButton",
"BaselineVariation": "1",
"Variations": ["1", "2", "3"],
"EventName": "ButtonPayTrack",
"StartExptTime": "2021-09-20T21:00:00.123456",
"EndExptTime": ""
}
if __name__ == '__main__':
logging.basicConfig(level=logging.ERROR,
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%m-%d %H:%M')
sender = RedisSender()
sender.send(*[Q1_START for _ in range(10)], topic=TOPIC_NAME)
|
local/tf/ze_utils.py | Alicegaz/x-vector-kaldi-tf | 117 | 11175040 | <filename>local/tf/ze_utils.py<gh_stars>100-1000
import argparse
import inspect
import logging
import math
import os
import re
import shutil
import subprocess
import threading
import thread
import traceback
import datetime
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
cuda_command = 'nvidia-smi --query-gpu=memory.free,memory.total --format=csv | tail -n+2 | ' \
'awk \'BEGIN{FS=" "}{if ($1/$3 > 0.98) print NR-1}\''
cuda_command2 = 'nvidia-smi -q | grep "Minor\|Processes" | grep "None" -B1 | tr -d " " | cut -d ":" -f2 | sed -n "1p"'
gpu_used_pid = 'nvidia-smi -q | grep "Process ID" | tr -d " " | cut -d ":" -f2'
def set_cuda_visible_devices(use_gpu=True, logger=None):
try:
if use_gpu:
free_gpu = subprocess.check_output(cuda_command2, shell=True)
if len(free_gpu) == 0:
create_log_on_gpu_error()
if logger is not None:
logger.info("No GPU seems to be available and I cannot continue without GPU.")
raise Exception("No GPU seems to be available and I cannot continue without GPU.")
else:
os.environ["CUDA_VISIBLE_DEVICES"] = free_gpu.decode().strip()
if logger is not None:
logger.info("CUDA_VISIBLE_DEVICES " + os.environ["CUDA_VISIBLE_DEVICES"])
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ''
except subprocess.CalledProcessError:
if logger is not None:
create_log_on_gpu_error()
logger.info("No GPU seems to be available and I cannot continue without GPU.")
# os.environ["CUDA_VISIBLE_DEVICES"] = ''
if use_gpu:
raise
def print_function_args_values(frame):
args, _, _, values = inspect.getargvalues(frame)
print('Function name "%s"' % inspect.getframeinfo(frame)[2])
for arg in args:
print(" %s = %s" % (arg, values[arg]))
def verify_egs_dir(egs_dir):
try:
egs_feat_dim = int(open('{0}/info/feat_dim'.format(egs_dir)).readline())
num_archives = int(open('{0}/info/num_archives'.format(egs_dir)).readline())
archives_minibatch_count = {}
with open('{0}/temp/archive_minibatch_count'.format(egs_dir), 'rt') as fid:
for line in fid:
if len(line.strip()) == 0:
continue
parts = line.split()
archives_minibatch_count[int(parts[0])] = int(parts[1])
return [num_archives, egs_feat_dim, archives_minibatch_count]
except (IOError, ValueError):
logger.error("The egs dir {0} has missing or malformed files.".format(egs_dir))
raise
def get_model_combine_iters(num_iters, num_archives, max_models_combine, num_jobs_final):
""" Figures out the list of iterations for which we'll use those models
in the final model-averaging phase. (note: it's a weighted average
where the weights are worked out from a subset of training data.)"""
approx_iters_per_epoch_final = num_archives / num_jobs_final
# Note: it used to be that we would combine over an entire epoch,
# but in practice we very rarely would use any weights from towards
# the end of that range, so we are changing it to use not
# approx_iters_per_epoch_final, but instead:
# approx_iters_per_epoch_final/2 + 1,
# dividing by 2 to use half an epoch, and adding 1 just to make sure
# it's not zero.
# First work out how many iterations we want to combine over in the final
# nnet3-combine-fast invocation.
# The number we use is:
# min(max(max_models_combine, approx_iters_per_epoch_final/2+1), iters/2)
# But if this value is > max_models_combine, then the models
# are sub-sampled to get these many models to combine.
num_iters_combine_initial = min(approx_iters_per_epoch_final / 2 + 1, num_iters / 2)
if num_iters_combine_initial > max_models_combine:
subsample_model_factor = int(float(num_iters_combine_initial) / max_models_combine)
models_to_combine = set(range(num_iters - num_iters_combine_initial + 1,
num_iters + 1, subsample_model_factor))
models_to_combine.add(num_iters)
else:
num_iters_combine = min(max_models_combine, num_iters / 2)
models_to_combine = set(range(num_iters - num_iters_combine + 1, num_iters + 1))
return models_to_combine
def get_learning_rate(_iter, num_jobs, num_iters, num_archives_processed, num_archives_to_process,
initial_effective_lrate, final_effective_lrate):
if _iter + 1 >= num_iters:
effective_learning_rate = final_effective_lrate
else:
effective_learning_rate = (initial_effective_lrate *
math.exp(num_archives_processed *
math.log(final_effective_lrate / initial_effective_lrate)
/ num_archives_to_process))
return num_jobs * effective_learning_rate
def get_successful_models(num_models, log_file_pattern, difference_threshold=1.0):
assert num_models > 0
parse_regex = re.compile(
"INFO .* Overall average objective function is ([0-9e.\-+= ]+) over ([0-9e.\-+]+) segments")
objectives = []
for i in range(num_models):
model_num = i + 1
logfile = re.sub('%', str(model_num), log_file_pattern)
lines = open(logfile, 'r').readlines()
this_objective = -100000.0
for line_num in range(1, len(lines) + 1):
# we search from the end as this would result in
# lesser number of regex searches. Python regex is slow !
mat_obj = parse_regex.search(lines[-1 * line_num])
if mat_obj is not None:
this_objective = float(mat_obj.groups()[0].split()[-1])
break
objectives.append(this_objective)
max_index = objectives.index(max(objectives))
accepted_models = []
for i in range(num_models):
if (objectives[max_index] - objectives[i]) <= difference_threshold:
accepted_models.append(i + 1)
if len(accepted_models) != num_models:
logger.warn("Only {0}/{1} of the models have been accepted "
"for averaging, based on log files {2}.".format(
len(accepted_models),
num_models, log_file_pattern))
return [accepted_models, max_index + 1]
def copy_best_nnet_dir(_dir, _iter, best_model_index):
best_model_dir = "{dir}/model_{next_iter}.{best_model_index}".format(
dir=_dir, next_iter=_iter + 1, best_model_index=best_model_index)
out_model_dir = "{dir}/model_{next_iter}".format(dir=_dir, next_iter=_iter + 1)
shutil.copytree(best_model_dir, out_model_dir)
def get_average_nnet_model(dir, iter, nnets_list, run_opts,
get_raw_nnet_from_am=True):
next_iter = iter + 1
if get_raw_nnet_from_am:
out_model = ("""- \| nnet3-am-copy --set-raw-nnet=- \
{dir}/{iter}.mdl {dir}/{next_iter}.mdl""".format(
dir=dir, iter=iter,
next_iter=next_iter))
else:
out_model = "{dir}/{next_iter}.raw".format(
dir=dir, next_iter=next_iter)
# common_lib.execute_command(
# """{command} {dir}/log/average.{iter}.log \
# nnet3-average {nnets_list} \
# {out_model}""".format(command=run_opts.command,
# dir=dir,
# iter=iter,
# nnets_list=nnets_list,
# out_model=out_model))
def remove_model(nnet_dir, _iter, models_to_combine=None, preserve_model_interval=100):
if _iter % preserve_model_interval == 0:
return
if models_to_combine is not None and _iter in models_to_combine:
return
model_dir = '{0}/model_{1}'.format(nnet_dir, _iter)
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
def background_command_waiter(command, popen_object, require_zero_status):
""" This is the function that is called from background_command, in
a separate thread."""
popen_object.communicate()
if popen_object.returncode is not 0:
_str = "Command exited with status {0}: {1}".format(popen_object.returncode, command)
if require_zero_status:
logger.error(_str)
# thread.interrupt_main() sends a KeyboardInterrupt to the main
# thread, which will generally terminate the program.
thread.interrupt_main()
else:
logger.warning(_str)
def background_command(command, require_zero_status=False):
"""Executes a command in a separate thread, like running with '&' in the shell.
If you want the program to die if the command eventually returns with
nonzero status, then set require_zero_status to True. 'command' will be
executed in 'shell' mode, so it's OK for it to contain pipes and other
shell constructs.
This function returns the Thread object created, just in case you want
to wait for that specific command to finish. For example, you could do:
thread = background_command('foo | bar')
# do something else while waiting for it to finish
thread.join()
See also:
- wait_for_background_commands(), which can be used
at the end of the program to wait for all these commands to terminate.
- execute_command() and get_command_stdout(), which allow you to
execute commands in the foreground.
"""
p = subprocess.Popen(command, shell=True)
thread = threading.Thread(target=background_command_waiter, args=(command, p, require_zero_status))
thread.daemon = True # make sure it exits if main thread is terminated abnormally.
thread.start()
return thread
def wait_for_background_commands():
""" This waits for all threads to exit. You will often want to
run this at the end of programs that have launched background
threads, so that the program will wait for its child processes
to terminate before it dies."""
for t in threading.enumerate():
if not t == threading.current_thread():
t.join()
def force_symlink(file1, file2):
import errno
try:
os.symlink(file1, file2)
except OSError as e:
if e.errno == errno.EEXIST:
os.unlink(file2)
os.symlink(file1, file2)
def str_to_bool(value):
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
else:
raise ValueError
class StrToBoolAction(argparse.Action):
""" A custom action to convert booleans from shell format i.e., true/false
to python format i.e., True/False """
def __call__(self, parser, namespace, values, option_string=None):
try:
setattr(namespace, self.dest, str_to_bool(values))
except ValueError:
raise Exception("Unknown value {0} for --{1}".format(values, self.dest))
class NullStrToNoneAction(argparse.Action):
""" A custom action to convert empty strings passed by shell to None in
python. This is necessary as shell scripts print null strings when a
variable is not specified. We could use the more apt None in python. """
def __call__(self, parser, namespace, values, option_string=None):
if values.strip() == "":
setattr(namespace, self.dest, None)
else:
setattr(namespace, self.dest, values)
class RunOpts(object):
"""A structure to store run options.
Run options like queue.pl and run.pl, along with their memory
and parallel training options for various types of commands such
as the ones for training, parallel-training, running on GPU etc.
"""
def __init__(self):
self.command = None
self.train_queue_opt = None
self.combine_gpu_opt = None
self.combine_queue_opt = None
self.prior_gpu_opt = None
self.prior_queue_opt = None
self.parallel_train_opts = None
def _get_component_dropout(dropout_schedule, data_fraction):
"""Retrieve dropout proportion from schedule when data_fraction
proportion of data is seen. This value is obtained by using a
piecewise linear function on the dropout schedule.
This is a module-internal function called by _get_dropout_proportions().
See help for --trainer.dropout-schedule for how the dropout value
is obtained from the options.
Arguments:
dropout_schedule: A list of (data_fraction, dropout_proportion) values
sorted in descending order of data_fraction.
data_fraction: The fraction of data seen until this stage of
training.
"""
if data_fraction == 0:
# Dropout at start of the iteration is in the last index of
# dropout_schedule
assert dropout_schedule[-1][0] == 0
return dropout_schedule[-1][1]
try:
# Find lower bound of the data_fraction. This is the
# lower end of the piecewise linear function.
(dropout_schedule_index, initial_data_fraction,
initial_dropout) = next((i, tup[0], tup[1])
for i, tup in enumerate(dropout_schedule)
if tup[0] <= data_fraction)
except StopIteration:
raise RuntimeError(
"Could not find data_fraction in dropout schedule "
"corresponding to data_fraction {0}.\n"
"Maybe something wrong with the parsed "
"dropout schedule {1}.".format(data_fraction, dropout_schedule))
if dropout_schedule_index == 0:
assert dropout_schedule[0][0] == 1 and data_fraction == 1
return dropout_schedule[0][1]
# The upper bound of data_fraction is at the index before the
# lower bound.
final_data_fraction, final_dropout = dropout_schedule[
dropout_schedule_index - 1]
if final_data_fraction == initial_data_fraction:
assert data_fraction == initial_data_fraction
return initial_dropout
assert (initial_data_fraction <= data_fraction < final_data_fraction)
return ((data_fraction - initial_data_fraction)
* (final_dropout - initial_dropout)
/ (final_data_fraction - initial_data_fraction)
+ initial_dropout)
def _parse_dropout_string(dropout_str):
"""Parses the dropout schedule from the string corresponding to a
single component in --trainer.dropout-schedule.
This is a module-internal function called by parse_dropout_function().
Arguments:
dropout_str: Specifies dropout schedule for a particular component
name pattern.
See help for the option --trainer.dropout-schedule.
Returns a list of (data_fraction_processed, dropout_proportion) tuples
sorted in descending order of num_archives_processed.
A data fraction of 1 corresponds to all data.
"""
dropout_values = []
parts = dropout_str.strip().split(',')
try:
if len(parts) < 2:
raise Exception("dropout proportion string must specify "
"at least the start and end dropouts")
# Starting dropout proportion
dropout_values.append((0, float(parts[0])))
for i in range(1, len(parts) - 1):
value_x_pair = parts[i].split('@')
if len(value_x_pair) == 1:
# Dropout proportion at half of training
dropout_proportion = float(value_x_pair[0])
data_fraction = 0.5
else:
assert len(value_x_pair) == 2
dropout_proportion = float(value_x_pair[0])
data_fraction = float(value_x_pair[1])
if (data_fraction < dropout_values[-1][0]
or data_fraction > 1.0):
logger.error(
"Failed while parsing value %s in dropout-schedule. "
"dropout-schedule must be in incresing "
"order of data fractions.", value_x_pair)
raise ValueError
dropout_values.append((data_fraction, float(dropout_proportion)))
dropout_values.append((1.0, float(parts[-1])))
except Exception:
logger.error("Unable to parse dropout proportion string %s. "
"See help for option "
"--trainer.dropout-schedule.", dropout_str)
raise
# reverse sort so that its easy to retrieve the dropout proportion
# for a particular data fraction
dropout_values.reverse()
for data_fraction, proportion in dropout_values:
assert 0.0 <= data_fraction <= 1.0
assert 0.0 <= proportion <= 1.0
return dropout_values
def get_dropout_edit_string(dropout_schedule, data_fraction):
"""Returns dropout proportion based on the dropout_schedule for the
fraction of data seen at this stage of training.
Returns None if dropout_schedule is None.
Arguments:
dropout_schedule: Value for the --dropout-schedule option.
See help for --dropout-schedule.
data_fraction: The fraction of data seen until this stage of
training.
"""
if dropout_schedule is None:
return None
dropout_schedule = _parse_dropout_string(dropout_schedule)
dropout_proportion = _get_component_dropout(dropout_schedule, data_fraction)
return dropout_proportion
def get_command_stdout(command, require_zero_status=True):
""" Executes a command and returns its stdout output as a string. The
command is executed with shell=True, so it may contain pipes and
other shell constructs.
If require_zero_stats is True, this function will raise an exception if
the command has nonzero exit status. If False, it just prints a warning
if the exit status is nonzero.
"""
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
stdout = p.communicate()[0]
if p.returncode is not 0:
output = "Command exited with status {0}: {1}".format(p.returncode, command)
if require_zero_status:
raise Exception(output)
else:
logger.warning(output)
return stdout if type(stdout) is str else stdout.decode()
def get_train_times(exp_dir):
train_log_files = "%s/log/" % (exp_dir)
train_log_names = "train.*.log"
command = 'find {0} -name "{1}" | xargs grep -H -e Accounting'.format(train_log_files,train_log_names)
train_log_lines = get_command_stdout(command, require_zero_status=False)
parse_regex = re.compile(".*train\.([0-9]+)\.([0-9]+)\.log:# Accounting: time=([0-9]+) thread.*")
train_times = dict()
for line in train_log_lines.split('\n'):
mat_obj = parse_regex.search(line)
if mat_obj is not None:
groups = mat_obj.groups()
try:
train_times[int(groups[0])][int(groups[1])] = float(groups[2])
except KeyError:
train_times[int(groups[0])] = {}
train_times[int(groups[0])][int(groups[1])] = float(groups[2])
iters = train_times.keys()
for _iter in iters:
values = train_times[_iter].values()
train_times[_iter] = max(values)
return train_times
def parse_prob_logs(exp_dir, key='accuracy'):
train_prob_files = "%s/log/compute_prob_train_subset.*.log" % exp_dir
valid_prob_files = "%s/log/compute_prob_valid.*.log" % exp_dir
train_prob_strings = get_command_stdout('grep -e {0} {1}'.format(key, train_prob_files))
valid_prob_strings = get_command_stdout('grep -e {0} {1}'.format(key, valid_prob_files))
# Overall average loss is 0.6923 over 1536 segments. Also, the overall average accuracy is 0.8548.
parse_regex = re.compile(".*compute_prob_.*\.([0-9]+).log.*Overall average ([a-zA-Z\-]+) is ([0-9.\-e]+) "
".*overall average ([a-zA-Z\-]+) is ([0-9.\-e]+)\.")
train_objf = {}
valid_objf = {}
for line in train_prob_strings.split('\n'):
mat_obj = parse_regex.search(line)
if mat_obj is not None:
groups = mat_obj.groups()
if groups[3] == key:
train_objf[int(groups[0])] = (groups[2], groups[4])
if not train_objf:
raise Exception("Could not find any lines with {key} in {log}".format(key=key, log=train_prob_files))
for line in valid_prob_strings.split('\n'):
mat_obj = parse_regex.search(line)
if mat_obj is not None:
groups = mat_obj.groups()
if groups[3] == key:
valid_objf[int(groups[0])] = (groups[2], groups[4])
if not valid_objf:
raise Exception("Could not find any lines with {key} in {log}".format(key=key, log=valid_prob_files))
iters = list(set(valid_objf.keys()).intersection(train_objf.keys()))
if not iters:
raise Exception("Could not any common iterations with key {k} in both {tl} and {vl}".format(
k=key, tl=train_prob_files, vl=valid_prob_files))
iters.sort()
return list(map(lambda x: (int(x), float(train_objf[x][0]), float(train_objf[x][1]),
float(valid_objf[x][0]), float(valid_objf[x][1])), iters))
def generate_report(exp_dir, key="accuracy"):
try:
times = get_train_times(exp_dir)
except Exception:
tb = traceback.format_exc()
logger.warning("Error getting info from logs, exception was: " + tb)
times = {}
report = ["%Iter\tduration\ttrain_loss\tvalid_loss\tdifference\ttrain_acc\tvalid_acc\tdifference"]
try:
data = list(parse_prob_logs(exp_dir, key))
except Exception:
tb = traceback.format_exc()
logger.warning("Error getting info from logs, exception was: " + tb)
data = []
for x in data:
try:
report.append("%d\t%s\t%g\t%g\t%g\t%g\t%g\t%g" %
(x[0], str(times[x[0]]), x[1], x[3], x[3]-x[1], x[2], x[4], x[2]-x[4]))
except KeyError:
continue
total_time = 0
for _iter in times.keys():
total_time += times[_iter]
report.append("Total training time is {0}\n".format(
str(datetime.timedelta(seconds=total_time))))
return ["\n".join(report), times, data]
def is_correct_model_dir(model_dir):
model_file = "{0}/model.meta".format(model_dir)
done_file = "{0}/done".format(model_dir)
if os.path.isfile(model_file) and os.stat(model_file).st_size > 0 and \
os.path.isfile(done_file) and os.stat(done_file).st_size > 0:
return True
return False
def create_log_on_gpu_error():
try:
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import tostring
import numpy as np
print(os.uname()[1])
uname = os.uname()[1]
used_pid = subprocess.check_output(gpu_used_pid, shell=True).decode().strip().split('\n')
command = 'ps -o user='
for pid in used_pid:
command += ' -p ' + pid
users = subprocess.check_output(command, shell=True).decode().strip().split('\n')
pid_user = {}
for user, pid in zip(users, used_pid):
pid_user[pid] = user
s = os.popen("qstat -r -s r -xml").read()
root = ET.fromstring(s)
# users = [x.text for x in root.findall('.//JB_owner')]
users = np.unique(users)
user_gpu = {}
for u in users:
q = [jl for jl in root.findall(".//*[JB_owner='%s']" % u)]
p = filter(lambda x: x.find('master').text == 'MASTER', q)
p1 = filter(lambda x: x.find('queue_name').text.endswith(uname), p)
hrs = [n.findall('hard_request') for n in p1]
a = [x for subl in hrs for x in subl]
v = [(x.get('name'), x.text) for x in a]
for (n, h) in v:
if n == 'gpu':
if u in user_gpu:
user_gpu[u] += int(h)
else:
user_gpu[u] = int(h)
for pid, user in pid_user.iteritems():
if user in user_gpu:
if user_gpu[user] > 0:
print("%-12s%-30s OK" % (pid, user))
else:
print("%-12s%-30s Get GPU more than request" % (pid, user))
user_gpu[user] -= 1
else:
print("%-12s%-30s No Request for GPU" % (pid, user))
except Exception as exp:
print(exp)
if __name__ == '__main__':
create_log_on_gpu_error()
|
spafe/__init__.py | SuperKogito/cautious-palm-tree | 205 | 11175041 | <reponame>SuperKogito/cautious-palm-tree<filename>spafe/__init__.py<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Top-level module for spafe
"""
__version__ = '0.1.0'
import sys
import warnings
# Throw a deprecation warning if we're on legacy python
if sys.version_info < (3,):
warnings.warn('You are using spafe with Python 2.'
'Please note that spafe requires Python 3 or later.',
FutureWarning)
|
mmrazor/models/mutators/base.py | hunto/mmrazor | 553 | 11175057 | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta
from mmcv.runner import BaseModule
from mmrazor.models.architectures import Placeholder
from mmrazor.models.builder import MUTABLES, MUTATORS
from mmrazor.models.mutables import MutableModule
@MUTATORS.register_module()
class BaseMutator(BaseModule, metaclass=ABCMeta):
"""Base class for mutators."""
def __init__(self, placeholder_mapping=None, init_cfg=None):
super(BaseMutator, self).__init__(init_cfg=init_cfg)
self.placeholder_mapping = placeholder_mapping
def prepare_from_supernet(self, supernet):
"""Implement some preparatory work based on supernet, including
``convert_placeholder`` and ``build_search_spaces``.
Args:
supernet (:obj:`torch.nn.Module`): The architecture to be used
in your algorithm.
"""
if self.placeholder_mapping is not None:
self.convert_placeholder(supernet, self.placeholder_mapping)
self.search_spaces = self.build_search_spaces(supernet)
def build_search_spaces(self, supernet):
"""Build a search space from the supernet.
Args:
supernet (:obj:`torch.nn.Module`): The architecture to be used
in your algorithm.
Returns:
dict: To collect some information about ``MutableModule`` in the
supernet.
"""
search_spaces = dict()
def traverse(module):
for child in module.children():
if isinstance(child, MutableModule):
if child.space_id not in search_spaces.keys():
search_spaces[child.space_id] = dict(
modules=[child],
choice_names=child.choice_names,
num_chosen=child.num_chosen,
space_mask=child.build_space_mask())
else:
search_spaces[child.space_id]['modules'].append(child)
traverse(child)
traverse(supernet)
return search_spaces
def convert_placeholder(self, supernet, placeholder_mapping):
"""Replace all placeholders in the model.
Args:
supernet (:obj:`torch.nn.Module`): The architecture to be used in
your algorithm.
placeholder_mapping (dict): Record which placeholders need to be
replaced by which ops,
its keys are the properties ``placeholder_group`` of
placeholders used in the searchable architecture,
its values are the registered ``OPS``.
"""
def traverse(module):
for name, child in module.named_children():
if isinstance(child, Placeholder):
mutable_cfg = placeholder_mapping[
child.placeholder_group].copy()
assert 'type' in mutable_cfg, f'{mutable_cfg}'
mutable_type = mutable_cfg.pop('type')
assert mutable_type in MUTABLES, \
f'{mutable_type} not in MUTABLES.'
mutable_constructor = MUTABLES.get(mutable_type)
mutable_kwargs = child.placeholder_kwargs
mutable_kwargs.update(mutable_cfg)
mutable_module = mutable_constructor(**mutable_kwargs)
setattr(module, name, mutable_module)
# setattr(module, name, choice_module)
# If the new MUTABLE is MutableEdge, it may have MutableOP,
# so here we need to traverse the new MUTABLES.
traverse(mutable_module)
else:
traverse(child)
traverse(supernet)
def deploy_subnet(self, supernet, subnet_dict):
"""Export the subnet from the supernet based on the specified
subnet_dict.
Args:
supernet (:obj:`torch.nn.Module`): The architecture to be used in
your algorithm.
subnet_dict (dict): Record the information to build the subnet from
the supernet,
its keys are the properties ``space_id`` of placeholders in the
mutator's search spaces,
its values are dicts: {'chosen': ['chosen name1',
'chosen name2', ...]}
"""
def traverse(module):
for name, child in module.named_children():
if isinstance(child, MutableModule):
space_id = child.space_id
chosen = subnet_dict[space_id]['chosen']
child.export(chosen)
traverse(child)
traverse(supernet)
|
base/site-packages/news/signals.py | edisonlz/fastor | 285 | 11175076 | <filename>base/site-packages/news/signals.py
from django.contrib.comments.signals import comment_will_be_posted
from django.contrib.comments.models import Comment
from django.http import HttpResponseRedirect
def unapprove_comment(sender, **kwargs):
the_comment = kwargs['comment']
the_comment.is_public = False
return True
comment_will_be_posted.connect(unapprove_comment) |
javascript.py | aronwoost/sublime-expand-region | 205 | 11175091 | try:
import expand_to_word
import expand_to_subword
import expand_to_word_with_dots
import expand_to_symbols
import expand_to_quotes
import expand_to_semantic_unit
import utils
except:
from . import expand_to_word
from . import expand_to_subword
from . import expand_to_word_with_dots
from . import expand_to_symbols
from . import expand_to_quotes
from . import expand_to_semantic_unit
from . import utils
def expand(string, start, end):
selection_is_in_string = expand_to_quotes.expand_to_quotes(string, start, end)
if selection_is_in_string:
string_result = expand_agains_string(selection_is_in_string["string"], start - selection_is_in_string["start"], end - selection_is_in_string["start"])
if string_result:
string_result["start"] = string_result["start"] + selection_is_in_string["start"]
string_result["end"] = string_result["end"] + selection_is_in_string["start"]
string_result[string] = string[string_result["start"]:string_result["end"]];
return string_result
if utils.selection_contain_linebreaks(string, start, end) == False:
line = utils.get_line(string, start, end)
line_string = string[line["start"]:line["end"]]
line_result = expand_agains_line(line_string, start - line["start"], end - line["start"])
if line_result:
line_result["start"] = line_result["start"] + line["start"]
line_result["end"] = line_result["end"] + line["start"]
line_result[string] = string[line_result["start"]:line_result["end"]];
return line_result
expand_stack = ["semantic_unit"]
result = expand_to_semantic_unit.expand_to_semantic_unit(string, start, end)
if result:
result["expand_stack"] = expand_stack
return result
expand_stack.append("symbols")
result = expand_to_symbols.expand_to_symbols(string, start, end)
if result:
result["expand_stack"] = expand_stack
return result
if utils.is_debug_enabled:
print("ExpandRegion, javascript.py, None")
def expand_agains_line(string, start, end):
expand_stack = []
expand_stack.append("subword")
result = expand_to_subword.expand_to_subword(string, start, end)
if result:
result["expand_stack"] = expand_stack
return result
expand_stack.append("word")
result = expand_to_word.expand_to_word(string, start, end)
if result:
result["expand_stack"] = expand_stack
return result
expand_stack.append("quotes")
result = expand_to_quotes.expand_to_quotes(string, start, end)
if result:
result["expand_stack"] = expand_stack
return result
expand_stack.append("semantic_unit")
result = expand_to_semantic_unit.expand_to_semantic_unit(string, start, end)
if result:
result["expand_stack"] = expand_stack
return result
expand_stack.append("symbols")
result = expand_to_symbols.expand_to_symbols(string, start, end)
if result:
result["expand_stack"] = expand_stack
return result
# expand_stack.append("line")
# result = expand_to_line.expand_to_line(string, start, end)
# if result:
# result["expand_stack"] = expand_stack
# return result
# return None
def expand_agains_string(string, start, end):
expand_stack = []
expand_stack.append("semantic_unit")
result = expand_to_semantic_unit.expand_to_semantic_unit(string, start, end)
if result:
result["expand_stack"] = expand_stack
return result
expand_stack.append("symbols")
result = expand_to_symbols.expand_to_symbols(string, start, end)
if result:
result["expand_stack"] = expand_stack
return result
|
gammapy/modeling/covariance.py | JohannesBuchner/gammapy | 155 | 11175114 | <filename>gammapy/modeling/covariance.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Covariance class"""
import numpy as np
import scipy
from .parameter import Parameters
__all__ = ["Covariance"]
class Covariance:
"""Parameter covariance class
Parameters
----------
parameters : `~gammapy.modeling.Parameters`
Parameter list
data : `~numpy.ndarray`
Covariance data array
"""
def __init__(self, parameters, data=None):
self.parameters = parameters
if data is None:
data = np.diag([p.error ** 2 for p in self.parameters])
self._data = np.asanyarray(data, dtype=float)
@property
def shape(self):
"""Covariance shape"""
npars = len(self.parameters)
return npars, npars
@property
def data(self):
"""Covariance data (`~numpy.ndarray`)"""
return self._data
@data.setter
def data(self, value):
value = np.asanyarray(value)
npars = len(self.parameters)
shape = (npars, npars)
if value.shape != shape:
raise ValueError(
f"Invalid covariance shape: {value.shape}, expected {shape}"
)
self._data = value
@staticmethod
def _expand_factor_matrix(matrix, parameters):
"""Expand covariance matrix with zeros for frozen parameters"""
npars = len(parameters)
matrix_expanded = np.zeros((npars, npars))
mask_frozen = [par.frozen for par in parameters]
pars_index = [np.where(np.array(parameters) == p)[0][0] for p in parameters]
mask_duplicate = [pars_idx != idx for idx, pars_idx in enumerate(pars_index)]
mask = np.array(mask_frozen) | np.array(mask_duplicate)
free_parameters = ~(mask | mask[:, np.newaxis])
matrix_expanded[free_parameters] = matrix.ravel()
return matrix_expanded
@classmethod
def from_factor_matrix(cls, parameters, matrix):
"""Set covariance from factor covariance matrix.
Used in the optimizer interface.
"""
npars = len(parameters)
if not matrix.shape == (npars, npars):
matrix = cls._expand_factor_matrix(matrix, parameters)
scales = [par.scale for par in parameters]
scale_matrix = np.outer(scales, scales)
data = scale_matrix * matrix
return cls(parameters, data=data)
@classmethod
def from_stack(cls, covar_list):
"""Stack sub-covariance matrices from list
Parameters
----------
covar_list : list of `Covariance`
List of sub-covariances
Returns
-------
covar : `Covariance`
Stacked covariance
"""
parameters = Parameters.from_stack([_.parameters for _ in covar_list])
covar = cls(parameters)
for subcovar in covar_list:
covar.set_subcovariance(subcovar)
return covar
def get_subcovariance(self, parameters):
"""Get sub-covariance matrix
Parameters
----------
parameters : `Parameters`
Sub list of parameters.
Returns
-------
covariance : `~numpy.ndarray`
Sub-covariance.
"""
idx = [self.parameters.index(par) for par in parameters]
data = self._data[np.ix_(idx, idx)]
return self.__class__(parameters=parameters, data=data)
def set_subcovariance(self, covar):
"""Set sub-covariance matrix
Parameters
----------
parameters : `Parameters`
Sub list of parameters.
"""
idx = [self.parameters.index(par) for par in covar.parameters]
if not np.allclose(self.data[np.ix_(idx, idx)], covar.data):
self.data[idx, :] = 0
self.data[:, idx] = 0
self._data[np.ix_(idx, idx)] = covar.data
def plot_correlation(self, ax=None, **kwargs):
"""Plot correlation matrix.
Parameters
----------
ax : `~matplotlib.axes.Axes`, optional
Axis to plot on.
**kwargs : dict
Keyword arguments passed to `~gammapy.visualisation.plot_heatmap`
Returns
-------
ax : `~matplotlib.axes.Axes`, optional
Axis
"""
import matplotlib.pyplot as plt
from gammapy.visualization import plot_heatmap, annotate_heatmap
npars = len(self.parameters)
figsize = (npars * 0.8, npars * 0.65)
plt.figure(figsize=figsize)
ax = plt.gca() if ax is None else ax
kwargs.setdefault("cmap", "coolwarm")
names = self.parameters.names
im, cbar = plot_heatmap(
data=self.correlation,
col_labels=names,
row_labels=names,
ax=ax,
vmin=-1,
vmax=1,
cbarlabel="Correlation",
**kwargs,
)
annotate_heatmap(im=im)
return ax
@property
def correlation(self):
r"""Correlation matrix (`numpy.ndarray`).
Correlation :math:`C` is related to covariance :math:`\Sigma` via:
.. math::
C_{ij} = \frac{ \Sigma_{ij} }{ \sqrt{\Sigma_{ii} \Sigma_{jj}} }
"""
err = np.sqrt(np.diag(self.data))
with np.errstate(invalid="ignore", divide="ignore"):
correlation = self.data / np.outer(err, err)
return np.nan_to_num(correlation)
@property
def scipy_mvn(self):
# TODO: use this, as in https://github.com/cdeil/multinorm/blob/master/multinorm.py
return scipy.stats.multivariate_normal(
self.parameters.value, self.data, allow_singular=True
)
def __str__(self):
return str(self.data)
def __array__(self):
return self.data
|
tests/providers/telegram/hooks/test_telegram.py | ChaseKnowlden/airflow | 15,947 | 11175117 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import pytest
import telegram
import airflow
from airflow.models import Connection
from airflow.providers.telegram.hooks.telegram import TelegramHook
from airflow.utils import db
TELEGRAM_TOKEN = "<PASSWORD>"
class TestTelegramHook(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(
conn_id='telegram-webhook-without-token',
conn_type='http',
)
)
db.merge_conn(
Connection(
conn_id='telegram_default',
conn_type='http',
password=TELEGRAM_TOKEN,
)
)
db.merge_conn(
Connection(
conn_id='telegram-webhook-with-chat_id',
conn_type='http',
password=TELEGRAM_TOKEN,
host="-420913222",
)
)
def test_should_raise_exception_if_both_connection_or_token_is_not_provided(self):
with pytest.raises(airflow.exceptions.AirflowException) as ctx:
TelegramHook()
assert "Cannot get token: No valid Telegram connection supplied." == str(ctx.value)
def test_should_raise_exception_if_conn_id_doesnt_exist(self):
with pytest.raises(airflow.exceptions.AirflowNotFoundException) as ctx:
TelegramHook(telegram_conn_id='telegram-webhook-non-existent')
assert "The conn_id `telegram-webhook-non-existent` isn't defined" == str(ctx.value)
def test_should_raise_exception_if_conn_id_doesnt_contain_token(self):
with pytest.raises(airflow.exceptions.AirflowException) as ctx:
TelegramHook(telegram_conn_id='telegram-webhook-without-token')
assert "Missing token(password) in Telegram connection" == str(ctx.value)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_raise_exception_if_chat_id_is_not_provided_anywhere(self, mock_get_conn):
with pytest.raises(airflow.exceptions.AirflowException) as ctx:
hook = TelegramHook(telegram_conn_id='telegram_default')
hook.send_message({"text": "test telegram message"})
assert "'chat_id' must be provided for telegram message" == str(ctx.value)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_raise_exception_if_message_text_is_not_provided(self, mock_get_conn):
with pytest.raises(airflow.exceptions.AirflowException) as ctx:
hook = TelegramHook(telegram_conn_id='telegram_default')
hook.send_message({"chat_id": -420913222})
assert "'text' must be provided for telegram message" == str(ctx.value)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_all_parameters_are_correctly_provided(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="<PASSWORD>")
hook = TelegramHook(telegram_conn_id='telegram_default')
hook.send_message({"chat_id": -420913222, "text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': -420913222,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_chat_id_is_provided_through_constructor(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="<PASSWORD>")
hook = TelegramHook(telegram_conn_id='telegram_default', chat_id=-420913222)
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': -420913222,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_chat_id_is_provided_in_connection(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="<PASSWORD>")
hook = TelegramHook(telegram_conn_id='telegram-webhook-with-chat_id')
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': "-420913222",
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_retry_when_any_telegram_error_is_encountered(self, mock_get_conn):
excepted_retry_count = 5
mock_get_conn.return_value = mock.Mock(password="<PASSWORD>")
def side_effect(*args, **kwargs):
raise telegram.error.TelegramError("cosmic rays caused bit flips")
mock_get_conn.return_value.send_message.side_effect = side_effect
with pytest.raises(Exception) as ctx:
hook = TelegramHook(telegram_conn_id='telegram-webhook-with-chat_id')
hook.send_message({"text": "test telegram message"})
assert "RetryError" in str(ctx.value)
assert "state=finished raised TelegramError" in str(ctx.value)
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_with(
**{
'chat_id': "-420913222",
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
assert excepted_retry_count == mock_get_conn.return_value.send_message.call_count
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_token_is_provided(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="<PASSWORD>")
hook = TelegramHook(token=TELEGRAM_TOKEN, chat_id=-420913222)
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': -420913222,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
|
mayan/apps/cabinets/migrations/0006_auto_20210525_0604.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 11175139 | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cabinets', '0005_auto_20210525_0500'),
]
operations = [
migrations.AlterModelOptions(
name='cabinet',
options={
'verbose_name': 'Cabinet', 'verbose_name_plural': 'Cabinets'
},
),
]
|
tests/becy/design1_proof_of_concepts.py | thautwarm/restrain-jit | 116 | 11175154 | <gh_stars>100-1000
from restrain_jit.becython.cy_loader import setup_pyx_for_cpp
from pyximport import pyximport
setup_pyx_for_cpp()
pyximport.install()
import restrain_jit.becython.cython_rts.hotspot
from restrain_jit.becython.cy_loader import compile_module
mod = """
cimport restrain_jit.becython.cython_rts.RestrainJIT as RestrainJIT
from restrain_jit.becython.cython_rts.hotspot cimport inttopy, pytoint, JITCounter
from libc.stdint cimport int64_t, int32_t, int16_t, int8_t
from libcpp.map cimport map as std_map
from libcpp.vector cimport vector as std_vector
from cython cimport typeof, final
cdef fused Arg1:
object
cdef fused Arg2:
object
cdef fused Arg3:
object
cdef JITCounter counter
cdef object recompile_handler
cdef object global_abs
cpdef f(Arg1 x, Arg2 y, Arg3 z):
if typeof(x) == typeof(object) or typeof(x) == typeof(object) or typeof(z) == typeof(object):
counter[(type(x), type(y), type(z))] += 1
if counter.times % 100 == 0:
recompile_handler()
return x + y + z
cpdef init(dict globs, dict _counter, _handler):
global global_abs, counter, recompile_handler
global_abs = globs['abs']
counter = JITCounter(_counter)
recompile_handler = _handler
"""
mod = compile_module('m', mod)
mod.init(dict(abs=abs), {}, lambda : print("jit started!"))
print(mod.f(14514, 2, 3))
|
tests/test_shap.py | paultimothymooney/docker-python-2 | 2,030 | 11175173 | <reponame>paultimothymooney/docker-python-2<filename>tests/test_shap.py
import unittest
import shap
class TestShap(unittest.TestCase):
def test_init(self):
shap.initjs()
|
atlas/foundations_authentication/src/test/__init__.py | DeepLearnI/atlas | 296 | 11175200 | from test.test_authentication_client import TestAuthenticationClient |
pic_locate.py | circlestarzero/GenshinMapAutoMarkTools | 167 | 11175242 | import numpy
import cv2 as cv
from matplotlib import pyplot as plt
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QImage
import win32gui
import sys
import time
import win32api
import win32print
import win32con
import os
import keyboard
import win32com.client
import pythoncom
base_dir = os.path.dirname(os.path.abspath(__file__))
app = QApplication(sys.argv)
def second_key_sort(k):
return k[1]
def cutscreen(hwnd):
pix = QApplication.primaryScreen().grabWindow(hwnd).toImage().convertToFormat(QImage.Format.Format_RGBA8888)
width = pix.width()
height = pix.height()
ptr = pix.bits()
ptr.setsize(height * width * 4)
img = numpy.frombuffer(ptr, numpy.uint8).reshape((height, width, 4))
img1 = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
return img1
def GetWindowCorner(hwnd):
screen = QApplication.primaryScreen()
pix = screen.grabWindow(hwnd).toImage().convertToFormat(QImage.Format.Format_RGBA8888)
rect = win32gui.GetWindowRect(hwnd)
hDC = win32gui.GetDC(0)
w = win32print.GetDeviceCaps(hDC, win32con.DESKTOPHORZRES)
pscale = w/win32api.GetSystemMetrics(0)
xf = int(rect[2]/pscale)-pix.width()
yf = int(rect[3]/pscale)-pix.height()
return [xf,yf]
def clk(pos,hwnd):
time.sleep(0.1)
off_set=GetWindowCorner(hwnd)
pos=(pos[0]+off_set[0],pos[1]+off_set[1])
win32api.SetCursorPos(pos)
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
time.sleep(0.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)
global window_height
window_height=0
def GetWindowHeight():
global window_height
if window_height!=0:
return window_height
hwnd = win32gui.FindWindow('UnityWndClass', None)
screen = QApplication.primaryScreen()
pix = screen.grabWindow(hwnd).toImage().convertToFormat(QImage.Format.Format_RGBA8888)
window_height=pix.height()
return window_height
def LocatePic(target,picname):
matchResult=[]
template = cv.imread(r"{0}\pic_{1}p\{2}.png".format(base_dir,GetWindowHeight(),picname),0)
theight, twidth = template.shape[:2]
result = cv.matchTemplate(target,template,cv.TM_SQDIFF_NORMED)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
temp_loc = min_loc
if min_val<0.01:
matchResult.append([int(min_loc[0]+twidth/2),int(min_loc[1]+theight/2)])
loc = numpy.where(result<0.01)
for other_loc in zip(*loc[::-1]):
if (temp_loc[0]-other_loc[0])**2>25 or(temp_loc[1]-other_loc[1])**2>25 :
if (other_loc[0]-min_loc[0])**2>25 or(other_loc[1]-min_loc[1])**2>25 :
temp_loc = other_loc
matchResult.append([int(other_loc[0]+twidth/2),int(other_loc[1]+theight/2)])
matchResult.sort(key=second_key_sort)
return matchResult
def OpenMap(hwnd):
if len(LocatePic(cutscreen(hwnd),'close_btn'))==0:
keyboard.send('m')
return 0
return 1
def SetBtnON(hwnd):
pos=LocatePic(cutscreen(hwnd),'off_btn')
if len(pos):
clk(pos[0],hwnd)
return 0
return 1
def SetBtnOFF(hwnd):
pos=LocatePic(cutscreen(hwnd),'on_btn')
print(pos)
if len(pos):
clk(pos[0],hwnd)
return 0
return 1
def ClickBtn(hwnd):
pos=LocatePic(cutscreen(hwnd),'confirm_btn')
if len(pos):
clk(pos[0],hwnd)
return 0
return 1
def ClickDel(hwnd):
pos=LocatePic(cutscreen(hwnd),'del')
if len(pos):
print(pos)
clk(pos[0],hwnd)
return 0
return 1
def ClickMarkList(hwnd):
pos=LocatePic(cutscreen(hwnd),'marklist0')
pos1=LocatePic(cutscreen(hwnd),'marklist1')
if len(pos):
clk(pos[0],hwnd)
print(pos)
return 0
if(len(pos1)):
clk(pos1[0],hwnd)
print(pos1)
return 0
return 1
def DeleteAllMark(hwnd,name):
poslist=LocatePic(cutscreen(hwnd),name)
t1=time.clock()
while len(poslist):
time.sleep(0.2)
clk(poslist[0],hwnd)
time.sleep(0.1)
flag=0
while flag==0:
for i in range(5):
if ClickDel(hwnd)==0:
flag=1
break
if flag==0:
for i in range(5):
if ClickMarkList(hwnd)==0:break
time.sleep(0.2)
poslist=LocatePic(cutscreen(hwnd),name)
t2=time.clock()
if(t2-t1>10): break
return
def DeleteAllMarks(hwnd):
for i in range(7):
DeleteAllMark(hwnd, 'mark{0}'.format(i))
return
if __name__ == '__main__':
hwnd = win32gui.FindWindow('UnityWndClass', None)
pythoncom.CoInitialize()
shell = win32com.client.Dispatch("WScript.Shell")
shell.SendKeys('%')
win32gui.SetForegroundWindow(hwnd)
time.sleep(0.2)
DeleteAllMarks(hwnd)
#for i in range(7):
#DeleteAllMark(hwnd, 'mark{0}'.format(i))
#t1=time.clock()
#print(LocatePic(cutscreen(hwnd),'marklist0'))
#t2=time.clock()
#print(t2-t1)
#OpenMap(hwnd) |
test-data/unit/plugins/method_in_decorator.py | cibinmathew/mypy | 12,496 | 11175247 | from mypy.types import CallableType, Type
from typing import Callable, Optional
from mypy.plugin import MethodContext, Plugin
class MethodDecoratorPlugin(Plugin):
def get_method_hook(self, fullname: str) -> Optional[Callable[[MethodContext], Type]]:
if 'Foo.a' in fullname:
return method_decorator_callback
return None
def method_decorator_callback(ctx: MethodContext) -> Type:
if isinstance(ctx.default_return_type, CallableType):
str_type = ctx.api.named_generic_type('builtins.str', [])
return ctx.default_return_type.copy_modified(ret_type=str_type)
return ctx.default_return_type
def plugin(version):
return MethodDecoratorPlugin
|
tests/test_data/packages/small_fake_with_unpinned_deps/setup.py | m-mead/pip-tools | 4,085 | 11175254 | from setuptools import setup
setup(
name="small_fake_with_unpinned_deps",
version=0.1,
install_requires=["small-fake-a", "small-fake-b"],
)
|
data/transcoder_evaluation_gfg/python/CHECK_LINE_PASSES_ORIGIN.py | mxl1n/CodeGen | 241 | 11175264 | <reponame>mxl1n/CodeGen
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( x1 , y1 , x2 , y2 ) :
return ( x1 * ( y2 - y1 ) == y1 * ( x2 - x1 ) )
#TOFILL
if __name__ == '__main__':
param = [
(1,28,2,56,),
(10,0,20,0,),
(0,1,0,17,),
(1,1,10,10,),
(82,86,19,4,),
(78,86,11,6,),
(13,46,33,33,),
(18,29,95,12,),
(42,35,25,36,),
(29,17,45,35,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
scattertext/test/test_fourSquareAxes.py | shettyprithvi/scattertext | 1,823 | 11175332 | from unittest import TestCase
import pandas as pd
from scattertext.CorpusFromPandas import CorpusFromPandas
from scattertext.WhitespaceNLP import whitespace_nlp
from scattertext.semioticsquare.FourSquareAxis import FourSquareAxes
def get_docs_categories_four():
documents = [u"What art thou that usurp'st this time of night,",
u'Together with that fair and warlike form',
u'In which the majesty of buried Denmark',
u'Did sometimes march? by heaven I charge thee, speak!',
u'Halt! Who goes there?',
u'[Intro]',
u'It is I sire Tone from Brooklyn.',
u'Well, speak up man what is it?',
u'News from the East sire! THE BEST OF BOTH WORLDS HAS RETURNED!',
u'I think it therefore manifest, from what I have here advanced,',
u'that the main Point of Skill and Address, is to furnish Employment',
u'for this Redundancy of Vapour, and prudently to adjust the Season 1',
u'of it ; by which ,means it may certainly become of Cardinal',
u"Ain't it just like the night to play tricks when you're tryin' to be so quiet?",
u"We sit here stranded, though we're all doin' our best to deny it",
u"And Louise holds a handful of rain, temptin' you to defy it",
u'Lights flicker from the opposite loft',
u'In this room the heat pipes just cough',
u'The country music station plays soft']
categories = ['hamlet'] * 4 + ['jay-z/r. kelly'] * 5 + ['swift'] * 4 + ['dylan'] * 6
return categories, documents
class TestFourSquareAxes(TestCase):
def test_build(self):
corpus = self._get_test_corpus()
with self.assertRaises(AssertionError):
fs = FourSquareAxes(corpus, 'hamlet', ['jay-z/r. kelly'], ['swift'], ['dylan'])
with self.assertRaises(AssertionError):
fs = FourSquareAxes(corpus, ['hamlet'], 'jay-z/r. kelly', ['swift'], ['dylan'])
with self.assertRaises(AssertionError):
fs = FourSquareAxes(corpus, ['hamlet'], ['jay-z/r. kelly'], 'swift', ['dylan'])
with self.assertRaises(AssertionError):
fs = FourSquareAxes(corpus, ['hamlet'], ['jay-z/r. kelly'], ['swift'], 'dylan')
fs = FourSquareAxes(corpus, ['hamlet'], ['jay-z/r. kelly'], ['swift'], ['dylan'])
self.assertEqual(fs.get_labels(),
{'a_and_b_label': 'swift',
'a_and_not_b_label': 'hamlet',
'a_label': '',
'b_and_not_a_label': 'jay-z/r. kelly',
'b_label': '',
'not_a_and_not_b_label': 'dylan',
'not_a_label': '',
'not_b_label': ''})
fs = FourSquareAxes(corpus, ['hamlet'], ['jay-z/r. kelly'], ['swift'], ['dylan'],
labels={'a': 'swiftham', 'b': 'swiftj'})
self.assertEqual(fs.get_labels(),
{'a_and_b_label': 'swift',
'a_and_not_b_label': 'hamlet',
'a_label': 'swiftham',
'b_and_not_a_label': 'jay-z/r. kelly',
'b_label': 'swiftj',
'not_a_and_not_b_label': 'dylan',
'not_a_label': '',
'not_b_label': ''})
axes = fs.get_axes()
self.assertEqual(len(axes), len(corpus.get_terms()))
self.assertEqual(set(axes.columns), {'x', 'y', 'counts'})
fs.lexicons
def _get_test_corpus(self):
cats, docs = get_docs_categories_four()
df = pd.DataFrame({'category': cats, 'text': docs})
corpus = CorpusFromPandas(df, 'category', 'text', nlp=whitespace_nlp).build()
return corpus
def _get_test_semiotic_square(self):
corpus = self._get_test_corpus()
semsq = FourSquareAxes(corpus, ['hamlet'], ['jay-z/r. kelly'], ['swift'], ['dylan'])
return semsq
|
src/sage/tests/books/judson-abstract-algebra/crypt-sage.py | bopopescu/sage | 1,742 | 11175342 | <reponame>bopopescu/sage<gh_stars>1000+
## -*- coding: utf-8 -*- ##
## Sage Doctest File ##
#**************************************#
#* Generated from PreTeXt source *#
#* on 2017-08-24T11:43:34-07:00 *#
#* *#
#* http://mathbook.pugetsound.edu *#
#* *#
#**************************************#
##
"""
Please contact <NAME> (<EMAIL>) with
any test failures here that need to be changed
as a result of changes accepted into Sage. You
may edit/change this file in any sensible way, so
that development work may procede. Your changes
may later be replaced by the authors of "Abstract
Algebra: Theory and Applications" when the text is
updated, and a replacement of this file is proposed
for review.
"""
##
## To execute doctests in these files, run
## $ $SAGE_ROOT/sage -t <directory-of-these-files>
## or
## $ $SAGE_ROOT/sage -t <a-single-file>
##
## Replace -t by "-tp n" for parallel testing,
## "-tp 0" will use a sensible number of threads
##
## See: http://www.sagemath.org/doc/developer/doctesting.html
## or run $ $SAGE_ROOT/sage --advanced for brief help
##
## Generated at 2017-08-24T11:43:34-07:00
## From "Abstract Algebra"
## At commit 26d3cac0b4047f4b8d6f737542be455606e2c4b4
##
## Section 7.6 Sage
##
r"""
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: p_a = next_prime(10^10)
sage: q_a = next_prime(p_a)
sage: p_b = next_prime((3/2)*10^10)
sage: q_b = next_prime(p_b)
sage: n_a = p_a * q_a
sage: n_b = p_b * q_b
sage: n_a, n_b
(100000000520000000627, 225000000300000000091)
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: m_a = euler_phi(n_a)
sage: m_b = euler_phi(n_b)
sage: m_a, m_b
(100000000500000000576, 225000000270000000072)
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: factor(m_a)
2^6 * 3 * 11 * 17 * 131 * 521 * 73259 * 557041
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: E_a = 5*23
sage: D_a = inverse_mod(E_a, m_a)
sage: D_a
20869565321739130555
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: factor(m_b)
2^3 * 3^4 * 107 * 1298027 * 2500000001
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: E_b = 7*29
sage: D_b = inverse_mod(E_b, m_b)
sage: D_b
24384236482463054195
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: print("Alice's public key, n:", n_a, "E:", E_a)
Alice's public key, n: 100000000520000000627 E: 115
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: print("Alice's private key, D:", D_a)
Alice's private key, D: 20869565321739130555
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: print("Bob's public key, n:", n_b, "E:", E_b)
Bob's public key, n: 225000000300000000091 E: 203
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: print("Bob's private key, D:", D_b)
Bob's private key, D: 24384236482463054195
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: word = 'Sage'
sage: digits = [ord(letter) for letter in word]
sage: digits
[83, 97, 103, 101]
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: message = ZZ(digits, 128)
sage: message
213512403
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: signed = power_mod(message, D_a, n_a)
sage: signed
47838774644892618423
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: encrypted = power_mod(signed, E_b, n_b)
sage: encrypted
111866209291209840488
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: decrypted = power_mod(encrypted, D_b, n_b)
sage: decrypted
47838774644892618423
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: received = power_mod(decrypted, E_a, n_a)
sage: received
213512403
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: digits = received.digits(base=128)
sage: letters = [chr(ascii) for ascii in digits]
sage: letters
['S', 'a', 'g', 'e']
~~~~~~~~~~~~~~~~~~~~~~ ::
sage: ''.join(letters)
'Sage'
"""
|
autoremovetorrents/clientstatus.py | stargz/autoremove-torrents | 437 | 11175442 | <gh_stars>100-1000
from .util.convertbytes import convert_bytes
from .util.convertspeed import convert_speed
class ClientStatus(object):
def __init__(self):
# Proper attributes:
# free_space, total_download_speed, total_upload_speed, etc.
#
# Note:
# The type of free_space is a function because we need to specific a
# directory to check its free space.
pass
# Format client status info
def __str__(self):
# Attribute Formater
def disp(prop, converter = None):
if hasattr(self, prop):
attr = getattr(self, prop)
if converter is not None:
return converter(attr)
else:
return '(Not Provided)'
return ('Status reported by the client: \n' +
'\tDownload Speed: %s\tTotal: %s\n' +
'\tUpload Speed: %s\tTotal: %s\n' +
'\tOutgoing Port Status: %s') % \
(
disp('download_speed', convert_speed),
disp('total_downloaded', convert_bytes),
disp('upload_speed', convert_speed),
disp('total_uploaded', convert_bytes),
disp('port_status', lambda s: s.name),
)
|
doc/conf.py | spatialaudio/jackclient-python | 120 | 11175443 | <reponame>spatialaudio/jackclient-python<filename>doc/conf.py
# Configuration file for Sphinx,
# see https://www.sphinx-doc.org/en/master/usage/configuration.html
import sys
import os
from subprocess import check_output
sys.path.insert(0, os.path.abspath('../src'))
sys.path.insert(0, os.path.abspath('.'))
# Fake import to avoid actually loading CFFI and the JACK library
import fake__jack
sys.modules['_jack'] = sys.modules['fake__jack']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3' # for sphinx.ext.napoleon
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon', # support for NumPy-style docstrings
'sphinx_last_updated_by_git',
]
autoclass_content = 'init'
autodoc_member_order = 'bysource'
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = False
napoleon_use_rtype = False
authors = '<NAME>'
project = 'JACK Audio Connection Kit (JACK) Client for Python'
copyright = '2020, ' + authors
nitpicky = True
try:
release = check_output(['git', 'describe', '--tags', '--always'])
release = release.decode().strip()
except Exception:
release = '<unknown>'
try:
today = check_output(['git', 'show', '-s', '--format=%ad', '--date=short'])
today = today.decode().strip()
except Exception:
today = '<unknown date>'
default_role = 'any'
# -- Options for HTML output ----------------------------------------------
html_theme = 'insipid'
html_title = 'JACK Client for Python, version ' + release
html_domain_indices = False
html_show_copyright = False
html_permalinks_icon = '§'
html_favicon = 'favicon.svg'
html_copy_source = False
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
'printindex': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [('index', 'JACK-Client.tex', project, authors, 'howto')]
latex_show_urls = 'footnote'
latex_domain_indices = False
# -- Options for epub output ----------------------------------------------
epub_author = authors
epub_use_index = False
|
tests/file_io/data_range_io.py | dfjxs/dfvfs | 176 | 11175451 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the data range file-like object."""
import unittest
from dfvfs.file_io import data_range_io
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from tests.file_io import test_lib
class DataRangeTest(test_lib.SylogTestCase):
"""Tests for the data range file-like object."""
# pylint: disable=protected-access
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_path = self._GetTestFilePath(['syslog'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
self._data_range_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_DATA_RANGE, parent=test_os_path_spec,
range_offset=167, range_size=1080)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._resolver_context.Empty()
def testOpenCloseFileObject(self):
"""Test the open and close functionality using a file-like object."""
file_object = data_range_io.DataRange(
self._resolver_context, self._data_range_path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 1080)
def testSetRange(self):
"""Test the _SetRange function."""
file_object = data_range_io.DataRange(
self._resolver_context, self._data_range_path_spec)
self.assertEqual(file_object._range_offset, -1)
self.assertEqual(file_object._range_size, -1)
file_object._SetRange(167, 1080)
self.assertEqual(file_object._range_offset, 167)
self.assertEqual(file_object._range_size, 1080)
with self.assertRaises(ValueError):
file_object._SetRange(-1, 1080)
with self.assertRaises(ValueError):
file_object._SetRange(167, -1)
def testOpenClosePathSpec(self):
"""Test the open and close functionality using a path specification."""
file_object = data_range_io.DataRange(
self._resolver_context, self._data_range_path_spec)
file_object.Open()
self.assertEqual(file_object.get_size(), 1080)
def testSeek(self):
"""Test the seek functionality."""
file_object = data_range_io.DataRange(
self._resolver_context, self._data_range_path_spec)
file_object.Open()
self._TestSeekFileObject(file_object, base_offset=0)
def testRead(self):
"""Test the read functionality."""
file_object = data_range_io.DataRange(
self._resolver_context, self._data_range_path_spec)
file_object.Open()
self._TestReadFileObject(file_object, base_offset=0)
if __name__ == '__main__':
unittest.main()
|
payment/apps.py | skyydq/GreaterWMS | 1,063 | 11175465 | <filename>payment/apps.py
from django.apps import AppConfig
from django.db.models.signals import post_migrate
class PaymentConfig(AppConfig):
name = 'payment'
# def ready(self):
# post_migrate.connect(do_init_data, sender=self)
#
# def do_init_data(sender, **kwargs):
# init_category()
#
# def init_category():
# """
# :return:None
# """
# try:
# from .models import TransportationFeeListModel as transporationfee
# if transporationfee.objects.filter(openid__iexact='init_data').exists():
# pass
# else:
# init_data = [
# transporationfee(openid='init_data', send_city='上海市', receiver_city='杭州市',
# weight_fee=0.4, volume_fee=30, transportation_supplier='WanKe Logistic',
# min_payment=250, creater='GreaterWMS'),
# transporationfee(openid='init_data', send_city='上海市', receiver_city='北京市',
# weight_fee=0.8, volume_fee=220, transportation_supplier='WanKe Logistic',
# min_payment=250, creater='GreaterWMS'),
# ]
# transporationfee.objects.bulk_create(init_data, batch_size=100)
# except:
# pass
#
# def init_datas():
# init_category()
|
examples/codes/mosn-extensions/plugin/filter/python/plugin.py | inkhare/mosn | 2,106 | 11175477 | <gh_stars>1000+
from concurrent import futures
import sys
import time
import argparse
import grpc
import logging
import json
import plugin_pb2
import plugin_pb2_grpc
class PluginServicer(plugin_pb2_grpc.PluginServicer):
def Call(self, request, context):
logging.info("begin do plugin something..")
for item in checker.config:
if request.header[item]!=checker.config[item]:
return plugin_pb2.Response(status=-1)
return plugin_pb2.Response(status=1)
class Checker:
def __init__(self,config):
self.config=config
def serve(checker):
# Start the server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
#checker
plugin_pb2_grpc.add_PluginServicer_to_server(PluginServicer(), server)
server.add_insecure_port('127.0.0.1:1234')
server.start()
# Output information
print("1|1|tcp|127.0.0.1:1234|grpc")
sys.stdout.flush()
try:
while True:
time.sleep(60 * 60 * 24)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
logging.basicConfig(filename='./plugin-py.log',format='[%(asctime)s-%(filename)s-%(levelname)s:%(message)s]', level = logging.DEBUG,filemode='a',datefmt='%Y-%m-%d%I:%M:%S %p')
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-c',dest="config",default="checkconf.json",
help='-c checkconf.json')
args = parser.parse_args()
f = open(args.config, encoding='utf-8')
setting = json.load(f)
checker = Checker(setting)
serve(checker)
|
datawig-js/server.py | tirkarthi/datawig | 374 | 11175668 | import os
from blueprints import datawig
from flask import Flask
app = Flask(__name__)
app.register_blueprint(datawig.datawig)
# same secret key causes sessions to carry over from one app execution to the next
app.secret_key = os.urandom(32)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8081, debug=True)
|
inventory/admin.py | nathandarnell/sal | 215 | 11175676 | <reponame>nathandarnell/sal
from django.contrib import admin
from inventory.models import Application, Inventory, InventoryItem
class ApplicationAdmin(admin.ModelAdmin):
list_display = ('name', 'bundleid', 'bundlename')
search_fields = ('name', 'bundleid', 'bundlename')
class InventoryAdmin(admin.ModelAdmin):
list_display = ('machine', 'datestamp', 'sha256hash')
list_filter = ('datestamp',)
date_hierarchy = 'datestamp'
search_fields = ('machine__hostname',)
class InventoryItemAdmin(admin.ModelAdmin):
list_display = ('application', 'version', 'path', 'machine')
search_fields = ('application__name', 'version', 'machine__hostname')
admin.site.register(Application, ApplicationAdmin)
admin.site.register(Inventory, InventoryAdmin)
admin.site.register(InventoryItem, InventoryItemAdmin)
|
keep/commands/cmd_update.py | nfsergiu/keep | 533 | 11175681 | <gh_stars>100-1000
import click
from keep import cli, utils, about
@click.command('update', short_help='Check for an update of Keep.')
@cli.pass_context
def cli(ctx):
"""Check for an update of Keep."""
utils.check_update(ctx, forced=True)
click.secho("Keep is at its latest version v{}".format(about.__version__), fg='green')
|
examples/quickstart/first.py | romeojulietthotel/Flask-NotSuperAdmin | 414 | 11175700 | from flask import Flask
from flask.ext.superadmin import Admin
app = Flask(__name__)
admin = Admin(app)
app.run()
|
jirafs/ticketfolder.py | coddingtonbear/jirafs | 119 | 11175726 | <gh_stars>100-1000
import codecs
import fnmatch
import logging
import logging.handlers
import io
import json
import os
import re
import subprocess
from urllib import parse
from jira.resources import Issue
from . import constants
from . import exceptions
from . import migrations
from . import utils
from .jiralinkmanager import JiraLinkManager
from .jirafieldmanager import JiraFieldManager
from .plugin import MacroPlugin, PluginValidationError
from .exceptions import MacroError
class TicketFolderLoggerAdapter(logging.LoggerAdapter):
def process(self, msg, kwargs):
return (
"{{{issue_id}}} {msg}".format(issue_id=self.extra["issue_id"], msg=msg,),
kwargs,
)
class TicketFolder(object):
def __init__(self, path, jira, migrate=True, quiet=False):
self.path = os.path.realpath(os.path.expanduser(path))
self.quiet = quiet
self.issue_url = self.get_ticket_url()
self.get_jira = jira
if not os.path.isdir(self.metadata_dir):
raise exceptions.NotTicketFolderException(
"%s is not a synchronizable ticket folder" % (path)
)
self._formatter = logging.Formatter(
fmt="%(asctime)s\t%(levelname)s\t%(module)s\t%(message)s"
)
self._handler = logging.handlers.RotatingFileHandler(
self.get_metadata_path(constants.TICKET_OPERATION_LOG),
maxBytes=2 ** 20,
backupCount=2,
encoding="utf-8",
)
self._handler.setFormatter(self._formatter)
self._logger = logging.getLogger(
".".join([__name__, self.ticket_number.replace("-", "_")])
)
self._logger.addHandler(self._handler)
self._logger_adapter = TicketFolderLoggerAdapter(
self._logger, {"issue_id": self.ticket_number},
)
self.plugins = self.load_plugins()
if migrate:
self.run_migrations()
# If no `new_comment.jira.txt` file exists, let's create one
comment_path = self.get_local_path(constants.TICKET_NEW_COMMENT)
if not os.path.exists(comment_path):
with io.open(comment_path, "w", encoding="utf-8") as out:
out.write("")
# Let's update the ignore file while we're here.
self.build_ignore_files()
@property
def logger(self):
return self._logger_adapter
def __repr__(self):
value = self.__str__()
return "<%s>" % value
def __str__(self):
return "[%s] at %s" % (self.ticket_number, self.path)
@property
def subtasks(self):
if hasattr(self, "_subtasks"):
return self._subtasks
self._subtasks = []
subtasks_path = self.get_metadata_path("subtasks")
if not os.path.exists(subtasks_path):
return self._subtasks
with open(subtasks_path, "r") as in_:
for line in in_:
ticket_number = line.strip()
folder = self.__class__(
self.get_path(ticket_number,), utils.lazy_get_jira()
)
self._subtasks.append(folder)
return self._subtasks
def load_plugins(self):
config = self.get_config()
plugins = []
if not config.has_section(constants.CONFIG_PLUGINS):
return plugins
installed_plugins = utils.get_installed_plugins()
for name, status in config.items(constants.CONFIG_PLUGINS):
if not utils.convert_to_boolean(status):
# This plugin is not turned on.
continue
if name not in installed_plugins:
# This plugin is not installed.
self.log(
"Plugin '%s' is not available.", (name,),
)
continue
plugin = installed_plugins[name](self, name)
try:
plugin.validate()
except PluginValidationError as e:
self.log(
"Plugin '%s' did not pass validation; not loading: %s.", (name, e,)
)
plugins.append(plugin)
return plugins
def get_config(self):
local_config_file = self.get_metadata_path("config")
additional_configs = []
if os.path.exists(local_config_file):
additional_configs.append(local_config_file)
return utils.get_config(additional_configs)
def set_config_value(self, section, key, value):
with utils.stash_local_changes(self):
local_config_file = self.get_metadata_path("config")
config = utils.get_config(
additional_configs=[local_config_file], include_global=False,
)
if not config.has_section(section):
config.add_section(section)
config.set(section, key, value)
with open(local_config_file, "w") as out:
config.write(out)
self.run_git_command("add", ".jirafs/config")
self.run_git_command("commit", "-m", "Config change", failure_ok=True)
@property
def jira_base(self):
match = re.match(r"(.*)\/browse\/.*", self.issue_url)
if not match:
raise ValueError(
"Could not infer Jira server URL from issue URL %s" % (self.issue_url,)
)
return match.group(1)
@property
def ticket_number(self):
parts = parse.urlparse(self.issue_url)
match = re.match(r".*\/browse\/(\w+-\d+)\/?.*", parts.path)
if not match:
raise ValueError(
"Could not infer ticket number from URL %s" % self.issue_url
)
return match.group(1)
@property
def jira(self):
if not hasattr(self, "_jira"):
self._jira = self.get_jira(self.jira_base, config=self.get_config())
return self._jira
@property
def issue(self):
if not hasattr(self, "_issue"):
self._issue = self.jira.issue(self.ticket_number)
return self._issue
def clear_cache(self):
if hasattr(self, "_issue"):
del self._issue
if hasattr(self, "_jira"):
del self._jira
def store_cached_issue(self, shadow=True):
storable = {"options": self.issue._options, "raw": self.issue.raw}
with io.open(
self.get_path(".jirafs/issue.json", shadow=shadow), "w", encoding="utf-8",
) as out:
out.write(
json.dumps(storable, indent=4, sort_keys=True, ensure_ascii=False,)
)
@property
def cached_issue(self):
if not hasattr(self, "_cached_issue"):
try:
issue_path = self.get_metadata_path("issue.json")
with io.open(issue_path, "r", encoding="utf-8") as _in:
storable = json.loads(_in.read())
self._cached_issue = Issue(
storable["options"], None, storable["raw"],
)
except IOError:
self.log(
"Error encountered while loading cached issue!",
level=logging.ERROR,
)
self._cached_issue = self.issue
return self._cached_issue
@property
def metadata_dir(self) -> str:
return os.path.join(self.path, constants.METADATA_DIR,)
@property
def git_master(self) -> str:
return self.run_git_command("rev-parse", "master")
@property
def git_merge_base(self) -> str:
return self.run_git_command("merge-base", "master", "jira",)
@property
def git_branch(self) -> str:
return self.run_git_command("rev-parse", "--abbrev-ref", "HEAD")
@property
def on_master(self) -> bool:
return self.git_branch == "master"
def get_ticket_url(self):
try:
with io.open(
self.get_metadata_path("issue_url"), "r", encoding="utf-8"
) as in_:
return in_.read().strip()
except (IOError, OSError):
return None
def get_metadata_path(self, *args) -> str:
return os.path.join(self.metadata_dir, *args)
def get_remote_file_metadata(self, shadow=True):
remote_files = self.get_path(".jirafs/remote_files.json", shadow=shadow)
try:
with io.open(remote_files, "r", encoding="utf-8") as _in:
data = json.loads(_in.read())
except IOError:
data = {}
return data
def set_remote_file_metadata(self, data, shadow=True):
remote_files = self.get_path(".jirafs/remote_files.json", shadow=shadow)
with io.open(remote_files, "w", encoding="utf-8") as out:
out.write(json.dumps(data, indent=4, sort_keys=True, ensure_ascii=False,))
def get_local_path(self, *args):
return os.path.join(self.path, *args)
def get_shadow_path(self, *args):
return os.path.join(self.get_metadata_path("shadow"), *args)
def get_path(self, *args, **kwargs):
shadow = kwargs.get("shadow", False)
if shadow:
return self.get_shadow_path(*args)
return self.get_local_path(*args)
@property
def version(self):
try:
with io.open(
self.get_metadata_path("version"), "r", encoding="utf-8"
) as _in:
return int(_in.read().strip())
except IOError:
return 1
@property
def log_path(self):
return self.get_metadata_path(constants.TICKET_OPERATION_LOG)
@classmethod
def initialize_ticket_folder(cls, ticket_url, path, jira):
path = os.path.realpath(path)
metadata_path = os.path.join(path, constants.METADATA_DIR,)
os.mkdir(metadata_path)
with io.open(
os.path.join(metadata_path, "issue_url"), "w", encoding="utf-8"
) as out:
out.write(ticket_url)
# Create bare git repository so we can easily detect changes.
subprocess.check_call(
("git", "--bare", "init", os.path.join(metadata_path, "git",)),
stdout=subprocess.PIPE,
)
subprocess.check_call(
(
"git",
"config",
"--file=%s" % os.path.join(metadata_path, "git", "config"),
"core.excludesfile",
constants.GIT_IGNORE_FILE,
)
)
excludes_path = os.path.join(metadata_path, "git", "info", "exclude")
with io.open(excludes_path, "w", encoding="utf-8") as gitignore:
gitignore.write(
"\n".join(
[
"%s/git" % constants.METADATA_DIR,
"%s/shadow" % constants.METADATA_DIR,
"%s/operation.log" % constants.METADATA_DIR,
]
)
)
instance = cls(path, jira, migrate=False)
instance.log(
"Ticket folder for issue %s created at %s",
(instance.ticket_number, instance.path,),
)
instance.run_git_command("add", "-A")
instance.run_git_command("commit", "--allow-empty", "-m", "Initialized")
instance.run_migrations(init=True)
comment_path = instance.get_local_path(constants.TICKET_NEW_COMMENT)
with io.open(comment_path, "w", encoding="utf-8") as out:
out.write("")
return instance
def run_git_command(self, command, *args, **kwargs):
failure_ok = kwargs.get("failure_ok", False)
shadow = kwargs.get("shadow", False)
binary = kwargs.get("binary", False)
stdin = kwargs.get("stdin", "")
args = list(args)
if not shadow:
work_tree = (self.path,)
git_dir = self.get_metadata_path("git")
cwd = self.path
else:
work_tree = self.get_metadata_path("shadow")
git_dir = self.get_metadata_path("shadow/.git")
cwd = self.get_metadata_path("shadow")
cmd = [
"git",
"--work-tree=%s" % work_tree,
"--git-dir=%s" % git_dir,
]
cmd.append(command)
if command == "commit":
args.append("--author='%s'" % constants.GIT_AUTHOR)
cmd.extend(args)
self.log("Executing git command `%s`", (" ".join(cmd),), logging.DEBUG)
handle = subprocess.Popen(
cmd,
cwd=cwd,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
)
result, _ = handle.communicate(stdin)
if handle.returncode != 0 and not failure_ok:
command = " ".join(cmd)
raise exceptions.GitCommandError(
"Error running command `%s`" % command,
returncode=handle.returncode,
stdout=result,
cmd=command,
)
if not binary:
return result.decode("utf-8").strip()
return result
def get_local_file_at_revision(self, path, revision, failure_ok=True, binary=False):
return self.run_git_command(
"show", "%s:%s" % (revision, path,), failure_ok=failure_ok, binary=binary,
)
def get_ignore_globs(self, which=constants.LOCAL_ONLY_FILE):
all_globs = [
constants.TICKET_DETAILS,
constants.TICKET_COMMENTS,
constants.TICKET_NEW_COMMENT,
constants.TICKET_LINKS,
]
for field in constants.FILE_FIELDS:
all_globs.append(
constants.TICKET_FILE_FIELD_TEMPLATE.format(field_name=field)
)
def get_globs_from_file(input_file):
globs = []
for line in input_file.readlines():
if line.startswith("#") or not line.strip():
continue
globs.append(line.strip())
return globs
try:
with io.open(
self.get_local_path(which), "r", encoding="utf-8"
) as local_ign:
all_globs.extend(get_globs_from_file(local_ign))
except IOError:
pass
try:
with io.open(
os.path.expanduser("~/%s" % which), "r", encoding="utf-8"
) as global_ignores:
all_globs.extend(get_globs_from_file(global_ignores))
except IOError:
pass
return all_globs
def file_matches_globs(self, filename, ignore_globs):
for glob in ignore_globs:
if fnmatch.fnmatch(filename, glob):
return True
return False
def get_conflicts(self):
conflicts = {}
conflicted_files = self.run_git_command(
"diff", "--name-only", "--diff-filter=U",
).strip()
if conflicted_files:
conflicts["files"] = conflicted_files.split("\n")
return conflicts
def get_ready_changes(self):
ready = {
"fields": (self.get_fields("HEAD") - self.get_fields(self.git_merge_base)),
"links": (self.get_links("HEAD") - self.get_links(self.git_merge_base)),
"new_comment": self.get_new_comment(ready=True),
}
current_hash = self.run_git_command("rev-parse", "master")
committed_files = set(
self.run_git_command("ls-tree", "--name-only", "-r", current_hash).split(
"\n"
)
)
merge_base_files = set(
self.run_git_command(
"ls-tree", "--name-only", "-r", self.git_merge_base
).split("\n")
)
ready["deleted"] = self.filter_ignored_files(
list(merge_base_files - committed_files),
constants.LOCAL_ONLY_FILE,
constants.GIT_IGNORE_FILE,
constants.GIT_EXCLUDE_FILE,
allow_nonfile=True,
)
changed_files = self.filter_ignored_files(
self.run_git_command(
"diff", "--name-only", "%s..master" % self.git_merge_base,
).split("\n"),
constants.LOCAL_ONLY_FILE,
)
ready["files"] = [
filename for filename in changed_files if filename not in ready["deleted"]
]
return ready
def get_uncommitted_changes(self):
uncommitted = {
"fields": self.get_fields() - self.get_fields("HEAD"),
"new_comment": self.get_new_comment(ready=False),
"links": self.get_links() - self.get_links("HEAD"),
}
new_files = self.run_git_command("ls-files", "-o", failure_ok=True).split("\n")
modified_files = self.run_git_command("ls-files", "-m", failure_ok=True).split(
"\n"
)
deleted_files = self.run_git_command("ls-files", "-d", failure_ok=True).split(
"\n"
)
uncommitted["files"] = self.filter_ignored_files(
[filename for filename in new_files + modified_files if filename],
constants.LOCAL_ONLY_FILE,
constants.GIT_IGNORE_FILE,
constants.GIT_EXCLUDE_FILE,
)
uncommitted["deleted"] = self.filter_ignored_files(
[filename for filename in deleted_files if filename],
constants.LOCAL_ONLY_FILE,
constants.GIT_IGNORE_FILE,
constants.GIT_EXCLUDE_FILE,
allow_nonfile=True, # They're deleted, after all
)
return uncommitted
def get_local_uncommitted_changes(self):
new_files = self.run_git_command("ls-files", "-o", failure_ok=True).split("\n")
modified_files = self.run_git_command("ls-files", "-m", failure_ok=True).split(
"\n"
)
committable = self.filter_ignored_files(
[filename for filename in new_files + modified_files if filename],
constants.LOCAL_ONLY_FILE,
)
uncommitted = self.filter_ignored_files(
[
filename
for filename in modified_files + new_files
if filename not in committable
],
constants.GIT_IGNORE_FILE,
constants.GIT_EXCLUDE_FILE,
)
return {"files": uncommitted}
def get_remotely_changed(self):
metadata = self.get_remote_file_metadata(shadow=True)
assets = []
attachments = self.filter_ignored_files(
getattr(self.issue.fields, "attachment", []), constants.REMOTE_IGNORE_FILE
)
for attachment in attachments:
changed = metadata.get(attachment.filename) != attachment.created
if changed:
assets.append(attachment.filename)
return assets
def filter_ignored_files(self, files, *which, allow_nonfile=False):
if len(which) < 1:
which = [constants.LOCAL_ONLY_FILE]
if not isinstance(which, (list, tuple)):
which = [which]
for list_path in which:
ignore_globs = self.get_ignore_globs(list_path)
assets = []
for fileish in files:
# Get the actual filename; this is a little gross -- apologies.
filename = fileish
attachment = False
if not isinstance(fileish, str):
filename = fileish.filename
attachment = True
if self.file_matches_globs(filename, ignore_globs):
continue
if (
not attachment
and not allow_nonfile
and not os.path.isfile(os.path.join(self.path, filename))
):
continue
if filename.startswith("."):
continue
assets.append(fileish)
files = assets
return assets
def get_macro_plugins(self):
if not hasattr(self, "_macro_plugins"):
config = self.get_config()
plugins = []
if not config.has_section(constants.CONFIG_PLUGINS):
return plugins
installed_plugins = utils.get_installed_plugins(MacroPlugin)
for entrypoint_name, status in config.items(constants.CONFIG_PLUGINS):
if not utils.convert_to_boolean(status):
# This plugin is not turned on.
continue
if entrypoint_name not in installed_plugins:
# This plugin is not installed.
self.log(
"Macro plugin '%s' is not available; "
"this is probably because this plugin is not a "
"macro.",
(entrypoint_name,),
level=logging.DEBUG,
)
continue
plugin = installed_plugins[entrypoint_name](self, entrypoint_name)
try:
plugin.validate()
except PluginValidationError as e:
self.log(
"Plugin '%s' did not pass validation; " "not loading: %s.",
(entrypoint_name, e,),
)
plugins.append(plugin)
self._macro_plugins = plugins
return self._macro_plugins
def process_macros_for_all_fields(self):
# Now let each plugin run its cleanup if necessary
for plugin in self.get_macro_plugins():
try:
plugin.cleanup_pre_process()
except NotImplementedError:
pass
# This is run just in case these macros are writing
# files as part of their operation, and we need to have
# those files written in advance of certain operations
# like listing changes or committing
fields = self.get_fields()
for field_name in fields:
fields.get_transformed(field_name)
self.get_new_comment()
with open(self.get_path(constants.TICKET_COMMENTS), "r") as inf:
self.process_macros(inf.read())
# Now let each plugin run its cleanup if necessary
for plugin in self.get_macro_plugins():
try:
plugin.cleanup_post_process()
except NotImplementedError:
pass
def process_macros(self, data, path=None):
macro_plugins = self.get_macro_plugins()
for plugin in macro_plugins:
try:
if isinstance(data, str):
data = plugin.process_text_data(data, path)
else:
continue
except MacroError as e:
# Annotate the MacroError with information about what
# macro caused the error
e.macro_name = plugin.entrypoint_name
raise e from e
unprocessed = re.compile(r"(<jirafs:.*>)", re.MULTILINE | re.DOTALL).findall(
data
)
if unprocessed:
raise exceptions.UnknownMacroError(unprocessed)
return data
def process_macro_reversals(self, data):
macro_plugins = self.get_macro_plugins()
for plugin in macro_plugins:
try:
if isinstance(data, str):
data = plugin.process_text_data_reversal(data)
else:
continue
except MacroError as e:
# Annotate the MacroError with information about what
# macro caused the error
e.macro_name = plugin.entrypoint_name
raise e from e
return data
def get_links(self, revision=None, path=None):
kwargs = {}
if not revision:
kwargs["path"] = path if path else self.path
else:
kwargs["revision"] = revision
return JiraLinkManager.create(self, **kwargs)
def get_fields(self, revision=None, path=None):
kwargs = {}
if not revision:
kwargs["path"] = path if path else self.path
else:
kwargs["revision"] = revision
return JiraFieldManager.create(self, **kwargs)
def get_new_comment(self, clear=False, staged=False, ready=False):
try:
with io.open(
self.get_local_path(constants.TICKET_NEW_COMMENT),
"r+",
encoding="utf-8",
) as c:
local_contents = c.read().strip()
if ready:
contents = self.get_local_file_at_revision(
constants.TICKET_NEW_COMMENT, "HEAD"
)
if contents:
contents = contents.strip()
else:
contents = ""
else:
contents = local_contents
if not ready and contents == self.get_new_comment(ready=True):
contents = ""
if contents == local_contents and clear:
with io.open(
self.get_local_path(constants.TICKET_NEW_COMMENT),
"r+",
encoding="utf-8",
) as c:
c.truncate()
except IOError:
contents = ""
# Apply macro plugins
return self.process_macros(contents)
def get_field_value_by_dotpath(self, field_name, raw=False, **kwargs):
fields = self.get_fields()
key_dotpath = None
if "." in field_name:
field_name, key_dotpath = field_name.split(".", 1)
if field_name not in fields:
raise exceptions.JirafsError("Field '%s' does not exist." % field_name)
if raw:
data = fields[field_name]
else:
data = fields.get_transformed(field_name)
if key_dotpath:
try:
for component in key_dotpath.split("."):
if not isinstance(data, dict):
raise exceptions.JirafsError(
"Key '%s' (of dotpath '%s') is not an object "
"in field '%s'." % (component, key_dotpath, field_name,)
)
elif component not in data:
if "default" in kwargs:
data = kwargs["default"]
else:
raise exceptions.JirafsError(
f"Keypath {key_dotpath} does not exist"
)
break
else:
data = data[component]
except (ValueError, TypeError):
raise exceptions.JirafsError(
"Field '%s' could not be parsed as JSON for retrieving "
"dotpath '%s'." % (field_name, key_dotpath,)
)
return data
def is_up_to_date(self):
jira_commit = self.run_git_command("rev-parse", "jira")
master_commit = self.run_git_command("rev-parse", "master")
try:
self.run_git_command(
"merge-base", "--is-ancestor", jira_commit, master_commit,
)
except exceptions.GitCommandError:
return False
return True
def status(self):
self.process_macros_for_all_fields()
return {
"ready": self.get_ready_changes(),
"conflicts": self.get_conflicts(),
"local_uncommitted": self.get_local_uncommitted_changes(),
"uncommitted": self.get_uncommitted_changes(),
"up_to_date": self.is_up_to_date(),
}
def run_migrations(self, init=False):
loglevel = logging.INFO
if init:
loglevel = logging.DEBUG
else:
if not os.path.exists(self.get_metadata_path("git")):
raise exceptions.JirafsError(
"{path} is not a valid ticket folder!".format(path=self.path)
)
if self.version < constants.CURRENT_REPO_VERSION:
print(
"Your ticket folder at {path} is out-of-date "
"and is being automatically updated.".format(path=self.path)
)
while self.version < constants.CURRENT_REPO_VERSION:
migrator = getattr(
migrations, "migration_%s" % str(self.version + 1).zfill(4)
)
self.migrate(migrator, loglevel=loglevel, init=init)
def migrate(self, migrator, loglevel=logging.INFO, init=False):
with utils.stash_local_changes(self):
self.log("%s: Migration started", (migrator.__name__,), loglevel)
migrator(self, init=init)
self.log("%s: Migration finished", (migrator.__name__,), loglevel)
def build_ignore_files(self):
metadata_excludes = [
"git",
"shadow",
"operation.log",
"subtasks",
"temp-generated",
"plugin_meta",
]
with codecs.open(
self.get_local_path(constants.GIT_EXCLUDE_FILE), "w", "utf-8"
) as out:
for line in metadata_excludes:
out.write("%s/%s\n" % (constants.METADATA_DIR, line,))
subtask_list_path = self.get_metadata_path("subtasks")
if os.path.exists(subtask_list_path):
with open(subtask_list_path, "r") as in_:
for line in in_:
out.write("%s/*\n" % line.strip())
with codecs.open(
self.get_metadata_path("combined_ignore"), "w", "utf-8"
) as out:
try:
out.write("# ~/%s\n" % constants.GIT_IGNORE_FILE_PARTIAL)
with codecs.open(
os.path.expanduser("~/%s" % constants.GIT_IGNORE_FILE_PARTIAL),
"r",
"utf-8",
) as in_:
for line in in_:
out.write("%s\n" % line.strip())
except Exception:
pass
try:
out.write("# %s\n" % (self.get_path(constants.GIT_IGNORE_FILE_PARTIAL)))
with codecs.open(
self.get_path(constants.GIT_IGNORE_FILE_PARTIAL), "r", "utf-8"
) as in_:
for line in in_:
out.write("%s\n" % line.strip())
except Exception:
pass
def log(self, message, args=None, level=logging.INFO):
if args is None:
args = []
self.logger.log(level, message, *args)
def get_log(self):
with io.open(self.log_path, "r", encoding="utf-8") as log_file:
return log_file.read()
|
tests/unit/test_advanced_conf.py | edditler/archivy | 2,061 | 11175755 | <gh_stars>1000+
from textwrap import dedent
import pytest
from tinydb import Query
from archivy.helpers import get_db, load_hooks, load_scraper
from archivy import data
@pytest.fixture()
def hooks_cli_runner(test_app, cli_runner, click_cli):
"""
Saves hooks to user config directory for tests.
All of the hooks except `before_dataobj_create` store some form of message in
the db, whose existence is then checked in the tests.
"""
hookfile = """\
from archivy.config import BaseHooks
from archivy.helpers import get_db
class Hooks(BaseHooks):
def on_edit(self, dataobj):
get_db().insert({"type": "edit_message", "content": f"Changes made to content of {dataobj.title}."})
def on_user_create(self, user):
get_db().insert({"type": "user_creation_message", "content": f"New user {user.username} created."})
def on_dataobj_create(self, dataobj):
get_db().insert({"type": "dataobj_creation_message", "content": f"New dataobj on {dataobj.title} with tags: {dataobj.tags}"})
def before_dataobj_create(self, dataobj):
dataobj.content += "Dataobj made for test." """
with cli_runner.isolated_filesystem():
cli_runner.invoke(click_cli, ["init"], input="\nn\nn\n\n")
with open("hooks.py", "w") as f:
f.write(dedent(hookfile))
with test_app.app_context():
test_app.config["HOOKS"] = load_hooks()
yield cli_runner
@pytest.fixture()
def custom_scraping_setup(test_app, cli_runner, click_cli):
scraping_file = """\
def test_pattern(data):
data.title = "Overridden note"
data.content = "this note was not processed by default archivy bookmarking, but a user-specified function"
data.tags = ["test"]
PATTERNS = {
"https://example.com/": test_pattern,
"https://example2.com/": ".nested"
}"""
with cli_runner.isolated_filesystem():
cli_runner.invoke(click_cli, ["init"], input="\nn\nn\n\n")
with open("scraping.py", "w") as f:
f.write(dedent(scraping_file))
with test_app.app_context():
test_app.config["SCRAPING_PATTERNS"] = load_scraper()
yield cli_runner
def test_dataobj_creation_hook(test_app, hooks_cli_runner, note_fixture):
creation_message = get_db().search(Query().type == "dataobj_creation_message")[0]
assert (
creation_message["content"]
== f"New dataobj on {note_fixture.title} with tags: {note_fixture.tags}"
)
def test_before_dataobj_creation_hook(
test_app, hooks_cli_runner, note_fixture, bookmark_fixture
):
# check hook that added content at the end of body succeeded.
message = "Dataobj made for test."
assert message in note_fixture.content
assert message in bookmark_fixture.content
def test_dataobj_edit_hook(test_app, hooks_cli_runner, note_fixture, client):
client.put(
f"/api/dataobjs/{note_fixture.id}", json={"content": "Updated note content"}
)
edit_message = get_db().search(Query().type == "edit_message")[0]
assert (
f"Changes made to content of {note_fixture.title}." == edit_message["content"]
)
def test_user_creation_hook(test_app, hooks_cli_runner, user_fixture):
creation_message = get_db().search(Query().type == "user_creation_message")[1]
assert f"New user {user_fixture.username} created." == creation_message["content"]
def test_custom_scraping_patterns(
custom_scraping_setup, test_app, bookmark_fixture, different_bookmark_fixture
):
pattern = "example.com"
assert pattern in bookmark_fixture.url
assert bookmark_fixture.title == "Overridden note"
assert bookmark_fixture.tags == ["test"]
pattern = "example2.com"
assert pattern in different_bookmark_fixture.url
# check that the CSS selector was parsed and other parts of the document were not selected
assert different_bookmark_fixture.content.startswith("aaa")
test_app.config["SCRAPING_PATTERNS"] = {}
|
regtests/calling/keyword.py | bpmbank/PythonJS | 319 | 11175826 | <filename>regtests/calling/keyword.py
"""keywords"""
def f(a, b=None, c=None):
return (a+b) * c
def main():
TestError( f(1, b=2, c=3) == 9) ## inorder works in javascript mode
TestError( f(1, c=3, b=2) == 9) ## out of order fails in javascript mode
|
reddit_detective/karma.py | oleitao/reddit-detective | 173 | 11175839 | <gh_stars>100-1000
"""
Let's assume that you get the subreddits they belong for 2 comments
Terminology: "stuff like karma" includes
comment_karma, link_karma, score, upvote_ratio and subscribers
If those 2 comments belong to the same subreddit, the code will be like the following:
MERGE (:Subreddit ...)
MERGE (:Subreddit ...)
If we include stuff like karma in properties at creation time,
then constraint UniqueSubreddit will fail.
Why?
Because Reddit does not give the exact number when it comes to
karmas/upvotes/subscribers. This leads to having different karma numbers
in 2 MERGE statements for the same Subreddit.
What is our solution?
- Do not include stuff like karma in properties at creation time
- After creation, get each node/rel's stuff like karma and add to their props
What if the user adds more stuff to their database?
- Delete each node/rel's stuff like karma
- Then get each node/rel's stuff like karma and add to their props
What if the user does not want to deal with stuff like karma?
- Make it optional
"""
import praw
from neo4j import BoltDriver
def _set_subreddit_subscribers(api: praw.Reddit, name):
sub = api.subreddit(name)
return """
MATCH (n {id: "%s"})
WITH n
SET n.subscribers = %s;
""" % (sub.id, sub.subscribers)
def _set_submission_upvotes(api: praw.Reddit, id_):
sub = api.submission(id_)
return """
MATCH (n {id: "%s"})
WITH n
SET n.score = %s, n.upvote_ratio = %s;
""" % (sub.id, sub.score, sub.upvote_ratio)
def _set_redditor_karma(api: praw.Reddit, name):
red = api.redditor(name)
return """
MATCH (n {id: "%s"})
WITH n
SET n.comment_karma = %s, n.link_karma = %s
""" % (red.id, red.comment_karma, red.link_karma)
def _set_comment_score(api: praw.Reddit, id_):
comm = api.comment(id_)
return """
MATCH (n:Comment {id: "%s"})
WITH n
SET n.score = %s;
""" % (comm.id, comm.score)
remove_stuff_subreddit = """
MATCH (n:Subreddit)
WITH n
REMOVE n.subscribers;
"""
remove_stuff_submission = """
MATCH (n:Submission)
WITH n
REMOVE n.score, n.upvote_ratio;
"""
remove_stuff_redditor = """
MATCH (n:Redditor)
WITH n
REMOVE n.comment_karma, n.link_karma;
"""
remove_stuff_comment = """
MATCH ()-[n:Comment]-()
WITH n
REMOVE n.score;
"""
def _set_karma_subreddits(api, names):
return [_set_subreddit_subscribers(api, name[0]) for name in list(names)]
def _set_karma_submissions(api, ids):
return [_set_submission_upvotes(api, id_[0]) for id_ in list(ids)]
def _set_karma_redditors(api, names):
return [_set_redditor_karma(api, name[0]) for name in list(names)]
def _set_karma_comments(api, ids):
return [_set_comment_score(api, id_[0]) for id_ in list(ids)]
def _remove_karma():
return [
remove_stuff_subreddit,
remove_stuff_submission,
remove_stuff_redditor,
remove_stuff_comment
]
|
samr/data.py | yuntuowang/sentiment-analysis-on-movie-reviews | 129 | 11175861 | <filename>samr/data.py
from collections import namedtuple
Datapoint = namedtuple("Datapoint", "phraseid sentenceid phrase sentiment")
|
plugin.video.yatp/site-packages/hachoir_parser/program/__init__.py | mesabib/kodi.yatp | 194 | 11175874 | from hachoir_parser.program.elf import ElfFile
from hachoir_parser.program.exe import ExeFile
from hachoir_parser.program.macho import MachoFile, MachoFatFile
from hachoir_parser.program.python import PythonCompiledFile
from hachoir_parser.program.java import JavaCompiledClassFile
from hachoir_parser.program.prc import PRCFile
from hachoir_parser.program.nds import NdsFile
from hachoir_parser.program.dex import DexFile
from hachoir_parser.program.java_serialized import JavaSerializedFile
|
examples/customer_churn/code/train/trainer.py | NunoEdgarGFlowHub/MLOps | 1,068 | 11175890 | import math
import numpy as np
import torch
import gpytorch
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import MultiStepLR
from sklearn.metrics import roc_auc_score,accuracy_score
from svdkl import (NeuralNetLayer,
GaussianProcessLayer,
DKLModel)
"""
Trainer class train/eval model
"""
class SvDklTrainer:
"""Train SV_DKL model"""
def __init__(self, hyper_params, aml_run):
"""initialize SV-DKL model
Args:
hyper_params(dict):contains model hyperparameters
aml_run(run):AzureML run
"""
self.device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.hyper_params = hyper_params
print(self.hyper_params)
# Bernoulli likelood
self.likelihood = gpytorch.likelihoods.BernoulliLikelihood().to(self.device)
nnet_layer = NeuralNetLayer(data_dim=self.hyper_params['input_dim'],
output_dim=self.hyper_params['latent_dim']
).to(self.device)
self.model = DKLModel(nnet_layer,
num_dim=self.hyper_params['latent_dim'],
grid_bounds=self.hyper_params['grid_bounds'],
grid_size=self.hyper_params['grid_size'],
num_mixtures = self.hyper_params['num_mixtures']
).to(self.device)
# Stochastic variational optimzer
self.optimizer=Adam([
{'params': self.model.nnet_layer.parameters(),'lr':self.hyper_params['nn_lr'], 'betas':(0.9, 0.999)},
{'params': self.model.gp_layer.hyperparameters(), 'lr': self.hyper_params['lh_lr'] * 0.01},
{'params':self. model.gp_layer.variational_parameters()},
{'params': self.likelihood.parameters()}], lr=self.hyper_params['lh_lr'])
#,momentum=0.9, nesterov=True, weight_decay=0)
self.aml_run = aml_run
def fit(self, data_loader):
"""Train SV-DKL model
Args:
dataloader(pytroch dataloader):data loader wrapping training dataset(X,y)
"""
scheduler = MultiStepLR(self.optimizer,
gamma=0.1,
milestones=[0.5 * self.hyper_params['epochs'], 0.75 * self.hyper_params['epochs']])
for epoch in range(1, self.hyper_params['epochs'] + 1):
self.model.train()
self.likelihood.train()
mll = gpytorch.mlls.VariationalELBO(self.likelihood,
self.model.gp_layer,
num_data=len(data_loader.dataset))
train_loss = 0.
for i, (data, target) in enumerate(data_loader):
data, target = data.to(self.device), target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
loss = -mll(output, target)
loss.backward()
self.optimizer.step()
if (i+ 1) % 2 == 0:
print('Train Epoch: %d [%03d/%03d], Loss: %.6f' % (epoch, i + 1, len(data_loader), loss.item()))
if self.aml_run is not None:
self.aml_run.log("loss",loss.item())
def eval(self, dataloader):
"""Evaluate SV-DKL model on test dataset
Args:
dataloader(pytroch dataloader):Data loader wrapping test dataset(X,y)
"""
y_pred_lst = []
y_truth_lst = []
with torch.no_grad():
for i, (X, y) in enumerate(dataloader):
output = self.likelihood(self.model(X.to(self.device)))
y_pred = output.mean.ge(0.5).float().cpu().numpy()
y_pred_lst.append(y_pred)
y_truth_lst.append(y.numpy())
truth = np.concatenate(y_truth_lst)
pred = np.concatenate(y_pred_lst)
auc = roc_auc_score(truth,pred)
accuracy = accuracy_score(truth,pred)
print("AUC score: ",round(auc,2))
print("Accuracy score: ",round(accuracy,2))
if self.aml_run is not None:
self.aml_run.log('auc',round(auc,2))
self.aml_run.log('Accuracy',round(accuracy,2))
|
Remoting/Application/Testing/Python/BackgroundColorBackwardsCompatibilityTest.py | xj361685640/ParaView | 815 | 11175916 | <reponame>xj361685640/ParaView<filename>Remoting/Application/Testing/Python/BackgroundColorBackwardsCompatibilityTest.py
# state file generated using paraview version 5.9.0-RC4
import paraview
from paraview.simple import *
renderView1 = CreateView('RenderView')
renderView1.ViewSize = [844, 539]
renderView1.Background2 = [0.5, 0, 0]
try:
renderView1.UseGradientBackground = 1
except paraview.NotSupportedException:
pass
else:
raise RuntimeError("NotSupportedException not thrown")
try:
renderView1.UseTexturedBackground = 1
except paraview.NotSupportedException:
pass
else:
raise RuntimeError("NotSupportedException not thrown")
try:
renderView1.UseSkyboxBackground = 1
except paraview.NotSupportedException:
pass
else:
raise RuntimeError("NotSupportedException not thrown")
# Now force older version and try the same thing again
paraview.compatibility.major = 5
paraview.compatibility.minor = 9
renderView1.UseGradientBackground = 1
assert(renderView1.BackgroundColorMode == "Gradient")
assert(renderView1.UseGradientBackground == 1)
renderView1.UseTexturedBackground = 1
assert(renderView1.BackgroundColorMode == "Texture")
assert(renderView1.UseTexturedBackground == 1)
renderView1.UseTexturedBackground = 0
assert(renderView1.BackgroundColorMode == "Single Color")
assert(renderView1.UseTexturedBackground == 0)
renderView1.UseSkyboxBackground = 1
assert(renderView1.BackgroundColorMode == "Skybox")
assert(renderView1.UseSkyboxBackground == 1)
renderView1.UseSkyboxBackground = 0
assert(renderView1.BackgroundColorMode == "Single Color")
assert(renderView1.UseSkyboxBackground == 0)
|
tests/chainer_tests/functions_tests/array_tests/test_get_item.py | zaltoprofen/chainer | 3,705 | 11175934 | import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
_backend_params = (
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ [{'use_cuda': True}]
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@testing.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
[{'axes': [1, 2], 'offsets': 0},
{'axes': [1, 2], 'offsets': [0, 1, 1]},
{'axes': 1, 'offsets': 1},
{'axes': 1, 'offsets': [0, 1, 1]},
{'axes': [], 'offsets': 0, 'new_axes': 0},
{'axes': [], 'offsets': 0, 'new_axes': 2},
{'axes': [], 'offsets': 0, 'new_axes': 3},
{'slices': (1, -1, 0)},
{'slices': (1, -1)},
{'slices': (1, Ellipsis, -1)},
{'slices': (1, None, Ellipsis, None, -1)},
]
))
class TestGetItem(testing.FunctionTestCase):
def setUp(self):
shape = (4, 2, 1)
if not hasattr(self, 'slices'):
axes = self.axes
offsets = self.offsets
# Convert axes, offsets and shape to slices
if isinstance(offsets, int):
offsets = tuple([offsets] * len(shape))
if isinstance(axes, int):
axes = tuple([axes])
slices = [slice(None)] * len(shape)
for axis in axes:
slices[axis] = slice(
offsets[axis], offsets[axis] + shape[axis])
if hasattr(self, 'new_axes'):
slices.insert(self.new_axes, None)
self.axes = axes
self.offsets = offsets
self.slices = tuple(slices)
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-4})
self.check_double_backward_options.update({'atol': 1e-3, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.get_item(x, self.slices)
return y,
def forward_expected(self, inputs):
x, = inputs
y = x[self.slices]
return numpy.asarray(y),
@testing.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
[{'slices': []},
{'slices': ([],)},
{'slices': ([[]],)},
{'slices': numpy.array([], dtype=numpy.bool)},
{'slices': (1, [1])},
{'slices': ([1], slice(1, 2))},
{'slices': [1, 0]},
{'slices': ([1, 0],)},
{'slices': numpy.array([[1, 0], [2, 3]])},
{'slices': ([1, 0], [1, 1])},
{'slices': ([1, 0], slice(None), [[1, 1], [1, 1]])},
{'slices': ([1, 0], slice(1, 2), [0, 0])},
{'slices': ([[1, 1], [1, 0]], slice(1, 2), 1)},
{'slices': numpy.array([True] * 18 + [False] * 6).reshape(4, 3, 2)},
{'slices': numpy.array([True, False, False, True])},
{'slices': (slice(None), numpy.array([True, False, True]))},
{'slices': numpy.array([False, False, False, False])},
{'slices': (3, 2, Ellipsis, 1)},
{'slices': (numpy.array(False)), 'input_shape': ()},
{'slices': (numpy.array(True)), 'input_shape': ()},
]
))
class TestGetItemAdvanced(testing.FunctionTestCase):
input_shape = (4, 3, 2)
def setUp(self):
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-4})
self.check_double_backward_options.update({'atol': 1e-3, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.input_shape).astype(self.dtype)
return x,
def _convert_slices(self, slices, device):
# Converts advanced indexing slices (of numpy.ndarray) to respective
# backend arrays.
if isinstance(slices, list):
return [self._convert_slices(a, device) for a in slices]
if isinstance(slices, tuple):
return tuple([self._convert_slices(a, device) for a in slices])
if isinstance(slices, numpy.ndarray):
return device.send(slices)
return slices
def forward(self, inputs, device):
x, = inputs
slices = self._convert_slices(self.slices, device)
y = functions.get_item(x, slices)
return y,
def forward_expected(self, inputs):
x, = inputs
y = x[self.slices]
return numpy.asarray(y),
@testing.parameterize(
{'slices': ([1, 0], [1, 1]), 'sliced_shape': (2, 2)},
{'slices': ([1, 0], slice(None), [[1, 1], [1, 1]]),
'sliced_shape': (2, 2, 3)},
{'slices': ([1, 0], [1, 1], [0, 0]), 'sliced_shape': (2,)},
{'slices': (slice(None), numpy.array([True, False, True])),
'sliced_shape': (4, 2, 2)},
)
class TestCupyIndicesGetItem(unittest.TestCase):
def setUp(self):
self.x_data = numpy.random.uniform(
-1, 1, (4, 3, 2)).astype(numpy.float32)
self.gy_data = numpy.random.uniform(
-1, 1, self.sliced_shape).astype(numpy.float32)
def check_forward(self, x_data):
slices = []
for i, s in enumerate(self.slices):
if isinstance(s, numpy.ndarray):
s = chainer.backends.cuda.cupy.array(s)
if isinstance(s, list):
s = chainer.backends.cuda.cupy.array(s, dtype=numpy.int32)
slices.append(s)
slices = tuple(slices)
x = chainer.Variable(x_data)
y = functions.get_item(x, slices)
self.assertEqual(y.data.dtype, numpy.float32)
numpy.testing.assert_equal(cuda.to_cpu(x_data)[self.slices],
cuda.to_cpu(y.data))
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x_data))
def check_backward(self, x_data, y_grad):
slices = []
for i, s in enumerate(self.slices):
if isinstance(s, numpy.ndarray):
s = chainer.backends.cuda.cupy.array(s)
if isinstance(s, list):
s = chainer.backends.cuda.cupy.array(s, dtype=numpy.int32)
slices.append(s)
slices = tuple(slices)
def f(x):
return functions.get_item(x, slices)
gradient_check.check_backward(
f, (x_data,), y_grad, dtype='d')
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.gy_data))
class TestInvalidGetItem(unittest.TestCase):
def setUp(self):
self.default_debug = chainer.is_debug()
chainer.set_debug(True)
self.x_data = numpy.random.uniform(-1, 1, (4, 3, 2))
def tearDown(self):
chainer.set_debug(self.default_debug)
def test_multiple_ellipsis(self):
with self.assertRaises(ValueError):
functions.get_item(self.x_data, (Ellipsis, Ellipsis))
testing.run_module(__name__, __file__)
|
z3/eq10.py | Wikunia/hakank | 279 | 11176004 | #!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Eq 10 problem in Z3
#
# Standard benchmark problem.
#
# This Z3 model was written by <NAME> (<EMAIL>)
# See also my Z3 page: http://hakank.org/z3/
#
from __future__ import print_function
from z3_utils_hakank import *
def main():
solver = Solver()
#
# data
#
n = 7
#
# variables
#
X = [makeIntVar(solver, "X(%i)" % i, 0, 10) for i in range(n)]
X1, X2, X3, X4, X5, X6, X7 = X
#
# constraints
#
solver.add(0 + 98527 * X1 + 34588 * X2 + 5872 * X3 + 59422 * X5 + 65159 * X7
== 1547604 + 30704 * X4 + 29649 * X6)
solver.add(
0 + 98957 * X2 + 83634 * X3 + 69966 * X4 + 62038 * X5 + 37164 * X6 + 85413 * X7 == 1823553 + 93989 *
X1)
solver.add(900032 + 10949 * X1 + 77761 * X2 + 67052 * X5
== 0 + 80197 * X3 + 61944 * X4 + 92964 * X6 + 44550 * X7)
solver.add(0 + 73947 * X1 + 84391 * X3 + 81310 * X5
== 1164380 + 96253 * X2 + 44247 * X4 + 70582 * X6 + 33054 * X7)
solver.add(0 + 13057 * X3 + 42253 * X4 + 77527 * X5 + 96552 * X7
== 1185471 + 60152 * X1 + 21103 * X2 + 97932 * X6)
solver.add(1394152 + 66920 * X1 + 55679 * X4 ==
0 + 64234 * X2 + 65337 * X3 + 45581 * X5 + 67707 * X6 + 98038 * X7)
solver.add(0 + 68550 * X1 + 27886 * X2 + 31716 * X3 + 73597 * X4 + 38835 * X7
== 279091 + 88963 * X5 + 76391 * X6)
solver.add(0 + 76132 * X2 + 71860 * X3 + 22770 * X4 + 68211 * X5 + 78587 * X6
== 480923 + 48224 * X1 + 82817 * X7)
solver.add(519878 + 94198 * X2 + 87234 * X3 + 37498 * X4
== 0 + 71583 * X1 + 25728 * X5 + 25495 * X6 + 70023 * X7)
solver.add(361921 + 78693 * X1 + 38592 * X5 + 38478 * X6
== 0 + 94129 * X2 + 43188 * X3 + 82528 * X4 + 69025 * X7)
num_solutions = 0
while solver.check() == sat:
num_solutions += 1
mod = solver.model()
print("X:", [mod.eval(X[i]) for i in range(n)])
print()
getDifferentSolution(solver,mod,X)
print()
print("num_solutions:", num_solutions)
if __name__ == "__main__":
main()
|
pwnypack/asm.py | iksteen/dpf | 133 | 11176033 | <filename>pwnypack/asm.py
"""
This module contains functions to assemble and disassemble code for a given
target platform. By default the keystone engine assembler will be used if it
is available. If it's not available (or if the ``WANT_KEYSTONE`` environment
variable is set and it's not ``1``, ``YES`` or ``TRUE`` (case insensitive)),
pwnypack falls back to using the *nasm* assembler for nasm syntax on X86 or
*GNU as* for any other supported syntax / architecture. Disassembly is
performed by *ndisasm* on x86 for nasm syntax. *capstone* is used for any
other supported syntax / architecture.
Currently, the only supported architectures are
:attr:`~pwnypack.target.Target.Arch.x86` (both 32 and 64 bits variants) and
:attr:`~pwnypack.target.Target.Arch.arm` (both 32 and 64 bits variants).
"""
from __future__ import print_function
try:
import shutilwhich
except ImportError:
pass
import argparse
import os
import subprocess
import sys
from enum import IntEnum
import shutil
from pwnypack.elf import ELF
import pwnypack.target
import pwnypack.main
import pwnypack.codec
import tempfile
import six
try:
import capstone
HAVE_CAPSTONE = True
except ImportError:
HAVE_CAPSTONE = False
try:
import keystone
HAVE_KEYSTONE = True
except ImportError:
HAVE_KEYSTONE = False
WANT_KEYSTONE = os.environ.get('WANT_KEYSTONE', '1').upper() in ('1', 'YES', 'TRUE')
__all__ = [
'AsmSyntax',
'asm',
'disasm',
]
BINUTILS_SUFFIXES = [
'none-eabi-',
'unknown-linux-gnu-',
'linux-gnu-',
'linux-gnueabi-',
]
BINUTILS_PREFIXES = {}
def find_binutils_prefix(arch):
global BINUTILS_PREFIXES
prefix = BINUTILS_PREFIXES.get(arch)
if prefix is not None:
return prefix
for suffix in BINUTILS_SUFFIXES:
prefix = '%s-%s' % (arch, suffix)
if shutil.which('%sas' % prefix) and \
shutil.which('%sld' % prefix):
BINUTILS_PREFIXES[arch] = prefix
return prefix
else:
raise RuntimeError('Could not locate a suitable binutils for %s.' % arch)
class AsmSyntax(IntEnum):
"""
This enumeration is used to specify the assembler syntax.
"""
nasm = 0 #: Netwide assembler syntax
intel = 1 #: Intel assembler syntax
att = 2 #: AT&T assembler syntax
def asm(code, addr=0, syntax=None, target=None, gnu_binutils_prefix=None):
"""
Assemble statements into machine readable code.
Args:
code(str): The statements to assemble.
addr(int): The memory address where the code will run.
syntax(AsmSyntax): The input assembler syntax for x86. Defaults to
nasm, ignored on other platforms.
target(~pwnypack.target.Target): The target architecture. The
global target is used if this argument is ``None``.
gnu_binutils_prefix(str): When the syntax is AT&T, gnu binutils'
as and ld will be used. By default, it selects
``arm-*-as/ld`` for 32bit ARM targets,
``aarch64-*-as/ld`` for 64 bit ARM targets,
``i386-*-as/ld`` for 32bit X86 targets and
``amd64-*-as/ld`` for 64bit X86 targets (all for various flavors
of ``*``. This option allows you to pick a different toolchain.
The prefix should always end with a '-' (or be empty).
Returns:
bytes: The assembled machine code.
Raises:
SyntaxError: If the assembler statements are invalid.
NotImplementedError: In an unsupported target platform is specified.
Example:
>>> from pwny import *
>>> asm('''
... pop rdi
... ret
... ''', target=Target(arch=Target.Arch.x86, bits=64))
b'_\\xc3'
"""
if target is None:
target = pwnypack.target.target
if syntax is None and target.arch is pwnypack.target.Target.Arch.x86:
syntax = AsmSyntax.nasm
if HAVE_KEYSTONE and WANT_KEYSTONE:
ks_mode = 0
ks_syntax = None
if target.arch is pwnypack.target.Target.Arch.x86:
ks_arch = keystone.KS_ARCH_X86
if target.bits is pwnypack.target.Target.Bits.bits_32:
ks_mode |= keystone.KS_MODE_32
else:
ks_mode |= keystone.KS_MODE_64
if syntax is AsmSyntax.nasm:
ks_syntax = keystone.KS_OPT_SYNTAX_NASM
elif syntax is AsmSyntax.intel:
ks_syntax = keystone.KS_OPT_SYNTAX_INTEL
else:
ks_syntax = keystone.KS_OPT_SYNTAX_ATT
elif target.arch is pwnypack.target.Target.Arch.arm:
if target.bits is pwnypack.target.Target.Bits.bits_32:
ks_arch = keystone.KS_ARCH_ARM
if target.mode & pwnypack.target.Target.Mode.arm_thumb:
ks_mode |= keystone.KS_MODE_THUMB
else:
ks_mode |= keystone.KS_MODE_ARM
if target.mode & pwnypack.target.Target.Mode.arm_v8:
ks_mode |= keystone.KS_MODE_V8
if target.mode & pwnypack.target.Target.Mode.arm_m_class:
ks_mode |= keystone.KS_MODE_MICRO
if target.endian is pwnypack.target.Target.Endian.little:
ks_mode |= keystone.KS_MODE_LITTLE_ENDIAN
else:
ks_mode |= keystone.KS_MODE_BIG_ENDIAN
else:
ks_arch = keystone.KS_ARCH_ARM64
ks_mode |= keystone.KS_MODE_LITTLE_ENDIAN
else:
raise NotImplementedError('Unsupported syntax or target platform.')
ks = keystone.Ks(ks_arch, ks_mode)
if ks_syntax is not None:
ks.syntax = ks_syntax
try:
data, insn_count = ks.asm(code, addr)
except keystone.KsError as e:
import traceback
traceback.print_exc()
raise SyntaxError(e.message)
return b''.join(six.int2byte(b) for b in data)
if target.arch is pwnypack.target.Target.Arch.x86 and syntax is AsmSyntax.nasm:
with tempfile.NamedTemporaryFile() as tmp_asm:
tmp_asm.write(('bits %d\norg %d\n%s' % (target.bits.value, addr, code)).encode('utf-8'))
tmp_asm.flush()
tmp_bin_fd, tmp_bin_name = tempfile.mkstemp()
os.close(tmp_bin_fd)
try:
p = subprocess.Popen(
[
'nasm',
'-o', tmp_bin_name,
'-f', 'bin',
tmp_asm.name,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
if p.returncode:
raise SyntaxError(stderr.decode('utf-8'))
tmp_bin = open(tmp_bin_name, 'rb')
result = tmp_bin.read()
tmp_bin.close()
return result
finally:
try:
os.unlink(tmp_bin_name)
except OSError:
pass
elif target.arch in (pwnypack.target.Target.Arch.x86, pwnypack.target.Target.Arch.arm):
preamble = ''
as_flags = []
ld_flags = []
if target.arch is pwnypack.target.Target.Arch.x86:
if target.bits == 32:
binutils_arch = 'i386'
else:
binutils_arch = 'amd64'
if syntax is AsmSyntax.intel:
preamble = '.intel_syntax noprefix\n'
ld_flags.extend(['--oformat', 'binary'])
else:
if target.bits == 32:
binutils_arch = 'arm'
if target.mode & pwnypack.target.Target.Mode.arm_v8:
as_flags.append('-march=armv8-a')
elif target.mode & pwnypack.target.Target.Mode.arm_m_class:
as_flags.append('-march=armv7m')
else:
binutils_arch = 'aarch64'
if target.endian is pwnypack.target.Target.Endian.little:
as_flags.append('-mlittle-endian')
ld_flags.append('-EL')
else:
as_flags.append('-mbig-endian')
ld_flags.append('-EB')
if target.mode & pwnypack.target.Target.Mode.arm_thumb:
as_flags.append('-mthumb')
if gnu_binutils_prefix is None:
gnu_binutils_prefix = find_binutils_prefix(binutils_arch)
tmp_out_fd, tmp_out_name = tempfile.mkstemp()
try:
os.close(tmp_out_fd)
p = subprocess.Popen(
[
'%sas' % gnu_binutils_prefix,
'-o', tmp_out_name
] + as_flags,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate((preamble + code).encode('utf-8'))
if p.returncode:
raise SyntaxError(stderr.decode('utf-8'))
tmp_bin_fd, tmp_bin_name = tempfile.mkstemp()
try:
os.close(tmp_bin_fd)
p = subprocess.Popen(
[
'%sld' % gnu_binutils_prefix,
'-Ttext', str(addr),
] + ld_flags + [
'-o', tmp_bin_name,
tmp_out_name,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
if p.returncode:
raise SyntaxError(stderr.decode('utf-8'))
if 'binary' in ld_flags:
tmp_bin = open(tmp_bin_name, 'rb')
result = tmp_bin.read()
tmp_bin.close()
return result
else:
tmp_bin = ELF(tmp_bin_name)
return tmp_bin.get_section_header('.text').content
finally:
try:
os.unlink(tmp_bin_name)
except OSError:
pass
finally:
try:
os.unlink(tmp_out_name)
except OSError:
pass # pragma: no cover
else:
raise NotImplementedError('Unsupported syntax or target platform.')
def prepare_capstone(syntax=AsmSyntax.att, target=None):
"""
Prepare a capstone disassembler instance for a given target and syntax.
Args:
syntax(AsmSyntax): The assembler syntax (Intel or AT&T).
target(~pwnypack.target.Target): The target to create a disassembler
instance for. The global target is used if this argument is
``None``.
Returns:
An instance of the capstone disassembler.
Raises:
NotImplementedError: If the specified target isn't supported.
"""
if not HAVE_CAPSTONE:
raise NotImplementedError('pwnypack requires capstone to disassemble to AT&T and Intel syntax')
if target is None:
target = pwnypack.target.target
if target.arch == pwnypack.target.Target.Arch.x86:
if target.bits is pwnypack.target.Target.Bits.bits_32:
md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32)
else:
md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
elif target.arch == pwnypack.target.Target.Arch.arm:
mode = 0
if target.bits is pwnypack.target.Target.Bits.bits_32:
arch = capstone.CS_ARCH_ARM
if target.mode and pwnypack.target.Target.Mode.arm_thumb:
mode = capstone.CS_MODE_THUMB
else:
mode = capstone.CS_MODE_ARM
if target.mode and pwnypack.target.Target.Mode.arm_m_class:
mode |= capstone.CS_MODE_MCLASS
if target.mode and pwnypack.target.Target.Mode.arm_v8:
mode |= capstone.CS_MODE_V8
else:
arch = capstone.CS_ARCH_ARM64
if target.endian is pwnypack.target.Target.Endian.little:
mode |= capstone.CS_MODE_LITTLE_ENDIAN
else:
mode |= capstone.CS_MODE_BIG_ENDIAN
md = capstone.Cs(arch, mode)
else:
raise NotImplementedError('Only x86 is currently supported.')
md.skipdata = True
if syntax is AsmSyntax.att:
md.syntax = capstone.CS_OPT_SYNTAX_ATT
elif syntax is AsmSyntax.intel:
md.skipdata_setup(('db', None, None))
else:
raise NotImplementedError('capstone engine only implements AT&T and Intel syntax.')
return md
def disasm(code, addr=0, syntax=None, target=None):
"""
Disassemble machine readable code into human readable statements.
Args:
code(bytes): The machine code that is to be disassembled.
addr(int): The memory address of the code (used for relative
references).
syntax(AsmSyntax): The output assembler syntax. This defaults to
nasm on x86 architectures, AT&T on all other architectures.
target(~pwnypack.target.Target): The architecture for which the code
was written. The global target is used if this argument is
``None``.
Returns:
list of str: The disassembled machine code.
Raises:
NotImplementedError: In an unsupported target platform is specified.
RuntimeError: If ndisasm encounters an error.
Example:
>>> from pwny import *
>>> disasm(b'_\\xc3', target=Target(arch=Target.Arch.x86, bits=64))
['pop rdi', 'ret']
"""
if target is None:
target = pwnypack.target.target
if syntax is None:
if target.arch is pwnypack.target.Target.Arch.x86:
syntax = AsmSyntax.nasm
else:
syntax = AsmSyntax.att
if syntax is AsmSyntax.nasm:
if target.arch is not pwnypack.target.Target.Arch.x86:
raise NotImplementedError('nasm only supports x86.')
p = subprocess.Popen(
[
'ndisasm',
'-b',
str(target.bits.value),
'-o',
str(addr),
'-',
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate(code)
if p.returncode:
raise RuntimeError(stderr.decode('utf-8'))
return [
line.split(None, 2)[2]
for line in stdout.decode('utf-8').split('\n')
if line and not line.startswith(' ')
]
elif syntax in (AsmSyntax.intel, AsmSyntax.att):
md = prepare_capstone(syntax, target)
statements = []
total_size = 0
for (_, size, mnemonic, op_str) in md.disasm_lite(code, addr):
statements.append((mnemonic + ' ' + op_str).strip())
total_size += size
return statements
else:
raise NotImplementedError('Unsupported syntax for host platform.')
@pwnypack.main.register('asm')
def asm_app(parser, cmd, args): # pragma: no cover
"""
Assemble code from commandline or stdin.
Please not that all semi-colons are replaced with carriage returns
unless source is read from stdin.
"""
parser.add_argument('source', help='the code to assemble, read from stdin if omitted', nargs='?')
pwnypack.main.add_target_arguments(parser)
parser.add_argument(
'--syntax', '-s',
choices=AsmSyntax.__members__.keys(),
default=None,
)
parser.add_argument(
'--address', '-o',
type=lambda v: int(v, 0),
default=0,
help='the address where the code is expected to run',
)
args = parser.parse_args(args)
target = pwnypack.main.target_from_arguments(args)
if args.syntax is not None:
syntax = AsmSyntax.__members__[args.syntax]
else:
syntax = None
if args.source is None:
args.source = sys.stdin.read()
else:
args.source = args.source.replace(';', '\n')
return asm(
args.source,
syntax=syntax,
target=target,
addr=args.address,
)
@pwnypack.main.register('disasm')
def disasm_app(_parser, cmd, args): # pragma: no cover
"""
Disassemble code from commandline or stdin.
"""
parser = argparse.ArgumentParser(
prog=_parser.prog,
description=_parser.description,
)
parser.add_argument('code', help='the code to disassemble, read from stdin if omitted', nargs='?')
pwnypack.main.add_target_arguments(parser)
parser.add_argument(
'--syntax', '-s',
choices=AsmSyntax.__members__.keys(),
default=None,
)
parser.add_argument(
'--address', '-o',
type=lambda v: int(v, 0),
default=0,
help='the address of the disassembled code',
)
parser.add_argument(
'--format', '-f',
choices=['hex', 'bin'],
help='the input format (defaults to hex for commandline, bin for stdin)',
)
args = parser.parse_args(args)
target = pwnypack.main.target_from_arguments(args)
if args.syntax is not None:
syntax = AsmSyntax.__members__[args.syntax]
else:
syntax = None
if args.format is None:
if args.code is None:
args.format = 'bin'
else:
args.format = 'hex'
if args.format == 'hex':
code = pwnypack.codec.dehex(pwnypack.main.string_value_or_stdin(args.code))
else:
code = pwnypack.main.binary_value_or_stdin(args.code)
print('\n'.join(disasm(code, args.address, syntax=syntax, target=target)))
@pwnypack.main.register(name='symbol-disasm')
def disasm_symbol_app(_parser, _, args): # pragma: no cover
"""
Disassemble a symbol from an ELF file.
"""
parser = argparse.ArgumentParser(
prog=_parser.prog,
description=_parser.description,
)
parser.add_argument(
'--syntax', '-s',
choices=AsmSyntax.__members__.keys(),
default=None,
)
parser.add_argument('file', help='ELF file to extract a symbol from')
parser.add_argument('symbol', help='the symbol to disassemble')
args = parser.parse_args(args)
if args.syntax is not None:
syntax = AsmSyntax.__members__[args.syntax]
else:
syntax = None
elf = ELF(args.file)
symbol = elf.get_symbol(args.symbol)
print('\n'.join(disasm(symbol.content, symbol.value, syntax=syntax, target=elf)))
|
examples/ogb_eff/ogbn_proteins/model_rev.py | mufeili/deep_gcns_torch | 937 | 11176071 | import __init__
import torch
import torch.nn as nn
from gcn_lib.sparse.torch_nn import norm_layer
import torch.nn.functional as F
import logging
import eff_gcn_modules.rev.memgcn as memgcn
from eff_gcn_modules.rev.rev_layer import GENBlock
import copy
class RevGCN(torch.nn.Module):
def __init__(self, args):
super(RevGCN, self).__init__()
self.num_layers = args.num_layers
self.dropout = args.dropout
self.group = args.group
hidden_channels = args.hidden_channels
num_tasks = args.num_tasks
aggr = args.gcn_aggr
t = args.t
self.learn_t = args.learn_t
p = args.p
self.learn_p = args.learn_p
y = args.y
self.learn_y = args.learn_y
self.msg_norm = args.msg_norm
learn_msg_scale = args.learn_msg_scale
conv_encode_edge = args.conv_encode_edge
norm = args.norm
mlp_layers = args.mlp_layers
node_features_file_path = args.nf_path
self.use_one_hot_encoding = args.use_one_hot_encoding
self.gcns = torch.nn.ModuleList()
self.last_norm = norm_layer(norm, hidden_channels)
for layer in range(self.num_layers):
Fms = nn.ModuleList()
fm = GENBlock(hidden_channels//self.group, hidden_channels//self.group,
aggr=aggr,
t=t, learn_t=self.learn_t,
p=p, learn_p=self.learn_p,
y=y, learn_y=self.learn_y,
msg_norm=self.msg_norm,
learn_msg_scale=learn_msg_scale,
encode_edge=conv_encode_edge,
edge_feat_dim=hidden_channels,
norm=norm, mlp_layers=mlp_layers)
for i in range(self.group):
if i == 0:
Fms.append(fm)
else:
Fms.append(copy.deepcopy(fm))
invertible_module = memgcn.GroupAdditiveCoupling(Fms,
group=self.group)
gcn = memgcn.InvertibleModuleWrapper(fn=invertible_module,
keep_input=False)
self.gcns.append(gcn)
self.node_features = torch.load(node_features_file_path).to(args.device)
if self.use_one_hot_encoding:
self.node_one_hot_encoder = torch.nn.Linear(8, 8)
self.node_features_encoder = torch.nn.Linear(8 * 2, hidden_channels)
else:
self.node_features_encoder = torch.nn.Linear(8, hidden_channels)
self.edge_encoder = torch.nn.Linear(8, hidden_channels)
self.node_pred_linear = torch.nn.Linear(hidden_channels, num_tasks)
def forward(self, x, node_index, edge_index, edge_attr, epoch=-1):
node_features_1st = self.node_features[node_index]
if self.use_one_hot_encoding:
node_features_2nd = self.node_one_hot_encoder(x)
# concatenate
node_features = torch.cat((node_features_1st, node_features_2nd), dim=1)
else:
node_features = node_features_1st
h = self.node_features_encoder(node_features)
edge_emb = self.edge_encoder(edge_attr)
edge_emb = torch.cat([edge_emb]*self.group, dim=-1)
m = torch.zeros_like(h).bernoulli_(1 - self.dropout)
mask = m.requires_grad_(False) / (1 - self.dropout)
h = self.gcns[0](h, edge_index, mask, edge_emb)
for layer in range(1, self.num_layers):
h = self.gcns[layer](h, edge_index, mask, edge_emb)
h = F.relu(self.last_norm(h))
h = F.dropout(h, p=self.dropout, training=self.training)
return self.node_pred_linear(h)
def print_params(self, epoch=None, final=False):
if self.learn_t:
ts = []
for gcn in self.gcns:
ts.append(gcn.t.item())
if final:
print('Final t {}'.format(ts))
else:
logging.info('Epoch {}, t {}'.format(epoch, ts))
if self.learn_p:
ps = []
for gcn in self.gcns:
ps.append(gcn.p.item())
if final:
print('Final p {}'.format(ps))
else:
logging.info('Epoch {}, p {}'.format(epoch, ps))
if self.learn_y:
ys = []
for gcn in self.gcns:
ys.append(gcn.sigmoid_y.item())
if final:
print('Final sigmoid(y) {}'.format(ys))
else:
logging.info('Epoch {}, sigmoid(y) {}'.format(epoch, ys))
if self.msg_norm:
ss = []
for gcn in self.gcns:
ss.append(gcn.msg_norm.msg_scale.item())
if final:
print('Final s {}'.format(ss))
else:
logging.info('Epoch {}, s {}'.format(epoch, ss))
|
Example/get_time.py | yangswei/Encrypt-python-code-License-control | 102 | 11176075 | # coding:utf-8
###############################
# python代码加密与License控制例子
# 这是需要License控制的脚本
###############################
import socket, fcntl, datetime, os, struct
from Crypto.Cipher import AES
from binascii import b2a_hex, a2b_hex
import time
class Get_License(object):
def __init__(self):
super(Get_License, self).__init__()
# 定义秘钥信息
self.seperateKey = "<KEY>
self.aesKey = "<KEY>"
self.aesIv = "abcdefg123456789"
self.aesMode = AES.MODE_CBC
def getHwAddr(self, ifname):
"""
获取主机物理地址
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ''.join(['%02x' % ord(char) for char in info[18:24]])
def decrypt(self, text):
"""
从.lic中解密出主机地址
"""
try:
cryptor = AES.new(self.aesKey, self.aesMode, self.aesIv)
plain_text = cryptor.decrypt(a2b_hex(text))
return plain_text.rstrip('\0')
except:
return ""
def getLicenseInfo(self, filePath = None):
if filePath == None:
filePath = "./license.lic"
if not os.path.isfile(filePath):
print("请将 license.lic 文件放在当前路径下")
os._exit(0)
return False, 'Invalid'
encryptText = ""
with open(filePath, "r") as licFile:
encryptText = licFile.read()
licFile.close()
try:
hostInfo = self.getHwAddr('eth0')
except IOError:
hostInfo = self.getHwAddr('eno1')
decryptText = self.decrypt(encryptText)
pos = decryptText.find(self.seperateKey)
if -1 == pos:
return False, "Invalid"
licHostInfo = self.decrypt(decryptText[0:pos])
licenseStr = decryptText[pos + len(self.seperateKey):]
if licHostInfo == hostInfo:
return True, licenseStr
else:
return False, 'Invalid'
# 以下是Liceshi控制的一种实施例子,仅供参考
License = Get_License()
condition, LicInfo = License.getLicenseInfo()
class Today():
def get_time(self):
if condition==True and LicInfo=='Valid':
print(datetime.datetime.now())
else:
print('未权授!')
def say(self):
if condition==True and LicInfo=='Valid':
print('hello world!')
localtime = time.asctime( time.localtime(time.time()) )
print("The local time is now:", localtime)
else:
print('未权授!')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.