max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
scripts/taiseilib/configure.py | kasymovga/taisei | 573 | 12620113 |
from . import common
import re
class ConfigureError(common.TaiseiError):
pass
class NameNotDefinedError(ConfigureError):
def __init__(self, type, name):
return super().__init__("{} '{}' is not defined".format(type, name))
class VariableNotDefinedError(NameNotDefinedError):
def __init__(self, name):
return super().__init__('Variable', name)
class MacroNotDefinedError(NameNotDefinedError):
def __init__(self, name):
return super().__init__('Macro', name)
def make_macros(args=common.default_args):
from . import version
vobj = version.get(args=args)
def macro_version(format):
return vobj.format(format)
def macro_file(path):
with open(path, 'r') as infile:
return infile.read()
return {
'VERSION': macro_version,
'FILE': macro_file,
}
def configure(source, variables, *, default=None, use_macros=True, prefix='${', suffix='}', args=common.default_args):
prefix = re.escape(prefix)
suffix = re.escape(suffix)
if use_macros:
pattern = re.compile(prefix + r'(\w+(?:\(.*?\))?)' + suffix, re.A)
macros = make_macros(args)
else:
pattern = re.compile(prefix + r'(\w+)' + suffix, re.A)
def sub(match):
var = match.group(1)
if var[-1] == ')':
macro_name, macro_arg = var.split('(', 1)
macro_arg = macro_arg[:-1]
try:
macro = macros[macro_name]
except KeyError:
raise MacroNotDefinedError(var)
val = macro(macro_arg)
else:
try:
val = variables[var]
except KeyError:
if default is not None:
val = default
raise VariableNotDefinedError(var)
return str(val)
return pattern.sub(sub, source)
def configure_file(inpath, outpath, variables, **options):
with open(inpath, "r") as infile:
result = configure(infile.read(), variables, **options)
common.update_text_file(outpath, result)
def add_configure_args(parser):
def parse_definition(defstring):
return defstring.split('=', 1)
def parse_definition_from_file(defstring):
var, fpath = parse_definition(defstring)
with open(fpath, 'r') as infile:
return (var, infile.read())
parser.add_argument('--define', '-D',
type=parse_definition,
action='append',
metavar='VARIABLE=VALUE',
dest='variables',
default=[],
help='define a variable that may appear in template, can be used multiple times'
)
parser.add_argument('--define-from-file', '-F',
type=parse_definition_from_file,
action='append',
metavar='VARIABLE=FILE',
dest='variables',
default=[],
help='like --define, but the value is read from the specified file'
)
parser.add_argument('--prefix',
default='${',
help='prefix for substitution expressions, default: ${'
)
parser.add_argument('--suffix',
default='}',
help='suffix for substitution expressions, default: }'
)
def main(args):
import argparse
parser = argparse.ArgumentParser(description='Generate a text file based on a template.', prog=args[0])
parser.add_argument('input', help='the template file')
parser.add_argument('output', help='the output file')
add_configure_args(parser)
common.add_common_args(parser, depfile=True)
args = parser.parse_args(args[1:])
args.variables = dict(args.variables)
configure_file(args.input, args.output, args.variables,
prefix=args.prefix,
suffix=args.suffix,
args=args
)
if args.depfile is not None:
common.write_depfile(args.depfile, args.output, [args.input, __file__])
|
utils/utils.py | LisaAnne/LocalizingMoments | 157 | 12620120 | <reponame>LisaAnne/LocalizingMoments<filename>utils/utils.py
import json
def read_json(json_file):
with open(json_file) as data_file:
data = json.load(data_file)
return data
|
Huang2017AdaIN/model.py | czczup/URST | 119 | 12620125 | import torch.nn as nn
import torch
from function import coral
from function import adaptive_instance_normalization as adain
from function import calc_mean_std
import torch.nn.functional as F
decoder = nn.Sequential(
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 256, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 128, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 64, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 3, (3, 3)),
)
vgg = nn.Sequential(
nn.Conv2d(3, 3, (1, 1)),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(3, 64, (3, 3)),
nn.ReLU(), # relu1-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(), # relu1-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 128, (3, 3)),
nn.ReLU(), # relu2-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(), # relu2-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 256, (3, 3)),
nn.ReLU(), # relu3-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 512, (3, 3)),
nn.ReLU(), # relu4-1, this is the last layer used
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU() # relu5-4
)
class Net(nn.Module):
def __init__(self, encoder, decoder):
super(Net, self).__init__()
enc_layers = list(encoder.children())
self.enc_1 = nn.Sequential(*enc_layers[:4]) # input -> relu1_1
self.enc_2 = nn.Sequential(*enc_layers[4:11]) # relu1_1 -> relu2_1
self.enc_3 = nn.Sequential(*enc_layers[11:18]) # relu2_1 -> relu3_1
self.enc_4 = nn.Sequential(*enc_layers[18:31]) # relu3_1 -> relu4_1
self.decoder = decoder
self.mse_loss = nn.MSELoss()
# fix the encoder
for name in ['enc_1', 'enc_2', 'enc_3', 'enc_4']:
for param in getattr(self, name).parameters():
param.requires_grad = False
# extract relu1_1, relu2_1, relu3_1, relu4_1 from input image
def encode_with_intermediate(self, input):
results = [input]
for i in range(4):
func = getattr(self, 'enc_{:d}'.format(i + 1))
results.append(func(results[-1]))
return results[1:]
# extract relu4_1 from input image
def encode(self, input):
for i in range(4):
input = getattr(self, 'enc_{:d}'.format(i + 1))(input)
return input
def calc_content_loss(self, input, target):
assert (input.size() == target.size())
assert (target.requires_grad is False)
return self.mse_loss(input, target)
def calc_style_loss(self, input, target):
assert (input.size() == target.size())
assert (target.requires_grad is False)
input_mean, input_std = calc_mean_std(input)
target_mean, target_std = calc_mean_std(target)
return self.mse_loss(input_mean, target_mean) + \
self.mse_loss(input_std, target_std)
def forward(self, content, style, alpha=1.0):
assert 0 <= alpha <= 1
with torch.no_grad():
style_feats = self.encode_with_intermediate(style)
content_feat = self.encode(content)
t = adain(content_feat, style_feats[-1])
t = alpha * t + (1 - alpha) * content_feat
g_t = self.decoder(t)
g_t_feats = self.encode_with_intermediate(g_t)
loss_c = self.calc_content_loss(g_t_feats[-1], t)
loss_s = self.calc_style_loss(g_t_feats[0], style_feats[0])
for i in range(1, 4):
loss_s += self.calc_style_loss(g_t_feats[i], style_feats[i])
return loss_c, loss_s
class NetV2(nn.Module):
def __init__(self, encoder, decoder):
super(NetV2, self).__init__()
enc_layers = list(encoder.children())
self.enc_1 = nn.Sequential(*enc_layers[:4]) # input -> relu1_1
self.enc_2 = nn.Sequential(*enc_layers[4:11]) # relu1_1 -> relu2_1
self.enc_3 = nn.Sequential(*enc_layers[11:18]) # relu2_1 -> relu3_1
self.enc_4 = nn.Sequential(*enc_layers[18:31]) # relu3_1 -> relu4_1
self.decoder = decoder
self.mse_loss = nn.MSELoss()
# fix the encoder
for name in ['enc_1', 'enc_2', 'enc_3', 'enc_4']:
for param in getattr(self, name).parameters():
param.requires_grad = False
# extract relu1_1, relu2_1, relu3_1, relu4_1 from input image
def encode_with_intermediate(self, input):
results = [input]
for i in range(4):
func = getattr(self, 'enc_{:d}'.format(i + 1))
results.append(func(results[-1]))
return results[1:]
# extract relu4_1 from input image
def encode(self, input):
for i in range(4):
input = getattr(self, 'enc_{:d}'.format(i + 1))(input)
return input
def calc_content_loss(self, input, target):
assert (target.requires_grad is False)
return self.mse_loss(input, target)
def calc_style_loss(self, input, target):
assert (target.requires_grad is False)
input_mean, input_std = calc_mean_std(input)
target_mean, target_std = calc_mean_std(target)
return self.mse_loss(input_mean, target_mean) + \
self.mse_loss(input_std, target_std)
def thumb_adaptive_instance_normalization(self, content_thumb_feat, content_patch_feat, style_thumb_feat):
size = content_thumb_feat.size()
style_mean, style_std = calc_mean_std(style_thumb_feat)
content_thumb_mean, content_thumb_std = calc_mean_std(content_thumb_feat)
content_thumb_feat = (content_thumb_feat - content_thumb_mean.expand(size)) / content_thumb_std.expand(size)
content_thumb_feat = content_thumb_feat * style_std.expand(size) + style_mean.expand(size)
content_patch_feat = (content_patch_feat - content_thumb_mean.expand(size)) / content_thumb_std.expand(size)
content_patch_feat = content_patch_feat * style_std.expand(size) + style_mean.expand(size)
return content_thumb_feat, content_patch_feat
def forward(self, content_patches, content_thumbs, style_thumbs, position, alpha=1.0):
assert 0 <= alpha <= 1
with torch.no_grad():
style_thumb_feats = self.encode_with_intermediate(style_thumbs)
content_thumb_feat = self.encode(content_thumbs)
content_patch_feat = self.encode(content_patches)
t_thumb, t_patch = self.thumb_adaptive_instance_normalization(content_thumb_feat,
content_patch_feat,
style_thumb_feats[-1])
t_thumb = alpha * t_thumb + (1 - alpha) * content_thumb_feat
t_patch = alpha * t_patch + (1 - alpha) * content_patch_feat
g_t_thumb = self.decoder(t_thumb)
g_t_patch = self.decoder(t_patch)
with torch.no_grad():
g_t_thumb_up = F.interpolate(g_t_thumb, scale_factor=2, mode='bilinear', align_corners=False)
g_t_thumb_crop = g_t_thumb_up[..., position[0]:position[1], position[2]:position[3]]
g_t_thumb_crop_feats = self.encode_with_intermediate(g_t_thumb_crop)
g_t_thumb_feats = self.encode_with_intermediate(g_t_thumb)
g_t_patch_feats = self.encode_with_intermediate(g_t_patch)
loss_c = self.calc_content_loss(g_t_thumb_feats[-1], t_thumb)
loss_s = self.calc_style_loss(g_t_thumb_feats[0], style_thumb_feats[0])
for i in range(1, 4):
loss_s += self.calc_style_loss(g_t_thumb_feats[i], style_thumb_feats[i])
loss_sp = self.calc_content_loss(g_t_patch_feats[-1], g_t_thumb_crop_feats[-1])
return loss_c, loss_s, loss_sp
|
tests/test_utilities.py | JNDanielson/mplstereonet | 120 | 12620132 | import pytest
import mplstereonet
import numpy as np
class TestParseStrikes:
def test_parse_strike(self):
data = [
[('N30E', '45NW'), (210, 45)],
[('210', '45'), (210, 45)],
[('E10N', '20NW'), (260, 20)],
[('350', '40W'), (170, 40)],
[('280', '30SW'), (100, 30)],
[('280', '30 SW'), (100, 30)],
]
for test, correct in data:
result = mplstereonet.parse_strike_dip(*test)
assert np.allclose(result, correct)
class TestParseQuadrant:
def test_parse_quadrant(self):
data = [('N30E', 30),
('E30N', 60),
('E30S', 120),
('S80E', 100),
('S10W', 190),
('W10S', 260),
('W30N', 300),
('N10E', 10),
('N10W', 350),
('N 10 W', 350),
]
for strike, azi in data:
assert azi == mplstereonet.parse_quadrant_measurement(strike)
def test_parse_quadrant_errors(self):
data = ['N10S', 'S80N', 'E10W', 'W30E']
for quad in data:
with pytest.raises(ValueError):
mplstereonet.parse_quadrant_measurement(quad)
class TestParseAzimuth:
def test_parse_azimuth(self):
data = [('N30E', 30),
('E30N', 60),
('E30S', 120),
('S80E', 100),
('S10W', 190),
('W10S', 260),
('W30N', 300),
('N10E', 10),
('N10W', 350),
('N 10 W', 350),
('310', 310),
(' 310 ', 310),
('32.5', 32.5),
]
for strike, azi in data:
assert azi == mplstereonet.parse_azimuth(strike)
def test_parse_azimuth_errors(self):
data = ['30NW', '30S', 'A40N', 'N10S', 'S80N', 'E10W', 'W30E']
for quad in data:
with pytest.raises(ValueError):
mplstereonet.parse_azimuth(quad)
class TestParseRakes:
def test_parse_rake(self):
data = [
[('N30E', '45NW', '10NE'), (210, 45, 170)],
[('N30E', '45NW', '10SW'), (210, 45, 10)],
[('210', '45', '10'), (210, 45, 10)],
[('210', '45', '-10'), (210, 45, 170)],
[('210', '45', '170'), (210, 45, 170)],
[('E10N', '20NW', '80E'), (260, 20, 100)],
[('E10N', '20NW', '100'), (260, 20, 100)],
[('E10N', '20NW', '80W'), (260, 20, 80)],
[('E10N', '20NW', '-80'), (260, 20, 100)],
[('350', '40W', '45N'), (170, 40, 135)],
[('350', '40W', '45S'), (170, 40, 45)],
[('280', '30SW', '30E'), (100, 30, 30)],
[('280', '30SW', '30W'), (100, 30, 150)],
]
for test, correct in data:
result = mplstereonet.parse_rake(*test)
assert np.allclose(result, correct)
class TestParsePlungeBearing:
def test_parse_pb(self):
data = [
[('10NE', 'N30E'), (10, 30)],
[('10SW', 'N30E'), (10, 210)],
[('10', '210'), (10, 210)],
[('-10', '210'), (10, 30)],
[('170', '210'), (10, 30)],
[('-170', '210'), (10, 210)],
]
for test, correct in data:
result = mplstereonet.parse_plunge_bearing(*test)
assert np.allclose(result, correct)
|
composer/core/state.py | mosaicml/composer | 945 | 12620136 | <reponame>mosaicml/composer<filename>composer/core/state.py
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""The state of the trainer."""
from __future__ import annotations
import collections.abc
import logging
import warnings
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, Optional, Sequence, Union, cast
import torch
import torch.nn.modules.utils
from torch.nn.parallel import DistributedDataParallel
from torch.optim import Optimizer
from composer.core.precision import Precision
from composer.core.serializable import Serializable
from composer.core.time import Time, Timestamp, TimeUnit
from composer.utils import batch_get, batch_set, dist, ensure_tuple
if TYPE_CHECKING:
import deepspeed
import composer.core.types as types
from composer.core.algorithm import Algorithm
from composer.core.callback import Callback
from composer.core.evaluator import Evaluator
from composer.profiler import Profiler
__all__ = ["State"]
logger = logging.getLogger(__name__)
def _ensure_backwards_compatible_checkpointing(state_dict: Dict[str, Any]):
# v0.4.1 removed the leading underscores for the keys in the state_dict
# It also renamed _is_model_ddp_wrapped to is_model_ddp
state = {}
for k, v in state_dict.items():
if k == "_is_model_ddp_wrapped":
k = "is_model_ddp"
if k.startswith("_"):
k = k[1:]
state[k] = v
return state
_STATE_DICT_SERIALIZED_ATTRIBUTES = [
# List of attributes that are serialized with state_dict
# Only the attributes listed in state.serialized_attributes will actually be saved.
"model",
"optimizers",
"schedulers",
"algorithms",
"callbacks",
"scaler",
"timestamp",
]
class State(Serializable):
"""The state of the trainer.
Contains variables that the trainer tracks throughout the training loop. Note that all the necessary parts (i.e.,
:attr:`serialized_attributes`) of state are serialized when the trainer is checkpointed so that it can be used
restore the trainer and continue training from a checkpoint. :mod:`~composer.algorithms` are able to modify an
instance of this class in-place.
.. note::
An instance of this class is automatically constructed by the :class:`~.Trainer` constructor. A user need
not instantiate this class.
Args:
model (torch.nn.Module): The model, typically as a subclass of :class:`~.ComposerModel`.
rank_zero_seed (int): The seed used on the rank zero process. It is assumed that each rank's seed is
``rank_zero_seed + dist.get_global_rank()``.
grad_accum (int, optional): The number of gradient accumulation steps to use. With this argument, micro batch
size for each device becomes ``microbatch_size = train_batch_size / (num_devices * grad_accum)``.
train_dataloader (types.DataLoader, optional): Dataloader used for training
evaluators (Evalutor | Evaluators, optional): :class:`.Evaluator` used for evaluation.
dataloader (types.DataLoader, optional): The active DataLoader.
dataloader_len (int | Time[int], optional): The number of batches per dataloader iteration (e.g. epoch).
The trainer will yield the first ``dataloader_len`` batches per iteration. If ``-1`` (the default),
the entire dataloader will be iterated over.
dataloader_label (str, optional): The name for the dataloader. Required if ``dataloader`` is specified.
(default: ``None``)
By convention, the training dataloader is called ``'train'``. The evaluator dataloader is called
``'eval'``, or when multiple evaluators are used, the name of the evaluator.
max_duration (str | Time, optional): The maximum duration to train for. (default: ``None``)
precision (str | Precision): The numerical precision to use for training. See :class:`~.Precision` for
the supported precisions.
optimizers (torch.optim.Optimizer | Sequence[torch.optim.Optimizer], optional): The optimizer being used to
train the model. Multiple optimizers are not currently supported.
schedulers (types.PyTorchScheduler | Sequence[types.PyTorchScheduler], optional):
The learning rate scheduler (can also be a list or tuple of schedulers).
scaler (torch.cuda.amp.GradScaler, optional): The gradient scaler in use for mixed precision training.
algorithms (Algorithm | Sequence[Algorithm], optional): The algorithms used for training.
callbacks (Callback | Sequence[Callback], optional): The callbacks used for training.
profiler (Optional[Profiler]): The Composer profiler.
Attributes:
batch (types.Batch): The batch. This will be the entire batch during the :attr:`.Event.AFTER_DATALOADER`, or a
microbatch between :attr:`.Event.BATCH_START` and :attr:`.Event.BATCH_END`.
current_metrics (Dict[str, Dict[str, Any]]): The current computed metrics, organized by dataloader label
and then by metric name. The train dataloader is labeled ``'train'``. If not using an :class:`.Evaluator`,
the eval dataloader is labeled ``'eval'``. Otherwise, the evaluator label is used.
For example:
>>> trainer = Trainer(
... ...,
... compute_training_metrics=True,
... train_dataloader=train_dataloader,
... eval_dataloader=eval_dataloader,
... )
>>> trainer.fit()
>>> trainer.state.current_metrics
{'train': {'Accuracy': tensor(...)}, 'eval': {'Accuracy': tensor(...)}}
Or, when using an :class:`.Evaluator`:
.. testsetup::
eval_1_dl = eval_dataloader
eval_2_dl = eval_dataloader
>>> from torchmetrics import Accuracy
>>> from composer.core import Evaluator
>>> trainer = Trainer(
... ...,
... compute_training_metrics=True,
... train_dataloader=train_dataloader,
... eval_dataloader=[
... Evaluator(label='eval1', dataloader=eval_1_dl, metrics=Accuracy()),
... Evaluator(label='eval2', dataloader=eval_2_dl, metrics=Accuracy()),
... ],
... )
>>> trainer.fit()
>>> trainer.state.current_metrics
{'train': {'Accuracy': tensor(...)}, 'eval1': {'Accuracy': tensor(...)}, 'eval2': {'Accuracy': tensor(...)}}
eval_timestamp (Timestamp): The timestamp for the current evaluation dataloader. This timestamp is reset
before the dataloader is evaluated. The :attr:`~Timestamp.epoch` attribute for this timestamp is always
``0``.
grad_accum (int): The number of gradient accumulation steps per batch.
loss (torch.Tensor | Sequence[torch.Tensor]): The most recently computed loss.
model (torch.nn.Module): The training model.
.. note::
When using DeepSpeed or multi-rank training, the model will be wrapped with
:class:`~deepspeed.DeepSpeedEngine` or :class:`~torch.nn.parallel.DistributedDataParallel`,
respectively.
outputs (torch.Tensor | Sequence[torch.Tensor]): The most recently computed output from the model's forward
pass.
predict_timestamp (Timestamp): The timestamp for the current prediction dataloader. This timestamp is reset
before the dataloader is used. The :attr:`~Timestamp.epoch` attribute for this timestamp is always
``0``.
profiler (Profiler): The profiler (if profiling is enabled), or ``None`` if not profiling.
rank_zero_seed (int): The seed of the rank zero process.
scaler (torch.cuda.amp.GradScaler): The gradient scaler if using mixed-precision training, or
``None`` if not using mixed-precision training.
serialized_attributes (List[str]): The names of the attribute which are serialized in a checkpoint.
By default, the following attributes are serialized:
+-----------------------+-------------------------------------------------------------+
| Attribute | Description |
+=======================+=============================================================+
| model | The model under training. |
+-----------------------+-------------------------------------------------------------+
| optimizers | The optimizers being used to train the model. |
+-----------------------+-------------------------------------------------------------+
| schedulers | The learning rate schedulers. |
+-----------------------+-------------------------------------------------------------+
| algorithms | The algorithms used for training. |
+-----------------------+-------------------------------------------------------------+
| callbacks | The callbacks used for training. |
+-----------------------+-------------------------------------------------------------+
| scaler | The gradient scaler in use for mixed precision training. |
+-----------------------+-------------------------------------------------------------+
| timestamp | The timestamp that tracks training loop progress. |
+-----------------------+-------------------------------------------------------------+
| rank_zero_seed | The seed of the rank zero process. |
+-----------------------+-------------------------------------------------------------+
| current_metrics | The current metrics. |
+-----------------------+-------------------------------------------------------------+
timestamp (Timestamp): The current training timestamp.
train_dataloader (Iterable): The training dataloader. (May be ``None`` if not training.)
"""
def __init__(
self,
# model
model: torch.nn.Module,
# determinism
rank_zero_seed: int,
# stopping conditions
max_duration: Optional[Union[str, Time[int]]] = None,
# data configurations
grad_accum: int = 1,
# dataloaders
train_dataloader: Optional[Iterable] = None,
evaluators: Optional[Union[Evaluator, Sequence[Evaluator]]] = None,
# these track the current 'active' dataloader
# depending on train, eval, or others
dataloader: Optional[Iterable] = None,
dataloader_label: Optional[str] = None,
dataloader_len: Union[int, Time[int]] = -1,
# precision
precision: Union[str, Precision] = Precision.FP32,
# optimizers
optimizers: Optional[Union[Optimizer, Sequence[Optimizer]]] = None,
# scaler
scaler: Optional[torch.cuda.amp.grad_scaler.GradScaler] = None,
# algorithms and callbacks
algorithms: Optional[Union[Algorithm, Sequence[Algorithm]]] = None,
callbacks: Optional[Union[Callback, Sequence[Callback]]] = None,
):
self.rank_zero_seed = rank_zero_seed
self.model = model
self.grad_accum = grad_accum
self._dataloader_len = None
self._dataloader = None
self._dataloader_label = None
self.set_dataloader(dataloader, dataloader_label, dataloader_len)
self._max_duration = None
self.max_duration = max_duration
self.train_dataloader = train_dataloader
self._evaluators = list(ensure_tuple(evaluators))
self.timestamp = Timestamp()
self.eval_timestamp = Timestamp()
self.predict_timestamp = Timestamp()
self._precision = Precision(precision)
if optimizers is None:
self._optimizers = []
else:
self._optimizers = list(ensure_tuple(optimizers))
self._schedulers = []
self.scaler = scaler
self._algorithms = list(ensure_tuple(algorithms))
self._callbacks = list(ensure_tuple(callbacks))
self.profiler: Optional[Profiler] = None
# Set defaults for transient variables (to make pyright happy)
self.batch: Any = None
self.loss: Union[torch.Tensor, Sequence[torch.Tensor]] = torch.Tensor()
self.outputs: Union[torch.Tensor, Sequence[torch.Tensor]] = torch.Tensor()
# These attributes will be serialized using .state_dict(), and loaded with .load_state_dict()
# All other attributes will not be serialized.
# For simplicity, omit the leading underscore for private attributes.
# For example, even though the optimizers are stored on the state
# as the "_optimizers" attribute, here we specify just "optimizers"
self.serialized_attributes = [
"model",
"optimizers",
"schedulers",
"algorithms",
"callbacks",
"scaler",
"timestamp",
"rank_zero_seed",
"current_metrics",
]
self.current_metrics: Dict[str, Dict[str, Any]] = {}
@property
def seed(self):
"""The seed for the current rank."""
return self.rank_zero_seed + dist.get_global_rank()
@property
def max_duration(self):
"""The maximum training duration."""
return self._max_duration
@max_duration.setter
def max_duration(self, max_duration: Optional[Union[str, Time[int]]]):
if max_duration is None:
self._max_duration = None
return
if isinstance(max_duration, str):
max_duration = cast(Time[int], Time.from_timestring(max_duration))
if max_duration.unit == TimeUnit.DURATION:
raise ValueError("TimeUnit.DURATION is not allowed as a unit for max_duration")
self._max_duration = max_duration
def get_elapsed_duration(self) -> Optional[Time[float]]:
"""Get the elapsed training duration.
Returns:
Optional[Time[float]]: The elapsed duration, in :attr:`TimeUnit.DURATION`.
``Time(0.0, TimeUnit.DURATION)`` represents the beginning of training and ``Time(1.0, TimeUnit.DURATION)``
represents a completed training process. Returns ``None`` if ``max_duration`` is None.
"""
if self.max_duration is None:
return None
return self.timestamp.get(self.max_duration.unit) / self.max_duration
@property
def optimizers(self):
"""The optimizers."""
return self._optimizers
@optimizers.setter
def optimizers(self, optimizers: Union[Optimizer, Sequence[Optimizer]]):
self._optimizers[:] = ensure_tuple(optimizers)
@property
def schedulers(self):
"""The schedulers."""
return self._schedulers
@schedulers.setter
def schedulers(self, schedulers: Union[types.PyTorchScheduler, Sequence[types.PyTorchScheduler]]):
self._schedulers[:] = ensure_tuple(schedulers)
def batch_get_item(self, key: Union[str, int, Callable, Any]) -> Any:
"""Gets element from batch either specified by key or user-specified function.
See batch_get in `utils/batch_helpers.py` for examples.
Args:
key (str | int | Tuple[Callable, Callable] | Any, optional): A key to index into the batch or a
user-specified function to do the extracting. A pair of callables is also
supported for cases where a get and set function pair are both passed
(like in Algorithms). The getter is assumed to be the first of the pair.
Returns:
The part of the batch specified by the key. This could be any type
depending on what the batch is composed of.
"""
return batch_get(self.batch, key)
def batch_set_item(self, key: Union[str, int, Callable, Any], value: Any):
"""Sets the element specified by the key of the set_fn to the specified value.
This is not an in-place operation, as for tuple-typed batches, a new batch object
must be created to modify them.
See batch_set in `utils/batch_helpers.py` for examples.
Args:
key (str | int | Tuple[Callable, Callable] | Any, optional): A key to index into the batch or a user-specified
function to do the setting. A pair of callables is also supported for
cases where a get and set function pair are both passed (like in
Algorithms). The setter is assumed to be the second of the pair.
value (Any): The value that batch[key] or batch.key gets set to or that the
user-defined set function sets a part of the batch to.
Returns:
batch (Any): The updated batch with value set at key.
"""
self.batch = batch_set(self.batch, key=key, value=value)
@property
def callbacks(self):
"""The callbacks."""
return self._callbacks
@callbacks.setter
def callbacks(self, callbacks: Sequence[Callback]):
self._callbacks[:] = callbacks
@property
def algorithms(self):
"""The algorithms."""
return self._algorithms
@algorithms.setter
def algorithms(self, algorithms: Sequence[Algorithm]):
self._algorithms[:] = algorithms
@property
def evaluators(self):
"""The evaluators."""
return self._evaluators
@evaluators.setter
def evaluators(self, evaluators: Union[Evaluator, Sequence[Evaluator]]):
self._evaluators[:] = list(ensure_tuple(evaluators))
def state_dict(self) -> Dict[str, Any]:
state_dict = {}
for attribute_name in self.serialized_attributes:
attribute_value = getattr(self, attribute_name)
if attribute_name == "model":
# Save model directly instead of by class name, since model may be wrapped by DistributedDataParallel
# If it is DDP wrapped, do not save the `module.` prefix, as that is an implmentation detail
model_state = attribute_value.state_dict()
if self.is_model_ddp:
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(model_state, "module.")
serialized_value = model_state
else:
if attribute_name in _STATE_DICT_SERIALIZED_ATTRIBUTES:
serialized_value = {
type(obj).__qualname__: obj.state_dict() for obj in ensure_tuple(attribute_value)
}
else:
serialized_value = attribute_value
state_dict[attribute_name] = serialized_value
return state_dict
def load_model_state(self, state_dict: Dict[str, Any], strict: bool):
"""Loads the model's state from a ``state_dict``.
Args:
state_dict (Dict[str, Any]): The state dict, generated from a previous call to :meth:`state_dict`.
strict (bool): Whether the keys (i.e., model parameter names) in the model state dict should
perfectly match the keys in the model instance.
"""
if state_dict.get("is_model_ddp", False) and not self.is_model_ddp:
# This check is for backwards compatibility, as pre-v0.6.0 checkpoints serialized the state
# with the `module.` prefix
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(state_dict['model'], "module.")
missing_keys, unexpected_keys = self.model.load_state_dict(state_dict['model'], strict=strict)
if len(missing_keys) > 0:
logger.warning(f"Found these missing keys in the checkpoint: {', '.join(missing_keys)}")
if len(unexpected_keys) > 0:
logger.warning(f"Found these unexpected keys in the checkpoint: {', '.join(unexpected_keys)}")
def load_state_dict(self, state: Dict[str, Any], strict: bool = False):
"""Loads the state.
Args:
state (Dict[str, Any]): object returned from call to :meth:`state_dict`.
strict (bool): whether the keys in the ``state["model"]`` should perfectly match the keys in the
``self.model``. Defaults to False.
"""
state = _ensure_backwards_compatible_checkpointing(state)
for attribute_name, serialized_value in state.items():
if attribute_name not in self.serialized_attributes:
# it's possible some attributes we removed
continue
if attribute_name == "model":
self.load_model_state(state, strict=strict)
continue
state_field_value = getattr(self, attribute_name)
if attribute_name in _STATE_DICT_SERIALIZED_ATTRIBUTES:
for target in ensure_tuple(state_field_value):
if type(target).__qualname__ not in serialized_value:
warnings.warn(
f"{type(target).__qualname__} is not in the state_dict. Its state will not be restored.",
category=UserWarning)
continue
source = serialized_value[type(target).__qualname__]
target.load_state_dict(source)
else:
# direct serialization
try:
setattr(self, attribute_name, serialized_value)
except AttributeError:
# ignore AttributeError for properties that have getters but not setters.
pass
@property
def dataloader(self):
"""The active dataloader."""
return self._dataloader
@property
def dataloader_label(self):
"""The dataloader label for the active dataloader.
By default, the training dataloader is called ``'train'``. The evaluator dataloader
is called ``'eval'``, or when multiple evaluators are used, the name of the evaluator.
However, the dataloader label can be explicitely specified in :meth:`.Trainer.fit`
and :meth:`.Trainer.eval`.
Returns:
Optional[str]: The dataloader label, or None if no dataloader is set.
"""
return self._dataloader_label
def set_dataloader(
self,
dataloader: Optional[Iterable] = None,
dataloader_label: Optional[str] = None,
dataloader_len: Union[int, Time[int]] = -1,
):
"""Update the active dataloader and dataloader label.
Args:
dataloader (Iterable, optional): The dataloader. Defaults to None.
dataloader_label (str, optional): The dataloader label. Must be ``None`` if and only if
``dataloader`` is None. Defaults to None.
dataloader_len (int, int): The number of batches per dataloader iteration (e.g. epoch), as used by the trainer.
Set to ``-1`` to iterate over the entire dataset. (Default: ``-1``.)
"""
if dataloader is None:
dataloader_label = None
else:
if dataloader_label is None:
raise ValueError("If the `dataloader` is specified, then `dataloader_label` must not be None.")
self._dataloader = dataloader
self._dataloader_label = dataloader_label
if dataloader is not None:
self.dataloader_len = dataloader_len # setting it to -1 will do a failsafe read of len(dataloader)
else:
self._dataloader_len = None
@property
def dataloader_len(self):
"""The number of batches per dataloader iteration (e.g. epoch), as used by the trainer.
.. note::
If not explicitely specified, this value is an approximation, as it depends on ``len(self.dataloader)``.
See the :doc:`PyTorch DataLoader Documentation <torch:data>` for more information.
Returns:
Optional[Time[int]]: The number of batches per dataloader iteration (e.g. epoch), or None if no dataloader
is defined or if the dataloader has an unknown length (e.g. streaming dataloaders).
"""
return self._dataloader_len
@dataloader_len.setter
def dataloader_len(self, num_batches: Union[int, Time[int]]):
if isinstance(num_batches, int):
num_batches = Time(num_batches, TimeUnit.BATCH)
if self._dataloader is None:
raise RuntimeError("`State.dataloader_len` cannot be set if the dataloader is not defined.")
try:
if isinstance(self._dataloader, collections.abc.Sized):
dataloader_len = len(self._dataloader)
else:
dataloader_len = None
except (TypeError, NotImplementedError):
dataloader_len = None
if dataloader_len is not None and num_batches >= 0 and int(num_batches) > dataloader_len:
warnings.warn((f"DataloaderNumBatchesWarning: The dataloader_len ({int(num_batches)}) "
f"is greater than the length (i.e. number of batches) of the dataloader, which is "
f"{dataloader_len}. State.dataloader_len is thus being set to {dataloader_len}."))
self._dataloader_len = Time(dataloader_len, TimeUnit.BATCH)
return
if num_batches < 0:
if dataloader_len is not None:
# len(dataloader) is an approximation -- see https://pytorch.org/docs/stable/data.html.
# However, in the worst case where additional last batches are dropped, this calculation should be
# an over-estimate, leading to the entire dataloader still being iterated over.
self._dataloader_len = Time(dataloader_len, TimeUnit.BATCH)
else:
# The dataloader length is unknown.
self._dataloader_len = None
return
self._dataloader_len = num_batches
@property
def precision(self):
"""The numerical precision to use for training.
See :class:`~.Precision` for the supported precisions.
"""
return self._precision
@precision.setter
def precision(self, precision: Union[str, Precision]):
self._precision = Precision(precision)
@property
def is_model_deepspeed(self) -> bool:
"""Whether :attr:`model` is an instance of a :class:`~deepspeed.DeepSpeedEngine`."""
try:
import deepspeed
except ImportError:
return False
else:
return isinstance(self.model, deepspeed.DeepSpeedEngine)
@property
def is_model_ddp(self):
"""Whether :attr:`model` is an instance of a :class:`.DistributedDataParallel`."""
return isinstance(self.model, DistributedDataParallel)
@property
def deepspeed_model(self) -> deepspeed.DeepSpeedEngine:
"""Cast :attr:`model` to :class:`~deepspeed.DeepSpeedEngine`."""
if self.is_model_deepspeed:
return cast("deepspeed.DeepSpeedEngine", self.model)
raise TypeError("state.model is not a DeepSpeed model")
|
unittest/ops/electric_potential_unittest.py | xiefei1026/DREAMPlace | 323 | 12620157 | ##
# @file electric_potential_unitest.py
# @author <NAME>
# @date Mar 2019
#
import time
import numpy as np
import unittest
import logging
import torch
from torch.autograd import Function, Variable
import os
import sys
import gzip
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from dreamplace.ops.dct import dct
from dreamplace.ops.dct import discrete_spectral_transform
from dreamplace.ops.electric_potential import electric_potential
sys.path.pop()
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import _pickle as pickle
import inspect
import pdb
from scipy import fftpack
import matplotlib
matplotlib.use('Agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
class ElectricPotentialOpTest(unittest.TestCase):
def test_densityOverflowRandom(self):
dtype = np.float64
xx = np.array([
1000, 11148, 11148, 11148, 11148, 11148, 11124, 11148, 11148,
11137, 11126, 11148, 11130, 11148, 11148, 11148, 11148, 11148,
11148, 0, 11148, 11148, 11150, 11134, 11148, 11148, 11148, 10550,
11148, 11148, 11144, 11148, 11148, 11148, 11148, 11140, 11120,
11154, 11148, 11133, 11148, 11148, 11134, 11125, 11148, 11148,
11148, 11155, 11127, 11148, 11148, 11148, 11148, 11131, 11148,
11148, 11148, 11148, 11136, 11148, 11146, 11148, 11135, 11148,
11125, 11150, 11148, 11139, 11148, 11148, 11130, 11148, 11128,
11148, 11138, 11148, 11148, 11148, 11130, 11148, 11132, 11148,
11148, 11090
]).astype(dtype)
yy = np.array([
1000, 11178, 11178, 11190, 11400, 11178, 11172, 11178, 11178,
11418, 11418, 11178, 11418, 11178, 11178, 11178, 11178, 11178,
11178, 11414, 11178, 11178, 11172, 11418, 11406, 11184, 11178,
10398, 11178, 11178, 11172, 11178, 11178, 11178, 11178, 11418,
11418, 11172, 11178, 11418, 11178, 11178, 11172, 11418, 11178,
11178, 11178, 11418, 11418, 11178, 11178, 11178, 11178, 11418,
11178, 11178, 11394, 11178, 11418, 11178, 11418, 11178, 11418,
11178, 11418, 11418, 11178, 11172, 11178, 11178, 11418, 11178,
11418, 11178, 11418, 11412, 11178, 11178, 11172, 11178, 11418,
11178, 11178, 11414
]).astype(dtype)
node_size_x = np.array([
6, 3, 3, 3, 3, 3, 5, 3, 3, 1, 1, 3, 1, 3, 3, 3, 3, 3, 3, 16728, 3,
3, 5, 1, 3, 3, 3, 740, 3, 3, 5, 3, 3, 3, 3, 5, 5, 5, 3, 1, 3, 3, 5,
1, 3, 3, 3, 5, 1, 3, 3, 3, 3, 1, 3, 3, 3, 3, 5, 3, 5, 3, 1, 3, 5,
5, 3, 5, 3, 3, 5, 3, 1, 3, 1, 3, 3, 3, 5, 3, 1, 3, 3, 67
]).astype(dtype)
node_size_y = np.array([
6, 240, 240, 6, 6, 240, 6, 240, 240, 6, 6, 240, 6, 240, 240, 240,
240, 240, 240, 10, 240, 6, 6, 6, 6, 6, 240, 780, 240, 240, 6, 240,
240, 240, 240, 6, 6, 6, 240, 6, 240, 240, 6, 6, 240, 240, 240, 6,
6, 240, 240, 240, 240, 6, 240, 240, 6, 240, 6, 240, 6, 240, 6, 240,
6, 6, 240, 6, 240, 240, 6, 240, 6, 240, 6, 6, 240, 240, 6, 240, 6,
240, 240, 10
]).astype(dtype)
#xx = np.array([2.0]).astype(dtype)
#yy = np.array([1.5]).astype(dtype)
#node_size_x = np.array([1.0]).astype(dtype)
#node_size_y = np.array([1.0]).astype(dtype)
num_nodes = len(xx)
num_movable_nodes = 1
num_terminals = len(xx) - num_movable_nodes
scale_factor = 1.0
xl = 0.0
yl = 6.0
xh = 16728.0
yh = 11430.0
target_density = 0.7
num_bins_x = 1024
num_bins_y = 1024
bin_size_x = (xh - xl) / num_bins_x
bin_size_y = (yh - yl) / num_bins_y
"""
return bin xl
"""
def bin_xl(id_x):
return xl + id_x * bin_size_x
"""
return bin xh
"""
def bin_xh(id_x):
return min(bin_xl(id_x) + bin_size_x, xh)
"""
return bin yl
"""
def bin_yl(id_y):
return yl + id_y * bin_size_y
"""
return bin yh
"""
def bin_yh(id_y):
return min(bin_yl(id_y) + bin_size_y, yh)
bin_center_x = np.zeros(num_bins_x, dtype=dtype)
for id_x in range(num_bins_x):
bin_center_x[id_x] = (bin_xl(id_x) +
bin_xh(id_x)) / 2 * scale_factor
bin_center_y = np.zeros(num_bins_y, dtype=dtype)
for id_y in range(num_bins_y):
bin_center_y[id_y] = (bin_yl(id_y) +
bin_yh(id_y)) / 2 * scale_factor
print("target_area = ", target_density * bin_size_x * bin_size_y)
if dtype == np.float64:
dtype = torch.float64
elif dtype == np.float32:
dtype = torch.float32
movable_size_x = node_size_x[:num_movable_nodes]
_, sorted_node_map = torch.sort(
torch.tensor(movable_size_x, requires_grad=False, dtype=dtype))
sorted_node_map = sorted_node_map.to(torch.int32).contiguous()
# test cpu
custom = electric_potential.ElectricPotential(
torch.tensor(node_size_x, requires_grad=False, dtype=dtype),
torch.tensor(node_size_y, requires_grad=False, dtype=dtype),
torch.tensor(bin_center_x, requires_grad=False, dtype=dtype),
torch.tensor(bin_center_y, requires_grad=False, dtype=dtype),
target_density=torch.tensor(target_density,
requires_grad=False,
dtype=dtype),
xl=xl,
yl=yl,
xh=xh,
yh=yh,
bin_size_x=bin_size_x,
bin_size_y=bin_size_y,
num_movable_nodes=num_movable_nodes,
num_terminals=num_terminals,
num_filler_nodes=0,
padding=0,
sorted_node_map=sorted_node_map,
movable_macro_mask=None,
deterministic_flag=True)
pos = Variable(torch.from_numpy(np.concatenate([xx, yy])),
requires_grad=True)
result = custom.forward(pos)
print("custom_result = ", result)
print(result.type())
result.backward()
grad = pos.grad.clone()
print("custom_grad = ", grad)
# test cuda
if torch.cuda.device_count():
custom_cuda = electric_potential.ElectricPotential(
torch.tensor(node_size_x, requires_grad=False,
dtype=dtype).cuda(),
torch.tensor(node_size_y, requires_grad=False,
dtype=dtype).cuda(),
torch.tensor(bin_center_x, requires_grad=False,
dtype=dtype).cuda(),
torch.tensor(bin_center_y, requires_grad=False,
dtype=dtype).cuda(),
target_density=torch.tensor(target_density,
requires_grad=False,
dtype=dtype).cuda(),
xl=xl,
yl=yl,
xh=xh,
yh=yh,
bin_size_x=bin_size_x,
bin_size_y=bin_size_y,
num_movable_nodes=num_movable_nodes,
num_terminals=num_terminals,
num_filler_nodes=0,
padding=0,
sorted_node_map=sorted_node_map.cuda(),
movable_macro_mask=None,
deterministic_flag=False)
pos = Variable(torch.from_numpy(np.concatenate([xx, yy])).cuda(),
requires_grad=True)
#pos.grad.zero_()
result_cuda = custom_cuda.forward(pos)
print("custom_result_cuda = ", result_cuda.data.cpu())
print(result_cuda.type())
result_cuda.backward()
grad_cuda = pos.grad.clone()
print("custom_grad_cuda = ", grad_cuda.data.cpu())
np.testing.assert_allclose(result.detach().numpy(),
result_cuda.data.cpu().detach().numpy())
np.testing.assert_allclose(grad.detach().numpy(),
grad_cuda.data.cpu().detach().numpy())
def plot(plot_count, density_map, padding, name):
"""
density map contour and heat map
"""
density_map = density_map[padding:density_map.shape[0] - padding,
padding:density_map.shape[1] - padding]
print("max density = %g" % (np.amax(density_map)))
print("mean density = %g" % (np.mean(density_map)))
fig = plt.figure()
ax = fig.gca(projection='3d')
x = np.arange(density_map.shape[0])
y = np.arange(density_map.shape[1])
x, y = np.meshgrid(x, y)
# looks like x and y should be swapped
ax.plot_surface(y, x, density_map, alpha=0.8)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('density')
# plt.tight_layout()
plt.savefig(name + ".3d.png")
plt.close()
# plt.clf()
#fig, ax = plt.subplots()
# ax.pcolor(density_map)
# Loop over data dimensions and create text annotations.
# for i in range(density_map.shape[0]):
# for j in range(density_map.shape[1]):
# text = ax.text(j, i, density_map[i, j],
# ha="center", va="center", color="w")
# fig.tight_layout()
#plt.savefig(name+".2d.%d.png" % (plot_count))
# plt.close()
def eval_runtime(design):
# e.g., adaptec1_density.pklz
with gzip.open(design, "rb") as f:
node_size_x, node_size_y, bin_center_x, bin_center_y, target_density, xl, yl, xh, yh, bin_size_x, bin_size_y, num_movable_nodes, num_terminals, num_filler_nodes = pickle.load(
f)
dtype = torch.float64
num_threads = 10
torch.set_num_threads(num_threads)
print("num_threads = %d" % (torch.get_num_threads()))
movable_size_x = node_size_x[:num_movable_nodes]
_, sorted_node_map = torch.sort(
torch.tensor(movable_size_x, requires_grad=False, dtype=dtype).cuda())
sorted_node_map = sorted_node_map.to(torch.int32).contiguous()
pos_var = Variable(torch.empty(len(node_size_x) * 2,
dtype=dtype).uniform_(xl, xh),
requires_grad=True)
custom = electric_potential.ElectricPotential(
torch.tensor(node_size_x, requires_grad=False, dtype=dtype).cpu(),
torch.tensor(node_size_y, requires_grad=False, dtype=dtype).cpu(),
torch.tensor(bin_center_x, requires_grad=False, dtype=dtype).cpu(),
torch.tensor(bin_center_y, requires_grad=False, dtype=dtype).cpu(),
target_density=torch.tensor(target_density,
requires_grad=False,
dtype=dtype).cpu(),
xl=xl,
yl=yl,
xh=xh,
yh=yh,
bin_size_x=bin_size_x,
bin_size_y=bin_size_y,
num_movable_nodes=num_movable_nodes,
num_terminals=num_terminals,
num_filler_nodes=num_filler_nodes,
padding=0,
sorted_node_map=sorted_node_map.cpu())
custom_cuda = electric_potential.ElectricPotential(
torch.tensor(node_size_x, requires_grad=False, dtype=dtype).cuda(),
torch.tensor(node_size_y, requires_grad=False, dtype=dtype).cuda(),
torch.tensor(bin_center_x, requires_grad=False, dtype=dtype).cuda(),
torch.tensor(bin_center_y, requires_grad=False, dtype=dtype).cuda(),
target_density=torch.tensor(target_density,
requires_grad=False,
dtype=dtype).cuda(),
xl=xl,
yl=yl,
xh=xh,
yh=yh,
bin_size_x=bin_size_x,
bin_size_y=bin_size_y,
num_movable_nodes=num_movable_nodes,
num_terminals=num_terminals,
num_filler_nodes=num_filler_nodes,
padding=0,
sorted_node_map=sorted_node_map)
torch.cuda.synchronize()
iters = 100
tbackward = 0
tt = time.time()
for i in range(iters):
result = custom.forward(pos_var)
ttb = time.time()
result.backward()
tbackward += time.time() - ttb
torch.cuda.synchronize()
print("custom takes %.3f ms, backward %.3f ms" %
((time.time() - tt) / iters * 1000, (tbackward / iters * 1000)))
pos_var = pos_var.cuda()
tt = time.time()
for i in range(iters):
result = custom_cuda.forward(pos_var)
result.backward()
torch.cuda.synchronize()
print("custom_cuda takes %.3f ms" % ((time.time() - tt) / iters * 1000))
if __name__ == '__main__':
logging.root.name = 'DREAMPlace'
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)-7s] %(name)s - %(message)s',
stream=sys.stdout)
if len(sys.argv) < 2:
unittest.main()
else:
design = sys.argv[1]
eval_runtime(design)
|
run_all_tests.py | spchal/pwntools-write-ups | 456 | 12620215 | <filename>run_all_tests.py
#!/usr/bin/env python2
from pwn import *
for path, dirs, files in os.walk('.'):
if '.git' in path:
continue
if 'wargames' in path:
continue
if not dirs:
for f in files:
if f.startswith('harness'):
h = log.waitfor('Running harness for ' + path)
with context.local(log_level = 'WARNING'):
data = process("./" + f, cwd = path).recvall().strip()
if data == 'ok':
h.success()
else:
h.failure('Got output:\n' + data)
break
else:
log.warning(path + ' has no harness')
|
third_party/WebKit/Source/build/scripts/make_element_lookup_trie.py | wenfeifei/miniblink49 | 5,964 | 12620217 | <reponame>wenfeifei/miniblink49
#!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from itertools import groupby, islice
import sys
import in_generator
import template_expander
PARAMETER_NAME = 'data'
def _trie(tags, index):
"""Make a trie from list of tags, starting at index.
Resulting trie is partly space-optimized (semi-radix tree): once have only
one string left, compact the entire branch to one leaf node.
However, does not compact branch nodes with a single child. (FIXME)
Returns:
(char, subtrie, tag, conditions): (char, trie, str, list)
code generation differs between branch nodes and leaf nodes,
hence need different data for each.
Arguments:
tags: sorted list
(sorted needed by groupby, list needed by len)
index: index at which to branch
(assumes prior to this index strings have a common prefix)
"""
def trie_node(char, subtags_iter):
# Pass in |char| so we can include in same tuple without unpacking
subtags = list(subtags_iter) # need list for len
if len(subtags) == 1: # terminal node, no subtrie
subtrie = None
tag = subtags[0]
conditions = _conditions(tag, index + 1)
else:
subtrie = _trie(subtags, index + 1)
tag = None
conditions = None
return char, subtrie, tag, conditions
# Group by char at index
def char_at_index(tag):
return tag[index].lower()
char_subtags = ((k, g) for k, g in groupby(tags, char_at_index))
# FIXME: if all subtags have a common prefix, merge with child
# and skip the switch in the generated code
return (trie_node(char, subtags) for char, subtags in char_subtags)
def _conditions(tag, index):
# boolean conditions to check suffix; corresponds to compacting branch
# with a single leaf
return ["%s[%d] == '%c'" % (PARAMETER_NAME, i, c.lower())
for i, c in islice(enumerate(tag), index, None)]
class ElementLookupTrieWriter(in_generator.Writer):
# FIXME: Inherit all these from somewhere.
defaults = {
'JSInterfaceName': None,
'constructorNeedsCreatedByParser': None,
'constructorNeedsFormElement': None,
'interfaceName': None,
'noConstructor': None,
'runtimeEnabled': None,
}
default_parameters = {
'attrsNullNamespace': None,
'export': '',
'fallbackInterfaceName': '',
'fallbackJSInterfaceName': '',
'namespace': '',
'namespacePrefix': '',
'namespaceURI': '',
}
def __init__(self, in_file_paths):
super(ElementLookupTrieWriter, self).__init__(in_file_paths)
self._tags = [entry['name'] for entry in self.in_file.name_dictionaries]
self._namespace = self.in_file.parameters['namespace'].strip('"')
self._outputs = {
(self._namespace + 'ElementLookupTrie.h'): self.generate_header,
(self._namespace + 'ElementLookupTrie.cpp'): self.generate_implementation,
}
@template_expander.use_jinja('ElementLookupTrie.h.tmpl')
def generate_header(self):
return {
'namespace': self._namespace,
}
@template_expander.use_jinja('ElementLookupTrie.cpp.tmpl')
def generate_implementation(self):
# First sort, so groupby works
self._tags.sort(key=lambda tag: (len(tag), tag))
# Group tags by length
length_tags = ((k, g) for k, g in groupby(self._tags, len))
return {
'namespace': self._namespace,
'length_tries': ((length, _trie(tags, 0))
for length, tags in length_tags),
}
if __name__ == '__main__':
in_generator.Maker(ElementLookupTrieWriter).main(sys.argv)
|
datasets/load/old/load.1000_genomes_phase3_chrMT.GRCh37.py | 3vivekb/hail | 789 | 12620220 |
import hail as hl
ht_samples = hl.import_table('gs://hail-datasets/raw-data/1000_genomes/samples_1kg.tsv',
key='sample')
mt = hl.import_vcf('gs://hail-datasets/raw-data/1000_genomes/ALL.chrMT.phase3_callmom-v0_4.20130502.genotypes.vcf.bgz')
mt = mt.key_rows_by('locus')
mt = mt.distinct_by_row()
mt = mt.partition_rows_by(['locus'], 'locus', 'alleles')
mt_split = hl.split_multi(mt)
mt_split = mt_split.select_entries(GT=hl.downcode(mt_split.GT, mt_split.a_index))
mt_split = mt_split.annotate_rows(info=hl.struct(AC=mt_split.info.AC,
VT=(hl.case()
.when((mt_split.alleles[0].length() == 1) & (mt_split.alleles[1].length() == 1), 'SNP')
.when(mt_split.alleles[0].matches('<CN*>') | mt_split.alleles[1].matches('<CN*>'), 'SV')
.default('INDEL'))))
mt_split.describe()
mt_split = mt_split.drop('old_locus', 'old_alleles', 'a_index')
mt_split = mt_split.annotate_cols(sex=ht_samples[mt_split.s].gender,
super_population=ht_samples[mt_split.s].super_pop,
population=ht_samples[mt_split.s].pop)
mt_split = hl.sample_qc(mt_split)
mt_split = hl.variant_qc(mt_split)
mt_split = hl.vep(mt_split, 'gs://hail-common/vep/vep/vep85-gcloud.json')
mt_split.describe()
mt_split.write('gs://hail-datasets/hail-data/1000_genomes_phase3_chrMT.GRCh37.mt', overwrite=True)
|
yapftests/unwrapped_line_test.py | TinkerBoard2-Android/external-yapf | 13,769 | 12620227 | <reponame>TinkerBoard2-Android/external-yapf
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yapf.unwrapped_line."""
import textwrap
import unittest
from lib2to3 import pytree
from lib2to3.pgen2 import token
from yapf.yapflib import format_token
from yapf.yapflib import split_penalty
from yapf.yapflib import unwrapped_line
from yapftests import yapf_test_helper
class UnwrappedLineBasicTest(unittest.TestCase):
def testConstruction(self):
toks = _MakeFormatTokenList([(token.DOT, '.'), (token.VBAR, '|')])
uwl = unwrapped_line.UnwrappedLine(20, toks)
self.assertEqual(20, uwl.depth)
self.assertEqual(['DOT', 'VBAR'], [tok.name for tok in uwl.tokens])
def testFirstLast(self):
toks = _MakeFormatTokenList([(token.DOT, '.'), (token.LPAR, '('),
(token.VBAR, '|')])
uwl = unwrapped_line.UnwrappedLine(20, toks)
self.assertEqual(20, uwl.depth)
self.assertEqual('DOT', uwl.first.name)
self.assertEqual('VBAR', uwl.last.name)
def testAsCode(self):
toks = _MakeFormatTokenList([(token.DOT, '.'), (token.LPAR, '('),
(token.VBAR, '|')])
uwl = unwrapped_line.UnwrappedLine(2, toks)
self.assertEqual(' . ( |', uwl.AsCode())
def testAppendToken(self):
uwl = unwrapped_line.UnwrappedLine(0)
uwl.AppendToken(_MakeFormatTokenLeaf(token.LPAR, '('))
uwl.AppendToken(_MakeFormatTokenLeaf(token.RPAR, ')'))
self.assertEqual(['LPAR', 'RPAR'], [tok.name for tok in uwl.tokens])
def testAppendNode(self):
uwl = unwrapped_line.UnwrappedLine(0)
uwl.AppendNode(pytree.Leaf(token.LPAR, '('))
uwl.AppendNode(pytree.Leaf(token.RPAR, ')'))
self.assertEqual(['LPAR', 'RPAR'], [tok.name for tok in uwl.tokens])
class UnwrappedLineFormattingInformationTest(yapf_test_helper.YAPFTest):
def testFuncDef(self):
code = textwrap.dedent(r"""
def f(a, b):
pass
""")
uwlines = yapf_test_helper.ParseAndUnwrap(code)
f = uwlines[0].tokens[1]
self.assertFalse(f.can_break_before)
self.assertFalse(f.must_break_before)
self.assertEqual(f.split_penalty, split_penalty.UNBREAKABLE)
lparen = uwlines[0].tokens[2]
self.assertFalse(lparen.can_break_before)
self.assertFalse(lparen.must_break_before)
self.assertEqual(lparen.split_penalty, split_penalty.UNBREAKABLE)
def _MakeFormatTokenLeaf(token_type, token_value):
return format_token.FormatToken(pytree.Leaf(token_type, token_value))
def _MakeFormatTokenList(token_type_values):
return [
_MakeFormatTokenLeaf(token_type, token_value)
for token_type, token_value in token_type_values
]
if __name__ == '__main__':
unittest.main()
|
tensorflow/contrib/labeled_tensor/python/ops/nn.py | tianyapiaozi/tensorflow | 848 | 12620236 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Neural network ops for LabeledTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import nn as contrib_nn
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.ops import nn
relu = core.define_unary_op('relu', nn.relu)
relu6 = core.define_unary_op('relu6', nn.relu6)
crelu = core.define_unary_op('crelu', nn.crelu)
elu = core.define_unary_op('elu', nn.elu)
softplus = core.define_unary_op('softplus', nn.softplus)
l2_loss = core.define_unary_op('l2_loss', nn.l2_loss)
sigmoid_cross_entropy_with_logits = core.define_binary_op(
'sigmoid_cross_entropy_with_logits',
contrib_nn.deprecated_flipped_sigmoid_cross_entropy_with_logits)
softmax = core.define_unary_op('softmax', nn.softmax)
log_softmax = core.define_unary_op('log_softmax', nn.log_softmax)
softmax_cross_entropy_with_logits = core.define_binary_op(
'softmax_cross_entropy_with_logits',
contrib_nn.deprecated_flipped_softmax_cross_entropy_with_logits)
sparse_softmax_cross_entropy_with_logits = core.define_binary_op(
'sparse_softmax_cross_entropy_with_logits',
contrib_nn.deprecated_flipped_sparse_softmax_cross_entropy_with_logits)
|
PhysicsTools/PythonAnalysis/test/testNumba.py | ckamtsikis/cmssw | 852 | 12620239 | <filename>PhysicsTools/PythonAnalysis/test/testNumba.py
#!/usr/bin/env python3
"""
A moving average function using @guvectorize.
"""
from __future__ import print_function
# from examples in numba documentaiton
# http://numba.pydata.org/numba-doc/dev/user/examples.html (0.33.0)
from builtins import range
import numpy as np
from numba import guvectorize
@guvectorize(['void(float64[:], intp[:], float64[:])'], '(n),()->(n)')
def move_mean(a, window_arr, out):
window_width = window_arr[0]
asum = 0.0
count = 0
for i in range(window_width):
asum += a[i]
count += 1
out[i] = asum / count
for i in range(window_width, len(a)):
asum += a[i] - a[i - window_width]
out[i] = asum / count
arr = np.arange(20, dtype=np.float64).reshape(2, 10)
print(arr)
print(move_mean(arr, 3))
|
lfs/shipping/__init__.py | michael-hahn/django-lfs | 345 | 12620240 | default_app_config = 'lfs.shipping.apps.LfsShippingAppConfig'
|
pkg/virtualenv_utils.py | bruce30262/idapkg | 125 | 12620243 | <reponame>bruce30262/idapkg
import os
import runpy
import subprocess
import sys
import tempfile
from hashlib import sha256
from .logger import getLogger
from .process import Popen, system
# extracted from https://pypi.org/simple/virtualenv/
VIRTUALENV_URL = 'https://files.pythonhosted.org/packages/b3/3a' \
'/3690099fc8f5137a1d879448c49480590bf6f0529eba7b72e3a34ffd8a31/virtualenv-16.7.10-py2.py3-none-any.whl'
HASH = '105893c8dc66b7817691c7371439ec18e3b6c5e323a304b5ed96cdd2e75cc1ec'
log = getLogger(__name__)
def _locate_python_win():
return os.path.join(sys.exec_prefix, 'python.exe')
def _locate_python():
if sys.platform == 'win32':
executable = _locate_python_win()
elif sys.platform == 'darwin':
executable = sys.executable
elif sys.platform == 'linux':
# TODO: test linux version
log.info(
'Linux virtualenv support is not tested. If this prints "Done!", it\'s working!')
executable = sys.executable
else:
assert False, "this platform is not supported"
return executable
class FixInterpreter(object):
def __init__(self):
pass
def __enter__(self):
self._executable, sys.executable = sys.executable, _locate_python()
self._popen, subprocess.Popen = subprocess.Popen, Popen
self._system, os.system = os.system, system
def __exit__(self, type_, value, traceback):
sys.executable = self._executable
subprocess.Popen = self._popen
os.system = self._system
def _install_virtualenv(path):
from .downloader import download
log.info('Downloading virtualenv from %r ...', VIRTUALENV_URL)
data = download(VIRTUALENV_URL).read()
if sha256(data).hexdigest() != HASH:
raise RuntimeError('virtualenv hash does not match!')
with tempfile.NamedTemporaryFile('wb', suffix=".zip", delete=False) as zf:
zf.write(data)
zf.flush()
sys.path.insert(0, zf.name)
import virtualenv
with FixInterpreter():
log.info('Creating environment using virtualenv...')
virtualenv.create_environment(path, site_packages=True)
log.info('Done!')
def prepare_virtualenv(path, tried=False):
# Normalize path first
path = os.path.abspath(path)
try:
# 1. Run activator in virtualenv
activator_path = os.path.join(
path, 'Scripts' if sys.platform == 'win32' else 'bin', 'activate_this.py')
if not os.path.isfile(activator_path):
raise ImportError()
runpy.run_path(activator_path)
# 2. Check if pip is in the virtualenv
import pip
if not os.path.abspath(pip.__file__).startswith(path):
raise ImportError()
except ImportError:
if tried:
log.error("Failed installing virtualenv!")
return
log.info('pip is not found in the virtualenv.')
log.info('Will install virtualenv at %r...', path)
# Install and try again
_install_virtualenv(path)
prepare_virtualenv(path, True)
|
SQTemplate/SQBuilder Sample/UIBuilder/builder.py | sundayios/coderZsq.project.ios-master | 264 | 12620317 | import os, sys, json
def cur_file_dir():
path = sys.path[0]
if os.path.isdir(path):
return path
elif os.path.isfile(path):
return os.path.dirname(path)
file_json = open(cur_file_dir() + "/config.json", "r")
data_json = json.loads(file_json.read())
bean = str(data_json["bean"])
unit = str(data_json["unit"])
isImport = str(data_json["isImport"])
submodel = data_json["submodel"]
subview = data_json["subview"]
tableViewCell = data_json["tableViewCell"]
collectionViewCell = data_json["collectionViewCell"]
viewProperty = ''
viewLazyLoad = ''
viewSetup = ''
viewLayout = ''
viewImport = ''
classImport = ''
for key in data_json["layout"]:
value = data_json["layout"][key]
viewProperty += "@property (nonatomic,strong) " + str(value) + " * " + str(key) + ";\n"
viewLazyLoad += "- (" + str(value) + " *)" + str(key) + " {\n\n if (!_" + str(key) + ") {\n _" + str(key) + " = [" + str(value) + " new];\n }\n return _" + str(key) + ";\n}\n\n"
viewSetup += "\n [self addSubview:self." + str(key) + "];"
viewLayout += " CGFloat " + str(key) + "X = <#length#>;\n CGFloat " + str(key) + "Y = <#length#>;\n CGFloat " + str(key) + "W = <#length#>;\n CGFloat " + str(key) + "H = <#length#>;\n _" + str(key) + ".frame = CGRectMake(" + str(key) + "X, " + str(key) + "Y, " + str(key) + "W, " + str(key) + "H);\n\n"
if isImport == 'Yes':
viewImport += '#import "' + str(value) + '.h"\n'
classImport += '@class ' + str(value) + ';\n'
model = ''
for key in data_json["model"]:
type = ''
modified = ''
value = data_json["model"][key]
if value == 'String':
type = 'NSString * '
modified = 'copy'
elif value == 'Int':
type = 'NSInteger '
modified = 'assign'
elif value == 'Float':
type = 'CGFloat '
modified = 'assign'
elif value == 'Bool':
type = 'BOOL '
modified = 'assign'
elif value == 'Array':
type = 'NSArray * '
modified = 'strong'
elif value == 'Dictionary':
type = 'NSDictionary * '
modified = 'strong'
else:
type = value
modified = 'strong'
model += '\n@property (nonatomic,' + modified + ') ' + type + str(key) + ';'
file_json.close()
def builder(path, file, isbean):
r = open(cur_file_dir() + '/UITemplate/' + path, 'r')
d = r.read()
r.close()
if isbean == 'Yes':
w = open(cur_file_dir() + '/' + bean + file, 'w')
else:
w = open(cur_file_dir() + '/' + unit + file, 'w')
w.write(d.replace("<#ViewProperty#>", viewProperty).replace("<#Unit#>", unit).replace("<#ViewLazyLoad#>", viewLazyLoad).replace("<#ViewSetup#>",viewSetup).replace("<#ViewLayout#>",viewLayout).replace("<#SubUnit#>", bean).replace("<#ViewImport#>", viewImport).replace("<#ModelInterface#>", model).replace("<#ClassImport#>", classImport))
w.close()
if subview == "Yes":
builder("SubviewTemplate.h", "View.h", "No")
builder("SubviewTemplate.m", "View.m", "No")
if tableViewCell == "Yes":
builder("TableViewCellTemplate.h", "Cell.h", "No")
builder("TableViewCellTemplate.m", "Cell.m", "No")
builder("CellAdapterTemplate.h", "CellAdapter.h", "No")
if submodel == 'Yes':
builder("SubmodelTemplate.h", "Model.h", "Yes")
builder("SubmodelTemplate.m", "Model.m", "Yes")
if collectionViewCell == 'Yes':
builder("CollectionViewTemplate.h", "View.h", "No")
builder("CollectionViewTemplate.m", "View.m", "No")
builder("CollectionViewCellTemplate.h", "Cell.h", "No")
builder("CollectionViewCellTemplate.m", "Cell.m", "No")
builder("CellAdapterTemplate.h", "CellAdapter.h", "No")
|
matrix-python-project/cover_generator/typesetting/model/three.py | hokaso/hocassian-media-matrix | 141 | 12620321 | <reponame>hokaso/hocassian-media-matrix
import sys, os, time, json, random
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from cover_generator.typesetting.more import More
from cover_generator.typesetting.mark import Mark
from cover_generator.typesetting.build import Build
from utils.snow_id import SnowId
sys.path.append(os.getcwd())
class Three(object):
def __init__(self, folder_key):
self.image_list = None
self.rank_model = None
self.tb = None
with open("cover_generator/typesetting/style.json", 'r') as f0:
style_config = json.load(f0)
self.model = style_config["three"]
self.func_map = {
1: self.horizontal_build,
2: self.vertical_build,
3: self.triple_vertical_build,
4: self.triple_horizontal_build
}
self._build = Build(folder_key, folder_key + "_temp")
def horizontal(self, image_list):
return More(image_list, self.model[0]["unit_detail"], "31").main()
def vertical(self, image_list):
return More(image_list, self.model[1]["unit_detail"], "32").main()
def triple_vertical(self, image_list):
return More(image_list, self.model[2]["unit_detail"], "33").main()
def triple_horizontal(self, image_list):
return More(image_list, self.model[3]["unit_detail"], "34").main()
def build(self, image_list, model):
self.tb = Image.open("cover_generator/background.jpg")
self.image_list = image_list
self.rank_model = model
self.func_map[int(model["model_id"][1])]()
def horizontal_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[0]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[0]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[0]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_1, pic_2, pic_3]
random.shuffle(pic_list)
# 保存
self.tb.paste(pic_list[0], (0, 0))
self.tb.paste(pic_list[1], (0, 480))
self.tb.paste(pic_list[2], (0, 960))
self._build.save(self.tb)
def vertical_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[1]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[1]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[1]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_1, pic_2, pic_3]
random.shuffle(pic_list)
# 保存
self.tb.paste(pic_list[0], (0, 0))
self.tb.paste(pic_list[1], (360, 0))
self.tb.paste(pic_list[2], (720, 0))
self._build.save(self.tb)
def triple_vertical_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[2]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[2]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[2]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_1, pic_2]
random.shuffle(pic_list)
# 结构也需要shuffle
kind = random.randint(0, 1)
# 保存
if kind == 0:
self.tb.paste(pic_list[0], (0, 0))
self.tb.paste(pic_list[1], (0, 720))
self.tb.paste(pic_3, (540, 0))
else:
self.tb.paste(pic_list[0], (540, 0))
self.tb.paste(pic_list[1], (540, 720))
self.tb.paste(pic_3, (0, 0))
self._build.save(self.tb)
def triple_horizontal_build(self):
# 贴第一张图
image = self.image_list[self.rank_model["model_match"][0][1]]
model = self.model[3]["unit_detail"][0]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_1 = self._build.build_up(image["filename"], rate, area)
# 贴第二张图
image = self.image_list[self.rank_model["model_match"][1][1]]
model = self.model[3]["unit_detail"][1]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_2 = self._build.build_up(image["filename"], rate, area)
# 贴第三张图
image = self.image_list[self.rank_model["model_match"][2][1]]
model = self.model[3]["unit_detail"][2]
rate, area = Mark(image["width"], image["height"], model["width"], model["height"]).crop()
pic_3 = self._build.build_up(image["filename"], rate, area)
# 随机对相同宽高的图片进行shuffle
pic_list = [pic_1, pic_2]
random.shuffle(pic_list)
# 结构也需要shuffle
kind = random.randint(0, 1)
# 保存
if kind == 0:
self.tb.paste(pic_list[0], (0, 0))
self.tb.paste(pic_list[1], (540, 0))
self.tb.paste(pic_3, (0, 720))
else:
self.tb.paste(pic_list[0], (0, 720))
self.tb.paste(pic_list[1], (540, 720))
self.tb.paste(pic_3, (0, 0))
self._build.save(self.tb)
|
evaluate/__init__.py | xiabinfeng/reid_baseline_with_syncbn | 155 | 12620344 | <reponame>xiabinfeng/reid_baseline_with_syncbn<filename>evaluate/__init__.py
from .eval_reid import eval_func
from .re_ranking import re_ranking
import torch
def euclidean_dist(x, y):
"""
Args:
x: pytorch Variable, with shape [m, d]
y: pytorch Variable, with shape [n, d]
Returns:
dist: pytorch Variable, with shape [m, n]
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(1, -2, x, y.t())
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist
def re_rank(q, g):
qq_dist = euclidean_dist(q, q).numpy()
gg_dist = euclidean_dist(g, g).numpy()
qg_dist = euclidean_dist(q, g).numpy()
distmat = re_ranking(qg_dist, qq_dist, gg_dist)
return distmat
|
examples/example_tf_data_validation/main.py | flylo/spotify-tensorflow | 123 | 12620360 | # -*- coding: utf-8 -*-
#
# Copyright 2017-2019 Spotify AB.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import tempfile
from os.path import join as pjoin
from examples.examples_utils import get_taxi_data_dir
from spotify_tensorflow.tfx.tfdv import TfDataValidator
if __name__ == "__main__":
taxi_data = get_taxi_data_dir()
tmp_dir = tempfile.mkdtemp()
schema = pjoin(taxi_data, "chicago_taxi_schema.pbtxt")
schema_snapshot_path = pjoin(taxi_data, "schema_snapshot.pb")
stats_file = pjoin(taxi_data, "stats.pb")
anomalies_path = pjoin(taxi_data, "anomalies.pb")
pipeline_args = [
"--temp_location=%s" % tmp_dir,
"--staging_location=%s" % tmp_dir,
"--runner=DirectRunner"
]
validator = TfDataValidator(schema, taxi_data)
validator.write_stats_and_schema(pipeline_args)
validator.validate_stats_against_schema()
os.remove(stats_file)
os.remove(schema_snapshot_path)
os.remove(anomalies_path)
|
ssd/layers/functions/__init__.py | maddie157/BiDet | 161 | 12620379 | <gh_stars>100-1000
from .detection import Detect, DetectPrior
from .prior_box import PriorBox
__all__ = ['Detect', 'DetectPrior', 'PriorBox']
|
torchlayers/_name.py | ghost2718/torchlayers | 573 | 12620383 | <reponame>ghost2718/torchlayers
_name = "torchlayers"
|
paper/rev2q3-count-highest-impact.py | zihhuafang/slivar_vep105 | 162 | 12620426 | from cyvcf2 import VCF
import collections
import sys
order = [line.strip() for line in open(sys.argv[1], 'rt') if line[0] != '#']
counts = collections.defaultdict(int)
for v in VCF(sys.argv[2]):
hi = v.INFO.get("highest_impact_order")
counts[order[hi]] += 1
for k, v in counts.items():
print(f"{k}\t{v}")
|
python/ql/src/Classes/ConflictingAttributesInBaseClasses.py | vadi2/codeql | 4,036 | 12620432 | <filename>python/ql/src/Classes/ConflictingAttributesInBaseClasses.py
class TCPServer(object):
def process_request(self, request, client_address):
self.do_work(request, client_address)
self.shutdown_request(request)
class ThreadingMixIn:
"""Mix-in class to handle each request in a new thread."""
def process_request(self, request, client_address):
"""Start a new thread to process the request."""
t = threading.Thread(target = self.do_work, args = (request, client_address))
t.daemon = self.daemon_threads
t.start()
class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
|
desktop/core/ext-py/docutils-0.14/test/test_transforms/test_messages.py | kokosing/hue | 5,079 | 12620452 | <filename>desktop/core/ext-py/docutils-0.14/test/test_transforms/test_messages.py
#! /usr/bin/env python
# $Id: test_messages.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: <NAME> <<EMAIL>>
# Copyright: This module has been placed in the public domain.
"""
Tests for docutils.transforms.universal.Messages.
"""
from __init__ import DocutilsTestSupport
from docutils.transforms.universal import Messages
from docutils.transforms.references import Substitutions
from docutils.parsers.rst import Parser
def suite():
parser = Parser()
s = DocutilsTestSupport.TransformTestSuite(parser)
s.generateTests(totest)
return s
totest = {}
totest['system_message_sections'] = ((Substitutions, Messages), [
["""\
This |unknown substitution| will generate a system message, thanks to
the ``Substitutions`` transform. The ``Messages`` transform will
generate a "System Messages" section.
(A second copy of the system message is tacked on to the end of the
document by the test framework.)
""",
"""\
<document source="test data">
<paragraph>
This \n\
<problematic ids="id2" refid="id1">
|unknown substitution|
will generate a system message, thanks to
the \n\
<literal>
Substitutions
transform. The \n\
<literal>
Messages
transform will
generate a "System Messages" section.
<paragraph>
(A second copy of the system message is tacked on to the end of the
document by the test framework.)
<section classes="system-messages">
<title>
Docutils System Messages
<system_message backrefs="id2" ids="id1" level="3" line="1" source="test data" type="ERROR">
<paragraph>
Undefined substitution referenced: "unknown substitution".
"""],
])
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
|
inquirer/__init__.py | SteinRobert/python-inquirer | 640 | 12620462 | <reponame>SteinRobert/python-inquirer<gh_stars>100-1000
from __future__ import print_function
try:
from .prompt import prompt
from .questions import (
Text,
Editor,
Password,
Confirm,
List,
Checkbox,
Path,
load_from_dict,
load_from_json,
load_from_list,
)
from .shortcuts import text, editor, password, confirm, list_input, checkbox
__all__ = [
"prompt",
"Text",
"Editor",
"Password",
"Confirm",
"List",
"Checkbox",
"Path",
"load_from_list",
"load_from_dict",
"load_from_json",
"text",
"editor",
"password",
"confirm",
"list_input",
"checkbox",
]
except ImportError as e:
print("An error was found, but returning just with the version: %s" % e)
|
mmtbx/refinement/real_space/adp.py | dperl-sol/cctbx_project | 155 | 12620471 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function
import mmtbx.refinement.real_space.utils
import mmtbx.refinement.utils
from scitbx.array_family import flex
from cctbx import adptbx
from libtbx import easy_mp
from mmtbx import bulk_solvent
from libtbx.test_utils import approx_equal
from six.moves import range
from cctbx import crystal
from mmtbx.refinement import adp_refinement
from cctbx import adp_restraints
from libtbx import group_args
from mmtbx.ncs import tncs
import boost_adaptbx.boost.python as bp
cctbx_maptbx_ext = bp.import_ext("cctbx_maptbx_ext")
def map_and_model_to_fmodel(map_data, xray_structure, atom_radius, d_min,
reset_adp=True):
box = mmtbx.utils.extract_box_around_model_and_map(
xray_structure = xray_structure,
map_data = map_data,
box_cushion = atom_radius)
box.apply_mask_inplace(atom_radius = atom_radius)
f_obs_complex = box.box_map_coefficients(d_min = d_min)
f_obs = abs(f_obs_complex)
if(flex.mean(f_obs.data())<1.e-6): return None
xrs = box.xray_structure_box.deep_copy_scatterers()
if(reset_adp):
vals_init = xrs.extract_u_iso_or_u_equiv()
xrs = xrs.set_b_iso(value=0)
assert approx_equal(flex.mean(xrs.extract_u_iso_or_u_equiv()),0.)
f_calc = f_obs.structure_factors_from_scatterers(
xray_structure = xrs).f_calc()
sc = flex.sum(abs(f_obs).data()*abs(f_calc).data())/ \
flex.sum(abs(f_calc).data()*abs(f_calc).data())
f_calc = f_calc.array(data = f_calc.data()*sc)
o = bulk_solvent.complex_f_kb_scaled(
f1 = f_obs_complex.data(),
f2 = f_calc.data(),
b_range = flex.double(range(5,505,5)),
ss = 1./flex.pow2(f_calc.d_spacings().data()) / 4.)
xrs = xrs.set_b_iso(value=o.b())
k_isotropic = flex.double(f_calc.data().size(), o.k())
if(o.k()<1.e-6):
k_isotropic = flex.double(f_calc.data().size(), 1)
xrs.set_u_iso(values = vals_init)
fmodel = mmtbx.f_model.manager(f_obs = f_obs, xray_structure = xrs)
if(reset_adp):
fmodel.update_core(k_isotropic = k_isotropic)
fmodel.update(target_name="ls_wunit_k1")
fmodel.update_all_scales(update_f_part1=False, apply_back_trace=True,
remove_outliers=False)
return fmodel
def get_plain_pair_sym_table(crystal_symmetry, sites_frac, plain_pairs_radius=5):
asu_mappings = crystal.symmetry.asu_mappings(crystal_symmetry,
buffer_thickness = plain_pairs_radius)
special_position_settings = crystal.special_position_settings(
crystal_symmetry = crystal_symmetry)
sites_cart = crystal_symmetry.unit_cell().orthogonalize(sites_frac)
site_symmetry_table = special_position_settings.site_symmetry_table(
sites_cart = sites_cart)
asu_mappings.process_sites_frac(
original_sites = sites_frac,
site_symmetry_table = site_symmetry_table)
pair_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
pair_asu_table.add_all_pairs(distance_cutoff = plain_pairs_radius)
return pair_asu_table.extract_pair_sym_table()
class tg(object):
def __init__(self, fmodel, x, restraints_weight):
self.restraints_weight = restraints_weight
self.fmodel = fmodel
self.plain_pair_sym_table = get_plain_pair_sym_table(
crystal_symmetry = self.fmodel.xray_structure.crystal_symmetry(),
sites_frac = self.fmodel.xray_structure.sites_frac())
self.adp_iso_params = \
adp_refinement.adp_restraints_master_params.extract().iso
self.fmodel.xray_structure.scatterers().flags_set_grads(state=False)
self.fmodel.xray_structure.scatterers().flags_set_grad_u_iso(
iselection = self.fmodel.xray_structure.all_selection().iselection())
# required fields
self.x = x
self.t = None
self.g = None
self.d = None
self.use_curvatures=False
#
self.weight = self._weight()
self.tgo = self._compute(x = self.x)
self.update_target_and_grads(x=x)
def _weight(self):
num = self._restraints().gradients.norm()
den = self._data().gradient_xray.norm()
if(den==0): return 1
return num/den
def _restraints(self):
return adp_restraints.energies_iso(
plain_pair_sym_table = self.plain_pair_sym_table,
xray_structure = self.fmodel.xray_structure,
parameters = self.adp_iso_params,
compute_gradients = True,
use_u_local_only = self.adp_iso_params.use_u_local_only,
use_hd = False)
def _data(self):
fmodels = mmtbx.fmodels(fmodel_xray = self.fmodel)
return fmodels.target_and_gradients(compute_gradients=True)
def _compute(self, x):
self.fmodel.xray_structure.set_b_iso(values = x)
self.fmodel.update_xray_structure(update_f_calc = True)
R = self._restraints()
D = self._data()
self.tgo = group_args(
target = D.target()*self.weight + R.target*self.restraints_weight,
gradient = D.gradient_xray*self.weight + R.gradients*self.restraints_weight)
return self.tgo
def update(self, x):
self.update_target_and_grads(x = x)
def update_target_and_grads(self, x):
self.x = x
self.tgo = self._compute(x=self.x)
self.t = self.tgo.target
self.g = self.tgo.gradient
def target(self): return self.t
def gradients(self): return self.g
def gradient(self): return self.gradients()
class ncs_aware_refinement(object):
def __init__(self, map_model_manager, d_min, atom_radius, nproc=1,
log = None, individual = True, restraints_weight = 1):
self.mmm = map_model_manager
self.nproc = nproc
self.d_min = d_min
self.atom_radius = atom_radius
self.log = log
self.individual = individual
self.restraints_weight = restraints_weight
#
ncs_groups = self.mmm.model().get_ncs_groups()
if(ncs_groups is None or len(ncs_groups)==0):
values = self.run_one()
self.mmm.model().set_b_iso(values = values)
else:
values = self.mmm.model().get_b_iso()
for i, g in enumerate(ncs_groups):
values_g = self.run_one(selection = g.master_iselection)
values = values.set_selected(g.master_iselection, values_g)
for j, c in enumerate(g.copies):
values = values.set_selected(c.iselection, values_g)
self.mmm.model().set_b_iso(values = values)
def run_one(self, selection=None):
model = self.mmm.model()
if(selection is not None): model = model.select(selection)
values = model.get_b_iso()
model.get_hierarchy().atoms().reset_i_seq()
if(self.nproc==1):
args = [model,]
return self.run_one_one(args = args)
else:
argss = []
selections = []
for sel in model.macromolecule_plus_hetatms_by_chain_selections():
model_i = model.select(sel)
if(model_i.size()==1):
chain_ids = " ".join([c.id for c in model_i.get_hierarchy().chains()])
print("Skip one atom model, chains: (%s)"%chain_ids, file=self.log)
continue
argss.append([model_i,])
selections.append(sel) # XXX CAN BE BIG
stdout_and_results = easy_mp.pool_map(
processes = self.nproc,
fixed_func = self.run_one_one,
args = argss,
func_wrapper = "buffer_stdout_stderr")
for i, result in enumerate(stdout_and_results):
values = values.set_selected(selections[i], result[1])
model.set_b_iso(values = values)
return values
def run_one_one(self, args):
model = args[0]
log = self.log
if(self.nproc>1): log = None
#
fmodel = map_and_model_to_fmodel(
map_data = self.mmm.map_data().deep_copy(),
xray_structure = model.get_xray_structure(),
atom_radius = self.atom_radius,
d_min = self.d_min)
if(fmodel is None):
return model.get_xray_structure().extract_u_iso_or_u_equiv()*adptbx.u_as_b(1.)
# selections for group ADP
ph_box = model.get_hierarchy()
ph_box.atoms().reset_i_seq()
group_adp_sel = []
for rg in ph_box.residue_groups():
group_adp_sel.append(rg.atoms().extract_i_seq())
#
b_isos = fmodel.xray_structure.extract_u_iso_or_u_equiv()*adptbx.u_as_b(1.)
if(flex.max(b_isos)<1.e-2):
b_isos = flex.random_double(model.size())*10
model.set_b_iso(values = b_isos)
fmodel.xray_structure.set_b_iso(values = b_isos)
fmodel.update_xray_structure(xray_structure = fmodel.xray_structure,
update_f_calc = True)
#
number_of_macro_cycles = 3
if(self.individual): number_of_macro_cycles = 1
group_b_manager = mmtbx.refinement.group.manager(
fmodel = fmodel,
selections = group_adp_sel,
convergence_test = False,
max_number_of_iterations = 50,
number_of_macro_cycles = number_of_macro_cycles,
run_finite_differences_test = False,
use_restraints = True,
refine_adp = True,
log = log)
fmodel.update_all_scales(update_f_part1=False, apply_back_trace=True,
remove_outliers=False)
b_isos = fmodel.xray_structure.extract_u_iso_or_u_equiv()*adptbx.u_as_b(1.)
model.set_b_iso(values = b_isos)
#
if(self.individual):
if(log is not None):
print("r_work (start): %6.4f rms_B_bonded: %4.2f"%(fmodel.r_work(),
model.rms_b_iso_or_b_equiv()), file=log)
rw = self.restraints_weight
flipped = False
for it in range(1,20):
x = fmodel.xray_structure.extract_u_iso_or_u_equiv()*adptbx.u_as_b(1.)
lower = flex.double(x.size(), 0)
upper = flex.double(x.size(), flex.max(x)*2)
calculator = tg(
fmodel = fmodel, x = x, restraints_weight = rw)
rw_prev = rw
b_isos_prev = b_isos
rms_b_prev = model.rms_b_iso_or_b_equiv()
m = tncs.minimizer(
potential = calculator,
use_bounds = 2,
lower_bound = lower,
upper_bound = upper,
initial_values = x).run()
b_isos = fmodel.xray_structure.extract_u_iso_or_u_equiv()*adptbx.u_as_b(1.)
model.set_b_iso(values = b_isos)
if(rms_b_prev is not None):
rms_b = model.rms_b_iso_or_b_equiv()
if(rms_b<5):
rw = rw/2
if(flipped):
b_isos = b_isos_prev
model.set_b_iso(values = b_isos)
break
else:
if(rms_b > rms_b_prev):
b_isos = b_isos_prev
model.set_b_iso(values = b_isos)
break
rw = rw*2
flipped = True
if(log is not None):
print("r_work: %6.4f rms_B_bonded: %4.2f restraints_weight: %6.4f"%(
fmodel.r_work(), rms_b, rw), file=log)
if(rms_b_prev is None): break
#
fmodel.xray_structure.set_b_iso(values = b_isos)
fmodel.update_xray_structure(xray_structure = fmodel.xray_structure,
update_f_calc = True)
if(log is not None):
print("r_work (final): %6.4f"%fmodel.r_work(), file=log)
#
return b_isos
|
kapitan/inputs/remove.py | laserb/kapitan | 1,413 | 12620496 | #!/usr/bin/env python3
# Copyright 2019 The Kapitan Authors
# SPDX-FileCopyrightText: 2020 The Kapitan Authors <<EMAIL>>
#
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import shutil
from distutils.dir_util import copy_tree
from kapitan.inputs.base import InputType
logger = logging.getLogger(__name__)
class Remove(InputType):
def __init__(self, compile_path, search_paths, ref_controller):
super().__init__("remove", compile_path, search_paths, ref_controller)
def compile_file(self, file_path, compile_path, ext_vars, **kwargs):
"""
Write items in path as plain rendered files to compile_path.
path can be either a file or directory.
"""
try:
logger.debug("Removing %s", file_path)
if os.path.isfile(file_path):
os.remove(file_path)
else:
shutil.rmtree(file_path)
except OSError as e:
logger.exception("Input dir not removed. Error: %s", e)
def default_output_type(self):
# no output_type options for remove
return None
|
Validation/EcalDigis/python/ecalUnsuppressedMixingModuleValidation_cff.py | ckamtsikis/cmssw | 852 | 12620508 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from Validation.EcalDigis.ecalMixingModuleValidation_cfi import *
ecalMixingModuleValidation.EBdigiCollection = 'simEcalUnsuppressedDigis'
ecalMixingModuleValidation.EEdigiCollection = 'simEcalUnsuppressedDigis'
ecalMixingModuleValidation.ESdigiCollection = 'simEcalUnsuppressedDigis'
|
phidl/__init__.py | giumc/phidl | 102 | 12620511 | <reponame>giumc/phidl<filename>phidl/__init__.py<gh_stars>100-1000
from phidl.device_layout import Device, Group, Path, CrossSection, Port, Layer, LayerSet
from phidl.device_layout import make_device
from phidl.quickplotter import quickplot, quickplot2, set_quickplot_options
from phidl.device_layout import __version__, reset
from phidl.geometry import device_lru_cache
|
demo-project/src/demo_project/pipelines/modelling/nodes.py | deepyaman/kedro-viz | 125 | 12620513 | import importlib
import logging
from typing import Any, Dict, Tuple
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
def split_data(data: pd.DataFrame, split_options: Dict) -> Tuple:
"""Splits data into features and targets training and test sets.
Args:
data: Data containing features and target.
parameters: Parameters defined in parameters.yml.
Returns:
Split data.
"""
target_variable = split_options["target"]
independent_variables = [x for x in data.columns if x != target_variable]
test_size = split_options["test_size"]
random_state = split_options["random_state"]
logger = logging.getLogger(__name__)
logger.info(
f"Splitting data for the following independent variables "
f"{independent_variables} against the target of '{target_variable}' "
f"with a test sized of {test_size} and a random state of "
f"'{random_state}'"
)
X = data[independent_variables]
y = data[target_variable]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state
)
return X_train, X_test, y_train, y_test
def train_model(
X_train: pd.DataFrame, y_train: pd.Series, model_options: Dict[str, Any]
) -> Tuple[BaseEstimator, Dict[str, Any]]:
"""Trains the linear regression model.
Args:
X_train: Training data of independent features.
y_train: Training data for price.
Returns:
Trained model.
"""
# Parse parameters
model_module = model_options.get("module")
model_type = model_options.get("class")
model_arguments = model_options.get("kwargs")
# Import and instantiate Sklearn regressor object
regressor_class = getattr(importlib.import_module(model_module), model_type)
regressor_instance = regressor_class(**model_arguments)
logger = logging.getLogger(__name__)
logger.info(f"Fitting model of type {type(regressor_instance)}")
# Fit model
regressor_instance.fit(X_train, y_train)
flat_model_params = {**{"model_type": model_type}, **model_arguments}
return regressor_instance, flat_model_params
def evaluate_model(
regressor: BaseEstimator,
X_test: pd.DataFrame,
y_test: pd.Series,
) -> Dict[str, float]:
"""Calculates and logs the coefficient of determination.
Args:
regressor: Trained model.
X_test: Testing data of independent features.
y_test: Testing data for price.
"""
y_pred = regressor.predict(X_test)
score = r2_score(y_test, y_pred)
logger = logging.getLogger(__name__)
logger.info(
f"Model has a coefficient R^2 of {score:.3f} on test data using a "
f"regressor of type '{type(regressor)}'"
)
return {"r2_score": score}
|
OSLSM/code/ss_settings.py | vamsirk/OneShotSemanticSegmentation | 115 | 12620518 | <reponame>vamsirk/OneShotSemanticSegmentation
import numpy as np
import os.path as osp
from util import Map
from db_path import *
# Classes in pascal dataset
PASCAL_CATS = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car' , 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person', 'potted plant', 'sheep', 'sofa',
'train', 'tv/monitor']
def get_cats(split, fold, num_folds=4):
'''
Returns a list of categories (for training/test) for a given fold number
Inputs:
split: specify train/val
fold : fold number, out of num_folds
num_folds: Split the set of image classes to how many folds. In BMVC paper, we use 4 folds
'''
num_cats = len(PASCAL_CATS)
assert(num_cats%num_folds==0)
val_size = int(num_cats/num_folds)
assert(fold<num_folds)
val_set = [ fold*val_size+v for v in range(val_size)]
train_set = [x for x in range(num_cats) if x not in val_set]
if split=='train':
return [PASCAL_CATS[x] for x in train_set]
else:
return [PASCAL_CATS[x] for x in val_set]
########################### The default setting ##########################################
empty_profile = Map(
###############################################
# For transforming video stream, not used
video_base_trans=None,
video_frame_trans=None,
video_noise_trans =None,
###############################################
# For transforming input images, not used
image_base_trans=None,
image_frame_trans=None,
image_noise_trans=None,
###############################################
# Do not change this part
first_label_params=[('first_label', 1., 0.)],
second_label_params=[('second_label', 1., 0.)],
###############################################
k_shot=1,
first_shape=None,
second_shape=None,
shape_divisible=1,
output_type=None,
read_mode=None, # Either "Shuffle" (for training) or "Deterministic" (for testing, random seed fixed)
bgr=True,
scale_256=True,
mean = (0.40787055, 0.45752459, 0.4810938),
first_label_scale= 1,
first_label_mean = 0,
batch_size = 1,
video_sets=[],
image_sets=[],
areaRng = [0 , np.inf],
default_pascal_cats = PASCAL_CATS,
default_coco_cats = None,
pascal_cats = PASCAL_CATS,
coco_cats = None,
coco_path = None,
pascal_path = PASCAL_PATH,
sbd_path = SBD_PATH,
worker_num = 1)
########################### Settings for reproducing experiments ###########################
#### fold 0 ####
# Setting for training (on **training images**)
fold0_train = Map(empty_profile,
output_type='image_pair',
read_mode='shuffle',
image_sets=['sbd_training', 'pascal_training'],
pascal_cats = get_cats('train',0),
first_shape=[224,224],
second_shape=None)
# Setting for testing on **test images** in unseen image classes (in total 5 classes), 5-shot
fold0_5shot_test = Map(empty_profile,
output_type='image_pair',
db_cycle = 1000,
read_mode='deterministic',
image_sets=['pascal_test'],
pascal_cats = get_cats('test',0),
first_shape=[224,224],
second_shape=[500,500],
k_shot=5)
## Setting for testing on **test images** in unseen image classes (in total 5 classes), 1-shot
fold0_1shot_test = Map(fold0_5shot_test, k_shot=1)
#### fold 1 ####
fold1_train = Map(fold0_train, pascal_cats=get_cats('train', 1))
fold1_5shot_test = Map(fold0_5shot_test, pascal_cats=get_cats('test', 1))
fold1_1shot_test = Map(fold1_5shot_test, k_shot=1)
#### fold 2 ####
fold2_train = Map(fold0_train, pascal_cats=get_cats('train', 2))
fold2_5shot_test = Map(fold0_5shot_test, pascal_cats=get_cats('test', 2))
fold2_1shot_test = Map(fold2_5shot_test, k_shot=1)
#### fold 3 ####
fold3_train = Map(fold0_train, pascal_cats=get_cats('train', 3))
fold3_5shot_test = Map(fold0_5shot_test, pascal_cats=get_cats('test', 3))
fold3_1shot_test = Map(fold3_5shot_test, k_shot=1)
|
scripts/backup/run_backup/backup_swift.py | chopeen/dataverse | 681 | 12620524 | import io
import re
import swiftclient
from config import (ConfigSectionMap)
def backup_file_swift (file_input, dataset_authority, dataset_identifier, storage_identifier):
auth_url = ConfigSectionMap("Backup")['swiftauthurl']
auth_version = ConfigSectionMap("Backup")['swiftauthversion']
user = ConfigSectionMap("Backup")['swiftuser']
tenant = ConfigSectionMap("Backup")['swifttenant']
key = ConfigSectionMap("Backup")['swiftkey']
conn = swiftclient.Connection(
authurl=auth_url,
user=user,
key=key,
tenant_name=tenant,
auth_version=auth_version
)
container_name = dataset_authority + ":" + dataset_identifier
conn.put(container_name)
conn.put_object(container_name, storage_identifier, file_input)
|
aminator/plugins/blockdevice/null.py | vijay-khanna/Netflix-aminator | 721 | 12620527 | # -*- coding: utf-8 -*-
#
#
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
aminator.plugins.blockdevice.null
==================================
null block device manager
"""
import logging
from aminator.plugins.blockdevice.base import BaseBlockDevicePlugin
__all__ = ('NullBlockDevicePlugin',)
log = logging.getLogger(__name__)
class NullBlockDevicePlugin(BaseBlockDevicePlugin):
_name = 'null'
def __enter__(self):
return '/dev/null'
def __exit__(self, typ, val, trc):
if typ:
log.debug('Exception encountered in Null block device plugin',
exc_info=(typ, val, trc))
return False
|
quarkc/test/ffi/expected/py/org_example_foo/org_example_foo_md/__init__.py | datawire/quark | 112 | 12620552 | <gh_stars>100-1000
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from builtins import str as unicode
from quark_runtime import *
_lazyImport.plug("org_example_foo_md.org_example_foo_Foo_test_Method")
import quark.reflect
class org_example_foo_Foo_test_Method(quark.reflect.Method):
def _init(self):
quark.reflect.Method._init(self)
def __init__(self):
super(org_example_foo_Foo_test_Method, self).__init__(u"quark.void", u"test", _List([]));
def invoke(self, object, args):
obj = _cast(object, lambda: org.example.foo.Foo);
(obj).test();
return None
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
class org_example_foo_Foo(quark.reflect.Class):
def _init(self):
quark.reflect.Class._init(self)
def __init__(self):
super(org_example_foo_Foo, self).__init__(u"org.example.foo.Foo");
(self).name = u"org.example.foo.Foo"
(self).parameters = _List([])
(self).fields = _List([])
(self).methods = _List([org_example_foo_Foo_test_Method()])
(self).parents = _List([u"quark.Object"])
def construct(self, args):
return org.example.foo.Foo()
def isAbstract(self):
return False
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
org_example_foo_Foo.singleton = org_example_foo_Foo()
class Root(_QObject):
def _init(self):
pass
def __init__(self): self._init()
def _getClass(self):
return _cast(None, lambda: unicode)
def _getField(self, name):
return None
def _setField(self, name, value):
pass
Root.org_example_foo_Foo_md = org_example_foo_Foo.singleton
def _lazy_import_org_example_foo():
import org.example.foo
globals().update(locals())
_lazyImport("import org.example.foo", _lazy_import_org_example_foo)
_lazyImport.pump("org_example_foo_md.org_example_foo_Foo_test_Method")
|
src/hdusd/ui/panels.py | VascoPi/BlenderUSDHydraAddon | 212 | 12620555 | #**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
import bpy
def get_panels():
# follow the Cycles model of excluding panels we don't want
exclude_panels = {
'DATA_PT_area',
'DATA_PT_context_light',
'DATA_PT_falloff_curve',
'DATA_PT_light',
'NODE_DATA_PT_light',
'DATA_PT_shadow',
'DATA_PT_spot',
'DATA_PT_sunsky',
'MATERIAL_PT_context_material',
'MATERIAL_PT_diffuse',
'MATERIAL_PT_flare',
'MATERIAL_PT_halo',
'MATERIAL_PT_mirror',
'MATERIAL_PT_options',
'MATERIAL_PT_pipeline',
'MATERIAL_PT_preview',
'MATERIAL_PT_shading',
'MATERIAL_PT_shadow',
'MATERIAL_PT_specular',
'MATERIAL_PT_sss',
'MATERIAL_PT_strand',
'MATERIAL_PT_transp',
'MATERIAL_PT_volume_density',
'MATERIAL_PT_volume_integration',
'MATERIAL_PT_volume_lighting',
'MATERIAL_PT_volume_options',
'MATERIAL_PT_volume_shading',
'MATERIAL_PT_volume_transp',
'RENDERLAYER_PT_layer_options',
'RENDERLAYER_PT_layer_passes',
'RENDERLAYER_PT_views',
'RENDER_PT_antialiasing',
'RENDER_PT_bake',
'RENDER_PT_motion_blur',
'RENDER_PT_performance',
'RENDER_PT_freestyle',
'RENDER_PT_post_processing',
'RENDER_PT_shading',
'RENDER_PT_simplify',
'RENDER_PT_stamp',
'SCENE_PT_simplify',
'SCENE_PT_audio',
'WORLD_PT_ambient_occlusion',
'WORLD_PT_environment_lighting',
'WORLD_PT_gather',
'WORLD_PT_indirect_lighting',
'WORLD_PT_mist',
'WORLD_PT_preview',
'WORLD_PT_world',
}
for panel in bpy.types.Panel.__subclasses__():
if hasattr(panel, 'COMPAT_ENGINES') and 'BLENDER_RENDER' in panel.COMPAT_ENGINES:
if panel.__name__ not in exclude_panels:
yield panel
def register():
# set HdUSD panels filter
for panel in get_panels():
panel.COMPAT_ENGINES.add('HdUSD')
def unregister():
# remove HdUSD panels filter
for panel in get_panels():
if 'HdUSD' in panel.COMPAT_ENGINES:
panel.COMPAT_ENGINES.remove('HdUSD')
|
tests/integration/test_system_ddl_worker_queue/test.py | chalice19/ClickHouse | 8,629 | 12620559 | import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
"node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True
)
node2 = cluster.add_instance(
"node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True
)
node3 = cluster.add_instance(
"node3", main_configs=["configs/remote_servers.xml"], with_zookeeper=True
)
node4 = cluster.add_instance(
"node4", main_configs=["configs/remote_servers.xml"], with_zookeeper=True
)
nodes = [node1, node2, node3, node4]
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
for i, node in enumerate([node1, node2]):
node.query("CREATE DATABASE testdb")
node.query(
"""CREATE TABLE testdb.test_table(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/test_table1', '{}') ORDER BY id;""".format(
i
)
)
for i, node in enumerate([node3, node4]):
node.query("CREATE DATABASE testdb")
node.query(
"""CREATE TABLE testdb.test_table(id UInt32, val String) ENGINE = ReplicatedMergeTree('/clickhouse/test/test_table2', '{}') ORDER BY id;""".format(
i
)
)
yield cluster
finally:
cluster.shutdown()
def test_distributed_ddl_queue(started_cluster):
node1.query(
"INSERT INTO testdb.test_table SELECT number, toString(number) FROM numbers(100)"
)
node3.query(
"INSERT INTO testdb.test_table SELECT number, toString(number) FROM numbers(100)"
)
node2.query("SYSTEM SYNC REPLICA testdb.test_table")
node4.query("SYSTEM SYNC REPLICA testdb.test_table")
node1.query(
"ALTER TABLE testdb.test_table ON CLUSTER test_cluster ADD COLUMN somecolumn UInt8 AFTER val",
settings={"replication_alter_partitions_sync": "2"},
)
for node in nodes:
node.query("SYSTEM SYNC REPLICA testdb.test_table")
assert node.query("SELECT somecolumn FROM testdb.test_table LIMIT 1") == "0\n"
assert (
node.query(
"SELECT If((SELECT count(*) FROM system.distributed_ddl_queue WHERE cluster='test_cluster' AND entry='query-0000000000') > 0, 'ok', 'fail')"
)
== "ok\n"
)
|
src/schnetpack/md/parsers/__init__.py | giadefa/schnetpack | 450 | 12620588 | <gh_stars>100-1000
"""
This module contains several parsers. This includes utilities for reading and converting molecular
dynamics input files to instructions for the :obj:`schnetpack.md.simulator.Simulator`. In addition, there is
a full package for parsing ORCA output files.
"""
|
external/AR/ltr/models/AR_seg_mask/AR_seg_mask.py | tzhhhh123/Stark | 376 | 12620595 | <reponame>tzhhhh123/Stark
import torch.nn as nn
from ltr.models.neck import CorrNL
from ltr import model_constructor
import torch
import ltr.models.backbone.resnet_seg as resnet_seg
from ltr.models.head import seg_network
from easydict import EasyDict as edict
'''2020.4.14 replace mask head with frtm for higher-quality mask'''
'''2020.4.22 Only use the mask branch'''
class ARnet_seg_mask(nn.Module):
""" Scale Estimation network module with three branches: bbox, coner and mask. """
def __init__(self, feature_extractor, neck_module, head_module, used_layers,
extractor_grad=True,output_size=(256,256)):
"""
args:
feature_extractor - backbone feature extractor
bb_regressor - IoU prediction module
bb_regressor_layer - List containing the name of the layers from feature_extractor, which are input to
bb_regressor
extractor_grad - Bool indicating whether backbone feature extractor requires gradients
"""
super(ARnet_seg_mask, self).__init__()
self.feature_extractor = feature_extractor
self.neck = neck_module
self.refiner = head_module
self.used_layers = used_layers
self.output_size = output_size
if not extractor_grad:
for p in self.feature_extractor.parameters():
p.requires_grad_(False)
def forward(self, train_imgs, test_imgs, train_bb, mode='train'):
""" Forward pass
Note: If the training is done in sequence mode, that is, test_imgs.dim() == 5, then the batch dimension
corresponds to the first dimensions. test_imgs is thus of the form [sequence, batch, feature, row, col]
"""
self.forward_ref(train_imgs, train_bb)
pred_dict = self.forward_test(test_imgs, mode)
return pred_dict
def forward_ref(self, train_imgs, train_bb):
""" Forward pass of reference branch.
size of train_imgs is (1,batch,3,H,W), train_bb is (1,batch,4)"""
num_sequences = train_imgs.shape[-4] # batch
num_train_images = train_imgs.shape[0] if train_imgs.dim() == 5 else 1 # 1
# Extract backbone features
'''train_feat OrderedDict, key:'layer4' '''
train_feat_dict = self.extract_backbone_features(train_imgs.view(-1, *train_imgs.shape[-3:])) # 输入size是(batch,3,256,256)
train_feat_list = [feat for feat in train_feat_dict.values()] #list,其中每个元素对应一层输出的特征(tensor)
# get reference feature
self.neck.get_ref_kernel(train_feat_list, train_bb.view(num_train_images, num_sequences, 4))
def forward_test(self, test_imgs, mode='train'):
""" Forward pass of test branch. size of test_imgs is (1,batch,3,256,256)"""
output = {}
# Extract backbone features
test_feat_dict = self.extract_backbone_features(test_imgs.view(-1, *test_imgs.shape[-3:]),
layers=['layer1','layer2','layer3','layer4','layer5'])# 输入size是(batch,3,256,256)
'''list,tensor'''
# Save low-level feature list
# Lfeat_list = [feat for name, feat in test_feat_dict.items() if name != 'layer3']
# fuse feature from two branches
fusion_feat = self.neck.fuse_feat([test_feat_dict['layer4']])
# Obtain bbox prediction
if mode=='train':
output['mask'] = torch.sigmoid(self.refiner(fusion_feat, test_feat_dict, self.output_size))
elif mode == 'mask':
output = torch.sigmoid(self.refiner(fusion_feat, test_feat_dict, self.output_size))
else:
raise ValueError("mode should be train or test")
return output
def extract_backbone_features(self, im, layers=None):
if layers is None:
layers = self.used_layers
return self.feature_extractor(im, layers)
def extract_features(self, im, layers):
return self.feature_extractor(im, layers)
@model_constructor
def ARnet_seg_mask_resnet50(backbone_pretrained=True,used_layers=('layer4',),pool_size=None):
# backbone
backbone_net = resnet_seg.resnet50(pretrained=backbone_pretrained)
# neck
neck_net = CorrNL.CorrNL(pool_size=pool_size)
# multiple heads
'''create segnet'''
in_channels = 1024
# disc_params = edict(layer="layer4", in_channels=in_channels, c_channels=96, out_channels=64) # non-local feat (64 channels rather than 1)
'''2020.4.22 change "out_channels" to pool_size * pool_size'''
disc_params = edict(layer="layer4", in_channels=in_channels, c_channels=96, out_channels=pool_size*pool_size) # non-local feat (64 channels rather than 1)
refnet_params = edict(
layers=("layer5", "layer4", "layer3", "layer2"),
nchannels=64, use_batch_norm=True)
disc_params.in_channels = backbone_net.get_out_channels()[disc_params.layer]
p = refnet_params
refinement_layers_channels = {L: nch for L, nch in backbone_net.get_out_channels().items() if L in p.layers}
refiner = seg_network.SegNetwork(disc_params.out_channels, p.nchannels, refinement_layers_channels, p.use_batch_norm)
'''create Alpha-Refine'''
net = ARnet_seg_mask(feature_extractor=backbone_net, neck_module=neck_net,
head_module=refiner,
used_layers=used_layers, extractor_grad=True,
output_size=(int(pool_size*2*16),int(pool_size*2*16)))
return net
|
test/nn/conv/test_supergat_conv.py | JinheonBaek/pytorch_geometric | 12,651 | 12620596 | import pytest
import torch
from torch_geometric.nn import SuperGATConv
@pytest.mark.parametrize('att_type', ['MX', 'SD'])
def test_supergat_conv(att_type):
x = torch.randn(4, 8)
edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
conv = SuperGATConv(8, 32, heads=2, attention_type=att_type,
neg_sample_ratio=1.0, edge_sample_ratio=1.0)
assert conv.__repr__() == f'SuperGATConv(8, 32, heads=2, type={att_type})'
out = conv(x, edge_index)
assert out.size() == (4, 64)
# Negative samples are given.
neg_edge_index = conv.negative_sampling(edge_index, x.size(0))
assert conv(x, edge_index, neg_edge_index).tolist() == out.tolist()
att_loss = conv.get_attention_loss()
assert isinstance(att_loss, torch.Tensor) and att_loss > 0
# Batch of graphs.
x = torch.randn(8, 8)
edge_index = torch.tensor([[0, 1, 2, 3, 4, 5, 6, 7],
[0, 0, 1, 1, 4, 4, 5, 5]])
batch = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1])
out = conv(x, edge_index, batch=batch)
assert out.size() == (8, 64)
# Batch of graphs and negative samples are given.
neg_edge_index = conv.negative_sampling(edge_index, x.size(0), batch)
assert conv(x, edge_index, neg_edge_index).tolist() == out.tolist()
att_loss = conv.get_attention_loss()
assert isinstance(att_loss, torch.Tensor) and att_loss > 0
|
src/sparsify/blueprints/system.py | dhuangnm/sparsify | 152 | 12620602 | <reponame>dhuangnm/sparsify
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Server routes related to the system
"""
import logging
from http import HTTPStatus
from flasgger import swag_from
from flask import Blueprint, jsonify
from sparsify.blueprints.utils import API_ROOT_PATH
from sparsify.schemas import ErrorSchema, ResponseSystemInfo, data_dump_and_validation
from sparsify.utils import get_ml_sys_info, ml_engines_errors
__all__ = ["SYSTEM_PATH", "system_blueprint"]
SYSTEM_PATH = "{}/system".format(API_ROOT_PATH)
_LOGGER = logging.getLogger(__name__)
system_blueprint = Blueprint(SYSTEM_PATH, __name__, url_prefix=SYSTEM_PATH)
@system_blueprint.route("/info")
@swag_from(
{
"tags": ["System"],
"summary": "Get system specs and other hardware info",
"produces": ["application/json"],
"parameters": [],
"responses": {
HTTPStatus.OK.value: {
"description": "The info for the current system the server is on",
"schema": ResponseSystemInfo,
},
HTTPStatus.BAD_REQUEST.value: {
"description": "Information for the error that occurred",
"schema": ErrorSchema,
},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
"description": "Information for the error that occurred",
"schema": ErrorSchema,
},
},
},
)
def info():
"""
Route for getting the info describing the current system the server is running on
:return: a tuple containing (json response, http status code)
"""
_LOGGER.info("getting system info")
sys_info = get_ml_sys_info()
resp_info = data_dump_and_validation(ResponseSystemInfo(), {"info": sys_info})
_LOGGER.info("retrieved system info {}".format(resp_info))
return jsonify(resp_info), HTTPStatus.OK.value
@system_blueprint.route("/validate", methods=["POST"])
@swag_from(
{
"tags": ["System"],
"summary": "Validate that the system is setup correctly to run. "
"For example, make sure deepsparse and sparseml are accessible",
"produces": ["application/json"],
"parameters": [],
"responses": {
HTTPStatus.OK.value: {"description": "System is setup correctly"},
HTTPStatus.BAD_REQUEST.value: {
"description": "Information for the error that occurred",
"schema": ErrorSchema,
},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {
"description": "Information for the error that occurred",
"schema": ErrorSchema,
},
},
},
)
def validate():
"""
Route for validating the current system the server is running on,
deepsparse and onnxruntime must be installed to validate successfully
:return: a tuple containing (response, http status code)
"""
_LOGGER.info("validating system")
errors = ml_engines_errors()
for key, err in errors.items():
if err is not None:
raise Exception("error on import for {}: {}".format(key, err))
_LOGGER.info("validated system")
return "", HTTPStatus.OK.value
|
eggs/Flask_Admin-1.1.0-py2.7.egg/flask_admin/contrib/mongoengine/typefmt.py | salayhin/talkofacta | 334 | 12620691 | <gh_stars>100-1000
from jinja2 import Markup, escape
from mongoengine.base import BaseList
from mongoengine.fields import GridFSProxy, ImageGridFsProxy
from flask.ext.admin.model.typefmt import BASE_FORMATTERS, list_formatter
from . import helpers
def grid_formatter(view, value):
if not value.grid_id:
return ''
args = helpers.make_gridfs_args(value)
return Markup(
('<a href="%(url)s" target="_blank">' +
'<i class="icon-file"></i>%(name)s' +
'</a> %(size)dk (%(content_type)s)') %
{
'url': view.get_url('.api_file_view', **args),
'name': escape(value.name),
'size': value.length // 1024,
'content_type': escape(value.content_type)
})
def grid_image_formatter(view, value):
if not value.grid_id:
return ''
return Markup(
('<div class="image-thumbnail">' +
'<a href="%(url)s" target="_blank"><img src="%(thumb)s"/></a>' +
'</div>') %
{
'url': view.get_url('.api_file_view', **helpers.make_gridfs_args(value)),
'thumb': view.get_url('.api_file_view', **helpers.make_thumb_args(value)),
})
DEFAULT_FORMATTERS = BASE_FORMATTERS.copy()
DEFAULT_FORMATTERS.update({
BaseList: list_formatter,
GridFSProxy: grid_formatter,
ImageGridFsProxy: grid_image_formatter
})
|
zeus/api/schemas/fields/__init__.py | conrad-kronos/zeus | 221 | 12620699 | from .enum import * # NOQA
from .file import * # NOQA
from .permission import * # NOQA
from .result import * # NOQA
from .revision import * # NOQA
from .severity import * # NOQA
from .status import * # NOQA
|
examples/serial_server.py | timgates42/PyBBIO | 102 | 12620722 | <reponame>timgates42/PyBBIO
# serial_server.py - <NAME> - 4/15/12
#
# Creates a simple web interface to the Serial2 port.
#
# Serial2 TX = pin 21 on P9 header
# Serial2 RX = pin 22 on P9 header
#
# Run this program and navigate to http://your_beaglebone_ip:8000
# in your web brower.
#
# See BBIOServer tutorial:
# https://github.com/alexanderhiam/PyBBIO/wiki/BBIOServer
#
# This example is in the public domain
from bbio import *
from bbio.libraries.BBIOServer import *
# Create a server instance:
server = BBIOServer()
# A global buffer for received data:
data =''
def serial_tx(string):
""" Sends given string to Serial2. """
Serial2.println(string)
def serial_rx():
""" Returns received data if any, otherwise current data buffer. """
global data
if (Serial2.available()):
# There's incoming data
data =''
while(Serial2.available()):
# If multiple characters are being sent we want to catch
# them all, so add received byte to our data string and
# delay a little to give the next byte time to arrive:
data += Serial2.read()
delay(5)
return data
def setup():
# Start the serial port at 9600 baud:
Serial2.begin(9600)
# Create the web page:
serial = Page("Serial")
serial.add_text("A simple interface to Serial2.")
serial.add_entry(lambda string: serial_tx(string), "Send", newline=True)
serial.add_monitor(lambda: serial_rx(), "Received:", newline=True)
# Start the server:
server.start(serial)
def loop():
# Server has stopped; exit happily:
stop()
run(setup, loop)
|
wagtail/admin/views/pages/utils.py | sonnybaker/wagtail | 8,851 | 12620730 | <filename>wagtail/admin/views/pages/utils.py
from django.utils.http import url_has_allowed_host_and_scheme
def get_valid_next_url_from_request(request):
next_url = request.POST.get('next') or request.GET.get('next')
if not next_url or not url_has_allowed_host_and_scheme(url=next_url, allowed_hosts={request.get_host()}):
return ''
return next_url
|
scripts/check_utils/check_split_leak.py | gitter-badger/FlexNeuART | 101 | 12620752 | <filename>scripts/check_utils/check_split_leak.py
#!/usr/bin/env python
#
# Copyright 2014+ Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import argparse
import numpy as np
from tqdm import tqdm
"""
This utility scripts checks for possible leakage across different data splits.
Importantly it works only for bitext. In the context of a community QA collection,
such bitext arises naturally. For regular document collections, a pseudo-bitext
needs to be created user the scripts/giza/export_bitext_plain.sh:
importantly one needs to use the text_raw field (and text as an index field)
and use 0 for the "max query to doc word ratio" so documents are not split
into chunks.
Specifically, we search for very similar question-answer pairs, which might
be duplicates or near duplicates. Hence, we check the following:
1. Are there very similar questions?
2. For sufficiently similar questions, e.g., Jaccard >= 0.75, we check
all pairwise similarities among all relevant answers.
By default this method uses brute-force search with the Jaccard similarity.
The exhaustiveness of the search ensures we won't miss anything. However, for quicker-and-easier
checks, one can use HNSW with sufficently high values of M (>= 30), efConstruction (>=200),
and efSearch (>=1000). These parameters might need to be bumped up for "harder" collections
and brute-force search is certainly a safer option.
"""
from flexneuart.check_utils import get_token_ids, QUERY_BATCH_SIZE, jaccard, \
read_sample_queries, create_jaccard_index, str_to_nmslib_vect
from flexneuart.text_proc.parse import get_bert_tokenizer
from flexneuart.io import jsonl_gen
from flexneuart.data_convert import unique
from flexneuart.config import ANSWER_FILE_JSON, QREL_FILE, DOCID_FIELD, TEXT_RAW_FIELD_NAME
from flexneuart.eval import read_qrels_dict
PRINT_TOO_CLOSE_THRESHOLD=0.9 # We want to inspect answers that are too close
np.random.seed(0)
tokenizer = get_bert_tokenizer()
parser = argparse.ArgumentParser(description='Checking for possible high overlaps among QA pairs.')
parser.add_argument('--data_dir',
metavar='data directory',
help='data directory',
type=str, required=True)
parser.add_argument('--input_subdir1',
metavar='1st input subdir',
help='1st input data subdirectory',
type=str, required=True)
parser.add_argument('--input_subdir2',
metavar='1st input subdir',
help='1st input data subdirectory',
type=str, required=True)
parser.add_argument('--sample_prob1',
metavar='1st subdir sample prob',
type=float, default=1.0)
parser.add_argument('--sample_prob2',
metavar='2d subdir sample prob',
type=float, default=1.0)
parser.add_argument('-k', metavar='k-NN k',
type=int, default=1)
parser.add_argument('--min_jacc', metavar='min jaccard to compare answers',
type=float, default=1.0)
parser.add_argument("--use_hnsw", action="store_true",
help="Use HNSW instead of brute-force for retrieval")
args = parser.parse_args()
print(args)
data_dir = args.data_dir
sample_query_list1, sample_query_list2 = read_sample_queries(data_dir,
args.input_subdir1, args.sample_prob1,
args.input_subdir2, args.sample_prob2)
apath1=os.path.join(data_dir, args.input_subdir1, ANSWER_FILE_JSON)
apath2=os.path.join(data_dir, args.input_subdir2, ANSWER_FILE_JSON)
rpath1 = os.path.join(data_dir, args.input_subdir1, QREL_FILE)
qrel_dict1 = read_qrels_dict(rpath1)
print('Read %d qrel sets from %s' % (len(qrel_dict1), rpath1))
rpath2 = os.path.join(data_dir, args.input_subdir2, QREL_FILE)
qrel_dict2 = read_qrels_dict(rpath2)
print('Read %d qrel sets from %s' % (len(qrel_dict2), rpath2))
answ_dict_text = {}
for fn in [apath1, apath2]:
qty = 0
for e in tqdm(jsonl_gen(fn), desc='loading answers'):
qty += 1
answ_id = e[DOCID_FIELD]
answ_text = e[TEXT_RAW_FIELD_NAME]
answ_dict_text[answ_id] = answ_text
print('Read %d answers from %s' % (qty, fn))
index = create_jaccard_index(args.use_hnsw, tokenizer, sample_query_list2)
K = args.k
print('K=', K)
nbr_quest_simils = []
nbr_answ_simils = []
for start in tqdm(range(0, len(sample_query_list1), QUERY_BATCH_SIZE), desc='query w/ 1st query set'):
qbatch = []
for e in sample_query_list1[start:start + QUERY_BATCH_SIZE]:
qbatch.append(str_to_nmslib_vect(tokenizer, e[TEXT_RAW_FIELD_NAME]))
if qbatch:
nbrs = index.knnQueryBatch(qbatch, k=K, num_threads=0)
assert(len(nbrs))
for i in range(len(qbatch)):
qnum1 = start + i
qid1 = sample_query_list1[qnum1][DOCID_FIELD]
index_queries, dists = nbrs[i]
for t in range(len(index_queries)):
# In the case of Jaccard, the similarity is one minus the distance
nqsimil = 1 - dists[t]
nbr_quest_simils.append(nqsimil)
# For close enough queries, compute all pairwise distances
# between the respective relevant answers
if nqsimil >= args.min_jacc:
qnum2 = index_queries[t]
qid2 = sample_query_list2[qnum2][DOCID_FIELD]
if qid1 in qrel_dict1 and qid2 in qrel_dict2:
for aid1, grade1 in qrel_dict1[qid1].items():
for aid2, grade2 in qrel_dict2[qid2].items():
if grade1 > 0 and grade2 > 0 and \
aid1 in answ_dict_text and aid2 in answ_dict_text:
toks1 = unique(get_token_ids(tokenizer, answ_dict_text[aid1]))
toks2 = unique(get_token_ids(tokenizer, answ_dict_text[aid2]))
answ_simil = jaccard(toks1, toks2)
nbr_answ_simils.append(answ_simil)
if answ_simil >= PRINT_TOO_CLOSE_THRESHOLD:
print(qid1, aid1, '<=>', answ_simil, '<=>', qid2, aid2)
print('---------------------')
print(answ_dict_text[aid1])
print(toks1)
print('---------------------')
print(answ_dict_text[aid2])
print(toks2)
print('=====================')
qbatch = []
# We are more interested in extremely high similarities, hence,
# we increase resolution in higher quantiles
q=list([0.2,0.3,0.4,0.5,0.6,0.7, 0.8, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99, 0.999, 0.999])
q.sort()
print('Maximum similarity among questions:', np.max(nbr_quest_simils))
print('Distribution of question-neighbor *SIMILARITIES* for k=%d' % K)
dst = np.quantile(nbr_quest_simils, q = q)
print(' quant| simil')
print('------+------')
for k in range(len(q)):
print('%5.03g' % q[k], ' | %.05g' % dst[k])
print('Distribution of relevant answer pairwise *SIMILARITIES* from neighbor questions with Jaccard >= %g' % args.min_jacc)
if nbr_answ_simils:
dst = np.quantile(nbr_answ_simils, q = q)
print(' quant| simil')
print('------+------')
for k in range(len(q)):
print('%5.03g' % q[k], ' | %.05g' % dst[k])
else:
print('No data collected, did you set the Jaccard threshold to a value < 1?')
print('Check is successful!')
|
states/bacula.py | beornf/salt-contrib | 111 | 12620764 | <reponame>beornf/salt-contrib
# -*- coding: utf-8 -*-
'''
Management of bacula File Daemon Configuration
==============================================
Configure Bacula file daemon to allow connections from a
particular Bacula director, set password credentials, as well as
the file daemon name and port that it runs on. Configure the
messages that get returned to the director.
.. code-block:: yaml
/etc/bacula/bacula-fd.conf:
bacula:
- fdconfig
- dirname: bacula-dir
- dirpasswd: <PASSWORD>
- fdname: bacula-fd
- fdport: 9102
- messages: bacula-dir = all, !skipped, !restored
'''
from __future__ import absolute_import
import re
# Search Patterns
dirs = re.compile(r'Director {[^}]*}')
fd = re.compile(r'FileDaemon {[^}]*}')
msgs = re.compile(r'Messages {[^}]*}')
def _getConfig(pattern, config):
'''
Get Configuration block
'''
m = pattern.search(config)
if m:
return m.group()
return None
def _getParam(pname, config):
'''
Get Param from config
'''
if pname == 'Password':
search = '{0} = "(?P<{0}>.*)"'.format(pname)
else:
search = '{0} = (?P<{0}>.*)'.format(pname)
mp = re.search(search, config)
if mp:
return mp.group(pname)
return None
def _getConfigParams(config):
'''
Get configuration blocks for parameters
'''
cparams = {}
dconfig = _getConfig(dirs, config)
if not dconfig:
return None
cparams['dirname'] = _getParam('Name', dconfig)
cparams['dirpasswd'] = _getParam('Password', dconfig)
fdconfig = _getConfig(fd, config)
if not fdconfig:
return None
cparams['fdname'] = _getParam('Name', fdconfig)
cparams['fdport'] = _getParam('FDport', fdconfig)
mconfig = _getConfig(msgs, config)
if not mconfig:
return None
cparams['messages'] = _getParam('director', mconfig)
return cparams
def fdconfig(name,
dirname=None,
dirpasswd=None,
fdname=None,
fdport=None,
messages=None):
'''
Configure a bacula file daemon
dirname
The name of the director that is allowed to connect to the
file daemon.
dirpasswd
The password that the director must use to successfully
connect to the file daemon.
fdname
The name of the file daemon
fdport
The port that the file daemon should run on
messages
Define how and what messages to send to a director.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': '', }
config = ''
with salt.utils.fopen(name) as f:
config = f.read()
if not config:
ret['comment'] = config # 'Could not find {0}\n'.format(name)
ret['result'] = False
return ret
cparams = _getConfigParams(config)
if not cparams:
ret['comment'] += 'Could not find configuration information.\n'
ret['result'] = False
return ret
changes = {}
if dirname and dirname != cparams['dirname']:
changes['dirname'] = dirname
if dirpasswd and dirpasswd != cparams['dirpasswd']:
changes['dirpasswd'] = dirpasswd
if fdname and fdname != cparams['fdname']:
changes['fdname'] = fdname
if fdport and fdport != int(cparams['fdport']):
changes['fdport'] = fdport
if messages and messages != cparams['messages']:
changes['messages'] = messages
if not changes:
ret['comment'] += 'Bacula file daemon configuration is up to date.\n'
ret['result'] = True
return ret
if __opts__['test']:
if 'dirname' in changes:
ret['comment'] += \
'Director Name set to be changed to {0}\n'.format(dirname)
if 'dirpasswd' in changes:
ret['comment'] += \
'Director Password set to be changed to {0}\n'.format(dirpasswd)
if 'fdname' in changes:
ret['comment'] += \
'File Daemon Name set to be changed to {0}\n'.format(fdname)
if 'fdport' in changes:
ret['comment'] += \
'File Daemon Port set to be changed to {0}\n'.format(fdport)
if 'messages' in changes:
ret['comment'] += \
'Messages Director set to be changed to {0}\n'.format(messages)
return ret
if 'dirname' in changes or 'dirpasswd' in changes:
dconfig = _getConfig(dirs, config)
if 'dirname' in changes:
dconfig = re.sub(r'Name = (.*)',
'Name = {0}'.format(dirname),
dconfig)
if 'dirpasswd' in changes:
dconfig = re.sub(r'Password = "(.*)"',
'Password = <PASSWORD>}"'.format(dirpasswd),
dconfig)
config = dirs.sub(dconfig, config)
ret['changes']['Director'] = dconfig
if 'fdname' in changes or 'fdport' in changes:
fdconfig = _getConfig(fd, config)
if 'fdname' in changes:
fdconfig = re.sub(r'Name = (.*)',
'Name = {0}'.format(fdname),
fdconfig)
if 'fdport' in changes:
fdconfig = re.sub(r'FDport = (.*)',
'FDport = {0}'.format(fdport),
fdconfig)
config = fd.sub(fdconfig, config)
ret['changes']['FileDaemon'] = fdconfig
if 'messages' in changes:
mconfig = _getConfig(msgs, config)
mconfig = re.sub(r'director = (.*)',
'director = {0}'. format(messages),
mconfig)
ret['changes']['Messages'] = mconfig
config = msgs.sub(mconfig, config)
with salt.utils.fopen(name, 'w') as f:
f.write(config)
ret['comment'] += 'Updated bacula file daemon settings.\n'
ret['result'] = True
return ret
|
src/encoded/tests/test_audit_series.py | procha2/encoded | 102 | 12620772 | import pytest
def test_treatment_time_series_mixed_units(
testapp,
treatment_time_series,
experiment_chip_H3K4me3,
experiment_chip_H3K27me3,
replicate_1_chip,
replicate_2_chip,
library_1_chip,
library_2_chip,
biosample_human_1,
biosample_human_2,
treatment_5,
treatment_with_duration_amount_units
):
testapp.patch_json(treatment_time_series['@id'], {'related_datasets': [experiment_chip_H3K4me3['@id'], experiment_chip_H3K27me3['@id']]})
testapp.patch_json(treatment_5['@id'], {'duration': 9, 'duration_units': 'minute'})
testapp.patch_json(biosample_human_1['@id'], {'treatments': [treatment_5['@id']]})
testapp.patch_json(biosample_human_2['@id'], {'treatments': [treatment_with_duration_amount_units['@id']]})
testapp.patch_json(replicate_2_chip['@id'], {'experiment': experiment_chip_H3K4me3['@id']})
res = testapp.get(treatment_time_series['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent treatment units' for error in errors_list)
def test_treatment_concentration_series_mixed_units(
testapp,
treatment_concentration_series,
experiment_chip_H3K4me3,
experiment_chip_H3K27me3,
replicate_1_chip,
replicate_2_chip,
library_1_chip,
library_2_chip,
biosample_human_1,
biosample_human_2,
treatment_5,
treatment_with_duration_amount_units
):
testapp.patch_json(treatment_concentration_series['@id'], {'related_datasets': [experiment_chip_H3K4me3['@id'], experiment_chip_H3K27me3['@id']]})
testapp.patch_json(treatment_5['@id'], {'amount': 9, 'amount_units': 'nM'})
testapp.patch_json(biosample_human_1['@id'], {'treatments': [treatment_5['@id']]})
testapp.patch_json(biosample_human_2['@id'], {'treatments': [treatment_with_duration_amount_units['@id']]})
testapp.patch_json(replicate_2_chip['@id'], {'experiment': experiment_chip_H3K4me3['@id']})
res = testapp.get(treatment_concentration_series['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent treatment units' for error in errors_list)
def test_differentiation_time_series_mixed_units(
testapp,
base_differentiation_series,
experiment_chip_H3K4me3,
experiment_chip_H3K27me3,
replicate_1_chip,
replicate_2_chip,
library_1_chip,
library_2_chip,
biosample_human_1,
biosample_human_2
):
testapp.patch_json(base_differentiation_series['@id'], {'related_datasets': [experiment_chip_H3K4me3['@id'], experiment_chip_H3K27me3['@id']]})
testapp.patch_json(biosample_human_1['@id'], {'post_differentiation_time': 10, 'post_differentiation_time_units': 'hour'})
testapp.patch_json(biosample_human_2['@id'], {'post_differentiation_time': 10, 'post_differentiation_time_units': 'day'})
testapp.patch_json(replicate_2_chip['@id'], {'experiment': experiment_chip_H3K4me3['@id']})
res = testapp.get(base_differentiation_series['@id'] + '@@index-data')
errors = res.json['audit']
errors_list = []
for error_type in errors:
errors_list.extend(errors[error_type])
assert any(error['category'] == 'inconsistent differentation time units' for error in errors_list) |
atest/testdata/standard_libraries/builtin/UseBuiltIn.py | phil-davis/robotframework | 7,073 | 12620774 | from robot.libraries.BuiltIn import BuiltIn
def log_debug_message():
b = BuiltIn()
b.set_log_level('DEBUG')
b.log('Hello, debug world!', 'DEBUG')
def get_test_name():
return BuiltIn().get_variables()['${TEST NAME}']
def set_secret_variable():
BuiltIn().set_test_variable('${SECRET}', '*****')
def use_run_keyword_with_non_unicode_values():
BuiltIn().run_keyword('Log', 42)
BuiltIn().run_keyword('Log', b'\xff')
def user_keyword_via_run_keyword():
BuiltIn().run_keyword("UseBuiltInResource.Keyword", 'This is x', 911)
|
dsgn/utils/rotate_iou/utils.py | fangchengji/DSGN | 166 | 12620780 | <reponame>fangchengji/DSGN
import numpy as np
def boxes_center3d_to_corner3d_lidar(boxes_center):
# (N, 7) -> (N, 8, 3)
N = boxes_center.shape[0]
translation = boxes_center[:, :3]
size = boxes_center[:, 3:6]
rotation = boxes_center[:, 6]
h, w, l = boxes_center[:,3], boxes_center[:,4], boxes_center[:,5]
zeros = np.zeros((len(h)), dtype=np.float32)
# N,8
trackletBox_l = np.stack([-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2], axis=1)
trackletBox_w = np.stack([w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], axis=1)
trackletBox_h = np.stack([zeros, zeros, zeros, zeros, h, h, h, h], axis=1)
trackletBox = np.stack([trackletBox_l, trackletBox_w, trackletBox_h], axis=1)
rotMat = np.zeros((N, 3, 3), dtype=np.float32)
rotMat[:, 0, 0] = np.cos(rotation)
rotMat[:, 0, 1] = -np.sin(rotation)
rotMat[:, 1, 0] = np.sin(rotation)
rotMat[:, 1, 1] = np.cos(rotation)
rotMat[:, 2, 2] = 1.
# N, 3, 8
corner = np.matmul(rotMat, trackletBox) + translation[..., np.newaxis]
corner = np.transpose(corner, (0, 2, 1))
return corner
def boxes_center2d_to_corner2d_lidar(boxes_center):
N = boxes_center.shape[0]
boxes3d_center = np.zeros((N, 7))
boxes3d_center[:, [0, 1, 4, 5, 6]] = boxes_center
corner = boxes_center3d_to_corner3d_lidar(boxes3d_center)
return corner[:, :4, :2]
|
tests/test_case.py | ajhynes7/datatest | 277 | 12620782 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import inspect
import re
import textwrap
import warnings
from sys import version_info as _version_info
from unittest import TestCase as _TestCase # Originial TestCase, not
# compatibility layer.
# Import compatiblity layers.
from . import _io as io
from . import _unittest as unittest
from .common import ignore_deprecations
# Import code to test.
from datatest.case import DataTestCase
from datatest.validation import validate
from datatest.validation import ValidationError
from datatest.differences import (
Missing,
Extra,
Invalid,
Deviation,
)
from datatest.acceptances import (
AcceptedDifferences,
AcceptedArgs,
AcceptedKeys,
AcceptedTolerance,
AcceptedFuzzy,
AcceptedPercent,
AcceptedCount,
)
try:
import squint
except ImportError:
squint = None
class TestHelperCase(unittest.TestCase):
"""Helper class for subsequent cases."""
def _run_one_test(self, case, method):
suite = unittest.TestSuite()
audit_case = case(method)
runner = unittest.TextTestRunner(stream=io.StringIO())
test_result = runner.run(audit_case)
self.assertEqual(test_result.testsRun, 1, 'Should one run test.')
if test_result.errors:
return test_result.errors[0][1]
if test_result.failures:
return test_result.failures[0][1]
return None
class TestSubclass(TestHelperCase):
def test_subclass(self):
"""DataTestCase should be a subclass of unittest.TestCase."""
self.assertTrue(issubclass(DataTestCase, _TestCase))
@ignore_deprecations
class TestAssertValid(DataTestCase):
"""
+-------------------------------------------------------------+
| Object Comparisons and Returned *differences* Container |
+--------------+----------------------------------------------+
| | *requirement* type |
| *data* type +-------+---------+--------------+-------------+
| | set | mapping | sequence | other |
+==============+=======+=========+==============+=============+
| **set** | list | | | list |
+--------------+-------+---------+--------------+-------------+
| **mapping** | dict | dict | dict | dict |
+--------------+-------+---------+--------------+-------------+
| **sequence** | list | | assert error | list |
+--------------+-------+---------+--------------+-------------+
| **iterable** | list | | | list |
+--------------+-------+---------+--------------+-------------+
| **other** | list | | | diff object |
+--------------+-------+---------+--------------+-------------+
"""
def test_nonmapping(self):
with self.assertRaises(ValidationError) as cm:
data = set([1, 2, 3])
required = set([1, 2, 4])
self.assertValid(data, required)
differences = cm.exception.differences
self.assertEqual(differences, [Missing(4), Extra(3)])
def test_data_mapping(self):
with self.assertRaises(ValidationError) as cm:
data = {'a': set([1, 2]), 'b': set([1]), 'c': set([1, 2, 3])}
required = set([1, 2])
self.assertValid(data, required)
differences = cm.exception.differences
self.assertEqual(differences, {'b': [Missing(2)], 'c': [Extra(3)]})
def test_required_mapping(self):
with self.assertRaises(ValidationError) as cm:
data = {'AAA': 'a', 'BBB': 'x'}
required = {'AAA': 'a', 'BBB': 'b', 'CCC': 'c'}
self.assertValid(data, required)
differences = cm.exception.differences
self.assertEqual(differences, {'BBB': Invalid('x', 'b'), 'CCC': Missing('c')})
def test_required_sequence(self):
"""When *required* is a sequence, should compare predicates by
position.
"""
with self.assertRaises(ValidationError) as cm:
data = ['a', 2, 'x', 3]
required = ['a', 2, 'c', 4]
self.assertValid(data, required)
error = cm.exception
expected = [
Invalid('x', expected='c'),
Deviation(-1, 4),
]
self.assertEqual(error.differences, expected)
self.assertEqual(error.args[1], 'does not match required sequence')
def test_required_other(self):
"""When *required* is a string or other object, _compare_other()
should be called.
"""
with self.assertRaises(ValidationError) as cm:
required = lambda x: x.isupper()
data = ['AAA', 'BBB', 'ccc', 'DDD']
self.assertValid(data, required)
differences = cm.exception.differences
self.assertEqual(differences, [Invalid('ccc')])
def test_maxdiff_propagation(self):
self.maxDiff = 35 # <- Set custom maxDiff (as number of characters)!
with self.assertRaises(ValidationError) as cm:
self.assertValid(set([1, 2, 3, 4, 5, 6]), set([1, 2]))
expected = """
does not satisfy set membership (4 differences): [
Extra(3),
Extra(4),
...
Diff is too long. Set self.maxDiff to None to see it.
"""
expected = textwrap.dedent(expected).strip()
self.assertEqual(str(cm.exception), expected)
def test_maxdiff_none(self):
self.maxDiff = None
with self.assertRaises(ValidationError) as cm:
self.assertValid(set([1, 2, 3, 4, 5, 6]), set([1, 2]))
message = str(cm.exception)
self.assertTrue(message.endswith(']'), 'should show full diff when None')
@unittest.skipUnless(squint, 'requires squint')
def test_query_objects(self):
source = squint.Select([('A', 'B'), ('1', '2'), ('1', '2')])
query_obj1 = source(['B'])
query_obj2 = source(['B'])
self.assertValid(query_obj1, query_obj2)
@unittest.skipUnless(squint, 'requires squint')
def test_result_objects(self):
result_obj1 = squint.Result(['2', '2'], evaltype=list)
result_obj2 = squint.Result(['2', '2'], evaltype=list)
self.assertValid(result_obj1, result_obj2)
class TestAssertEqual(unittest.TestCase):
def test_for_unwrapped_behavior(self):
"""The datatest.DataTestCase class should NOT wrap the
assertEqual() method of its superclass. In version 0.7.0,
datatest DID wrap this method--this test should remain part
of the suite to prevent regression.
"""
if _version_info >= (3, 1):
self.assertIs(DataTestCase.assertEqual, unittest.TestCase.assertEqual)
else:
with self.assertRaises(Exception) as cm:
first = set([1,2,3,4,5,6,7])
second = set([1,2,3,4,5,6])
self.assertEqual(first, second)
self.assertIs(type(cm.exception), AssertionError)
class TestValidationWrappers(unittest.TestCase):
def setUp(self):
class DummyCase(DataTestCase):
def runTest(self_):
pass
def _apply_validation(self_, function, *args, **kwds):
"""Knocks-out existing method to log applied function."""
self_._applied_function = function
self.case = DummyCase()
def test_methods_names(self):
"""For each validate() method, DataTestCase should have a
matching unittest-style method.
========== ===================
validate() DataTestCase
========== ===================
approx() assertValidApprox()
subset() assertValidSubset()
... ...
========== ===================
"""
methods = [x for x in dir(validate) if not x.startswith('_')]
missing_methods = []
for method in methods:
name = 'assertValid{0}'.format(method.title())
msg = 'DataTestCase does not have method named {0!r}'.format(name)
if not hasattr(self.case, name):
foo = ' validate.{0}() <-> DataTestCase.{1}()'.format(method, name)
missing_methods.append(foo)
msg = ('validate and DataTestCase should have matching '
'validation methods:\n\n{0}').format('\n'.join(missing_methods))
self.assertTrue(len(missing_methods) == 0, msg=msg)
def test_methods_wrappers(self):
"""DataTestCase method wrappers should call appropriate
validate methods.
"""
method_calls = [
('predicate', ('aaa', 'aaa'), {}),
('regex', (['a', 'b'], '[ab]'), {}),
('approx', ([1.5, 1.5], 1.5), {}),
('fuzzy', ('aaa', 'aaa'), {}),
('interval', ([1, 2, 3], 1, 3), {}),
('set', ([1, 1, 2, 2], set([1, 2])), {}),
('subset', ([1, 2, 3], set([1, 2])), {}),
('superset', ([1, 2], set([1, 2, 3])), {}),
('unique', ([1, 2, 3],), {}),
('order', (['x', 'y'], ['x', 'y']), {}),
]
method_names = set(x[0] for x in method_calls)
all_names = set(x for x in dir(validate) if not x.startswith('_'))
self.assertSetEqual(method_names, all_names)
for orig_name, args, kwds in method_calls:
case_name = 'assertValid{0}'.format(orig_name.title())
case_method = getattr(self.case, case_name)
case_method(*args, **kwds)
orig_method = getattr(validate, orig_name)
applied_name = self.case._applied_function.__name__
msg = (
'\n\n '
'DataTestCase.{0}() should map to validate.{1}() '
'but instead maps to validate.{2}()'
).format(case_name, orig_name, applied_name)
self.assertEqual(self.case._applied_function, orig_method, msg=msg)
class TestAcceptanceWrappers(unittest.TestCase):
"""Test method wrappers for acceptance context managers."""
def setUp(self):
class DummyCase(DataTestCase):
def runTest(self):
pass
self.case = DummyCase()
def test_accepted(self):
cm = self.case.accepted([Missing('foo')])
self.assertTrue(isinstance(cm, AcceptedDifferences))
def test_acceptedArgs(self):
cm = self.case.acceptedArgs('foo')
self.assertTrue(isinstance(cm, AcceptedArgs))
def test_acceptedKeys(self):
cm = self.case.acceptedKeys('foo')
self.assertTrue(isinstance(cm, AcceptedKeys))
def test_acceptedTolerance(self):
cm = self.case.acceptedTolerance(5)
self.assertTrue(isinstance(cm, AcceptedTolerance))
def test_acceptedPercent(self):
result = self.case.acceptedPercent(5)
self.assertTrue(isinstance(result, AcceptedPercent))
def test_acceptedFuzzy(self):
cm = self.case.acceptedFuzzy()
self.assertTrue(isinstance(cm, AcceptedFuzzy))
def test_acceptedCount(self):
cm = self.case.acceptedCount(10)
self.assertTrue(isinstance(cm, AcceptedCount))
|
paas-ce/paas/esb/esb/channel/confapis.py | renmcc/bk-PaaS | 767 | 12620783 | <filename>paas-ce/paas/esb/esb/channel/confapis.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from esb.utils.confapis import get_confapis_manager
from .base import ChannelManager, get_channel_manager
class ConfapisChannelManager(ChannelManager):
"""
Manager for Channels, query confapis config to find the matching channel.
"""
def __init__(self, *args, **kwargs):
"""
:preset_channels example:
{
"GET": {
"/cc/add_plat_id/": {
"raw_path": "/cc/add_plat_id/",
"re_path": re_obj,
"channel": esb_channel_obj,
"classes": {"api": None},
"comp_conf": {},
"channel_conf": {},
}
}
}
"""
super(ConfapisChannelManager, self).__init__(*args, **kwargs)
self.changed = False
self.channel_manager = get_channel_manager()
self.confapis_manager = get_confapis_manager()
self.set_default_channel_classes(self.channel_manager.get_default_channel_classes())
def __str__(self):
return '<CompapisChannelManager>'
def get_channel_by_path(self, path, method):
"""
根据路径获取对应的channel配置
:param str path: 需要查询的路径
:param str method: HTTP请求的方法
:returns dict: 包含当前channel和channel_classes的字典
"""
if not path.startswith('/'):
path = '/%s' % path
channel = None
# 处理path最后有无斜杠两种情况
path_another = path.rstrip('/') if path.endswith('/') else '%s/' % path
for _path in (path, path_another):
channel = self.preset_channels.get(method, {}).get(_path)
if channel:
return channel
return channel
def refresh_channel_groups(self):
self.register_channel_groups(
self.default_channel_classes,
self.confapis_manager.get_apis_conf(),
{},
)
_confapis_channel_manager = None
def get_confapis_channel_manager():
global _confapis_channel_manager
if _confapis_channel_manager is None:
manager = ConfapisChannelManager()
manager.refresh_channel_groups()
_confapis_channel_manager = manager
return _confapis_channel_manager
|
st2api/tests/unit/controllers/v1/test_policies.py | momokuri-3/st2 | 4,920 | 12620786 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import six
from six.moves import http_client
from st2common.models.api.policy import PolicyTypeAPI, PolicyAPI
from st2common.persistence.policy import PolicyType, Policy
from st2common.transport.publishers import PoolPublisher
from st2api.controllers.v1.policies import PolicyTypeController
from st2api.controllers.v1.policies import PolicyController
from st2tests.fixturesloader import FixturesLoader
from st2tests.api import FunctionalTest
from st2tests.api import APIControllerWithIncludeAndExcludeFilterTestCase
__all__ = ["PolicyTypeControllerTestCase", "PolicyControllerTestCase"]
TEST_FIXTURES = {
"policytypes": ["fake_policy_type_1.yaml", "fake_policy_type_2.yaml"],
"policies": ["policy_1.yaml", "policy_2.yaml"],
}
PACK = "generic"
LOADER = FixturesLoader()
FIXTURES = LOADER.load_fixtures(fixtures_pack=PACK, fixtures_dict=TEST_FIXTURES)
class PolicyTypeControllerTestCase(
FunctionalTest, APIControllerWithIncludeAndExcludeFilterTestCase
):
get_all_path = "/v1/policytypes"
controller_cls = PolicyTypeController
include_attribute_field_name = "module"
exclude_attribute_field_name = "parameters"
base_url = "/v1/policytypes"
@classmethod
def setUpClass(cls):
super(PolicyTypeControllerTestCase, cls).setUpClass()
cls.policy_type_dbs = []
for _, fixture in six.iteritems(FIXTURES["policytypes"]):
instance = PolicyTypeAPI(**fixture)
policy_type_db = PolicyType.add_or_update(PolicyTypeAPI.to_model(instance))
cls.policy_type_dbs.append(policy_type_db)
def test_policy_type_get_all(self):
resp = self.__do_get_all()
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 0)
def test_policy_type_filter(self):
resp = self.__do_get_all()
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 0)
selected = resp.json[0]
resp = self.__do_get_all(
filter="resource_type=%s&name=%s"
% (selected["resource_type"], selected["name"])
)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 1)
self.assertEqual(self.__get_obj_id(resp, idx=0), selected["id"])
resp = self.__do_get_all(filter="name=%s" % selected["name"])
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 1)
self.assertEqual(self.__get_obj_id(resp, idx=0), selected["id"])
resp = self.__do_get_all(filter="resource_type=%s" % selected["resource_type"])
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 1)
def test_policy_type_filter_empty(self):
resp = self.__do_get_all(filter="resource_type=yo&name=whatever")
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 0)
def test_policy_type_get_one(self):
resp = self.__do_get_all()
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 0)
selected = resp.json[0]
resp = self.__do_get_one(selected["id"])
self.assertEqual(resp.status_int, 200)
self.assertEqual(self.__get_obj_id(resp), selected["id"])
resp = self.__do_get_one(selected["ref"])
self.assertEqual(resp.status_int, 200)
self.assertEqual(self.__get_obj_id(resp), selected["id"])
def test_policy_type_get_one_fail(self):
resp = self.__do_get_one("1")
self.assertEqual(resp.status_int, 404)
def _insert_mock_models(self):
result = []
for policy_type_db in self.policy_type_dbs:
result.append(policy_type_db.id)
return result
def _delete_mock_models(self, object_ids):
pass
@staticmethod
def __get_obj_id(resp, idx=-1):
return resp.json["id"] if idx < 0 else resp.json[idx]["id"]
def __do_get_all(self, filter=None):
url = "%s?%s" % (self.base_url, filter) if filter else self.base_url
return self.app.get(url, expect_errors=True)
def __do_get_one(self, id):
return self.app.get("%s/%s" % (self.base_url, id), expect_errors=True)
class PolicyControllerTestCase(
FunctionalTest, APIControllerWithIncludeAndExcludeFilterTestCase
):
get_all_path = "/v1/policies"
controller_cls = PolicyController
include_attribute_field_name = "policy_type"
exclude_attribute_field_name = "parameters"
base_url = "/v1/policies"
@classmethod
def setUpClass(cls):
super(PolicyControllerTestCase, cls).setUpClass()
for _, fixture in six.iteritems(FIXTURES["policytypes"]):
instance = PolicyTypeAPI(**fixture)
PolicyType.add_or_update(PolicyTypeAPI.to_model(instance))
cls.policy_dbs = []
for _, fixture in six.iteritems(FIXTURES["policies"]):
instance = PolicyAPI(**fixture)
policy_db = Policy.add_or_update(PolicyAPI.to_model(instance))
cls.policy_dbs.append(policy_db)
def test_get_all(self):
resp = self.__do_get_all()
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 0)
def test_filter(self):
resp = self.__do_get_all()
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 0)
selected = resp.json[0]
resp = self.__do_get_all(
filter="pack=%s&name=%s" % (selected["pack"], selected["name"])
)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 1)
self.assertEqual(self.__get_obj_id(resp, idx=0), selected["id"])
resp = self.__do_get_all(filter="name=%s" % selected["name"])
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 1)
self.assertEqual(self.__get_obj_id(resp, idx=0), selected["id"])
resp = self.__do_get_all(filter="pack=%s" % selected["pack"])
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 1)
def test_filter_empty(self):
resp = self.__do_get_all(filter="pack=yo&name=whatever")
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 0)
def test_get_one(self):
resp = self.__do_get_all()
self.assertEqual(resp.status_int, 200)
self.assertGreater(len(resp.json), 0)
selected = resp.json[0]
resp = self.__do_get_one(selected["id"])
self.assertEqual(resp.status_int, 200)
self.assertEqual(self.__get_obj_id(resp), selected["id"])
resp = self.__do_get_one(selected["ref"])
self.assertEqual(resp.status_int, 200)
self.assertEqual(self.__get_obj_id(resp), selected["id"])
def test_get_one_fail(self):
resp = self.__do_get_one("1")
self.assertEqual(resp.status_int, 404)
def test_crud(self):
instance = self.__create_instance()
post_resp = self.__do_post(instance)
self.assertEqual(post_resp.status_int, http_client.CREATED)
get_resp = self.__do_get_one(self.__get_obj_id(post_resp))
self.assertEqual(get_resp.status_int, http_client.OK)
updated_input = get_resp.json
updated_input["enabled"] = not updated_input["enabled"]
put_resp = self.__do_put(self.__get_obj_id(post_resp), updated_input)
self.assertEqual(put_resp.status_int, http_client.OK)
self.assertEqual(put_resp.json["enabled"], updated_input["enabled"])
del_resp = self.__do_delete(self.__get_obj_id(post_resp))
self.assertEqual(del_resp.status_int, http_client.NO_CONTENT)
def test_post_duplicate(self):
instance = self.__create_instance()
post_resp = self.__do_post(instance)
self.assertEqual(post_resp.status_int, http_client.CREATED)
post_dup_resp = self.__do_post(instance)
self.assertEqual(post_dup_resp.status_int, http_client.CONFLICT)
del_resp = self.__do_delete(self.__get_obj_id(post_resp))
self.assertEqual(del_resp.status_int, http_client.NO_CONTENT)
def test_put_not_found(self):
updated_input = self.__create_instance()
put_resp = self.__do_put("12345", updated_input)
self.assertEqual(put_resp.status_int, http_client.NOT_FOUND)
def test_put_sys_pack(self):
instance = self.__create_instance()
instance["pack"] = "core"
post_resp = self.__do_post(instance)
self.assertEqual(post_resp.status_int, http_client.CREATED)
updated_input = post_resp.json
updated_input["enabled"] = not updated_input["enabled"]
put_resp = self.__do_put(self.__get_obj_id(post_resp), updated_input)
self.assertEqual(put_resp.status_int, http_client.BAD_REQUEST)
self.assertEqual(
put_resp.json["faultstring"],
"Resources belonging to system level packs can't be manipulated",
)
# Clean up manually since API won't delete object in sys pack.
Policy.delete(Policy.get_by_id(self.__get_obj_id(post_resp)))
def test_delete_not_found(self):
del_resp = self.__do_delete("12345")
self.assertEqual(del_resp.status_int, http_client.NOT_FOUND)
def test_delete_sys_pack(self):
instance = self.__create_instance()
instance["pack"] = "core"
post_resp = self.__do_post(instance)
self.assertEqual(post_resp.status_int, http_client.CREATED)
del_resp = self.__do_delete(self.__get_obj_id(post_resp))
self.assertEqual(del_resp.status_int, http_client.BAD_REQUEST)
self.assertEqual(
del_resp.json["faultstring"],
"Resources belonging to system level packs can't be manipulated",
)
# Clean up manually since API won't delete object in sys pack.
Policy.delete(Policy.get_by_id(self.__get_obj_id(post_resp)))
def _insert_mock_models(self):
result = []
for policy_db in self.policy_dbs:
result.append(policy_db.id)
return result
def _delete_mock_models(self, object_ids):
pass
@staticmethod
def __create_instance():
return {
"name": "myaction.mypolicy",
"pack": "mypack",
"resource_ref": "mypack.myaction",
"policy_type": "action.mock_policy_error",
"parameters": {"k1": "v1"},
}
@staticmethod
def __get_obj_id(resp, idx=-1):
return resp.json["id"] if idx < 0 else resp.json[idx]["id"]
def __do_get_all(self, filter=None):
url = "%s?%s" % (self.base_url, filter) if filter else self.base_url
return self.app.get(url, expect_errors=True)
def __do_get_one(self, id):
return self.app.get("%s/%s" % (self.base_url, id), expect_errors=True)
@mock.patch.object(PoolPublisher, "publish", mock.MagicMock())
def __do_post(self, instance):
return self.app.post_json(self.base_url, instance, expect_errors=True)
@mock.patch.object(PoolPublisher, "publish", mock.MagicMock())
def __do_put(self, id, instance):
return self.app.put_json(
"%s/%s" % (self.base_url, id), instance, expect_errors=True
)
@mock.patch.object(PoolPublisher, "publish", mock.MagicMock())
def __do_delete(self, id):
return self.app.delete("%s/%s" % (self.base_url, id), expect_errors=True)
|
Python/PlusOneTest.py | TonnyL/Windary | 205 | 12620791 | <reponame>TonnyL/Windary
from unittest import TestCase
from PlusOne import PlusOne
class TestPlusOne(TestCase):
def test_plusOne(self):
po = PlusOne()
self.assertEqual(po.plusOne([1]), [2])
self.assertEqual(po.plusOne([9]), [1, 0])
self.assertEqual(po.plusOne([9, 9]), [1, 0, 0])
self.assertEqual(po.plusOne([2, 8, 9, 9, 9]), [2, 9, 0, 0, 0])
self.assertEqual(po.plusOne([2, 8, 8, 9]), [2, 8, 9, 0])
|
src/encoded/upgrade/analysis_step.py | procha2/encoded | 102 | 12620804 | from snovault import upgrade_step
from .upgrade_data.analysis_step_5_to_6 import (
label_mapping,
status_mapping,
title_mapping,
major_version_mapping,
aliases_mapping
)
@upgrade_step('analysis_step', '1', '2')
def analysis_step_1_2(value, system):
# http://redmine.encodedcc.org/issues/2770
input_mapping = {
'align-star-pe-v-1-0-2': ['reads'],
'align-star-pe-v-2-0-0': ['reads'],
'align-star-se-v-1-0-2': ['reads'],
'align-star-se-v-2-0-0': ['reads'],
'index-star-v-1-0-1': ['genome reference', 'spike-in sequence', 'reference genes'],
'index-star-v-2-0-0': ['genome reference', 'spike-in sequence', 'reference genes'],
'index-rsem-v-1-0-1': ['genome reference', 'spike-in sequence', 'reference genes'],
'index-tophat-v-1-0-0': ['genome reference', 'spike-in sequence', 'reference genes'],
'quant-rsem-v-1-0-2': ['transcriptome alignments'],
'stranded-signal-star-v-1-0-1': ['alignments'],
'stranded-signal-star-v-2-0-0': ['alignments'],
'unstranded-signal-star-v-1-0-1': ['alignments'],
'unstranded-signal-star-v-2-0-0': ['alignments'],
'align-tophat-pe-v-1-0-1': ['reads'],
'align-tophat-se-v-1-0-1': ['reads']
}
output_mapping = {
'align-star-pe-v-1-0-2': ['alignments'],
'align-star-pe-v-2-0-0': ['alignments'],
'align-star-se-v-1-0-2': ['alignments'],
'align-star-se-v-2-0-0': ['alignments'],
'index-star-v-1-0-1': ['genome index'],
'index-star-v-2-0-0': ['genome index'],
'index-rsem-v-1-0-1': ['genome index'],
'index-tophat-v-1-0-0': ['genome index'],
'quant-rsem-v-1-0-2': ['gene quantifications'],
'stranded-signal-star-v-1-0-1': [
'minus strand signal of multi-mapped reads',
'plus strand signal of multi-mapped reads',
'minus strand signal of unique reads',
'plus strand signal of unique reads'
],
'stranded-signal-star-v-2-0-0': [
'minus strand signal of multi-mapped reads',
'plus strand signal of multi-mapped reads',
'minus strand signal of unique reads',
'plus strand signal of unique reads'
],
'unstranded-signal-star-v-1-0-1': [
'signal of multi-mapped reads',
'signal of unique reads'
],
'unstranded-signal-star-v-2-0-0': [
'signal of multi-mapped reads',
'signal of unique reads'
],
'align-tophat-pe-v-1-0-1': ['alignments'],
'align-tophat-se-v-1-0-1': ['alignments']
}
value['input_file_types'] = input_mapping[value['name']]
value['output_file_types'] = output_mapping[value['name']]
@upgrade_step('analysis_step', '2', '3')
def analysis_step_2_3(value, system):
# http://redmine.encodedcc.org/issues/3019
import re
if 'output_file_types' in value:
for i in range(0, len(value['output_file_types'])):
string = value['output_file_types'][i]
value['output_file_types'][i] = re.sub('multi-mapped', 'all', string)
if 'input_file_types' in value:
for i in range(0, len(value['input_file_types'])):
string = value['input_file_types'][i]
value['input_file_types'][i] = re.sub('multi-mapped', 'all', string)
# http://redmine.encodedcc.org/issues/3074
del value['software_versions']
# http://redmine.encodedcc.org/issues/3074 note 16 and 3073
if value.get('name') in ['lrna-se-star-alignment-step-v-2-0',
'lrna-pe-star-alignment-step-v-2-0',
'lrna-pe-star-stranded-signal-step-v-2-0',
'lrna-pe-star-stranded-signals-for-tophat-step-v-2-0',
'lrna-se-star-unstranded-signal-step-v-2-0',
'lrna-se-star-unstranded-signals-for-tophat-step-v-2-0',
'index-star-v-2-0',
'rampage-grit-peak-calling-step-v-1-1'
]:
value['status'] = 'deleted'
if value.get('name') == 'lrna-pe-rsem-quantification-v-1':
value['parents'] = ['ace7163c-563a-43d6-a86f-686405af167d', #/analysis-steps/lrna-pe-star-alignment-step-v-1/'
'9ca04da2-5ef7-4ba1-b78c-41dfc4be0c11' #/analysis-steps/index-rsem-v-1-0/'
]
elif value.get('name') == 'lrna-se-rsem-quantification-step-v-1':
value['parents'] = ['3cad3827-7f21-4f70-9cbc-e718b5529775', #/analysis-steps/lrna-se-star-alignment-step-v-1/',
'9ca04da2-5ef7-4ba1-b78c-41dfc4be0c11' #/analysis-steps/index-rsem-v-1-0/'
]
@upgrade_step('analysis_step', '3', '4')
def analysis_step_3_4(value, system):
# http://redmine.encodedcc.org/issues/3063
if 'analysis_step_types' in value:
value['analysis_step_types'] = list(set(value['analysis_step_types']))
if 'input_file_types' in value:
value['input_file_types'] = list(set(value['input_file_types']))
if 'output_file_types' in value:
value['output_file_types'] = list(set(value['output_file_types']))
if 'qa_stats_generated' in value:
value['qa_stats_generated'] = list(set(value['qa_stats_generated']))
if 'parents' in value:
value['parents'] = list(set(value['parents']))
if 'aliases' in value:
value['aliases'] = list(set(value['aliases']))
if 'documents' in value:
value['documents'] = list(set(value['documents']))
@upgrade_step('analysis_step', '5', '6')
def analysis_step_5_6(value, system):
# http://redmine.encodedcc.org/issues/4987
obj_aliases = value.get('aliases', None)
if obj_aliases:
if obj_aliases[0] in label_mapping:
value['step_label'] = label_mapping[obj_aliases[0]]
else:
value['step_label'] = value['name']
value.pop('name', None)
if obj_aliases[0] in major_version_mapping:
value['major_version'] = major_version_mapping[obj_aliases[0]]
if obj_aliases[0] in title_mapping:
value['title'] = title_mapping[obj_aliases[0]]
if obj_aliases[0] in status_mapping:
value['status'] = status_mapping[obj_aliases[0]]
if obj_aliases[0] in aliases_mapping:
value['aliases'].append(aliases_mapping[obj_aliases[0]])
# http://redmine.encodedcc.org/issues/5050
if value.get('status') == 'replaced':
value['status'] = 'deleted'
@upgrade_step('analysis_step', '6', '7')
def analysis_step_6_7(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-4613
input_file = value.get('input_file_types', None)
output_file = value.get('output_file_types', None)
if input_file and 'candidate regulatory elements' in input_file:
input_file.remove('candidate regulatory elements')
input_file.append('candidate Cis-Regulatory Elements')
value['input_file_types'] = input_file
if output_file and 'candidate regulatory elements' in output_file:
output_file.remove('candidate regulatory elements')
output_file.append('candidate Cis-Regulatory Elements')
value['output_file_types'] = output_file
@upgrade_step('analysis_step', '7', '8')
def analysis_step_7_8(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-4641
input_file_types = value.get('input_file_types', None)
output_file_types = value.get('output_file_types', None)
if input_file_types and 'optimal idr thresholded peaks' in input_file_types:
input_file_types.remove('optimal idr thresholded peaks')
input_file_types.append('optimal IDR thresholded peaks')
value['input_file_types'] = input_file_types
if input_file_types and 'conservative idr thresholded peaks' in input_file_types:
input_file_types.remove('conservative idr thresholded peaks')
input_file_types.append('conservative IDR thresholded peaks')
value['input_file_types'] = input_file_types
if input_file_types and 'pseudoreplicated idr thresholded peaks' in input_file_types:
input_file_types.remove('pseudoreplicated idr thresholded peaks')
input_file_types.append('pseudoreplicated IDR thresholded peaks')
value['input_file_types'] = input_file_types
if output_file_types and 'optimal idr thresholded peaks' in output_file_types:
output_file_types.remove('optimal idr thresholded peaks')
output_file_types.append('optimal IDR thresholded peaks')
value['output_file_types'] = output_file_types
if output_file_types and 'conservative idr thresholded peaks' in output_file_types:
output_file_types.remove('conservative idr thresholded peaks')
output_file_types.append('conservative IDR thresholded peaks')
value['output_file_types'] = output_file_types
if output_file_types and 'pseudoreplicated idr thresholded peaks' in output_file_types:
output_file_types.remove('pseudoreplicated idr thresholded peaks')
output_file_types.append('pseudoreplicated IDR thresholded peaks')
value['output_file_types'] = output_file_types
@upgrade_step('analysis_step', '8', '9')
def analysis_step_8_9(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-5232
output_file_types = value.get('output_file_types', None)
input_file_types = value.get('input_file_types', None)
if output_file_types and 'representative dnase hypersensitivity sites' in output_file_types:
output_file_types.remove('representative dnase hypersensitivity sites')
output_file_types.append('representative DNase hypersensitivity sites (rDHSs)')
if input_file_types and 'representative dnase hypersensitivity sites' in input_file_types:
input_file_types.remove('representative dnase hypersensitivity sites')
input_file_types.append('representative DNase hypersensitivity sites (rDHSs)')
return
@upgrade_step('analysis_step', '9', '10')
def analysis_step_9_10(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-5424
output_file_types = value.get('output_file_types', None)
input_file_types = value.get('input_file_types', None)
if output_file_types and 'spike-in sequence' in output_file_types:
output_file_types.remove('spike-in sequence')
output_file_types.append('spike-ins')
if input_file_types and 'spike-in sequence' in input_file_types:
input_file_types.remove('spike-in sequence')
input_file_types.append('spike-ins')
return
@upgrade_step('analysis_step', '10', '11')
def analysis_step_10_11(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-5480
if 'stable peaks' in value.get('input_file_types', []):
value['input_file_types'].remove('stable peaks')
value['input_file_types'].append('pseudo-replicated peaks')
if 'stable peaks' in value.get('output_file_types', []):
value['output_file_types'].remove('stable peaks')
value['output_file_types'].append('pseudo-replicated peaks')
@upgrade_step('analysis_step', '11', '12')
def analysis_step_11_12(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-5551
if 'smoothed methylation stage at CpG' in value.get('input_file_types', []):
value['input_file_types'].remove('smoothed methylation stage at CpG')
value['input_file_types'].append('smoothed methylation state at CpG')
if 'smoothed methylation stage at CpG' in value.get('output_file_types', []):
value['output_file_types'].remove('smoothed methylation stage at CpG')
value['output_file_types'].append('smoothed methylation state at CpG')
@upgrade_step('analysis_step', '12', '13')
def analysis_step_12_13(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-5573
output_file_types = value.get('output_file_types', None)
input_file_types = value.get('input_file_types', None)
if output_file_types and 'consensus DNase hypersensitivity sites (cDHSs)' in output_file_types:
output_file_types.remove('consensus DNase hypersensitivity sites (cDHSs)')
output_file_types.append('consensus DNase hypersensitivity sites')
if output_file_types and 'representative DNase hypersensitivity sites (rDHSs)' in output_file_types:
output_file_types.remove('representative DNase hypersensitivity sites (rDHSs)')
output_file_types.append('representative DNase hypersensitivity sites')
if input_file_types and 'consensus DNase hypersensitivity sites (cDHSs)' in input_file_types:
input_file_types.remove('consensus DNase hypersensitivity sites (cDHSs)')
input_file_types.append('consensus DNase hypersensitivity sites')
if input_file_types and 'representative DNase hypersensitivity sites (rDHSs)' in input_file_types:
input_file_types.remove('representative DNase hypersensitivity sites (rDHSs)')
input_file_types.append('representative DNase hypersensitivity sites')
return
@upgrade_step('analysis_step', '13', '14')
def analysis_step_13_14(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-5662
if 'pseudo-replicated peaks' in value.get('input_file_types', []):
value['input_file_types'].remove('pseudo-replicated peaks')
value['input_file_types'].append('pseudoreplicated peaks')
if 'pseudo-replicated peaks' in value.get('output_file_types', []):
value['output_file_types'].remove('pseudo-replicated peaks')
value['output_file_types'].append('pseudoreplicated peaks')
@upgrade_step('analysis_step', '14', '15')
def analysis_step_14_15(value, system):
# https://encodedcc.atlassian.net/browse/ENCD-5657
output_file_types = value.get('output_file_types', None)
input_file_types = value.get('input_file_types', None)
term_pairs = [
('blacklisted regions', 'exclusion list regions'),
('mitochondria blacklisted regions', 'mitochondrial exclusion list regions'),
]
for old_term, new_term in term_pairs:
if output_file_types and old_term in output_file_types:
output_file_types.remove(old_term)
output_file_types.append(new_term)
if input_file_types and old_term in input_file_types:
input_file_types.remove(old_term)
input_file_types.append(new_term)
|
wandb/vendor/prompt_toolkit/contrib/regular_languages/__init__.py | dreamflasher/client | 6,989 | 12620831 | <reponame>dreamflasher/client
r"""
Tool for expressing the grammar of an input as a regular language.
==================================================================
The grammar for the input of many simple command line interfaces can be
expressed by a regular language. Examples are PDB (the Python debugger); a
simple (bash-like) shell with "pwd", "cd", "cat" and "ls" commands; arguments
that you can pass to an executable; etc. It is possible to use regular
expressions for validation and parsing of such a grammar. (More about regular
languages: http://en.wikipedia.org/wiki/Regular_language)
Example
-------
Let's take the pwd/cd/cat/ls example. We want to have a shell that accepts
these three commands. "cd" is followed by a quoted directory name and "cat" is
followed by a quoted file name. (We allow quotes inside the filename when
they're escaped with a backslash.) We could define the grammar using the
following regular expression::
grammar = \s* (
pwd |
ls |
(cd \s+ " ([^"]|\.)+ ") |
(cat \s+ " ([^"]|\.)+ ")
) \s*
What can we do with this grammar?
---------------------------------
- Syntax highlighting: We could use this for instance to give file names
different colour.
- Parse the result: .. We can extract the file names and commands by using a
regular expression with named groups.
- Input validation: .. Don't accept anything that does not match this grammar.
When combined with a parser, we can also recursively do
filename validation (and accept only existing files.)
- Autocompletion: .... Each part of the grammar can have its own autocompleter.
"cat" has to be completed using file names, while "cd"
has to be completed using directory names.
How does it work?
-----------------
As a user of this library, you have to define the grammar of the input as a
regular expression. The parts of this grammar where autocompletion, validation
or any other processing is required need to be marked using a regex named
group. Like ``(?P<varname>...)`` for instance.
When the input is processed for validation (for instance), the regex will
execute, the named group is captured, and the validator associated with this
named group will test the captured string.
There is one tricky bit:
Ofter we operate on incomplete input (this is by definition the case for
autocompletion) and we have to decide for the cursor position in which
possible state the grammar it could be and in which way variables could be
matched up to that point.
To solve this problem, the compiler takes the original regular expression and
translates it into a set of other regular expressions which each match prefixes
of strings that would match the first expression. (We translate it into
multiple expression, because we want to have each possible state the regex
could be in -- in case there are several or-clauses with each different
completers.)
TODO: some examples of:
- How to create a highlighter from this grammar.
- How to create a validator from this grammar.
- How to create an autocompleter from this grammar.
- How to create a parser from this grammar.
"""
from .compiler import compile
|
parsecom.py | hanleybrand/deserts | 493 | 12620841 | <filename>parsecom.py
import re
class ParseFailure(Exception):
pass
class Parser(object):
def __add__(self, other):
return ThenWS(self, other)
def __and__(self, other):
return Then(self, other)
def __or__(self, other):
return Or(self, other)
def __rshift__(self, other):
return Apply(other, self)
def __gt__(self, other):
return Apply(lambda *x: other, self)
def __invert__(self):
return Null(self)
def __call__(self, string):
matches, rest = self.parse(string)
assert not rest, rest
assert len(matches) == 1
return matches[0]
class Or(Parser):
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
def parse(self, string):
try:
return self.p1.parse(string)
except ParseFailure:
return self.p2.parse(string)
class Then(Parser):
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
def parse(self, string):
first, rest = self.p1.parse(string)
second, rest = self.p2.parse(rest)
return first + second, rest
class ThenWS(Then):
def parse(self, string):
first, rest = self.p1.parse(string)
_, rest = whitespace.parse(rest)
second, rest = self.p2.parse(rest)
return first + second, rest
def __repr__(self):
return "%r + %r" % (self.p1, self.p2)
class Many(Parser):
def __init__(self, base, allow_none=True):
self.base = base
self.allow_none = allow_none
def parse(self, string):
matches = []
while True:
try:
match, string = self.base.parse(string)
#_, string = whitespace.parse(string)
matches.extend(match)
except ParseFailure:
if matches or self.allow_none:
break
else:
raise
return matches, string
class Maybe(Parser):
def __init__(self, parser):
self.parser = parser
def parse(self, string):
try:
return self.parser.parse(string)
except ParseFailure:
return [], string
class Apply(Parser):
def __init__(self, func, parser):
self.func = func
self.parser = parser
def parse(self, string):
matches, rest = self.parser.parse(string)
return [self.func(*matches)], rest
def __repr__(self):
return "%r >> %r" % (self.parser, self.func)
class Regex(Parser):
def __init__(self, regex):
self.regex = re.compile(regex)
def parse(self, string):
match = self.regex.match(string)
if match is None:
raise ParseFailure
return [match.group()], string[match.end():]
def __repr__(self):
return "Regex(%s)" % self.regex.pattern
class Null(Parser):
def __init__(self, parser):
self.parser = parser
def parse(self, string):
match, rest = self.parser.parse(string)
return [], rest
def __repr__(self):
return "~%r" % self.parser
class End(Parser):
def parse(self, string):
if string:
raise ParseFailure
return [], ''
def __repr__(self):
return "eof"
class Forward(Parser):
def parse(self, string):
return self.p.parse(string)
def __repr__(self):
return "Forward()"
class Literal(Parser):
def __init__(self, text):
self.text = text
def parse(self, string):
n = len(self.text)
if string[:n] == self.text:
return [self.text], string[n:]
else:
raise ParseFailure
lit = lambda x: ~Literal(x)
sep = lambda x, s: x + Many(~s + x)
eof = End()
numeric = Regex('\d+')
whitespace = Regex('\s*')
integer = Regex('-?\d+') >> int
word = Regex('\w+')
comment = lambda char: lit(char) + ~Regex(r"[^\n]*\n")
doublequoted = Regex(r'"([^"\\]|\\.)*"')
singlequoted = Regex(r"'([^'\\]|\\.)*'")
quoted = doublequoted | singlequoted
coords = (integer + lit(',') + integer) >> (lambda x, y: (x, y))
def tuplize(*args): return tuple(args)
def listize(*args): return list(args)
def dictize(*args): return dict(args)
|
fhir/resources/DSTU2/address.py | cstoltze/fhir.resources | 144 | 12620845 | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Address
Release: DSTU2
Version: 1.0.2
Revision: 7202
"""
from typing import List as ListType
from pydantic import Field
from . import fhirtypes
from .element import Element
class Address(Element):
"""A postal address.
There is a variety of postal address formats defined around the world. This
format defines a superset that is the basis for all addresses around the
world.
"""
resource_type = Field("Address", const=True)
city: fhirtypes.String = Field(
None,
alias="city",
title="Type `String` (represented as `dict` in JSON)",
description="Name of city, town etc.",
)
country: fhirtypes.String = Field(
None,
alias="country",
title="Type `String` (represented as `dict` in JSON)",
description="Country (e.g. can be ISO 3166 2 or 3 letter code)",
)
district: fhirtypes.String = Field(
None,
alias="district",
title="Type `String` (represented as `dict` in JSON)",
description="District name (aka county)",
)
line: ListType[fhirtypes.String] = Field(
None,
alias="line",
title="List of `String` items (represented as `dict` in JSON)",
description="Street name, number, direction \u0026 P.O. Box etc.",
)
period: fhirtypes.PeriodType = Field(
None,
alias="period",
title="Type `Period` (represented as `dict` in JSON)",
description="Time period when address was/is in use",
)
postalCode: fhirtypes.String = Field(
None,
alias="postalCode",
title="Type `String` (represented as `dict` in JSON)",
description="Postal code for area",
)
state: fhirtypes.String = Field(
None,
alias="state",
title="Type `String` (represented as `dict` in JSON)",
description="Sub-unit of country (abbreviations ok)",
)
text: fhirtypes.String = Field(
None,
alias="text",
title="Type `String` (represented as `dict` in JSON)",
description="Text representation of the address",
)
type: fhirtypes.Code = Field(
None,
alias="type",
title="Type `Code` (represented as `dict` in JSON)",
description="postal | physical | both",
)
use: fhirtypes.Code = Field(
None,
alias="use",
title="Type `Code` (represented as `dict` in JSON)",
description="home | work | temp | old - purpose of this address",
)
|
scons/site_tools/openocd_remote.py | roboterclubaachen/xpcc | 161 | 12620846 | #!/usr/bin/env python3
#
# Copyright (c) 2014, Roboterclub Aachen e.V.
# All Rights Reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
import platform
from SCons.Script import *
# -----------------------------------------------------------------------------
# Copy the hex file to the remote target as /tmp/openocd.hex
# Then use telnet interface of openocd to remotely control openocd to flash
# hex file to connected target.
# openocd must be running on target at port 4444
def openocd_remote_run(env, source, alias='openocd_remote_run'):
if platform.system() == "Windows":
def fail(target, source, env):
raise Exception("Not supported under windows")
action = fail
return env.AlwaysBuild(env.Alias(alias, source, action))
else:
commands = ["init", "reset halt", "flash write_image erase /tmp/openocd.hex", "reset halt", "mww 0xE000EDF0 0xA05F0000"]
action = Action("scp $SOURCE $OPENOCD_REMOTE_USER@$OPENOCD_REMOTE_HOST:/tmp/openocd.hex; echo %s | nc $OPENOCD_REMOTE_HOST 4444" % ' '.join(['"%s;"' % c for c in commands]),
cmdstr="$OPENOCD_COMSTR")
return env.AlwaysBuild(env.Alias(alias, source, action))
# -----------------------------------------------------------------------------
# Program elf file via a remote gdb session
def gdb_remote_program(env, source, alias='gdb_remote_program'):
gdb = "arm-none-eabi-gdb"
cmd = [gdb, '-q',
'-ex "target remote $OPENOCD_REMOTE_HOST:3333"',
'-ex "monitor reset halt"',
'-ex "load"',
'-ex "monitor reset "',
'-ex "disconnect"',
'-ex "quit"',
'$SOURCE']
action = Action(' '.join(cmd))
return env.AlwaysBuild(env.Alias(alias, source, action))
# -----------------------------------------------------------------------------
# Interactively debug via a remote gdb session
def gdb_remote_debug(env, source, alias='gdb_remote_debug'):
gdb = "arm-none-eabi-gdb"
cmd = [gdb, '-q',
'--tui',
'-ex "target remote $OPENOCD_REMOTE_HOST:3333"',
'-ex "monitor halt"',
'$SOURCE']
action = Action(' '.join(cmd))
return env.AlwaysBuild(env.Alias(alias, source, action))
# -----------------------------------------------------------------------------
# Reset processor via remote gdb session
def gdb_remote_reset(env, alias='gdb_remote_reset'):
if platform.system() == "Windows":
def fail(target, source, env):
raise Exception("Not supported under windows")
action = fail
return env.AlwaysBuild(env.Alias(alias, '', action))
else:
gdb = "arm-none-eabi-gdb"
cmd = [gdb, '-q',
'-ex "target remote $OPENOCD_REMOTE_HOST:3333"',
'-ex "monitor reset"',
'-ex "disconnect"',
'-ex "quit"']
action = Action(' '.join(cmd))
return env.AlwaysBuild(env.Alias(alias, '', action))
# -----------------------------------------------------------------------------
def generate(env, **kw):
# build messages
if not ARGUMENTS.get('verbose'):
env['OPENOCD_COMSTR'] = "OpenOCD remote: program $SOURCE"
env['OPENOCD'] = 'openocd'
env.AddMethod(openocd_remote_run, 'OpenOcdRemote')
env.AddMethod(gdb_remote_program, 'GdbRemoteProgram')
env.AddMethod(gdb_remote_reset, 'GdbRemoteReset')
env.AddMethod(gdb_remote_debug, 'GdbRemoteDebug')
def exists(env):
return env.Detect('openocd_remote')
|
Chapter 7/tf2_cbow_model.py | shantam21/Deep-Learning-with-TensorFlow-2-and-Keras | 267 | 12620856 | import tensorflow as tf
class CBOWModel(tf.keras.Model):
def __init__(self, vocab_sz, emb_sz, window_sz, **kwargs):
super(CBOWModel, self).__init__(**kwargs)
self.embedding = tf.keras.layers.Embedding(
input_dim=vocab_sz,
output_dim=emb_sz,
embeddings_initializer="glorot_uniform",
input_length=window_sz*2
)
self.dense = tf.keras.layers.Dense(
vocab_sz,
kernel_initializer="glorot_uniform",
activation="softmax"
)
def call(self, x):
x = self.embedding(x)
x = tf.reduce_mean(x, axis=1)
x = self.dense(x)
return x
VOCAB_SIZE = 5000
EMBED_SIZE = 300
WINDOW_SIZE = 1 # 3 word window, 1 on left, 1 on right
model = CBOWModel(VOCAB_SIZE, EMBED_SIZE, WINDOW_SIZE)
model.build(input_shape=(None, VOCAB_SIZE))
model.compile(optimizer=tf.optimizers.Adam(),
loss="categorical_crossentropy",
metrics=["accuracy"])
model.summary()
# train the model here
# retrieve embeddings from trained model
emb_layer = [layer for layer in model.layers
if layer.name.startswith("embedding")][0]
emb_weight = [weight.numpy() for weight in emb_layer.weights
if weight.name.endswith("/embeddings:0")][0]
print(emb_weight, emb_weight.shape)
|
pythran/tests/rosetta/greatest_common_divisor.py | davidbrochart/pythran | 1,647 | 12620887 | <filename>pythran/tests/rosetta/greatest_common_divisor.py
#from http://rosettacode.org/wiki/Greatest_common_divisor#Python
#pythran export gcd_iter(int, int)
#pythran export gcd(int, int)
#pythran export gcd_bin(int, int)
#runas gcd_iter(40902, 24140)
#runas gcd(40902, 24140)
#runas gcd_bin(40902, 24140)
def gcd_iter(u, v):
while v:
u, v = v, u % v
return abs(u)
def gcd(u, v):
return gcd(v, u % v) if v else abs(u)
def gcd_bin(u, v):
u, v = abs(u), abs(v) # u >= 0, v >= 0
if u < v:
u, v = v, u # u >= v >= 0
if v == 0:
return u
# u >= v > 0
k = 1
while u & 1 == 0 and v & 1 == 0: # u, v - even
u >>= 1; v >>= 1
k <<= 1
t = -v if u & 1 else u
while t:
while t & 1 == 0:
t >>= 1
if t > 0:
u = t
else:
v = -t
t = u - v
return u * k
|
tests/qos/test_buffer_traditional.py | lolyu/sonic-mgmt | 132 | 12620906 | import logging
import pytest
from tests.common.utilities import wait_until
from tests.common.helpers.assertions import pytest_assert
pytestmark = [
pytest.mark.topology('any')
]
DEFAULT_LOSSLESS_PROFILES = None
RECLAIM_BUFFER_ON_ADMIN_DOWN = None
@pytest.fixture(scope="module", autouse=True)
def setup_module(duthosts, rand_one_dut_hostname):
"""Setup module. Called only once when the module is initialized
Args:
duthosts: The duthosts object
rand_one_dut_hostname:
"""
global RECLAIM_BUFFER_ON_ADMIN_DOWN
duthost = duthosts[rand_one_dut_hostname]
if duthost.facts["asic_type"] in ["mellanox"]:
RECLAIM_BUFFER_ON_ADMIN_DOWN = True
else:
RECLAIM_BUFFER_ON_ADMIN_DOWN = False
if "201911" not in duthost.os_version:
pytest.skip("Buffer test runs on 201911 branch only, skip")
load_lossless_info_from_pg_profile_lookup(duthost)
def load_lossless_info_from_pg_profile_lookup(duthost):
"""Load pg_profile_lookup.ini to a dictionary. Called only once when the module is initialized
Args:
duthost: the DUT host object
Return:
The dictionary containing the information in pg_profile_lookup.ini
"""
global DEFAULT_LOSSLESS_PROFILES
# Check the threshold mode
threshold_mode = duthost.shell('redis-cli -n 4 hget "BUFFER_POOL|ingress_lossless_pool" mode')['stdout']
threshold_field_name = 'dynamic_th' if threshold_mode == 'dynamic' else 'static_th'
dut_hwsku = duthost.facts["hwsku"]
dut_platform = duthost.facts["platform"]
skudir = "/usr/share/sonic/device/{}/{}/".format(dut_platform, dut_hwsku)
pg_profile_lookup_file = os.path.join(skudir, 'pg_profile_lookup.ini')
duthost.file(path=pg_profile_lookup_file, state="file")
lines = duthost.shell('cat {}'.format(pg_profile_lookup_file))["stdout_lines"]
DEFAULT_LOSSLESS_PROFILES = {}
for line in lines:
if line[0] == '#':
continue
tokens = line.split()
speed = tokens[0]
cable_length = tokens[1]
size = tokens[2]
xon = tokens[3]
xoff = tokens[4]
threshold = tokens[5]
profile_info = {
'pool': '[BUFFER_POOL|ingress_lossless_pool]',
'size': size,
'xon': xon,
'xoff': xoff,
threshold_field_name: threshold}
if len(tokens) > 6:
profile_info['xon_offset'] = tokens[6]
DEFAULT_LOSSLESS_PROFILES[(speed, cable_length)] = profile_info
def make_dict_from_output_lines(lines):
if lines:
return dict(zip(lines[::2], lines[1::2]))
return None
def test_buffer_pg(duthosts, rand_one_dut_hostname, conn_graph_facts):
"""The testcase for (traditional) buffer manager
1. For all ports in the config_db,
- Check whether there is no lossless buffer PG configured on an admin-down port
- on all paltforms, there is no lossless PG configured on inactive ports which are admin-down
which is guaranteed by buffer template
- Check whether the lossless PG aligns with the port's speed and cable length
- If name to oid maps exist for port and PG, check whether the information in ASIC_DB aligns with that in CONFIG_DB
- If a lossless profile hasn't been checked, check whether lossless profile in CONFIG_DB aligns with
- pg_profile_lookup.ini according to speed and cable length
- information in ASIC_DB
2. Shutdown a port and check whether the lossless buffer PGs
- has been removed on Mellanox platforms
- will not be changed on other platforms
3. Startup the port and check whether the lossless PG has been readded.
"""
def _check_condition(condition, message, use_assert):
"""Check whether the condition is satisfied
Args:
condition: The condition to check
message: The message to log or in pytest_assert
use_assert: Whether to use assert or not. If this is called from wait_until(), it should be False.
Return:
The condition
"""
if use_assert:
pytest_assert(condition, message)
elif not condition:
logging.info("Port buffer check: {}".format(message))
return False
return True
def _check_port_buffer_info_and_get_profile_oid(duthost, port, expected_profile, use_assert=True):
"""Check port's buffer information against CONFIG_DB and ASIC_DB
Args:
duthost: The duthost object
port: The port to test in string
expected_profile: The expected profile in string
use_assert: Whether or not to use pytest_assert in case any conditional check isn't satisfied
Return:
A tuple consisting of the OID of buffer profile and whether there is any check failed
"""
profile_in_pg = duthost.shell('redis-cli -n 4 hget "BUFFER_PG|{}|3-4" profile'.format(port))['stdout']
buffer_profile_oid = None
default_lossless_pgs = ['3', '4']
if expected_profile:
if not _check_condition(profile_in_pg == expected_profile, "Buffer profile of lossless PG of port {} isn't the expected ({})".format(port, expected_profile), use_assert):
return None, False
if pg_name_map:
for pg in default_lossless_pgs:
buffer_pg_asic_oid = pg_name_map['{}:{}'.format(port, pg)]
buffer_pg_asic_key = duthost.shell('redis-cli -n 1 keys *{}*'.format(buffer_pg_asic_oid))['stdout']
buffer_profile_oid_in_pg = duthost.shell('redis-cli -n 1 hget {} SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE'.format(buffer_pg_asic_key))['stdout']
logging.info("Checking admin-up port {} lossless PG {} in ASIC_DB ({})".format(port, pg, buffer_profile_oid_in_pg))
if buffer_profile_oid:
if not _check_condition(buffer_profile_oid == buffer_profile_oid_in_pg,
"Different OIDs in PG 3 ({}) and 4 ({}) in port {}".format(buffer_profile_oid, buffer_profile_oid_in_pg, port),
use_assert):
return None, False
else:
buffer_profile_oid = buffer_profile_oid_in_pg
else:
if not _check_condition(not profile_in_pg, "Buffer PG configured on admin down port {}".format(port), use_assert):
return None, False
if pg_name_map:
for pg in default_lossless_pgs:
buffer_pg_asic_oid = pg_name_map['{}:{}'.format(port, pg)]
buffer_pg_asic_key = duthost.shell('redis-cli -n 1 keys *{}*'.format(buffer_pg_asic_oid))['stdout']
buffer_profile_oid_in_pg = duthost.shell('redis-cli -n 1 hget {} SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE'.format(buffer_pg_asic_key))['stdout']
logging.info("Checking admin-down port {} lossless PG {}".format(port, pg))
if not _check_condition(not buffer_profile_oid_in_pg or buffer_profile_oid_in_pg == 'oid:0x0',
"Buffer PG configured on admin down port in ASIC_DB {}".format(port),
use_assert):
return None, False
return buffer_profile_oid, True
def _check_port_buffer_info_and_return(duthost, port, expected_profile):
"""Check port's buffer information against CONFIG_DB and ASIC_DB and return the result
This is called from wait_until
Args:
duthost: The duthost object
port: The port to test in string
expected_profile: The expected profile in string
Return:
Whether all the checks passed
"""
_, result = _check_port_buffer_info_and_get_profile_oid(duthost, port, expected_profile, False)
return result
global DEFAULT_LOSSLESS_PROFILES
duthost = duthosts[rand_one_dut_hostname]
# Check whether the COUNTERS_PG_NAME_MAP exists. Skip ASIC_DB checking if it isn't
pg_name_map = make_dict_from_output_lines(duthost.shell('redis-cli -n 2 hgetall COUNTERS_PG_NAME_MAP')['stdout'].split())
cable_length_map = make_dict_from_output_lines(duthost.shell('redis-cli -n 4 hgetall "CABLE_LENGTH|AZURE"')['stdout'].split())
configdb_ports = [x.split('|')[1] for x in duthost.shell('redis-cli -n 4 keys "PORT|*"')['stdout'].split()]
profiles_checked = {}
lossless_pool_oid = None
buffer_profile_asic_info = None
admin_up_ports = set()
for port in configdb_ports:
port_config = make_dict_from_output_lines(duthost.shell('redis-cli -n 4 hgetall "PORT|{}"'.format(port))['stdout'].split())
is_port_up = port_config.get('admin_status') == 'up'
if is_port_up or not RECLAIM_BUFFER_ON_ADMIN_DOWN:
if is_port_up:
admin_up_ports.add(port)
cable_length = cable_length_map[port]
speed = port_config['speed']
expected_profile = '[BUFFER_PROFILE|pg_lossless_{}_{}_profile]'.format(speed, cable_length)
logging.info("Checking admin-{} port {} buffer information: profile {}".format('up' if is_port_up else 'down', port, expected_profile))
buffer_profile_oid, _ = _check_port_buffer_info_and_get_profile_oid(duthost, port, expected_profile)
if expected_profile not in profiles_checked:
profile_info = make_dict_from_output_lines(duthost.shell('redis-cli -n 4 hgetall "{}"'.format(expected_profile[1:-1]))['stdout'].split())
pytest_assert(profile_info == DEFAULT_LOSSLESS_PROFILES[(speed, cable_length)], "Buffer profile {} {} doesn't match default {}".format(expected_profile, profile_info, DEFAULT_LOSSLESS_PROFILES[(speed, cable_length)]))
logging.info("Checking buffer profile {}: OID: {}".format(expected_profile, buffer_profile_oid))
if buffer_profile_oid:
# Further check the buffer profile in ASIC_DB
buffer_profile_key = duthost.shell('redis-cli -n 1 keys *{}*'.format(buffer_profile_oid))['stdout']
buffer_profile_asic_info = make_dict_from_output_lines(duthost.shell('redis-cli -n 1 hgetall {}'.format(buffer_profile_key))['stdout'].split())
pytest_assert(buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_XON_TH'] == profile_info['xon'] and
buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_XOFF_TH'] == profile_info['xoff'] and
buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_RESERVED_BUFFER_SIZE'] == profile_info['size'] and
(buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE'] == 'SAI_BUFFER_PROFILE_THRESHOLD_MODE_DYNAMIC' and
buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH'] == profile_info['dynamic_th'] or
buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE'] == 'SAI_BUFFER_PROFILE_THRESHOLD_MODE_STATIC' and
buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_SHARED_STATIC_TH'] == profile_info['static_th']),
"Buffer profile {} {} doesn't align with ASIC_TABLE {}".format(expected_profile, profile_info, buffer_profile_asic_info))
profiles_checked[expected_profile] = buffer_profile_oid
if not lossless_pool_oid:
if buffer_profile_asic_info:
lossless_pool_oid = buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_POOL_ID']
else:
pytest_assert(lossless_pool_oid == buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_POOL_ID'],
"Buffer profile {} has different buffer pool id {} from others {}".format(expected_profile, buffer_profile_asic_info['SAI_BUFFER_PROFILE_ATTR_POOL_ID'], lossless_pool_oid))
else:
pytest_assert(profiles_checked[expected_profile] == buffer_profile_oid,
"PG {}|3-4 has different OID of profile from other PGs sharing the same profile {}".format(port, expected_profile))
else:
# Port admin down. Make sure no lossless PG configured.
# After deployment, there should not be lossless PG configured on any platforms
# This is guaranteed by buffers_config.j2: no lossless PG will be configured on inactive ports
logging.info("Checking admin-down port buffer information: {}".format(port))
_, _ = _check_port_buffer_info_and_get_profile_oid(duthost, port, None)
port_to_shutdown = admin_up_ports.pop()
expected_profile = duthost.shell('redis-cli -n 4 hget "BUFFER_PG|{}|3-4" profile'.format(port_to_shutdown))['stdout']
try:
# Shutdown the port and check whether the lossless PGs
# - have been removed on Mellanox platforms
# - will not be affected on other platforms
logging.info("Shut down an admin-up port {} and check its buffer information".format(port_to_shutdown))
duthost.shell('config interface shutdown {}'.format(port_to_shutdown))
if RECLAIM_BUFFER_ON_ADMIN_DOWN:
expected_profile_admin_down = None
else:
expected_profile_admin_down = expected_profile
wait_until(60, 5, _check_port_buffer_info_and_return, duthost, port_to_shutdown, expected_profile_admin_down)
# Startup the port and check whether the lossless PG has been reconfigured
logging.info("Re-startup the port {} and check its buffer information".format(port_to_shutdown))
duthost.shell('config interface startup {}'.format(port_to_shutdown))
wait_until(60, 5, _check_port_buffer_info_and_return, duthost, port_to_shutdown, expected_profile)
finally:
duthost.shell('config interface startup {}'.format(port_to_shutdown), module_ignore_errors=True)
|
tests/openwisp2/sample_x509/migrations/0002_common_name_max_length.py | MiHiR151203/django-x509 | 410 | 12620921 | # Generated by Django 3.1.1 on 2020-09-09 08:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sample_x509', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='ca',
name='common_name',
field=models.CharField(
blank=True, max_length=64, verbose_name='common name'
),
),
migrations.AlterField(
model_name='cert',
name='common_name',
field=models.CharField(
blank=True, max_length=64, verbose_name='common name'
),
),
migrations.AlterField(
model_name='customcert',
name='common_name',
field=models.CharField(
blank=True, max_length=64, verbose_name='common name'
),
),
]
|
cv-Tkinter-GUI/kivy-GUI/kivy_cv1.py | shliang0603/OpenCV-Python-Tutorial | 2,875 | 12620939 | # -*- coding: utf-8 -*-
# @Time : 2018/2/8 16:30
# @Author : play4fun
# @File : kivy_cv1.py
# @Software: PyCharm
"""
参考:https://github.com/kivy/kivy/blob/master/kivy/core/camera/camera_opencv.py
kivy_cv1.py:
https://gist.github.com/ExpandOcean/de261e66949009f44ad2
pip install kivy
问题:无显示
"""
# coding:utf-8
from kivy.app import App
from kivy.uix.image import Image
from kivy.clock import Clock
from kivy.graphics.texture import Texture
import cv2
class KivyCamera(Image):
def __init__(self, capture, fps, **kwargs):
super(KivyCamera, self).__init__(**kwargs)
self.capture = capture
Clock.schedule_interval(self.update, 1.0 / fps)
def update(self, dt):
ret, frame = self.capture.read()
if ret:
# convert it to texture
buf1 = cv2.flip(frame, 0)
buf = buf1.tostring()
image_texture = Texture.create(
size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
image_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
# display image from the texture
self.texture = image_texture
class CamApp(App):
def build(self):
self.capture = cv2.VideoCapture(1)
self.my_camera = KivyCamera(capture=self.capture, fps=30)
return self.my_camera
def on_stop(self):
# without this, app will not exit even if the window is closed
self.capture.release()
if __name__ == '__main__':
CamApp().run()
|
Python/tigre/algorithms/statistical_algorithms.py | gfardell/TIGRE | 326 | 12620952 | <filename>Python/tigre/algorithms/statistical_algorithms.py
from __future__ import division
import time
import numpy as np
from tigre.algorithms.iterative_recon_alg import IterativeReconAlg
from tigre.algorithms.iterative_recon_alg import decorator
from tigre.utilities.Atb import Atb
from tigre.utilities.Ax import Ax
class MLEM(IterativeReconAlg): # noqa: D101
__doc__ = (
" MLEM_CBCT solves the CBCT problem using the maximum likelihood expectation maximization\n"
" algorithm\n"
" \n"
" MLEM_CBCT(PROJ,GEO,ANGLES,NITER) solves the reconstruction problem\n"
" using the projection data PROJ taken over ALPHA angles, corresponding\n"
" to the geometry descrived in GEO, using NITER iterations."
) + IterativeReconAlg.__doc__
def __init__(self, proj, geo, angles, niter, **kwargs):
# Don't precompute V and W.
kwargs.update(dict(W=None, V=None))
kwargs.update(dict(blocksize=angles.shape[0]))
IterativeReconAlg.__init__(self, proj, geo, angles, niter, **kwargs)
if self.init is None:
self.res += 1.0
self.W = Atb(np.ones(proj.shape, dtype=np.float32), geo, angles, gpuids=self.gpuids)
self.W[self.W <= 0.0] = np.inf
# Overide
def run_main_iter(self):
self.res[self.res < 0.0] = 0.0
for i in range(self.niter):
self._estimate_time_until_completion(i)
den = Ax(self.res, self.geo, self.angles, "interpolated", gpuids=self.gpuids)
# toc = time.process_time()
# print('Ax time: {}'.format(toc-tic))
den[den == 0.0] = np.inf
auxmlem = self.proj / den
# auxmlem[auxmlem == np.nan] = 0.
# auxmlem[auxmlem == np.inf] = 0.
# update
# tic = time.process_time()
img = Atb(auxmlem, self.geo, self.angles, backprojection_type="matched", gpuids=self.gpuids) / self.W
# toc = time.process_time()
# print('Atb time: {}'.format(toc-tic))
# img[img == np.nan] = 0.
# img[img == np.inf] = 0.
self.res = self.res * img
self.res[self.res < 0.0] = 0.0
mlem = decorator(MLEM, name="mlem")
|
notification/notifier/slack.py | gueux/openduty | 595 | 12620970 | from slacker import Slacker
class SlackNotifier:
def __init__(self, config):
self.__config = config
def notify(self, notification):
slack = Slacker(self.__config['apikey'])
response = slack.chat.post_message(notification.user_to_notify.profile.slack_room_name, notification.message,
username="Openduty", icon_url="https://slack.global.ssl.fastly.net/1937/img/services/pagerduty_48.png")
if not response.error:
print "Slack message sent"
else:
print "Failed to send Slack message"
|
examples/entities/mtext_editor.py | Gmadges/ezdxf | 515 | 12620973 | <filename>examples/entities/mtext_editor.py
# Copyright (c) 2021 <NAME>
# License: MIT License
from pathlib import Path
import ezdxf
from ezdxf.tools.text import (
MTextEditor,
ParagraphProperties,
MTextParagraphAlignment,
)
from ezdxf.tools.text_layout import lorem_ipsum
OUTBOX = Path("~/Desktop/Outbox").expanduser()
ATTRIBS = {
"char_height": 0.7,
"style": "OpenSans",
}
# use constants defined in MTextEditor:
NP = MTextEditor.NEW_PARAGRAPH
def recreate_mtext_py_example(msp, location):
# replicate example "mtext.py":
attribs = dict(ATTRIBS)
attribs["width"] = 15.0
editor = (
MTextEditor(f"recreate mtext.py result:{NP}normal ")
.overline("over line")
.append(" normal" + NP + "normal ")
.strike_through("strike through")
.append(" normal" + NP)
.underline("under line")
.append(" normal")
)
msp.add_mtext(str(editor), attribs).set_location(insert=location)
def using_colors(msp, location):
attribs = dict(ATTRIBS)
attribs["width"] = 10.0
editor = MTextEditor("using colors:" + NP)
# Change colors by name: red, green, blue, yellow, cyan, magenta, white
editor.color("red").append("RED" + NP)
# The color stays the same until changed
editor.append("also RED" + NP)
# Change color by ACI (AutoCAD Color Index)
editor.aci(3).append("GREEN" + NP)
# Change color by RGB tuples
editor.rgb((0, 0, 255)).append("BLUE" + NP)
msp.add_mtext(str(editor), attribs).set_location(insert=location)
def changing_text_height_absolute(msp, location):
attribs = dict(ATTRIBS)
attribs["width"] = 40.0 # need mor space to avoid text wrapping
editor = MTextEditor(
"changing text height absolute: default height is 0.7" + NP
)
# doubling the default height = 1.4
editor.height(1.4)
editor.append("text height: 1.4" + NP)
editor.height(3.5).append("text height: 3.5" + NP)
editor.height(0.7).append("back to default height: 0.7" + NP)
msp.add_mtext(str(editor), attribs).set_location(insert=location)
def changing_text_height_relative(msp, location):
attribs = dict(ATTRIBS)
attribs["width"] = 40.0 # need mor space to avoid text wrapping
editor = MTextEditor(
"changing text height relative: default height is 0.7" + NP
)
# this is the default text height in the beginning:
current_height = attribs["char_height"]
# The text height can only be changed by a factor:
editor.scale_height(2) # scale by 2 = 1.4
# keep track of the actual height:
current_height *= 2
editor.append("text height: 1.4" + NP)
# to set an absolute height, calculate the required factor:
desired_height = 3.5
factor = desired_height / current_height
editor.scale_height(factor).append("text height: 3.5" + NP)
current_height = desired_height
# and back to 0.7
editor.scale_height(0.7 / current_height).append(
"back to default height: 0.7" + NP
)
msp.add_mtext(str(editor), attribs).set_location(insert=location)
def changing_fonts(msp, location):
attribs = dict(ATTRIBS)
attribs["width"] = 15.0
editor = MTextEditor("changing fonts:" + NP)
editor.append("Default: Hello World!" + NP)
editor.append("SimSun: ")
# The font name for changing MTEXT fonts inline is the font family name!
# The font family name is the name shown in font selection widgets in
# desktop applications: "Arial", "Times New Roman", "Comic Sans MS"
#
# change font in a group to revert back to the default font at the end:
simsun_editor = MTextEditor().font("SimSun").append("你好,世界" + NP)
# reverts the font back at the end of the group:
editor.group(str(simsun_editor))
# back to default font OpenSans:
editor.append("Times New Roman: ")
# change font outside of a group until next font change:
editor.font("Times New Roman").append("Привет мир!" + NP)
# If the font does not exist, a replacement font will be used:
editor.font("Does not exist").append("This is the replacement font!")
msp.add_mtext(str(editor), attribs).set_location(insert=location)
def indent_first_line(msp, location):
# Indentation is a multiple of the default text height (MTEXT char_height)
attribs = dict(ATTRIBS)
attribs["char_height"] = 0.25
attribs["width"] = 7.5
editor = MTextEditor("Indent the first line:" + NP)
props = ParagraphProperties(
indent=1, # indent first line = 1x0.25 drawing units
align=MTextParagraphAlignment.JUSTIFIED,
)
editor.paragraph(props)
editor.append(" ".join(lorem_ipsum(100)))
msp.add_mtext(str(editor), attribs).set_location(insert=location)
def indent_except_fist_line(msp, location):
# Indentation is a multiple of the default text height (MTEXT char_height)
attribs = dict(ATTRIBS)
attribs["char_height"] = 0.25
attribs["width"] = 7.5
editor = MTextEditor("Indent left paragraph side:" + NP)
indent = 0.7 # 0.7 * 0.25 = 0.175 drawing units
props = ParagraphProperties(
# first line indentation is relative to "left", this reverses the
# left indentation:
indent=-indent, # first line
# indent left paragraph side:
left=indent,
align=MTextParagraphAlignment.JUSTIFIED,
)
editor.paragraph(props)
editor.append(" ".join(lorem_ipsum(100)))
msp.add_mtext(str(editor), attribs).set_location(insert=location)
def bullet_list(msp, location):
attribs = dict(ATTRIBS)
attribs["char_height"] = 0.25
attribs["width"] = 7.5
# There are no special commands to build bullet list, the list is build of
# indentation and a tabulator stop. Each list item needs a marker as an
# arbitrary string.
bullet = "•" # alt + numpad 7
editor = MTextEditor("Bullet List:" + NP)
editor.bullet_list(
indent=1,
bullets=[bullet] * 3, # each list item needs a marker
content=[
"First item",
"Second item",
" ".join(lorem_ipsum(30)),
],
)
msp.add_mtext(str(editor), attribs).set_location(insert=location)
def numbered_list(msp, location):
attribs = dict(ATTRIBS)
attribs["char_height"] = 0.25
attribs["width"] = 7.5
# There are no special commands to build numbered list, the list is build of
# indentation and a tabulator stop. There is no automatic numbering,
# but therefore the absolute freedom for using any string as list marker:
editor = MTextEditor("Numbered List:" + NP)
editor.bullet_list(
indent=1,
bullets=["1.", "2.", "3."],
content=[
"First item",
"Second item",
" ".join(lorem_ipsum(30)),
],
)
# Indentation and tab stops are multiples of the default text height (MTEXT
# char_height)!
msp.add_mtext(str(editor), attribs).set_location(insert=location)
def stacking(msp, location):
attribs = dict(ATTRIBS)
attribs["char_height"] = 0.25
attribs["width"] = 4
editor = MTextEditor("Stacked text:" + NP)
# place fraction with down scaled text height in a group:
stack = MTextEditor().scale_height(0.6).stack("1", "2", "^")
editor.append("over: ").group(str(stack)).append(NP)
stack = MTextEditor().scale_height(0.6).stack("1", "2", "/")
editor.append("fraction: ").group(str(stack)).append(NP)
stack = MTextEditor().scale_height(0.6).stack("1", "2", "#")
editor.append("slanted: ").group(str(stack)).append(NP)
# additional formatting in numerator and denominator is not supported
# by AutoCAD or BricsCAD.
# switching colors inside the fraction to red does not work:
numerator = MTextEditor().color("red").append("1")
stack = MTextEditor().scale_height(0.6).stack(str(numerator), "2", "#")
editor.append("color red: ").group(str(stack)).append(NP)
msp.add_mtext(str(editor), attribs).set_location(insert=location)
def create(dxfversion):
"""
Important:
MTEXT FORMATTING IS NOT PORTABLE ACROSS CAD APPLICATIONS!
Inline MTEXT codes are not supported by every CAD application and even
if inline codes are supported the final rendering may vary.
Inline codes are very well supported by AutoCAD (of course!) and BricsCAD,
but don't expect the same rendering in other CAD applications.
The drawing add-on of ezdxf may support some features in the future,
but very likely with a different rendering result than AutoCAD/BricsCAD.
"""
doc = ezdxf.new(dxfversion, setup=True)
msp = doc.modelspace()
recreate_mtext_py_example(msp, location=(0, 0))
using_colors(msp, location=(0, 10))
changing_text_height_absolute(msp, location=(0, 25))
changing_text_height_relative(msp, location=(0, 40))
changing_fonts(msp, location=(15, 14))
indent_first_line(msp, location=(15, 6))
indent_except_fist_line(msp, location=(24, 6))
bullet_list(msp, location=(33, 6))
numbered_list(msp, location=(33, 2))
stacking(msp, location=(33, 14))
doc.set_modelspace_vport(height=60, center=(15, 15))
return doc
for dxfversion in ["R2000", "R2004", "R2007", "R2010", "R2013", "R2018"]:
doc = create(dxfversion)
filename = f"mtext_editor_{dxfversion}.dxf"
doc.saveas(OUTBOX / filename)
print(f"saved {filename}")
|
sleepypuppy/admin/assessment/views.py | soffensive/sleepy-puppy | 952 | 12620979 | <reponame>soffensive/sleepy-puppy
# Copyright 2015 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask.ext.admin.contrib.sqla import ModelView
from models import Assessment
from sleepypuppy.admin.payload.models import Payload
from sleepypuppy.admin.capture.models import Capture
from sleepypuppy.admin.collector.models import GenericCollector
from sleepypuppy.admin.access_log.models import AccessLog
from flask.ext import login
from flask_wtf import Form
from sleepypuppy import app, db
import collections
import os
#
# Utility processors for healing with Assessment view.
#
@app.context_processor
def utility_processor():
def get_payloads():
the_payloads = Payload.query.all()
results = collections.OrderedDict()
for i in the_payloads:
results[i] = i.payload
return results
return dict(get_payloads=get_payloads)
@app.context_processor
def utility_processor2():
def get_hostname():
return app.config['HOSTNAME']
return dict(get_hostname=get_hostname)
@app.context_processor
def utility_processor3():
def get_captures(data_type):
magic_string = ""
magic_string += "{"
the_assessments = Assessment.query.all()
the_payloads = Payload.query.all()
for the_assessment in the_assessments:
magic_string += "\'" + str(the_assessment.id) + "': {"
for the_payload in the_payloads:
if data_type == "capture":
cap_count = Capture.query.filter_by(
assessment=the_assessment.name, payload=the_payload.id).count()
if data_type == "collector":
cap_count = GenericCollector.query.filter_by(
assessment=the_assessment.name, payload=the_payload.id).count()
if data_type == "access_log":
cap_count = AccessLog.query.filter_by(
assessment=the_assessment.name, payload=the_payload.id).count()
magic_string += str(the_payload.id) + \
":" + str(cap_count) + ","
magic_string += "},"
magic_string += "}"
return magic_string
return dict(get_captures=get_captures)
class AssessmentView(ModelView):
"""
ModelView override of Flask Admin for Assessments.
"""
# CSRF Protection
form_base_class = Form
# Check if user is authenticated
def is_accessible(self):
return login.current_user.is_authenticated()
list_template = 'assessment_list.html'
# Only display form columns listed below
form_columns = ['name', 'access_log_enabled', 'snooze', 'run_once']
column_list = ['name', 'snooze', 'run_once', 'access_log_enabled']
def delete_captures(self, assessment):
"""
Remove captures and local captures upon assessment deletion
"""
cascaded_captures = Capture.query.filter_by(
assessment=assessment.name).all()
for capture in cascaded_captures:
try:
os.remove("uploads/{}.png".format(capture.screenshot))
os.remove(
"uploads/small_{}.png".format(capture.screenshot))
except:
pass
try:
# Cascade delete for Assessment
Capture.query.filter_by(assessment=assessment.name).delete()
AccessLog.query.filter_by(assessment=assessment.name).delete()
GenericCollector.query.filter_by(assessment=assessment.name).delete()
except Exception as err:
app.logger.warn(err)
try:
db.session.commit()
except Exception as err:
app.logger.warn(err)
on_model_delete = delete_captures
form_args = dict(
access_log_enabled=dict(
description="Record requests to payloads regardless if \
they executed to the 'Access Log' \
table for any payload associated with this assessment. \
Recommended if you think you may hit namespace\
conflicts or issues running JS payloads in victim's browser"
),
snooze=dict(
description="Stop captures for this payload"
),
run_once=dict(
description="Only run capture once for this payload"
)
)
column_formatters = dict(
payloads=lambda v, c, m, p: [Payload.query.all()])
def __init__(self, session, **kwargs):
super(AssessmentView, self).__init__(Assessment, session, **kwargs)
|
qa/rpc-tests/spv.py | coindroid42/chain2 | 515 | 12620989 | #!/usr/bin/env python3
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import binascii
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from numpy.ma.testutils import assert_equal
'''
spv.py
Act as an SPV client to test SPV server functionality
This is not yet a complete suite. It was added to test inclusion of ancestors
in connection filters.
'''
class BaseNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong(0)
self.sleep_time = 0.1
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Syncing helpers
def sync(self, test_function, timeout=30):
while timeout > 0:
with mininode_lock:
if test_function():
return
time.sleep(self.sleep_time)
timeout -= self.sleep_time
raise AssertionError("Sync failed to complete")
def sync_with_ping(self):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_pong.nonce == self.ping_counter
self.sync(test_function)
self.ping_counter += 1
return
class TestNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
self.txids = []
self.txidstream_isopen = False
self.txidstream_pos = 0
def on_inv(self, conn, message):
if self.txidstream_isopen:
for i in message.inv:
if i.type == 1:
self.txids.append(('%x' % i.hash).zfill(64))
def open_txidstream(self):
self.txidstream_isopen = True
def read_txidstream(self):
if not self.txidstream_isopen:
raise AssertionError("TXID stream not opened for reading")
self.sync(lambda: len(self.txids) >= self.txidstream_pos + 1)
self.txidstream_pos += 1
return self.txids[self.txidstream_pos - 1]
class SPVTest(BitcoinTestFramework):
def __init__(self):
self.num_nodes = 2
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-allowfreetx=0 -debug=mempool"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-allowfreetx=0 -debug=mempool"]))
connect_nodes(self.nodes[0], 1)
def run_test(self):
# Setup the p2p connections
spv_node = TestNode()
connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], spv_node, services=0)
spv_node.add_connection(connection)
# Add a bunch of extra connections to our nodes[0] peer, so spv_node is
# unlikely to get inv's due to trickling rather than filter matches
other_nodes = []
for i in range(0,25):
other_nodes.append(BaseNode())
other_connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], other_nodes[i])
other_nodes[i].add_connection(other_connection)
NetworkThread().start() # Start up network handling in another thread
spv_node.wait_for_verack()
# Generate some coins
self.nodes[1].generate(110)
sync_blocks(self.nodes[0:2])
# Start collecting txid inv's
spv_node.open_txidstream()
# Generate an address and extract pubkeyhash
address0 = self.nodes[0].getnewaddress()
dummyoutputs = {}
dummyoutputs[address0] = 1
dummytxhex = self.nodes[0].createrawtransaction([], dummyoutputs)
dummytx = self.nodes[0].decoderawtransaction(dummytxhex)
dummyasm = dummytx["vout"][0]["scriptPubKey"]["asm"]
pkhstart = dummyasm.index("OP_HASH160") + len("OP_HASH160") + 1
pkhhex = dummyasm[pkhstart:pkhstart+20*2]
pubkeyhash0 = bytearray.fromhex(pkhhex)
# Load bloom filter to peer
spvFilter = CBloomFilter(nFlags=CBloomFilter.ANCESTOR_UPDATE_BIT)
spvFilter.insert(pubkeyhash0)
spv_node.send_message(msg_filterload(spvFilter))
# Test 1. Bloom filter positive match
tx1_id = self.nodes[1].sendtoaddress(address0, 1)
got_txid = spv_node.read_txidstream()
assert_equal(got_txid, tx1_id) #tx1 pays us
# Test 2. Ancestor relay and mempool response
# Send a control tx that neither pays us, nor is an ancestor of a tx that pays us
txextra_id = self.nodes[1].sendtoaddress(self.nodes[1].getnewaddress(), 15)
# Build an ancestor chain where the grandchild pays us
tx2grandparent_id = self.nodes[1].sendtoaddress(self.nodes[1].getnewaddress(), 25)
tx2parent_input = {}
tx2parent_input["txid"] = tx2grandparent_id
tx2parent_input["vout"] = 0
tx2parent_output = {}
tx2parent_output[self.nodes[1].getnewaddress()] = 12.5
tx2parent_output[self.nodes[1].getnewaddress()] = 12.48
tx2parent = self.nodes[1].createrawtransaction([tx2parent_input], tx2parent_output)
tx2parentsignresult = self.nodes[1].signrawtransaction(tx2parent)
assert_equal(tx2parentsignresult["complete"], True)
tx2parent_id = self.nodes[1].sendrawtransaction(tx2parentsignresult["hex"])
# Match tx2 by its consumption of a specific UTXO
spvFilter.insert(COutPoint(int(tx2parent_id,16),0))
spv_node.send_message(msg_filterload(spvFilter))
tx2_input = {}
tx2_input["txid"] = tx2parent_id
tx2_input["vout"] = 0
tx2_output = {}
tx2_output[self.nodes[0].getnewaddress()] = 2
tx2_output[self.nodes[1].getnewaddress()] = 10.48
tx2 = self.nodes[1].createrawtransaction([tx2_input], tx2_output)
tx2signresult = self.nodes[1].signrawtransaction(tx2)
assert_equal(tx2signresult["complete"], True)
tx2_id = self.nodes[1].sendrawtransaction(tx2signresult["hex"])
# Check that tx2 as well as all its ancestors are pushed to our SPV node
relayed = [spv_node.read_txidstream(), spv_node.read_txidstream(), spv_node.read_txidstream()]
expectedRelay = [tx2grandparent_id, tx2parent_id, tx2_id]
assert_equal(sorted(relayed), sorted(expectedRelay))
sync_mempools(self.nodes[0:2])
spv_node.send_message(msg_mempool())
# Check the complete filtered mempool returned by our peer
pool = [spv_node.read_txidstream(), spv_node.read_txidstream(), spv_node.read_txidstream(), spv_node.read_txidstream()]
expectedPool = [tx1_id, tx2grandparent_id, tx2parent_id, tx2_id]
assert_equal(sorted(pool), sorted(expectedPool))
if __name__ == '__main__':
SPVTest().main()
|
3rdParty/V8/v7.9.317/tools/dump-cpp.py | rajeev02101987/arangodb | 12,278 | 12621007 | <reponame>rajeev02101987/arangodb<gh_stars>1000+
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script executes dumpcpp.js, collects all dumped C++ symbols,
# and merges them back into v8 log.
# for py2/py3 compatibility
from __future__ import print_function
import os
import platform
import re
import subprocess
import sys
def is_file_executable(fPath):
return os.path.isfile(fPath) and os.access(fPath, os.X_OK)
if __name__ == '__main__':
JS_FILES = ['splaytree.js', 'codemap.js', 'csvparser.js', 'consarray.js',
'profile.js', 'logreader.js', 'arguments.js', 'tickprocessor.js',
'SourceMap.js', 'dumpcpp.js', 'dumpcpp-driver.js']
tools_path = os.path.dirname(os.path.realpath(__file__))
on_windows = platform.system() == 'Windows'
JS_FILES = [os.path.join(tools_path, f) for f in JS_FILES]
args = []
log_file = 'v8.log'
debug = False
for arg in sys.argv[1:]:
if arg == '--debug':
debug = True
continue
args.append(arg)
if not arg.startswith('-'):
log_file = arg
if on_windows:
args.append('--windows')
with open(log_file, 'r') as f:
lines = f.readlines()
d8_line = re.search(',\"(.*d8)', ''.join(lines))
if d8_line:
d8_exec = d8_line.group(1)
if not is_file_executable(d8_exec):
print('d8 binary path found in {} is not executable.'.format(log_file))
sys.exit(-1)
else:
print('No d8 binary path found in {}.'.format(log_file))
sys.exit(-1)
args = [d8_exec] + JS_FILES + ['--'] + args
with open(log_file) as f:
sp = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=f)
out, err = sp.communicate()
if debug:
print(err)
if sp.returncode != 0:
print(out)
exit(-1)
if on_windows and out:
out = re.sub('\r+\n', '\n', out)
is_written = not bool(out)
with open(log_file, 'w') as f:
for line in lines:
if not is_written and line.startswith('tick'):
f.write(out)
is_written = True
f.write(line)
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/ATI/pn_triangles.py | ShujaKhalid/deep-rl | 210 | 12621012 | '''OpenGL extension ATI.pn_triangles
This module customises the behaviour of the
OpenGL.raw.GL.ATI.pn_triangles to provide a more
Python-friendly API
Overview (from the spec)
ATI_pn_triangles provides a path for enabling the GL to internally
tessellate input geometry into curved patches. The extension allows the
user to tune the amount of tessellation to be performed on each triangle as
a global state value. The intent of PN Triangle tessellation is
typically to produce geometry with a smoother silhouette and more organic
shape.
The tessellated patch will replace the triangles input into the GL.
The GL will generate new vertices in object-space, prior to geometry
transformation. Only the vertices and normals are required to produce
proper results, and the rest of the information per vertex is interpolated
linearly across the patch.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ATI/pn_triangles.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ATI.pn_triangles import *
from OpenGL.raw.GL.ATI.pn_triangles import _EXTENSION_NAME
def glInitPnTrianglesATI():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
KG/CoKE/bin/evaluation.py | pkulzb/Research | 1,319 | 12621014 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" evaluation scripts for KBC and pathQuery tasks """
import json
import logging
import collections
import numpy as np
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def kbc_batch_evaluation(eval_i, all_examples, batch_results, tt):
r_hts_idx = collections.defaultdict(list)
scores_head = collections.defaultdict(list)
scores_tail = collections.defaultdict(list)
batch_r_hts_cnt = 0
b_size = len(batch_results)
for j in range(b_size):
result = batch_results[j]
i = eval_i + j
example = all_examples[i]
assert len(example.token_ids
) == 3, "For kbc task each example consists of 3 tokens"
h, r, t = example.token_ids
_mask_type = example.mask_type
if i % 2 == 0:
r_hts_idx[r].append((h, t))
batch_r_hts_cnt += 1
if _mask_type == "MASK_HEAD":
scores_head[(r, t)] = result
elif _mask_type == "MASK_TAIL":
scores_tail[(r, h)] = result
else:
raise ValueError("Unknown mask type in prediction example:%d" % i)
rank = {}
f_rank = {}
for r, hts in r_hts_idx.items():
r_rank = {'head': [], 'tail': []}
r_f_rank = {'head': [], 'tail': []}
for h, t in hts:
scores_t = scores_tail[(r, h)][:]
sortidx_t = np.argsort(scores_t)[::-1]
r_rank['tail'].append(np.where(sortidx_t == t)[0][0] + 1)
rm_idx = tt[r]['ts'][h]
rm_idx = [i for i in rm_idx if i != t]
for i in rm_idx:
scores_t[i] = -np.Inf
sortidx_t = np.argsort(scores_t)[::-1]
r_f_rank['tail'].append(np.where(sortidx_t == t)[0][0] + 1)
scores_h = scores_head[(r, t)][:]
sortidx_h = np.argsort(scores_h)[::-1]
r_rank['head'].append(np.where(sortidx_h == h)[0][0] + 1)
rm_idx = tt[r]['hs'][t]
rm_idx = [i for i in rm_idx if i != h]
for i in rm_idx:
scores_h[i] = -np.Inf
sortidx_h = np.argsort(scores_h)[::-1]
r_f_rank['head'].append(np.where(sortidx_h == h)[0][0] + 1)
rank[r] = r_rank
f_rank[r] = r_f_rank
h_pos = [p for k in rank.keys() for p in rank[k]['head']]
t_pos = [p for k in rank.keys() for p in rank[k]['tail']]
f_h_pos = [p for k in f_rank.keys() for p in f_rank[k]['head']]
f_t_pos = [p for k in f_rank.keys() for p in f_rank[k]['tail']]
ranks = np.asarray(h_pos + t_pos)
f_ranks = np.asarray(f_h_pos + f_t_pos)
return ranks, f_ranks
def pathquery_batch_evaluation(eval_i, all_examples, batch_results,
sen_negli_dict, trivial_sen_set):
""" evaluate the metrics for batch datas for pathquery datasets """
mqs = []
ranks = []
for j, result in enumerate(batch_results):
i = eval_i + j
example = all_examples[i]
token_ids, mask_type = example
assert mask_type in ["MASK_TAIL", "MASK_HEAD"
], " Unknown mask type in pathquery evaluation"
label = token_ids[-1] if mask_type == "MASK_TAIL" else token_ids[0]
sen = " ".join([str(x) for x in token_ids])
if sen in trivial_sen_set:
mq = rank = -1
else:
# candidate vocab set
cand_set = sen_negli_dict[sen]
assert label in set(
cand_set), "predict label must be in the candidate set"
cand_idx = np.sort(np.array(cand_set))
cand_ret = result[
cand_idx] #logits for candidate words(neg + gold words)
cand_ranks = np.argsort(cand_ret)[::-1]
pred_y = cand_idx[cand_ranks]
rank = (np.argwhere(pred_y == label).ravel().tolist())[0] + 1
mq = (len(cand_set) - rank) / (len(cand_set) - 1.0)
mqs.append(mq)
ranks.append(rank)
return mqs, ranks
def compute_kbc_metrics(rank_li, frank_li, output_evaluation_result_file):
""" combine the kbc rank results from batches into the final metrics """
rank_rets = np.array(rank_li).ravel()
frank_rets = np.array(frank_li).ravel()
mrr = np.mean(1.0 / rank_rets)
fmrr = np.mean(1.0 / frank_rets)
hits1 = np.mean(rank_rets <= 1.0)
hits3 = np.mean(rank_rets <= 3.0)
hits10 = np.mean(rank_rets <= 10.0)
# filtered metrics
fhits1 = np.mean(frank_rets <= 1.0)
fhits3 = np.mean(frank_rets <= 3.0)
fhits10 = np.mean(frank_rets <= 10.0)
eval_result = {
'mrr': mrr,
'hits1': hits1,
'hits3': hits3,
'hits10': hits10,
'fmrr': fmrr,
'fhits1': fhits1,
'fhits3': fhits3,
'fhits10': fhits10
}
with open(output_evaluation_result_file, "w") as fw:
fw.write(json.dumps(eval_result, indent=4) + "\n")
return eval_result
def compute_pathquery_metrics(mq_li, rank_li, output_evaluation_result_file):
""" combine the pathquery mq, rank results from batches into the final metrics """
rank_rets = np.array(rank_li).ravel()
_idx = np.where(rank_rets != -1)
non_trivial_eval_rets = rank_rets[_idx]
non_trivial_mq = np.array(mq_li).ravel()[_idx]
non_trivial_cnt = non_trivial_eval_rets.size
mq = np.mean(non_trivial_mq)
mr = np.mean(non_trivial_eval_rets)
mrr = np.mean(1.0 / non_trivial_eval_rets)
fhits10 = np.mean(non_trivial_eval_rets <= 10.0)
eval_result = {
'fcnt': non_trivial_cnt,
'mq': mq,
'mr': mr,
'fhits10': fhits10
}
with open(output_evaluation_result_file, "w") as fw:
fw.write(json.dumps(eval_result, indent=4) + "\n")
return eval_result
|
Chapter07/generators_iteration_2.py | TranQuangDuc/Clean-Code-in-Python | 402 | 12621018 | """Clean Code in Python - Chapter 7: Using Generators
> The Interface for Iteration: sequences
"""
import logging
logger = logging.getLogger(__name__)
class SequenceWrapper:
def __init__(self, original_sequence):
self.seq = original_sequence
def __getitem__(self, item):
value = self.seq[item]
logger.debug("%s getting %s", self.__class__.__name__, item)
return value
def __len__(self):
return len(self.seq)
class MappedRange:
"""Apply a transformation to a range of numbers."""
def __init__(self, transformation, start, end):
self._transformation = transformation
self._wrapped = range(start, end)
def __getitem__(self, index):
value = self._wrapped.__getitem__(index)
result = self._transformation(value)
logger.debug("Index %d: %s", index, result)
return result
def __len__(self):
return len(self._wrapped)
|
dml/tool/__init__.py | Edelweiss35/deep-machine-learning | 708 | 12621031 | <reponame>Edelweiss35/deep-machine-learning
"""
"""
import numpy as np
import scipy as sp
from .sigmoid import sigmoid
from .normalize import normalize,disnormalize,normalize_by_extant,featurenormal
from .sign import sign
from .pca import pca,projectData,recoverData
from .displayData import displayData,showimage
from .heap import Heap
from .expand import expand
__all__ = ['sigmoid',
'normalize',
'disnormalize',
'normalize_by_extant',
'sign',
'pca',
'projectData',
'recoverData',
'displayData',
'Heap',
'expand',
'showimage'
]
|
leetcode/527.word-abbreviation.py | geemaple/algorithm | 177 | 12621056 | class Solution(object):
def wordsAbbreviation(self, dict):
"""
:type dict: List[str]
:rtype: List[str]
"""
res = []
countMap = {}
prefix = [1] * len(dict)
for word in dict:
abbr = self.abbreviateWord(word, 1)
res.append(abbr)
countMap[abbr] = countMap.get(abbr, 0) + 1
while(True):
unique = True
for i in range(len(res)):
if countMap.get(res[i], 0) > 1:
unique = False
prefix[i] += 1
abbr = self.abbreviateWord(dict[i], prefix[i])
res[i] = abbr
countMap[abbr] = countMap.get(abbr, 0) + 1
if unique:
break
return res
def abbreviateWord(self, word, pos):
if pos + 2 >= len(word):
return word
return word[:pos] + str(len(word) - pos - 1) + word[-1] |
homeassistant/components/zwave/workaround.py | tbarbette/core | 22,481 | 12621098 | """Z-Wave workarounds."""
from . import const
# Manufacturers
FIBARO = 0x010F
GE = 0x0063
PHILIO = 0x013C
SOMFY = 0x0047
WENZHOU = 0x0118
LEVITON = 0x001D
# Product IDs
GE_FAN_CONTROLLER_12730 = 0x3034
GE_FAN_CONTROLLER_14287 = 0x3131
JASCO_FAN_CONTROLLER_14314 = 0x3138
PHILIO_SLIM_SENSOR = 0x0002
PHILIO_3_IN_1_SENSOR_GEN_4 = 0x000D
PHILIO_PAN07 = 0x0005
VIZIA_FAN_CONTROLLER_VRF01 = 0x0334
LEVITON_DECORA_FAN_CONTROLLER_ZW4SF = 0x0002
# Product Types
FGFS101_FLOOD_SENSOR_TYPE = 0x0B00
FGRM222_SHUTTER2 = 0x0301
FGR222_SHUTTER2 = 0x0302
GE_DIMMER = 0x4944
PHILIO_SWITCH = 0x0001
PHILIO_SENSOR = 0x0002
SOMFY_ZRTSI = 0x5A52
VIZIA_DIMMER = 0x1001
LEVITON_DECORA_FAN_CONTROLLER = 0x0038
# Mapping devices
PHILIO_SLIM_SENSOR_MOTION_MTII = (PHILIO, PHILIO_SENSOR, PHILIO_SLIM_SENSOR, 0)
PHILIO_3_IN_1_SENSOR_GEN_4_MOTION_MTII = (
PHILIO,
PHILIO_SENSOR,
PHILIO_3_IN_1_SENSOR_GEN_4,
0,
)
PHILIO_PAN07_MTI_INSTANCE = (PHILIO, PHILIO_SWITCH, PHILIO_PAN07, 1)
WENZHOU_SLIM_SENSOR_MOTION_MTII = (WENZHOU, PHILIO_SENSOR, PHILIO_SLIM_SENSOR, 0)
# Workarounds
WORKAROUND_NO_OFF_EVENT = "trigger_no_off_event"
WORKAROUND_NO_POSITION = "workaround_no_position"
WORKAROUND_REFRESH_NODE_ON_UPDATE = "refresh_node_on_update"
WORKAROUND_IGNORE = "workaround_ignore"
# List of workarounds by (manufacturer_id, product_type, product_id, index)
DEVICE_MAPPINGS_MTII = {
PHILIO_SLIM_SENSOR_MOTION_MTII: WORKAROUND_NO_OFF_EVENT,
PHILIO_3_IN_1_SENSOR_GEN_4_MOTION_MTII: WORKAROUND_NO_OFF_EVENT,
WENZHOU_SLIM_SENSOR_MOTION_MTII: WORKAROUND_NO_OFF_EVENT,
}
# List of workarounds by (manufacturer_id, product_type, product_id, instance)
DEVICE_MAPPINGS_MTI_INSTANCE = {
PHILIO_PAN07_MTI_INSTANCE: WORKAROUND_REFRESH_NODE_ON_UPDATE
}
SOMFY_ZRTSI_CONTROLLER_MT = (SOMFY, SOMFY_ZRTSI)
# List of workarounds by (manufacturer_id, product_type)
DEVICE_MAPPINGS_MT = {SOMFY_ZRTSI_CONTROLLER_MT: WORKAROUND_NO_POSITION}
# Component mapping devices
FIBARO_FGFS101_SENSOR_ALARM = (
FIBARO,
FGFS101_FLOOD_SENSOR_TYPE,
const.COMMAND_CLASS_SENSOR_ALARM,
)
FIBARO_FGRM222_BINARY = (FIBARO, FGRM222_SHUTTER2, const.COMMAND_CLASS_SWITCH_BINARY)
FIBARO_FGR222_BINARY = (FIBARO, FGR222_SHUTTER2, const.COMMAND_CLASS_SWITCH_BINARY)
GE_FAN_CONTROLLER_12730_MULTILEVEL = (
GE,
GE_DIMMER,
GE_FAN_CONTROLLER_12730,
const.COMMAND_CLASS_SWITCH_MULTILEVEL,
)
GE_FAN_CONTROLLER_14287_MULTILEVEL = (
GE,
GE_DIMMER,
GE_FAN_CONTROLLER_14287,
const.COMMAND_CLASS_SWITCH_MULTILEVEL,
)
JASCO_FAN_CONTROLLER_14314_MULTILEVEL = (
GE,
GE_DIMMER,
JASCO_FAN_CONTROLLER_14314,
const.COMMAND_CLASS_SWITCH_MULTILEVEL,
)
VIZIA_FAN_CONTROLLER_VRF01_MULTILEVEL = (
LEVITON,
VIZIA_DIMMER,
VIZIA_FAN_CONTROLLER_VRF01,
const.COMMAND_CLASS_SWITCH_MULTILEVEL,
)
LEVITON_FAN_CONTROLLER_ZW4SF_MULTILEVEL = (
LEVITON,
LEVITON_DECORA_FAN_CONTROLLER,
LEVITON_DECORA_FAN_CONTROLLER_ZW4SF,
const.COMMAND_CLASS_SWITCH_MULTILEVEL,
)
# List of component workarounds by
# (manufacturer_id, product_type, command_class)
DEVICE_COMPONENT_MAPPING = {
FIBARO_FGFS101_SENSOR_ALARM: "binary_sensor",
FIBARO_FGRM222_BINARY: WORKAROUND_IGNORE,
FIBARO_FGR222_BINARY: WORKAROUND_IGNORE,
}
# List of component workarounds by
# (manufacturer_id, product_type, product_id, command_class)
DEVICE_COMPONENT_MAPPING_MTI = {
GE_FAN_CONTROLLER_12730_MULTILEVEL: "fan",
GE_FAN_CONTROLLER_14287_MULTILEVEL: "fan",
JASCO_FAN_CONTROLLER_14314_MULTILEVEL: "fan",
VIZIA_FAN_CONTROLLER_VRF01_MULTILEVEL: "fan",
LEVITON_FAN_CONTROLLER_ZW4SF_MULTILEVEL: "fan",
}
def get_device_component_mapping(value):
"""Get mapping of value to another component."""
if value.node.manufacturer_id.strip() and value.node.product_type.strip():
manufacturer_id = int(value.node.manufacturer_id, 16)
product_type = int(value.node.product_type, 16)
product_id = int(value.node.product_id, 16)
result = DEVICE_COMPONENT_MAPPING.get(
(manufacturer_id, product_type, value.command_class)
)
if result:
return result
result = DEVICE_COMPONENT_MAPPING_MTI.get(
(manufacturer_id, product_type, product_id, value.command_class)
)
if result:
return result
return None
def get_device_mapping(value):
"""Get mapping of value to a workaround."""
if (
value.node.manufacturer_id.strip()
and value.node.product_id.strip()
and value.node.product_type.strip()
):
manufacturer_id = int(value.node.manufacturer_id, 16)
product_type = int(value.node.product_type, 16)
product_id = int(value.node.product_id, 16)
result = DEVICE_MAPPINGS_MTII.get(
(manufacturer_id, product_type, product_id, value.index)
)
if result:
return result
result = DEVICE_MAPPINGS_MTI_INSTANCE.get(
(manufacturer_id, product_type, product_id, value.instance)
)
if result:
return result
return DEVICE_MAPPINGS_MT.get((manufacturer_id, product_type))
return None
|
venv/Lib/site-packages/two1/sell/composer.py | RafaelHMachado/Cioffis_Automation | 415 | 12621100 | # standard python imports
import re
import os
import time
from collections import OrderedDict
from collections import namedtuple
import json
import shutil
import subprocess
from enum import Enum
from abc import ABCMeta
from abc import abstractmethod
import tarfile
import yaml
# 3rd party imports
import requests
from docker import Client
from docker.utils import kwargs_from_env as docker_env
# two1 imports
from io import BytesIO
from two1.wallet import Two1Wallet
from two1.blockchain import TwentyOneProvider
from two1.sell.exceptions import exceptions_composer as exceptions
from two1.sell.util.context import YamlDataContext
from two1.commands.util.exceptions import ServerRequestError
class ComposerState(Enum):
""" Composer state label.
"""
CONNECTED = 1,
DISCONNECTED = 2
class Two1Composer(metaclass=ABCMeta):
""" Abstract base composer layer.
"""
DOCKERHUB_API_URL = "https://registry.hub.docker.com/v2/repositories"
DOCKERHUB_REPO = "21dotco/two1"
PRIMARY_ACCOUNT_DIR = os.path.expanduser("~/.two1")
PRIMARY_ACCOUNT_FILE = os.path.join(PRIMARY_ACCOUNT_DIR, "two1.json")
BASE_DIR = os.path.join(PRIMARY_ACCOUNT_DIR, "services")
DB_DIR = os.path.join(BASE_DIR, "db_dir")
os.makedirs(DB_DIR, exist_ok=True)
SITES_ENABLED_PATH = os.path.join(BASE_DIR, "config", "sites-enabled")
SITES_AVAILABLE_PATH = os.path.join(BASE_DIR, "config", "sites-available")
COMPOSE_FILE = os.path.join(BASE_DIR, "21-compose.yaml")
BASE_SERVICES = {"router", "payments", "base"}
SERVICE_START_TIMEOUT = 10
SERVICE_PUBLISH_TIMEOUT = 15
@property
def wallet_file(self):
""" Get the default wallet path.
"""
try:
with open(Two1Composer.PRIMARY_ACCOUNT_FILE, 'r') as f:
account_info = json.load(f)
except Exception:
raise
return account_info.get("wallet_path")
@abstractmethod
def start_services(self, *args):
""" Start router, payments server, and machine-payable services.
"""
@abstractmethod
def stop_services(self, *args):
""" Stop router, payments server, and machine-payable services.
"""
@abstractmethod
def status_services(self, *args):
""" Get the status of services.
"""
@abstractmethod
def status_router(self, *args):
""" Get the status of the router.
"""
@abstractmethod
def status_payments_server(self, *args):
""" Get the status of the payments server.
"""
@abstractmethod
def connect(self, *args, **kwargs):
""" Connect to the docker client
"""
@abstractmethod
def read_server_config(self):
"""Read configuration of server.
"""
class Two1ComposerNative(Two1Composer):
""" Manage machine-payable microservices natively.
"""
def __init__(self):
self._connected = ComposerState.DISCONNECTED
self.provider = TwentyOneProvider()
self.default_wallet = Two1Wallet(self.wallet_file,
self.provider)
def connect(self, *args, **kwargs):
""" Create docker client.
"""
self.docker_client = Client()
self._connected = ComposerState.DISCONNECTED
class Two1ComposerContainers(Two1Composer):
""" Manage machine-payable microservices in containers.
"""
def __init__(self):
self._connected = ComposerState.DISCONNECTED
self.provider = TwentyOneProvider()
self.default_wallet = Two1Wallet(self.wallet_file, self.provider)
class ServiceManager:
""" Query and modify user services persisting at cls.USER_SERVICES_FILE.
"""
USER_SERVICES_FILE = os.path.join(Two1Composer.BASE_DIR, "user-services.json")
class Image(namedtuple('Image', 'docker_hub_account repository tag')):
def _asdict(self):
# Fixes a bug for Python 3.4 users
# https://bugs.python.org/issue24931
'A new OrderedDict mapping field names to their values'
return OrderedDict(zip(self._fields, self))
@property
def is_dockerhub_image(self):
""" Returns: True iff Image instance has all fields.
"""
return self.docker_hub_account and self.repository and self.tag
@property
def is_local_image(self):
""" Returns: True iff Image instance doesn't have docker_hub_account but has all other fields.
"""
return not self.docker_hub_account and self.repository and self.tag
def __str__(self):
""" Returns: Docker image name constructed from Image instance fields.
"""
if self.is_dockerhub_image:
return '%s/%s:%s' % (self.docker_hub_account, self.repository, self.tag)
elif self.is_local_image:
return '%s:%s' % (self.repository, self.tag)
else:
raise ValueError()
@classmethod
def from_string(cls, image_name):
""" Constructs an Image instance from a docker image name.
Args:
image_name (str): A docker image name.
Returns:
Image: An Image instance.
"""
slashes = re.findall('/', image_name)
colons = re.findall(':', image_name)
if len(slashes) == 1:
if len(colons) == 1 and image_name.find('/') < image_name.find(':'):
docker_hub_account, rest = image_name.split('/')
repository, tag = rest.split(':')
return cls(docker_hub_account=docker_hub_account, repository=repository, tag=tag)
elif len(colons) == 0:
docker_hub_account, repository = image_name.split('/')
return cls(docker_hub_account=docker_hub_account, repository=repository, tag='latest')
elif len(slashes) == 0:
if len(colons) == 1:
repository, tag = image_name.split(':')
return cls(docker_hub_account=None, repository=repository, tag=tag)
elif len(colons) == 0:
return cls(docker_hub_account=None, repository=image_name, tag='latest')
raise ValueError()
@classmethod
def get_image(cls, service_name):
""" Constructs an Image instance for a service.
Args:
service_name (str): The name of either a 21 service in the 21dotco/two1 repository or a user service
added to ServiceManager.USER_SERVICES_FILE by ServiceManager.add_service.
Returns:
Image: An Image instance corresponding to the given service.
"""
if service_name in cls.available_21_services():
return cls.Image(
docker_hub_account='2<PASSWORD>co',
repository='two1',
tag=service_name if service_name in Two1Composer.BASE_SERVICES else 'service-%s' % service_name
)
elif service_name in cls.available_user_services():
return cls.Image(**cls._get_user_service_dict()[service_name])
else:
raise ValueError()
@classmethod
def available_services(cls):
""" Returns: All available service names.
"""
return cls.available_21_services() | cls.available_user_services()
@classmethod
def available_21_services(cls):
""" Returns: All available 21 services by querying Docker Hub.
"""
service_image_data = requests.get(os.path.join(
Two1Composer.DOCKERHUB_API_URL, Two1Composer.DOCKERHUB_REPO, 'tags')).json().get('results')
return set([image_data['name'].split('service-')[1] for image_data in
service_image_data if re.match(r'^service-', image_data['name'])])
@classmethod
def available_user_services(cls):
""" Returns: All available user services.
"""
return set(cls._get_user_service_dict().keys())
@classmethod
def add_service(cls, service_name, image_name_string,
service_successfully_added_hook, service_already_exists_hook,
service_failed_to_add_hook):
""" Adds a new service definition to ServiceManager.USER_SERVICES_FILE.
Args:
service_name (str): Name of the service definition to add.
image_name_string (str): Docker image name for the service definition.
"""
service_dict = cls._get_user_service_dict()
if service_name in service_dict:
service_already_exists_hook(service_name)
else:
service_dict[service_name] = cls.Image.from_string(image_name_string)._asdict()
if cls._commit_user_service_dict(service_dict):
service_successfully_added_hook(service_name)
else:
service_failed_to_add_hook(service_name)
@classmethod
def remove_service(cls, service_name,
service_successfully_removed_hook,
service_does_not_exists_hook,
service_failed_to_remove_hook):
""" Removes a service definition from ServiceManager.USER_SERVICES_FILE.
Args:
service_name (str): Name of the service definition to remove.
"""
service_dict = cls._get_user_service_dict()
if service_name in service_dict:
del service_dict[service_name]
if cls._commit_user_service_dict(service_dict):
service_successfully_removed_hook(service_name)
else:
service_failed_to_remove_hook(service_name)
else:
service_does_not_exists_hook(service_name)
@classmethod
def _get_user_service_dict(cls):
""" Returns: ServiceManager.USER_SERVICES_FILE as a dict.
"""
try:
with open(cls.USER_SERVICES_FILE, 'r') as data_file:
service_dict = json.load(data_file)
except:
return {}
else:
return service_dict
@classmethod
def _commit_user_service_dict(cls, service_dict):
""" Writes a dict of user services to ServiceManager.USER_SERVICES_FILE in json format.
Args:
service_dict (dict): A dictionary of user services of the form
{service_name : _as_dict representation of corresponding Image instance..}.
Returns:
bool: True iff no exceptions were raised when writing service_dict to ServiceManager.USER_SERVICES_FILE
as json.
"""
try:
with open(cls.USER_SERVICES_FILE, 'w') as outfile:
json.dump(service_dict, outfile)
except:
return False
else:
return True
class ComposerYAMLContext(YamlDataContext):
""" Context manager for composer YAML service file.
"""
def __init__(self, username=None, password=None, server_port=None, mnemonic=None):
self.username = username
self.password = password
self.server_port = server_port
self.mnemonic = mnemonic
super().__init__(Two1Composer.COMPOSE_FILE)
def __enter__(self):
sup = super().__enter__()
for service in self.data['services']:
service_definition = self.data['services'][service]
if 'environment' in service_definition:
if 'TWO1_USERNAME' in service_definition['environment'] and self.username is not None:
service_definition['environment']['TWO1_USERNAME'] = self.username
if 'TWO1_PASSWORD' in service_definition['environment'] and self.password is not None:
service_definition['environment']['TWO1_PASSWORD'] = <PASSWORD>.password
if 'TWO1_WALLET_MNEMONIC' in service_definition['environment'] and self.mnemonic is not None:
service_definition['environment']['TWO1_WALLET_MNEMONIC'] = self.mnemonic
if 'PAYMENT_SERVER_IP' in service_definition['environment'] and self.server_port is not None:
rest = service_definition['environment']['PAYMENT_SERVER_IP'].rsplit(':', maxsplit=1)[-1]
service_definition['environment']['PAYMENT_SERVER_IP'] = '%s:%s' % (rest, self.server_port)
return sup
def _filler(self):
""" Create the base service description file.
"""
return {
'version': '2',
'services': {
'base': {
'image': '%s:base' % Two1Composer.DOCKERHUB_REPO,
},
'router': {
'image': '%s:router' % Two1Composer.DOCKERHUB_REPO,
'container_name': 'sell_router',
'restart': 'always',
'volumes': [
Two1Composer.SITES_ENABLED_PATH + ":/etc/nginx/sites-enabled",
Two1Composer.SITES_AVAILABLE_PATH + ":/etc/nginx/sites-available",
],
'ports': ['%s:%s' % (self.server_port, self.server_port)],
'links': [
'payments:payments',
],
},
'payments': {
'image': '%s:payments' % Two1Composer.DOCKERHUB_REPO,
'depends_on': ['base'],
'container_name': 'sell_payments',
'restart': 'always',
'environment': {
"TWO1_USERNAME": str(self.username),
"TWO1_PASSWORD": str(self.password),
"TWO1_WALLET_MNEMONIC": str(self.mnemonic)
},
'volumes': [
Two1Composer.DB_DIR + ":/usr/src/db/"
],
'logging': {
'driver': 'json-file'
},
'cap_drop': [
'ALL'
],
'cap_add': [
'DAC_OVERRIDE',
'NET_RAW',
],
}
}
}
# public api
def connect(self, machine_env, host, machine_config_file):
""" Connect service composer to machine layer.
Args:
machine_env (dict): Environment dictionary for the docker client of the machine layer.
host (str): Hostname of the machine layer docker daemon.
machine_config_file (str): Path to the config file for the machine layer.
"""
self.machine_env = machine_env
self.machine_host = host
with open(machine_config_file, 'r') as f:
self.machine_config = json.load(f)
self.docker_client = Client(**docker_env(assert_hostname=False,
environment=self.machine_env))
self._connected = ComposerState.CONNECTED
def initialize_server(self, username, password, server_port, wallet=None):
""" Initialize micropayments server.
Define boilerplate services, networks, and volumes composer file
and nginx server config.
Generates a wallet mnemonic if non-existent.
Args:
username (str): Username to log in with.
password (str): Password to log in with.
server_port (int): The server port that the router is running on.
wallet: The wallet to use for the payments server and subsequent services.
"""
self._create_base_server(server_port) # create base router server config
self._create_payments_route() # create route to payments server
new_wallet = None # rv[1], not None if mnemonic is replaced in this function
# generate service description (yaml)
with self.ComposerYAMLContext(username, password, server_port) as composer_yaml:
try:
mnemonic = composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
if not mnemonic or mnemonic == str(None): # if mnemonic is Falsy or uninitialized
raise ValueError()
except (KeyError, ValueError): # catches if mnemonic is Falsy or doesn't exist in dict tree
new_machine_wallet = self.default_wallet.create(self.provider)[1]
composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC'] = new_machine_wallet
new_wallet = new_machine_wallet
return 0, new_wallet
def pull_image(self, image,
image_sucessfully_pulled_hook, image_failed_to_pull_hook, image_is_local_hook,
image_is_malformed_hook):
""" Pulls an Image instance iff it is a Docker Hub image.
Args:
image (Image): An Image instance.
"""
if image.is_dockerhub_image:
try:
self.docker_client.pull('%s/%s' % (image.docker_hub_account, image.repository),
tag=image.tag, stream=False)
except:
image_failed_to_pull_hook(image)
else:
image_sucessfully_pulled_hook(image)
elif image.is_local_image:
image_is_local_hook(image)
else:
image_is_malformed_hook(image)
def start_services(self, service_names,
failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook, failed_to_up_hook,
up_hook):
""" Start selected services.
Args:
service_names (list): List of service names to start.
failed_to_start_hook (Callable): A callable hook that takes in a service name and is run when said service
fails to start.
started_hook (Callable): A callable hook that takes in a service name and is run when said service starts.
failed_to_restart_hook (Callable): A callable hook that takes in a service name and is run when said service
fails to restart.
restarted_hook (Callable): A callable hook that takes in a service name and is run when said service
restarts.
failed_to_up_hook (Callable): A callable hook that takes in a service name and is run when said service
fails to go up.
up_hook (Callable): A callable hook that takes in a service name and is run when said service goes up.
"""
self._start_sell_service('base', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)
self._start_sell_service('router', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)
self._start_sell_service('payments', failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)
self._restart_sell_service('router', failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook,
failed_to_up_hook, up_hook)
# Attempt to start all market services
for service_name in service_names:
try:
image = self.ServiceManager.get_image(service_name)
container_name = self.service_name_2_container_name(service_name)
# create nginx routes for service_name
self._create_service_route(service_name)
# add service_name to docker compose file
with self.ComposerYAMLContext() as docker_compose_yaml:
username = docker_compose_yaml['services']['payments']['environment']['TWO1_USERNAME']
password = docker_compose_yaml['services']['payments']['environment']['TWO1_PASSWORD']
mnemonic = docker_compose_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
docker_compose_yaml['services'][service_name] = {
'image': str(image),
'container_name': container_name,
'depends_on': ['base'],
'restart': 'always',
'environment': {
"TWO1_USERNAME": str(username),
"TWO1_PASSWORD": str(password),
"TWO1_WALLET_MNEMONIC": str(mnemonic),
"SERVICE": str(service_name),
"PAYMENT_SERVER_IP":
"http://%s:%s" % (self.machine_host, self.machine_config["server_port"])
},
'volumes': [
Two1Composer.DB_DIR + ":/usr/src/db/"
],
'logging': {
'driver': 'json-file'
},
'cap_drop': [
'ALL'
],
'cap_add': [
'DAC_OVERRIDE',
'NET_RAW',
],
}
link_str = '%s:%s' % (service_name, service_name)
if link_str not in docker_compose_yaml['services']['router']['links']:
docker_compose_yaml['services']['router']['links'].append(link_str)
except:
# something went wrong while configuring service_name
failed_to_start_hook(service_name)
else:
# attempt to build service_name
self._start_sell_service(service_name, failed_to_start_hook, started_hook, failed_to_up_hook, up_hook)
self._restart_sell_service('router', failed_to_start_hook, started_hook, failed_to_restart_hook, restarted_hook,
failed_to_up_hook, up_hook)
def _start_sell_service(self, service_name, failed_to_start_hook, started_hook, failed_to_up_hook, up_hook,
timeout=Two1Composer.SERVICE_START_TIMEOUT):
try:
subprocess.check_output(["docker-compose", "-f", Two1Composer.COMPOSE_FILE, "up", "-d", service_name],
stderr=subprocess.DEVNULL, env=self.machine_env)
except subprocess.CalledProcessError:
failed_to_start_hook(service_name)
else:
started_hook(service_name)
if service_name == 'router':
time.sleep(5)
elif service_name != 'router' and service_name != 'base':
start = time.clock()
exec_id = self.docker_client.exec_create('sell_router', "curl %s:5000" % service_name)['Id']
self.docker_client.exec_start(exec_id)
running = True
while time.clock() - start < timeout and running is True:
running = self.docker_client.exec_inspect(exec_id)['Running']
if running is True:
failed_to_up_hook(service_name)
else:
up_hook(service_name)
def _restart_sell_service(self, service_name, failed_to_start_hook, started_hook, failed_to_restart_hook,
restarted_hook, failed_to_up_hook, up_hook):
try:
self.docker_client.stop("sell_%s" % service_name)
except:
is_restart = False
else:
is_restart = True
self._start_sell_service(service_name, failed_to_restart_hook if is_restart else failed_to_start_hook,
restarted_hook if is_restart else started_hook, failed_to_up_hook, up_hook)
def stop_services(self, service_names,
service_found_stopped_and_removed_hook,
service_failed_to_stop_hook,
service_failed_to_be_removed_hook,
service_not_found_hook):
""" Stop selected services and remove containers.
Args:
service_names (set): Set of services to stop.
service_found_stopped_and_removed_hook (Callable): A callable hook that takes in a service name and is run
when said service is found, stopped, and removed.
service_failed_to_stop_hook (Callable): A callable hook that takes in a service name and is run when said
service fails to be stopped.
service_failed_to_be_removed_hook (Callable): A callable hook that takes in a service name and is run when
said service fails to be removed.
service_not_found_hook (Callable): A callable hook that takes in a service name and is run when said service
isn't found.
"""
for service_name in service_names:
if service_name in self.get_running_services():
container_name = self.service_name_2_container_name(service_name)
try:
self.docker_client.stop(container_name)
except:
service_failed_to_stop_hook(service_name)
else:
try:
self.docker_client.remove_container(container_name)
except:
service_failed_to_be_removed_hook(service_name)
else:
service_found_stopped_and_removed_hook(service_name)
else:
service_not_found_hook(service_name)
def silently_force_stop_all_services(self):
running_container_names = self.docker_client.containers(filters={"status": "running"})
for container_name in running_container_names:
self.docker_client.remove_container(container_name, force=True)
@staticmethod
def container_names_2_service_names(container_definitions):
""" Return service names from container definitions.
See service_name_2_container_name for the inverse operation but on one service name.
Args:
container_definitions (list): List of container descriptions as returned by self.docker_client.containers.
Returns:
set: Set of service names generated by removing the 'sell_' prefix from the containers' names.
"""
return set([container_definition['Names'][0][6:] for container_definition in container_definitions])
@staticmethod
def service_name_2_container_name(service_name):
""" Generates a container name from a service name by prepending 'sell_'
"""
return 'sell_%s' % service_name
def status_services(self, services):
""" Gets running status of specified services.
Args:
services (list): List of services to get status for.
"""
existent_services = self.get_services(all=True)
running_services = self.get_services(filters={"status": "running"})
exited_services = self.get_services(filters={"status": "exited"})
return {
"running": running_services & services,
"exited": exited_services & services,
"nonexistent": services - existent_services
}
def get_services(self, *args, **kwargs):
""" Call docker_client.containers | convert resulting container names to service names | remove base services
"""
return self.container_names_2_service_names(
self.docker_client.containers(*args, **kwargs)
) - Two1Composer.BASE_SERVICES
def get_running_services(self):
""" Get list of currently running services that aren't 21 base services.
Returns:
set: Set of currently running services.
"""
return self.get_services(filters={"status": "running"})
def status_router(self, service_running_hook, service_unknown_state_hook):
""" Get status of Nginx router container.
Args:
service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
is running.
service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
service is in an unknown state.
"""
if len(self.docker_client.containers(all=True, filters={"name": "sell_router", "status": "running"})) == 1:
service_running_hook("router")
else:
service_unknown_state_hook("router")
def status_payments_server(self, service_running_hook, service_unknown_state_hook):
""" Get status of payment channels server.
Args:
service_running_hook (Callable): A callable hook that takes in a service name and is run when said service
is running.
service_unknown_state_hook (Callable): A callable hook that takes in a service name and is run when said
service is in an unknown state.
"""
if len(self.docker_client.containers(all=True, filters={"name": "sell_payments", "status": "running"})) == 1:
service_running_hook("payments")
else:
service_unknown_state_hook("payments")
@staticmethod
def _create_base_server(server_port):
""" Create nginx base server config.
Args:
server_port (int): port for 21 sell server.
"""
try:
# create nginx router dirs
shutil.rmtree(Two1Composer.SITES_ENABLED_PATH, ignore_errors=True)
shutil.rmtree(Two1Composer.SITES_AVAILABLE_PATH, ignore_errors=True)
os.makedirs(Two1Composer.SITES_ENABLED_PATH, exist_ok=True)
os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
# create base nginx server
with open(os.path.join(Two1Composer.SITES_ENABLED_PATH,
"two1baseserver"), 'w') as f:
f.write("server {\n"
" listen " + str(server_port) + ";\n"
" include /etc/nginx/sites-available/*;\n"
"}\n"
)
except Exception:
raise exceptions.Two1ComposerServiceDefinitionException()
@staticmethod
def _create_service_route(service):
""" Create route for container service.
"""
os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
try:
with open(os.path.join(Two1Composer.SITES_AVAILABLE_PATH, service), 'w') as f:
f.write("location /" + service + " {\n"
" rewrite ^/" + service + "/?(.*) /$1 break;\n"
" proxy_pass http://" + service + ":" + str(5000) + ";\n"
" proxy_set_header Host $host;\n"
" proxy_set_header X-Real-IP $remote_addr;\n"
" proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
"}\n")
except Exception:
raise exceptions.Two1ComposerRouteException()
@staticmethod
def _create_payments_route():
""" Add route to payments server.
"""
os.makedirs(Two1Composer.SITES_AVAILABLE_PATH, exist_ok=True)
try:
# write nginx route for payments server
with open(os.path.join(Two1Composer.SITES_AVAILABLE_PATH, "payments"), 'w') as f:
f.write("location /payment {\n"
" proxy_pass http://payments:" + str(5000) + ";\n"
" proxy_set_header Host $host;\n"
" proxy_set_header X-Real-IP $remote_addr;\n"
" proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n"
"}\n")
except Exception:
raise exceptions.Two1ComposerRouteException()
def publish_service(self, service_name, host_override, rest_client, published_hook,
already_published_hook, failed_to_publish_hook,
unknown_publish_error_hook):
strm, stat = self.docker_client.get_archive('sell_%s' % service_name,
'/usr/src/app/manifest.yaml')
with tarfile.open(fileobj=BytesIO(strm.read()), mode='r') as tf:
manifest = yaml.load(tf.extractfile(stat[u'name']).read().decode())
manifest['host'] = host_override
try:
resp = rest_client.publish({"manifest": manifest,
"marketplace": "21mkt"})
except ServerRequestError as e:
if e.status_code == 403 and e.data.get("error") == "TO600":
already_published_hook(service_name)
else:
failed_to_publish_hook(service_name)
except:
unknown_publish_error_hook(service_name)
else:
if resp.status_code == 201:
published_hook(service_name)
else:
failed_to_publish_hook(service_name)
def read_server_config(self):
try:
with open(Two1Composer.COMPOSE_FILE) as f:
return yaml.load(f)
except FileNotFoundError:
return {}
def get_services_mnemonic(self):
if os.path.isfile(Two1Composer.COMPOSE_FILE):
with self.ComposerYAMLContext() as composer_yaml:
try:
maybe_mnemonic = composer_yaml['services']['payments']['environment']['TWO1_WALLET_MNEMONIC']
except KeyError:
rv = None
else:
rv = maybe_mnemonic
else:
rv = None
return rv
|
cinder/tests/unit/policies/test_attachments.py | arunvinodqmco/cinder | 571 | 12621120 | # Copyright 2021 Red Hat, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from cinder.api import microversions as mv
from cinder.api.v3 import attachments
from cinder import exception
from cinder.policies import attachments as attachments_policies
from cinder.tests.unit.api import fakes as fake_api
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit.policies import base
from cinder.tests.unit import utils as test_utils
from cinder.volume import manager as volume_manager
@ddt.ddt
class AttachmentsPolicyTest(base.BasePolicyTest):
authorized_users = [
'legacy_admin',
'legacy_owner',
'system_admin',
'project_admin',
'project_member',
'project_reader',
'project_foo',
]
unauthorized_users = [
'system_member',
'system_reader',
'system_foo',
'other_project_member',
'other_project_reader',
]
# Basic policy test is without enforcing scope (which cinder doesn't
# yet support) and deprecated rules enabled.
def setUp(self, enforce_scope=False, enforce_new_defaults=False,
*args, **kwargs):
super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs)
self.controller = attachments.AttachmentsController()
self.manager = volume_manager.VolumeManager()
self.manager.driver = mock.MagicMock()
self.manager.driver.initialize_connection = mock.MagicMock()
self.manager.driver.initialize_connection.side_effect = (
self._initialize_connection)
self.api_path = '/v3/%s/attachments' % (self.project_id)
self.api_version = mv.NEW_ATTACH
def _initialize_connection(self, volume, connector):
return {'data': connector}
def _create_attachment(self):
vol_type = test_utils.create_volume_type(self.project_admin_context,
name='fake_vol_type',
testcase_instance=self)
volume = test_utils.create_volume(self.project_member_context,
volume_type_id=vol_type.id,
admin_metadata={
'attached_mode': 'ro'
},
testcase_instance=self)
volume = test_utils.attach_volume(self.project_member_context,
volume.id,
fake.INSTANCE_ID,
'fake_host',
'fake_mountpoint')
return volume.volume_attachment[0].id
@ddt.data(*base.all_users)
def test_create_attachment_policy(self, user_id):
volume = test_utils.create_volume(self.project_member_context,
testcase_instance=self)
rule_name = attachments_policies.CREATE_POLICY
url = self.api_path
req = fake_api.HTTPRequest.blank(url, version=self.api_version)
req.method = 'POST'
body = {
"attachment": {
"instance_uuid": fake.INSTANCE_ID,
"volume_uuid": volume.id,
}
}
# Some context return HTTP 404 (rather than 403).
unauthorized_exceptions = [
exception.VolumeNotFound,
]
self.common_policy_check(user_id, self.authorized_users,
self.unauthorized_users,
unauthorized_exceptions,
rule_name, self.controller.create, req,
body=body)
@ddt.data(*base.all_users)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_update_attachment_policy(self, user_id, mock_attachment_update):
# Redirect the RPC call directly to the volume manager.
def attachment_update(*args):
return self.manager.attachment_update(*args)
mock_attachment_update.side_effect = attachment_update
rule_name = attachments_policies.UPDATE_POLICY
attachment_id = self._create_attachment()
url = '%s/%s' % (self.api_path, attachment_id)
req = fake_api.HTTPRequest.blank(url, version=self.api_version)
req.method = 'PUT'
body = {
"attachment": {
"connector": {
"initiator": "iqn.1993-08.org.debian: 01: cad181614cec",
"ip": "192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": False,
"mountpoint": "/dev/vdb",
"mode": "ro"
}
}
}
unauthorized_exceptions = []
self.common_policy_check(user_id, self.authorized_users,
self.unauthorized_users,
unauthorized_exceptions,
rule_name, self.controller.update, req,
id=attachment_id, body=body)
@ddt.data(*base.all_users)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete')
def test_delete_attachment_policy(self, user_id, mock_attachment_delete):
# Redirect the RPC call directly to the volume manager.
def attachment_delete(*args):
return self.manager.attachment_delete(*args)
mock_attachment_delete.side_effect = attachment_delete
rule_name = attachments_policies.DELETE_POLICY
attachment_id = self._create_attachment()
url = '%s/%s' % (self.api_path, attachment_id)
req = fake_api.HTTPRequest.blank(url, version=self.api_version)
req.method = 'DELETE'
unauthorized_exceptions = []
self.common_policy_check(user_id, self.authorized_users,
self.unauthorized_users,
unauthorized_exceptions,
rule_name, self.controller.delete, req,
id=attachment_id)
@ddt.data(*base.all_users)
def test_complete_attachment_policy(self, user_id):
rule_name = attachments_policies.COMPLETE_POLICY
attachment_id = self._create_attachment()
url = '%s/%s/action' % (self.api_path, attachment_id)
req = fake_api.HTTPRequest.blank(url, version=mv.NEW_ATTACH_COMPLETION)
req.method = 'POST'
body = {
"os-complete": {}
}
unauthorized_exceptions = [
exception.VolumeNotFound,
]
self.common_policy_check(user_id, self.authorized_users,
self.unauthorized_users,
unauthorized_exceptions,
rule_name, self.controller.complete, req,
id=attachment_id, body=body)
@ddt.data(*base.all_users)
def test_multiattach_bootable_volume_policy(self, user_id):
volume = test_utils.create_volume(self.project_member_context,
multiattach=True,
status='in-use',
bootable=True,
testcase_instance=self)
rule_name = attachments_policies.MULTIATTACH_BOOTABLE_VOLUME_POLICY
url = self.api_path
req = fake_api.HTTPRequest.blank(url, version=self.api_version)
req.method = 'POST'
body = {
"attachment": {
"instance_uuid": fake.INSTANCE_ID,
"volume_uuid": volume.id,
}
}
# Relax the CREATE_POLICY in order to get past that check, which takes
# place prior to checking the MULTIATTACH_BOOTABLE_VOLUME_POLICY.
self.policy.set_rules({attachments_policies.CREATE_POLICY: ""},
overwrite=False)
unauthorized_exceptions = [
exception.VolumeNotFound,
]
self.common_policy_check(user_id, self.authorized_users,
self.unauthorized_users,
unauthorized_exceptions,
rule_name, self.controller.create, req,
body=body)
class AttachmentsPolicySecureRbacTest(AttachmentsPolicyTest):
authorized_users = [
'legacy_admin',
'system_admin',
'project_admin',
'project_member',
]
unauthorized_users = [
'legacy_owner',
'system_member',
'system_reader',
'system_foo',
'project_reader',
'project_foo',
'other_project_member',
'other_project_reader',
]
def setUp(self, *args, **kwargs):
# Test secure RBAC by disabling deprecated policy rules (scope
# is still not enabled).
super().setUp(enforce_scope=False, enforce_new_defaults=True,
*args, **kwargs)
|
files/commands/code.py | rockonedege/ezored | 129 | 12621146 | <reponame>rockonedege/ezored<filename>files/commands/code.py<gh_stars>100-1000
"""Code manager tool"""
import os
import subprocess
from files.core import const
from files.core import file
from files.core import log
from files.core import runner
from files.core import target
# -----------------------------------------------------------------------------
def run(params):
args = params["args"]
if len(args) > 0:
action = args[0]
if action:
if action == "format":
code_format(params)
else:
show_help(params)
else:
show_help(params)
else:
show_help(params)
# -----------------------------------------------------------------------------
def code_format(params):
proj_path = params["proj_path"]
targets = target.get_all_targets(proj_path)
# format c++ files
has_tool = check_cpp_formatter()
if has_tool:
path_list = [
{
"path": os.path.join(
proj_path, const.DIR_NAME_FILES, const.DIR_NAME_FILES_MODULES
),
"patterns": ["*.cpp", "*.hpp", "*.c", "*.h", "*.m", "*.mm"],
},
{
"path": os.path.join(proj_path, const.DIR_NAME_PROJECTS, "others"),
"patterns": ["*.cpp", "*.hpp", "*.c", "*.h", "*.m", "*.mm"],
},
{
"path": os.path.join(proj_path, const.DIR_NAME_PROJECTS, "android"),
"patterns": ["*.cpp", "*.hpp", "*.c", "*.h", "*.m", "*.mm"],
},
{
"path": os.path.join(
proj_path, const.DIR_NAME_PROJECTS, "ios", "Sample", "Sample"
),
"patterns": ["*.cpp", "*.hpp", "*.c", "*.h", "*.m", "*.mm"],
},
]
if path_list:
log.info("Formating C++ files...")
for path_list_item in path_list:
patterns = path_list_item["patterns"]
for pattern_item in patterns:
files = file.find_files(path_list_item["path"], pattern_item)
for file_item in files:
log.info(
"Formatting file: {0}...".format(os.path.relpath(file_item))
)
run_args = ["clang-format", "-style", "file", "-i", file_item]
runner.run(run_args, proj_path)
log.ok()
else:
log.error("No C++ files found to format")
# format python files
has_tool = check_python_formatter()
if has_tool:
path_list = [
{
"path": os.path.join(proj_path, "make.py"),
},
{
"path": os.path.join(proj_path, const.DIR_NAME_FILES),
"patterns": ["*.py"],
},
]
if path_list:
log.info("Formating Python files...")
for path_list_item in path_list:
patterns = (
path_list_item["patterns"] if "patterns" in path_list_item else None
)
if patterns:
for pattern_item in patterns:
files = file.find_files(path_list_item["path"], pattern_item)
for file_item in files:
log.info(
"Formatting file: {0}...".format(
os.path.relpath(file_item)
)
)
run_args = ["black", "-q", file_item]
runner.run(run_args, proj_path)
else:
file_item = (
path_list_item["path"] if "path" in path_list_item else None
)
if file_item:
log.info(
"Formatting file: {0}...".format(os.path.relpath(file_item))
)
run_args = ["black", "-q", file_item]
runner.run(run_args, proj_path)
log.ok()
else:
log.error("No Python files found to format")
# format cmake files
has_tool = check_cmake_formatter()
if has_tool:
path_list = [
{
"path": os.path.join(
proj_path, const.DIR_NAME_FILES, const.DIR_NAME_FILES_MODULES
),
"patterns": ["*.cmake"],
},
{
"path": os.path.join(
proj_path, const.DIR_NAME_FILES, const.DIR_NAME_FILES_MODULES
),
"patterns": ["CMakeLists.txt"],
},
{
"path": os.path.join(
proj_path, const.DIR_NAME_FILES, const.DIR_NAME_FILES_COMMON
),
"patterns": ["*.cmake"],
},
{
"path": os.path.join(
proj_path, const.DIR_NAME_FILES, const.DIR_NAME_FILES_COMMON
),
"patterns": ["CMakeLists.txt"],
},
]
for target_name in targets:
path_list.extend(
[
{
"path": os.path.join(
proj_path,
const.DIR_NAME_FILES,
const.DIR_NAME_FILES_TARGETS,
target_name,
const.DIR_NAME_FILES_TARGET_CMAKE,
),
"patterns": ["*.cmake"],
},
{
"path": os.path.join(
proj_path,
const.DIR_NAME_FILES,
const.DIR_NAME_FILES_TARGETS,
target_name,
const.DIR_NAME_FILES_TARGET_CMAKE,
),
"patterns": ["CMakeLists.txt"],
},
{
"path": os.path.join(
proj_path,
const.DIR_NAME_FILES,
const.DIR_NAME_FILES_TARGETS,
target_name,
const.DIR_NAME_FILES_TARGET_CONAN,
),
"patterns": ["*.cmake"],
},
{
"path": os.path.join(
proj_path,
const.DIR_NAME_FILES,
const.DIR_NAME_FILES_TARGETS,
target_name,
const.DIR_NAME_FILES_TARGET_CONAN,
),
"patterns": ["CMakeLists.txt"],
},
{
"path": os.path.join(
proj_path,
const.DIR_NAME_FILES,
const.DIR_NAME_FILES_TARGETS,
target_name,
const.DIR_NAME_FILES_TARGET_SUPPORT,
),
"patterns": ["*.cmake"],
},
{
"path": os.path.join(
proj_path,
const.DIR_NAME_FILES,
const.DIR_NAME_FILES_TARGETS,
target_name,
const.DIR_NAME_FILES_TARGET_SUPPORT,
),
"patterns": ["CMakeLists.txt"],
},
{
"path": os.path.join(
proj_path,
const.DIR_NAME_FILES,
const.DIR_NAME_FILES_TARGETS,
target_name,
const.DIR_NAME_FILES_TARGET_VERBS,
),
"patterns": ["*.cmake"],
},
{
"path": os.path.join(
proj_path,
const.DIR_NAME_FILES,
const.DIR_NAME_FILES_TARGETS,
target_name,
const.DIR_NAME_FILES_TARGET_VERBS,
),
"patterns": ["CMakeLists.txt"],
},
]
)
if path_list:
log.info("Formating CMake files...")
for path_list_item in path_list:
patterns = path_list_item["patterns"]
for pattern_item in patterns:
files = file.find_files(path_list_item["path"], pattern_item)
for file_item in files:
log.info(
"Formatting file: {0}...".format(os.path.relpath(file_item))
)
run_args = [
"cmake-format",
"-c",
".cmake-format",
"-i",
file_item,
]
runner.run(run_args, proj_path)
log.ok()
else:
log.error("No CMake files found to format")
# -----------------------------------------------------------------------------
def check_cpp_formatter():
"""Checks if invoking supplied clang-format binary works."""
try:
subprocess.check_output(["clang-format", "--version"])
return True
except OSError:
log.info(
"Clang-format is not installed, check: https://clang.llvm.org/docs/ClangFormat.html"
)
return False
# -----------------------------------------------------------------------------
def check_python_formatter():
"""Checks if invoking supplied black binary works."""
try:
subprocess.check_output(["black", "--version"])
return True
except OSError:
log.info("Black is not installed, check: https://github.com/psf/black")
return False
# -----------------------------------------------------------------------------
def check_cmake_formatter():
"""Checks if invoking supplied cmake-format binary works."""
try:
subprocess.check_output(["cmake-format", "--version"])
return True
except OSError:
log.info(
"Cmake-format is not installed, check: https://github.com/cheshirekow/cmake_format"
)
return False
# -----------------------------------------------------------------------------
def show_help(params):
log.colored("Available actions:\n", log.PURPLE)
log.normal(" - format")
# -----------------------------------------------------------------------------
def get_description(params):
return "Code manager tool"
|
gradient_free_optimizers/optimizers/sequence_model/ensemble_optimizer.py | gtr8/Gradient-Free-Optimizers | 860 | 12621154 | <reponame>gtr8/Gradient-Free-Optimizers
# Author: <NAME>
# Email: <EMAIL>
# License: MIT License
from .exp_imp_based_opt import ExpectedImprovementBasedOptimization
from .surrogate_models import EnsembleRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.neural_network import MLPRegressor
class EnsembleOptimizer(ExpectedImprovementBasedOptimization):
def __init__(
self,
search_space,
initialize={"grid": 4, "random": 2, "vertices": 4},
estimators=[
GradientBoostingRegressor(n_estimators=5),
# DecisionTreeRegressor(),
# MLPRegressor(),
GaussianProcessRegressor(),
],
xi=0.01,
warm_start_smbo=None,
max_sample_size=10000000,
sampling={"random": 1000000},
warnings=100000000,
rand_rest_p=0.03,
):
super().__init__(search_space, initialize)
self.estimators = estimators
self.regr = EnsembleRegressor(estimators)
self.xi = xi
self.warm_start_smbo = warm_start_smbo
self.max_sample_size = max_sample_size
self.sampling = sampling
self.warnings = warnings
self.rand_rest_p = rand_rest_p
self.init_position_combinations()
self.init_warm_start_smbo()
|
run/sipdump2john.py | zaza568/yo | 2,109 | 12621188 | <reponame>zaza568/yo
#!/usr/bin/env python
"""sipdump2john.py processes sipdump output files (dump files)
into a format suitable for use with JtR."""
import sys
def process_file(filename):
with open(filename, "r") as f:
for line in f.readlines():
line = line.rstrip().replace('"', '*').replace(':', '*')
data = line.split('*')
# Handle the case when the port number is not explicit
# in the uri field, in that case, adds an empty field
if len(data) == 13:
data.insert(7, '')
sys.stderr.write("%s-%s:$sip$*%s\n" % (data[0], data[1], '*'.join(data)))
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s <sipdump dump files>\n" % sys.argv[0])
sys.exit(-1)
for i in range(1, len(sys.argv)):
process_file(sys.argv[i])
|
tests/r/test_consump.py | hajime9652/observations | 199 | 12621226 | <gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.consump import consump
def test_consump():
"""Test module consump.py by downloading
consump.csv and testing shape of
extracted data has 37 rows and 24 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = consump(test_path)
try:
assert x_train.shape == (37, 24)
except:
shutil.rmtree(test_path)
raise()
|
examples/emr_pyspark_crag/repo.py | dbatten5/dagster | 4,606 | 12621232 | # start-snippet
from pathlib import Path
from dagster import graph, make_python_type_usable_as_dagster_type, op, repository
from dagster.core.definitions.no_step_launcher import no_step_launcher
from dagster_aws.emr import emr_pyspark_step_launcher
from dagster_aws.s3 import s3_pickle_io_manager, s3_resource
from dagster_pyspark import DataFrame as DagsterPySparkDataFrame
from dagster_pyspark import pyspark_resource
from pyspark.sql import DataFrame, Row
from pyspark.sql.types import IntegerType, StringType, StructField, StructType
# Make pyspark.sql.DataFrame map to dagster_pyspark.DataFrame
make_python_type_usable_as_dagster_type(python_type=DataFrame, dagster_type=DagsterPySparkDataFrame)
@op(required_resource_keys={"pyspark", "pyspark_step_launcher"})
def make_people(context) -> DataFrame:
schema = StructType([StructField("name", StringType()), StructField("age", IntegerType())])
rows = [Row(name="Thom", age=51), Row(name="Jonny", age=48), Row(name="Nigel", age=49)]
return context.resources.pyspark.spark_session.createDataFrame(rows, schema)
@op(required_resource_keys={"pyspark_step_launcher"})
def filter_over_50(people: DataFrame) -> DataFrame:
return people.filter(people["age"] > 50)
@op(required_resource_keys={"pyspark_step_launcher"})
def count_people(people: DataFrame) -> int:
return people.count()
emr_resource_defs = {
"pyspark_step_launcher": emr_pyspark_step_launcher.configured(
{
"cluster_id": {"env": "EMR_CLUSTER_ID"},
"local_pipeline_package_path": str(Path(__file__).parent),
"deploy_local_pipeline_package": True,
"region_name": "us-west-1",
"staging_bucket": "my_staging_bucket",
"wait_for_logs": True,
}
),
"pyspark": pyspark_resource.configured({"spark_conf": {"spark.executor.memory": "2g"}}),
"s3": s3_resource,
"io_manager": s3_pickle_io_manager.configured(
{"s3_bucket": "my_staging_bucket", "s3_prefix": "simple-pyspark"}
),
}
local_resource_defs = {
"pyspark_step_launcher": no_step_launcher,
"pyspark": pyspark_resource.configured({"spark_conf": {"spark.default.parallelism": 1}}),
}
@graph
def count_people_over_50():
count_people(filter_over_50(make_people()))
count_people_over_50_local = count_people_over_50.to_job(
name="local", resource_defs=local_resource_defs
)
count_people_over_50_emr = count_people_over_50.to_job(name="prod", resource_defs=emr_resource_defs)
# end-snippet
@repository
def emr_pyspark_example():
return [count_people_over_50_emr, count_people_over_50_local]
|
components/iscesys/DataRetriever/__init__.py | vincentschut/isce2 | 1,133 | 12621236 | #!/usr/bin/env python3
def createDataRetriever(name=''):
from .DataRetriever import DataRetriever
return DataRetriever(name=name)
def getFactoriesInfo():
"""
Returns a dictionary with information on how to create an object Sensor from its factory
"""
return {'DataRetriever':
{
'factory':'createDataRetriever'
}
}
|
oidc_example/op2/client_mgr.py | kschu91/pyoidc | 373 | 12621239 | <gh_stars>100-1000
#!/usr/bin/env python
import json
from oic.utils.client_management import CDB
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', dest='list', action='store_true')
parser.add_argument('-a', dest='add')
parser.add_argument('-d', dest='delete')
parser.add_argument(dest="config")
args = parser.parse_args()
# Client data base
cdb = CDB(args.config)
if args.list:
for key, val in cdb.items():
print('{}:{}'.format(key, val['redirect_uris']))
if args.add:
fp = open(args.add)
spec = json.load(fp)
cli_info = cdb.create(**spec)
print(cli_info)
if args.delete:
del cdb[args.delete]
|
test/win/gyptest-quoting-commands.py | chlorm-forks/gyp | 2,151 | 12621255 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure batch files run as actions. Regression test for previously missing
trailing quote on command line. cmd typically will implicitly insert a missing
quote, but if the command ends in a quote, it will not insert another, so the
command can sometimes become unterminated.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'batch-file-action'
test.run_gyp('batch-file-action.gyp', chdir=CHDIR)
test.build('batch-file-action.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
|
Day_1_Scientific_Python/snippets/02-pandas_introduction65.py | Morisset/python-workshop | 183 | 12621269 | <filename>Day_1_Scientific_Python/snippets/02-pandas_introduction65.py
df.loc[df['Sex'] == 'male', 'Age'].mean() |
migrations/versions/ce624ab2c458_create_tables.py | martyole/TensorHive | 129 | 12621274 | <filename>migrations/versions/ce624ab2c458_create_tables.py
"""create_tables
Revision ID: ce624ab2c458
Revises:
Create Date: 2018-10-19 23:09:12.879429
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ce624ab2c458'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('revoked_tokens',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('jti', sa.String(length=120), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('jti')
)
op.create_table('users',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('username', sa.String(length=40), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('_hashed_password', sa.String(length=120), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('reservations',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=60), nullable=False),
sa.Column('description', sa.String(length=200), nullable=True),
sa.Column('protected_resource_id', sa.String(length=60), nullable=False),
sa.Column('_starts_at', sa.DateTime(), nullable=False),
sa.Column('_ends_at', sa.DateTime(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=40), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles')
op.drop_table('reservations')
op.drop_table('users')
op.drop_table('revoked_tokens')
# ### end Alembic commands ###
|
Section 8 - Twitter Sentiment Analysis/TSA Part 5 - Preprocessing tweets.py | kungfumas/bahasa-alami | 169 | 12621281 | <gh_stars>100-1000
# Twitter Sentiment Analysis using NLP
# Install tweepy - pip install tweepy
# Importing the libraries
import tweepy
import re
import pickle
from tweepy import OAuthHandler
# Please change with your own consumer key, consumer secret, access token and access secret
# Initializing the keys
consumer_key = 'yoIwFkjZGYDa49aO16XqSNqcN'
consumer_secret = '<KEY>'
access_token = '<KEY>'
access_secret ='<KEY>'
# Initializing the tokens
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
args = ['trump'];
api = tweepy.API(auth,timeout=10)
# Fetching the tweets
list_tweets = []
query = args[0]
if len(args) == 1:
for status in tweepy.Cursor(api.search,q=query+" -filter:retweets",lang='en',result_type='recent',geocode="22.1568,89.4332,500km").items(100):
list_tweets.append(status.text)
# Loading the vectorizer and classfier
with open('classifier.pickle','rb') as f:
classifier = pickle.load(f)
with open('tfidfmodel.pickle','rb') as f:
tfidf = pickle.load(f)
# Preprocessing the tweets
for tweet in list_tweets:
tweet = re.sub(r"^https://t.co/[a-zA-Z0-9]*\s", " ", tweet)
tweet = re.sub(r"\s+https://t.co/[a-zA-Z0-9]*\s", " ", tweet)
tweet = re.sub(r"\s+https://t.co/[a-zA-Z0-9]*$", " ", tweet)
tweet = tweet.lower()
tweet = re.sub(r"that's","that is",tweet)
tweet = re.sub(r"there's","there is",tweet)
tweet = re.sub(r"what's","what is",tweet)
tweet = re.sub(r"where's","where is",tweet)
tweet = re.sub(r"it's","it is",tweet)
tweet = re.sub(r"who's","who is",tweet)
tweet = re.sub(r"i'm","i am",tweet)
tweet = re.sub(r"she's","she is",tweet)
tweet = re.sub(r"he's","he is",tweet)
tweet = re.sub(r"they're","they are",tweet)
tweet = re.sub(r"who're","who are",tweet)
tweet = re.sub(r"ain't","am not",tweet)
tweet = re.sub(r"wouldn't","would not",tweet)
tweet = re.sub(r"shouldn't","should not",tweet)
tweet = re.sub(r"can't","can not",tweet)
tweet = re.sub(r"couldn't","could not",tweet)
tweet = re.sub(r"won't","will not",tweet)
tweet = re.sub(r"\W"," ",tweet)
tweet = re.sub(r"\d"," ",tweet)
tweet = re.sub(r"\s+[a-z]\s+"," ",tweet)
tweet = re.sub(r"\s+[a-z]$"," ",tweet)
tweet = re.sub(r"^[a-z]\s+"," ",tweet)
tweet = re.sub(r"\s+"," ",tweet)
print(tweet)
|
tests/SphereVoxelization_fft.py | SyedZiaul/freud | 172 | 12621285 | <filename>tests/SphereVoxelization_fft.py
import numpy as np
def compute_3d(box_size, width, points, r_max, periodic=True):
"""
Does voxelization by doing an aperiodic fft of the sphere over the
points on the grid in 3 dimensions
Args:
box_size (float):
Length of the (assuemd cubic) box for the calculation.
width (int):
Number of grid spaces in each direction of the box
points (:np.ndarray: (N, 3)):
Points within the box to compute the voxelization of
r_max (float):
Radius of the spheres centered at each point
periodic (bool):
True if the box should be considered periodic
"""
eff_rad = r_max / box_size * width
# enlarge the box for the fft by adding more segments of the same length
# we will cut the extra off later so the fft will be aperiodic.
buf_size = 0 if periodic else int(round(eff_rad + 1))
new_width = 2 * buf_size + width
# make the grid with the points on it
arr = _put_points_on_grid(points, new_width, box_size, width, buf_size, ndim=3)
# make the sphere
sphere = _make_sphere_3d(new_width, eff_rad)
# do the ffts
fft_arr = np.fft.fftn(arr) * np.fft.fftn(sphere)
image = np.rint(np.real(np.fft.ifftn(fft_arr))).astype(np.uint32)
# get rid of the buffer
if not periodic:
image = image[buf_size:-buf_size, buf_size:-buf_size, buf_size:-buf_size]
# set the overlaps to 1, instead of larger integers
np.clip(image, 0, 1, out=image)
return image
def compute_2d(box_size, width, points, r_max, periodic=True):
"""
Does voxelization by doing an aperiodic fft of the sphere over the
points on the grid in 3 dimensions
Args:
box_size (float):
Length of the (assuemd cubic) box for the calculation.
width (int):
Number of grid spaces in each direction of the box
points (:np.ndarray: (N, 3)):
Points within the box to compute the voxelization of
r_max (float):
Radius of the spheres centered at each point
periodic (bool):
True if the box should be considered periodic
"""
eff_rad = r_max / box_size * width
# enlarge the box for the fft by adding more segments of the same length
# we will cut the extra off later so the fft will be aperiodic.
buf_size = 0 if periodic else int(round(eff_rad + 1))
new_width = 2 * buf_size + width
# make the grid with the points on it
arr = _put_points_on_grid(points, new_width, box_size, width, buf_size, ndim=2)
# make the sphere
sphere = _make_sphere_2d(new_width, eff_rad)
# do the ffts
fft_arr = np.fft.fft2(arr) * np.fft.fft2(sphere)
image = np.rint(np.real(np.fft.ifft2(fft_arr))).astype(np.uint32)
# get rid of the buffer
if not periodic:
image = image[buf_size:-buf_size, buf_size:-buf_size]
# set the overlaps to 1, instead of larger integers
np.clip(image, 0, 1, out=image)
return image
def _put_points_on_grid(points, new_width, box_size, width, buf_size, ndim):
"""
Creates a grid where the voxels are 1 if there is a point there and 0 if
not.
"""
d = (new_width,) * ndim
arr = np.zeros(d)
img_points = points / (box_size / width) # points in units of grid spacing
for pt in img_points:
shifted_pt = tuple(int(round(pt[i])) for i in range(ndim))
arr[shifted_pt] = 1
return arr
def _make_sphere_3d(new_width, eff_rad):
"""Makes a grid in 3D with voxels that are within ``eff_rad`` of the
center having value 1 and other voxels having value 0."""
r_rad = int(round(eff_rad))
ctr = new_width // 2
arr = np.zeros((new_width, new_width, new_width))
for i in range(-r_rad, r_rad):
for j in range(-r_rad, r_rad):
for k in range(-r_rad, r_rad):
if np.linalg.norm([i, j, k]) < eff_rad:
arr[ctr + i, ctr + j, ctr + k] = 1
return arr
def _make_sphere_2d(new_width, eff_rad):
"""makes a grid in 2D with voxels that are within eff_rad of the center
having value 1 (else 0)"""
r_rad = round(eff_rad)
ctr = new_width // 2
arr = np.zeros((new_width, new_width))
for i in range(-r_rad, r_rad):
for j in range(-r_rad, r_rad):
if np.linalg.norm([i, j]) <= eff_rad:
arr[ctr + i, ctr + j] = 1
return arr
|
dist/ba_data/python/bastd/game/thelaststand.py | Bartixxx32/Bombsquad-Ballistica-Modded-Server | 317 | 12621286 | <filename>dist/ba_data/python/bastd/game/thelaststand.py<gh_stars>100-1000
# Released under the MIT License. See LICENSE for details.
#
"""Defines the last stand minigame."""
from __future__ import annotations
import random
from dataclasses import dataclass
from typing import TYPE_CHECKING
import ba
from bastd.actor.playerspaz import PlayerSpaz
from bastd.actor.bomb import TNTSpawner
from bastd.actor.scoreboard import Scoreboard
from bastd.actor.powerupbox import PowerupBoxFactory, PowerupBox
from bastd.actor.spazbot import (SpazBotSet, SpazBotDiedMessage, BomberBot,
BomberBotPro, BomberBotProShielded,
BrawlerBot, BrawlerBotPro,
BrawlerBotProShielded, TriggerBot,
TriggerBotPro, TriggerBotProShielded,
ChargerBot, StickyBot, ExplodeyBot)
if TYPE_CHECKING:
from typing import Any, Optional, Sequence
from bastd.actor.spazbot import SpazBot
@dataclass
class SpawnInfo:
"""Spawning info for a particular bot type."""
spawnrate: float
increase: float
dincrease: float
class Player(ba.Player['Team']):
"""Our player type for this game."""
class Team(ba.Team[Player]):
"""Our team type for this game."""
class TheLastStandGame(ba.CoopGameActivity[Player, Team]):
"""Slow motion how-long-can-you-last game."""
name = '<NAME>'
description = 'Final glorious epic slow motion battle to the death.'
tips = [
'This level never ends, but a high score here\n'
'will earn you eternal respect throughout the world.'
]
# Show messages when players die since it matters here.
announce_player_deaths = True
# And of course the most important part.
slow_motion = True
default_music = ba.MusicType.EPIC
def __init__(self, settings: dict):
settings['map'] = 'Rampage'
super().__init__(settings)
self._new_wave_sound = ba.getsound('scoreHit01')
self._winsound = ba.getsound('score')
self._cashregistersound = ba.getsound('cashRegister')
self._spawn_center = (0, 5.5, -4.14)
self._tntspawnpos = (0, 5.5, -6)
self._powerup_center = (0, 7, -4.14)
self._powerup_spread = (7, 2)
self._preset = str(settings.get('preset', 'default'))
self._excludepowerups: list[str] = []
self._scoreboard: Optional[Scoreboard] = None
self._score = 0
self._bots = SpazBotSet()
self._dingsound = ba.getsound('dingSmall')
self._dingsoundhigh = ba.getsound('dingSmallHigh')
self._tntspawner: Optional[TNTSpawner] = None
self._bot_update_interval: Optional[float] = None
self._bot_update_timer: Optional[ba.Timer] = None
self._powerup_drop_timer = None
# For each bot type: [spawnrate, increase, d_increase]
self._bot_spawn_types = {
BomberBot: SpawnInfo(1.00, 0.00, 0.000),
BomberBotPro: SpawnInfo(0.00, 0.05, 0.001),
BomberBotProShielded: SpawnInfo(0.00, 0.02, 0.002),
BrawlerBot: SpawnInfo(1.00, 0.00, 0.000),
BrawlerBotPro: SpawnInfo(0.00, 0.05, 0.001),
BrawlerBotProShielded: SpawnInfo(0.00, 0.02, 0.002),
TriggerBot: SpawnInfo(0.30, 0.00, 0.000),
TriggerBotPro: SpawnInfo(0.00, 0.05, 0.001),
TriggerBotProShielded: SpawnInfo(0.00, 0.02, 0.002),
ChargerBot: SpawnInfo(0.30, 0.05, 0.000),
StickyBot: SpawnInfo(0.10, 0.03, 0.001),
ExplodeyBot: SpawnInfo(0.05, 0.02, 0.002)
} # yapf: disable
def on_transition_in(self) -> None:
super().on_transition_in()
ba.timer(1.3, ba.Call(ba.playsound, self._new_wave_sound))
self._scoreboard = Scoreboard(label=ba.Lstr(resource='scoreText'),
score_split=0.5)
def on_begin(self) -> None:
super().on_begin()
# Spit out a few powerups and start dropping more shortly.
self._drop_powerups(standard_points=True)
ba.timer(2.0, ba.WeakCall(self._start_powerup_drops))
ba.timer(0.001, ba.WeakCall(self._start_bot_updates))
self.setup_low_life_warning_sound()
self._update_scores()
self._tntspawner = TNTSpawner(position=self._tntspawnpos,
respawn_time=10.0)
def spawn_player(self, player: Player) -> ba.Actor:
pos = (self._spawn_center[0] + random.uniform(-1.5, 1.5),
self._spawn_center[1],
self._spawn_center[2] + random.uniform(-1.5, 1.5))
return self.spawn_player_spaz(player, position=pos)
def _start_bot_updates(self) -> None:
self._bot_update_interval = 3.3 - 0.3 * (len(self.players))
self._update_bots()
self._update_bots()
if len(self.players) > 2:
self._update_bots()
if len(self.players) > 3:
self._update_bots()
self._bot_update_timer = ba.Timer(self._bot_update_interval,
ba.WeakCall(self._update_bots))
def _drop_powerup(self, index: int, poweruptype: str = None) -> None:
if poweruptype is None:
poweruptype = (PowerupBoxFactory.get().get_random_powerup_type(
excludetypes=self._excludepowerups))
PowerupBox(position=self.map.powerup_spawn_points[index],
poweruptype=poweruptype).autoretain()
def _start_powerup_drops(self) -> None:
self._powerup_drop_timer = ba.Timer(3.0,
ba.WeakCall(self._drop_powerups),
repeat=True)
def _drop_powerups(self,
standard_points: bool = False,
force_first: str = None) -> None:
"""Generic powerup drop."""
from bastd.actor import powerupbox
if standard_points:
pts = self.map.powerup_spawn_points
for i in range(len(pts)):
ba.timer(
1.0 + i * 0.5,
ba.WeakCall(self._drop_powerup, i,
force_first if i == 0 else None))
else:
drop_pt = (self._powerup_center[0] + random.uniform(
-1.0 * self._powerup_spread[0], 1.0 * self._powerup_spread[0]),
self._powerup_center[1],
self._powerup_center[2] + random.uniform(
-self._powerup_spread[1], self._powerup_spread[1]))
# Drop one random one somewhere.
powerupbox.PowerupBox(
position=drop_pt,
poweruptype=PowerupBoxFactory.get().get_random_powerup_type(
excludetypes=self._excludepowerups)).autoretain()
def do_end(self, outcome: str) -> None:
"""End the game."""
if outcome == 'defeat':
self.fade_to_red()
self.end(delay=2.0,
results={
'outcome': outcome,
'score': self._score,
'playerinfos': self.initialplayerinfos
})
def _update_bots(self) -> None:
assert self._bot_update_interval is not None
self._bot_update_interval = max(0.5, self._bot_update_interval * 0.98)
self._bot_update_timer = ba.Timer(self._bot_update_interval,
ba.WeakCall(self._update_bots))
botspawnpts: list[Sequence[float]] = [[-5.0, 5.5, -4.14],
[0.0, 5.5, -4.14],
[5.0, 5.5, -4.14]]
dists = [0.0, 0.0, 0.0]
playerpts: list[Sequence[float]] = []
for player in self.players:
try:
if player.is_alive():
assert isinstance(player.actor, PlayerSpaz)
assert player.actor.node
playerpts.append(player.actor.node.position)
except Exception:
ba.print_exception('Error updating bots.')
for i in range(3):
for playerpt in playerpts:
dists[i] += abs(playerpt[0] - botspawnpts[i][0])
dists[i] += random.random() * 5.0 # Minor random variation.
if dists[0] > dists[1] and dists[0] > dists[2]:
spawnpt = botspawnpts[0]
elif dists[1] > dists[2]:
spawnpt = botspawnpts[1]
else:
spawnpt = botspawnpts[2]
spawnpt = (spawnpt[0] + 3.0 * (random.random() - 0.5), spawnpt[1],
2.0 * (random.random() - 0.5) + spawnpt[2])
# Normalize our bot type total and find a random number within that.
total = 0.0
for spawninfo in self._bot_spawn_types.values():
total += spawninfo.spawnrate
randval = random.random() * total
# Now go back through and see where this value falls.
total = 0
bottype: Optional[type[SpazBot]] = None
for spawntype, spawninfo in self._bot_spawn_types.items():
total += spawninfo.spawnrate
if randval <= total:
bottype = spawntype
break
spawn_time = 1.0
assert bottype is not None
self._bots.spawn_bot(bottype, pos=spawnpt, spawn_time=spawn_time)
# After every spawn we adjust our ratios slightly to get more
# difficult.
for spawninfo in self._bot_spawn_types.values():
spawninfo.spawnrate += spawninfo.increase
spawninfo.increase += spawninfo.dincrease
def _update_scores(self) -> None:
score = self._score
# Achievements apply to the default preset only.
if self._preset == 'default':
if score >= 250:
self._award_achievement('Last Stand Master')
if score >= 500:
self._award_achievement('Last Stand Wizard')
if score >= 1000:
self._award_achievement('Last Stand God')
assert self._scoreboard is not None
self._scoreboard.set_team_value(self.teams[0], score, max_score=None)
def handlemessage(self, msg: Any) -> Any:
if isinstance(msg, ba.PlayerDiedMessage):
player = msg.getplayer(Player)
self.stats.player_was_killed(player)
ba.timer(0.1, self._checkroundover)
elif isinstance(msg, ba.PlayerScoredMessage):
self._score += msg.score
self._update_scores()
elif isinstance(msg, SpazBotDiedMessage):
pts, importance = msg.spazbot.get_death_points(msg.how)
target: Optional[Sequence[float]]
if msg.killerplayer:
assert msg.spazbot.node
target = msg.spazbot.node.position
self.stats.player_scored(msg.killerplayer,
pts,
target=target,
kill=True,
screenmessage=False,
importance=importance)
ba.playsound(self._dingsound
if importance == 1 else self._dingsoundhigh,
volume=0.6)
# Normally we pull scores from the score-set, but if there's no
# player lets be explicit.
else:
self._score += pts
self._update_scores()
else:
super().handlemessage(msg)
def _on_got_scores_to_beat(self, scores: list[dict[str, Any]]) -> None:
self._show_standard_scores_to_beat_ui(scores)
def end_game(self) -> None:
# Tell our bots to celebrate just to rub it in.
self._bots.final_celebrate()
ba.setmusic(None)
ba.pushcall(ba.WeakCall(self.do_end, 'defeat'))
def _checkroundover(self) -> None:
"""End the round if conditions are met."""
if not any(player.is_alive() for player in self.teams[0].players):
self.end_game()
|
lldb/packages/Python/lldbsuite/test/lang/objc/foundation/TestFoundationDisassembly.py | dan-zheng/llvm-project | 456 | 12621293 | """
Test the lldb disassemble command on foundation framework.
"""
import unittest2
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
@skipUnlessDarwin
class FoundationDisassembleTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def test_foundation_disasm(self):
"""Do 'disassemble -n func' on each and every 'Code' symbol entry from the Foundation.framework."""
self.build()
# Enable synchronous mode
self.dbg.SetAsync(False)
# Create a target by the debugger.
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
foundation_framework = None
for module in target.modules:
if module.file.basename == "Foundation":
foundation_framework = module.file.fullpath
break
self.assertTrue(
foundation_framework is not None,
"Foundation.framework path located")
self.runCmd("image dump symtab '%s'" % foundation_framework)
raw_output = self.res.GetOutput()
# Now, grab every 'Code' symbol and feed it into the command:
# 'disassemble -n func'.
#
# The symbol name is on the last column and trails the flag column which
# looks like '0xhhhhhhhh', i.e., 8 hexadecimal digits.
codeRE = re.compile(r"""
\ Code\ {9} # ' Code' followed by 9 SPCs,
.* # the wildcard chars,
0x[0-9a-f]{8} # the flag column, and
\ (.+)$ # finally the function symbol.
""", re.VERBOSE)
for line in raw_output.split(os.linesep):
match = codeRE.search(line)
if match:
func = match.group(1)
self.runCmd('image lookup -s "%s"' % func)
self.runCmd('disassemble -n "%s"' % func)
def test_simple_disasm(self):
"""Test the lldb 'disassemble' command"""
self.build()
# Create a target by the debugger.
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
# Stop at +[NSString stringWithFormat:].
symbol_name = "+[NSString stringWithFormat:]"
break_results = lldbutil.run_break_set_command(
self, "_regexp-break %s" % (symbol_name))
lldbutil.check_breakpoint_result(
self,
break_results,
symbol_name=symbol_name,
num_locations=1)
# Stop at -[MyString initWithNSString:].
lldbutil.run_break_set_by_symbol(
self,
'-[MyString initWithNSString:]',
num_expected_locations=1,
sym_exact=True)
# Stop at the "description" selector.
lldbutil.run_break_set_by_selector(
self,
'description',
num_expected_locations=1,
module_name='a.out')
# Stop at -[NSAutoreleasePool release].
break_results = lldbutil.run_break_set_command(
self, "_regexp-break -[NSAutoreleasePool release]")
lldbutil.check_breakpoint_result(
self,
break_results,
symbol_name='-[NSAutoreleasePool release]',
num_locations=1)
self.runCmd("run", RUN_SUCCEEDED)
# First stop is +[NSString stringWithFormat:].
self.expect(
"thread backtrace",
"Stop at +[NSString stringWithFormat:]",
substrs=["Foundation`+[NSString stringWithFormat:]"])
# Do the disassemble for the currently stopped function.
self.runCmd("disassemble -f")
self.runCmd("process continue")
# Skip another breakpoint for +[NSString stringWithFormat:].
self.runCmd("process continue")
# Followed by a.out`-[MyString initWithNSString:].
self.expect(
"thread backtrace",
"Stop at a.out`-[MyString initWithNSString:]",
substrs=["a.out`-[MyString initWithNSString:]"])
# Do the disassemble for the currently stopped function.
self.runCmd("disassemble -f")
self.runCmd("process continue")
# Followed by -[MyString description].
self.expect("thread backtrace", "Stop at -[MyString description]",
substrs=["a.out`-[MyString description]"])
# Do the disassemble for the currently stopped function.
self.runCmd("disassemble -f")
self.runCmd("process continue")
# Skip another breakpoint for -[MyString description].
self.runCmd("process continue")
# Followed by -[NSAutoreleasePool release].
self.expect("thread backtrace", "Stop at -[NSAutoreleasePool release]",
substrs=["Foundation`-[NSAutoreleasePool release]"])
# Do the disassemble for the currently stopped function.
self.runCmd("disassemble -f")
|
tests/func/conftest.py | ng-pe/ldap2pg | 151 | 12621311 | import logging
import os
import sys
from functools import partial
import pytest
import sh
class PSQL(object):
# A helper object to do SQL queries with real psql.
def __init__(self):
from sh import psql
self.psql = psql
def __call__(self, *a, **kw):
return self.psql(*a, **kw)
def scalar(self, select, *a, **kw):
return next(iter(self.select1(select, *a, **kw)))
def select1(self, select, *a, **kw):
# Execute a SELECT and yield each line as a single value.
return filter(None, (
line.strip()
for line in self('-tc', select, *a, _iter=True, **kw)
))
def members(self, role):
# List members of role
return self.select1(
# Good old SQL injection. Who cares?
"SELECT m.rolname FROM pg_roles AS m "
"JOIN pg_auth_members a ON a.member = m.oid "
"JOIN pg_roles AS r ON r.oid = a.roleid "
" WHERE r.rolname = '%s' "
"ORDER BY 1;" % (role,)
)
def roles(self):
# List **all** roles
return self.select1("SELECT rolname FROM pg_roles;")
def superusers(self):
# List superusers
return self.select1(
"SELECT rolname FROM pg_roles WHERE rolsuper IS TRUE;"
)
def tables(self, *a, **kw):
# List tables
return self.select1(
"SELECT relname "
"FROM pg_catalog.pg_class c "
"JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace "
"WHERE "
" c.relkind = 'r' "
" AND n.nspname !~ '^pg_' "
" AND n.nspname <> 'information_schema' "
"ORDER BY 1;",
*a, **kw
)
@pytest.fixture(scope='session')
def psql():
# Supply the PSQL helper as a pytest fixture.
return PSQL()
class LDAP(object):
# Helper to query LDAP with creds from envvars.
def __init__(self):
self.common_args = (
'-xv',
'-w', os.environ['LDAPPASSWORD'],
)
self.search = sh.ldapsearch.bake(*self.common_args)
def search_sub_dn(self, base):
# Iter dn under base entry, excluded.
for line in self.search('-b', base, 'dn', _iter=True):
if not line.startswith('dn: '):
continue
if line.startswith('dn: ' + base):
continue
yield line.strip()[len('dn: '):]
@pytest.fixture(scope='session')
def ldap():
# Supply LDAP helper as a pytest fixture
#
# def test_rockon(ldap):
# entries = ldap.search(...)
return LDAP()
@pytest.fixture(scope='module', autouse=True)
def resetpostgres():
from sh import Command
Command('fixtures/postgres.sh')()
def lazy_write(attr, data):
# Lazy access sys.{stderr,stdout} to mix with capsys.
getattr(sys, attr).write(data)
return False # should_quit
@pytest.fixture(scope='session', autouse=True)
def sh_errout():
logging.getLogger('sh').setLevel(logging.ERROR)
# Duplicate tested command stdio to pytest capsys.
sh._SelfWrapper__self_module.Command._call_args.update(dict(
err=partial(lazy_write, 'stderr'),
out=partial(lazy_write, 'stdout'),
tee=True,
))
|
neutron/agent/linux/ip_conntrack.py | congnt95/neutron | 1,080 | 12621320 | <filename>neutron/agent/linux/ip_conntrack.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import eventlet
import netaddr
from neutron_lib import constants
from neutron_lib import exceptions
from oslo_concurrency import lockutils
from oslo_log import log as logging
from neutron.agent.linux import utils as linux_utils
LOG = logging.getLogger(__name__)
CONTRACK_MGRS = {}
MAX_CONNTRACK_ZONES = 65535
ZONE_START = 4097
WORKERS = 8
class IpConntrackUpdate(object):
"""Encapsulates a conntrack update
An instance of this object carries the information necessary to
process a request to update the conntrack table.
"""
def __init__(self, device_info_list, rule, remote_ips):
self.device_info_list = device_info_list
self.rule = rule
self.remote_ips = remote_ips
def __repr__(self):
return ('<IpConntrackUpdate(device_info_list=%s, rule=%s, '
'remote_ips=%s>' % (self.device_info_list, self.rule,
self.remote_ips))
@lockutils.synchronized('conntrack')
def get_conntrack(get_rules_for_table_func, filtered_ports, unfiltered_ports,
execute=None, namespace=None, zone_per_port=False):
try:
return CONTRACK_MGRS[namespace]
except KeyError:
ipconntrack = IpConntrackManager(get_rules_for_table_func,
filtered_ports, unfiltered_ports,
execute, namespace, zone_per_port)
CONTRACK_MGRS[namespace] = ipconntrack
return CONTRACK_MGRS[namespace]
class IpConntrackManager(object):
"""Smart wrapper for ip conntrack."""
def __init__(self, get_rules_for_table_func, filtered_ports,
unfiltered_ports, execute=None, namespace=None,
zone_per_port=False):
self.get_rules_for_table_func = get_rules_for_table_func
self.execute = execute or linux_utils.execute
self.namespace = namespace
self.filtered_ports = filtered_ports
self.unfiltered_ports = unfiltered_ports
self.zone_per_port = zone_per_port # zone per port vs per network
self._populate_initial_zone_map()
self._queue = eventlet.queue.LightQueue()
self._start_process_queue()
def _start_process_queue(self):
LOG.debug("Starting ip_conntrack _process_queue_worker() threads")
pool = eventlet.GreenPool(size=WORKERS)
for i in range(WORKERS):
pool.spawn_n(self._process_queue_worker)
def _process_queue_worker(self):
# While it's technically not necessary to have this method, the
# 'while True' could just be in _process_queue(), the tests have
# to be able to drain the queue without blocking, so _process_queue()
# is made standalone.
while True:
self._process_queue()
def _process_queue(self):
update = None
try:
# this will block until an entry gets added to the queue
update = self._queue.get()
if update.remote_ips:
for remote_ip in update.remote_ips:
self._delete_conntrack_state(
update.device_info_list, update.rule, remote_ip)
else:
self._delete_conntrack_state(
update.device_info_list, update.rule)
except Exception:
LOG.exception("Failed to process ip_conntrack queue entry: %s",
update)
def _process(self, device_info_list, rule, remote_ips=None):
# queue the update to allow the caller to resume its work
update = IpConntrackUpdate(device_info_list, rule, remote_ips)
self._queue.put(update)
@staticmethod
def _generate_conntrack_cmd_by_rule(rule, namespace):
ethertype = rule.get('ethertype')
protocol = rule.get('protocol')
direction = rule.get('direction')
mark = rule.get('mark')
cmd = ['conntrack', '-D']
if protocol is not None:
# 0 is IP in /etc/protocols, but conntrack will throw an error
if str(protocol) == '0':
protocol = 'ip'
cmd.extend(['-p', str(protocol)])
cmd.extend(['-f', str(ethertype).lower()])
if mark is not None:
cmd.extend(['-m', str(mark)])
cmd.append('-d' if direction == 'ingress' else '-s')
cmd_ns = []
if namespace:
cmd_ns.extend(['ip', 'netns', 'exec', namespace])
cmd_ns.extend(cmd)
return cmd_ns
def _get_conntrack_cmds(self, device_info_list, rule, remote_ip=None):
conntrack_cmds = set()
cmd = self._generate_conntrack_cmd_by_rule(rule, self.namespace)
ethertype = rule.get('ethertype')
for device_info in device_info_list:
zone_id = self.get_device_zone(device_info, create=False)
if not zone_id:
LOG.debug("No zone for device %(dev)s. Will not try to "
"clear conntrack state. Zone map: %(zm)s",
{'dev': device_info['device'],
'zm': self._device_zone_map})
continue
ips = device_info.get('fixed_ips', [])
for ip in ips:
net = netaddr.IPNetwork(ip)
if str(net.version) not in ethertype:
continue
ip_cmd = [str(net.ip), '-w', zone_id]
if remote_ip and str(
netaddr.IPNetwork(remote_ip).version) in ethertype:
if rule.get('direction') == 'ingress':
direction = '-s'
else:
direction = '-d'
ip_cmd.extend([direction, str(remote_ip)])
conntrack_cmds.add(tuple(cmd + ip_cmd))
return conntrack_cmds
def _delete_conntrack_state(self, device_info_list, rule, remote_ip=None):
conntrack_cmds = self._get_conntrack_cmds(device_info_list,
rule, remote_ip)
for cmd in conntrack_cmds:
try:
self.execute(list(cmd), run_as_root=True, privsep_exec=True,
check_exit_code=True,
extra_ok_codes=[1])
except RuntimeError:
LOG.exception("Failed execute conntrack command %s", cmd)
def delete_conntrack_state_by_rule(self, device_info_list, rule):
self._process(device_info_list, rule)
def delete_conntrack_state_by_remote_ips(self, device_info_list,
ethertype, remote_ips, mark=None):
for direction in ['ingress', 'egress']:
rule = {'ethertype': str(ethertype).lower(),
'direction': direction}
if mark:
rule['mark'] = mark
self._process(device_info_list, rule, remote_ips)
def _populate_initial_zone_map(self):
"""Setup the map between devices and zones based on current rules."""
self._device_zone_map = {}
rules = self.get_rules_for_table_func('raw')
for rule in rules:
match = re.match(r'.* --physdev-in (?P<dev>[a-zA-Z0-9\-]+)'
r'.* -j CT --zone (?P<zone>\d+).*', rule)
if match:
# strip off any prefix that the interface is using
short_port_id = (
match.group('dev')[constants.LINUX_DEV_PREFIX_LEN:])
self._device_zone_map[short_port_id] = int(match.group('zone'))
LOG.debug("Populated conntrack zone map: %s", self._device_zone_map)
def _device_key(self, port):
# we have to key the device_zone_map based on the fragment of the
# UUID that shows up in the interface name. This is because the initial
# map is populated strictly based on interface names that we don't know
# the full UUID of.
if self.zone_per_port:
identifier = port['device'][constants.LINUX_DEV_PREFIX_LEN:]
else:
identifier = port['network_id']
return identifier[:(constants.LINUX_DEV_LEN -
constants.LINUX_DEV_PREFIX_LEN)]
def get_device_zone(self, port, create=True):
device_key = self._device_key(port)
try:
return self._device_zone_map[device_key]
except KeyError:
if create:
return self._generate_device_zone(device_key)
def _free_zones_from_removed_ports(self):
"""Clears any entries from the zone map of removed ports."""
existing_ports = [
self._device_key(port)
for port in (list(self.filtered_ports.values()) +
list(self.unfiltered_ports.values()))
]
removed = set(self._device_zone_map) - set(existing_ports)
for dev in removed:
self._device_zone_map.pop(dev, None)
def _generate_device_zone(self, short_device_id):
"""Generates a unique conntrack zone for the passed in ID."""
try:
zone = self._find_open_zone()
except exceptions.CTZoneExhaustedError:
# Free some zones and try again, repeat failure will not be caught
self._free_zones_from_removed_ports()
zone = self._find_open_zone()
self._device_zone_map[short_device_id] = zone
LOG.debug("Assigned CT zone %(z)s to device %(dev)s.",
{'z': zone, 'dev': short_device_id})
return self._device_zone_map[short_device_id]
def _find_open_zone(self):
# call set to dedup because old ports may be mapped to the same zone.
zones_in_use = sorted(set(self._device_zone_map.values()))
if not zones_in_use:
return ZONE_START
# attempt to increment onto the highest used zone first. if we hit the
# end, go back and look for any gaps left by removed devices.
last = zones_in_use[-1]
if last < MAX_CONNTRACK_ZONES:
return max(last + 1, ZONE_START)
for index, used in enumerate(zones_in_use):
if used - index != ZONE_START:
# gap found, let's use it!
return index + ZONE_START
# conntrack zones exhausted :( :(
raise exceptions.CTZoneExhaustedError()
class OvsIpConntrackManager(IpConntrackManager):
def __init__(self, execute=None):
super(OvsIpConntrackManager, self).__init__(
get_rules_for_table_func=None,
filtered_ports={}, unfiltered_ports={},
execute=execute, namespace=None, zone_per_port=False)
def _populate_initial_zone_map(self):
self._device_zone_map = {}
def get_device_zone(self, port, create=False):
of_port = port.get('of_port')
if of_port is None:
return
return of_port.vlan_tag
|
climlab/model/column.py | nfeldl/climlab | 160 | 12621326 | <filename>climlab/model/column.py
"""Object-oriented code for radiative-convective models with grey-gas radiation.
Code developed by <NAME>, University at Albany
<EMAIL>
Note that the column models by default represent global, time averages.
Thus the insolation is a prescribed constant.
Here is an example to implement seasonal insolation at 45 degrees North
:Example:
.. code-block:: python
import climlab
# create the column model object
col = climlab.GreyRadiationModel()
# create a new latitude axis with a single point
lat = climlab.domain.Axis(axis_type='lat', points=45.)
# add this new axis to the surface domain
col.Ts.domain.axes['lat'] = lat
# create a new insolation process using this domain
Q = climlab.radiation.insolation.DailyInsolation(domains=col.Ts.domain, **col.param)
# replace the fixed insolation subprocess in the column model
col.add_subprocess('insolation', Q)
This model is now a single column with seasonally varying insolation
calculated for 45N.
"""
from __future__ import division
import numpy as np
from climlab import constants as const
from climlab.process.time_dependent_process import TimeDependentProcess
from climlab.domain.initial import column_state
from climlab.domain.field import Field
from climlab.radiation.insolation import FixedInsolation
from climlab.radiation.greygas import GreyGas, GreyGasSW
from climlab.convection.convadj import ConvectiveAdjustment
from climlab.radiation.nband import ThreeBandSW, FourBandLW, FourBandSW
from climlab.radiation.water_vapor import ManabeWaterVapor
class GreyRadiationModel(TimeDependentProcess):
def __init__(self,
num_lev=30,
num_lat=1,
lev=None,
lat=None,
water_depth=1.0,
albedo_sfc=0.299,
timestep=1. * const.seconds_per_day,
Q=341.3,
# absorption coefficient in m**2 / kg
abs_coeff=1.229E-4,
**kwargs):
# Check to see if an initial state is already provided
# If not, make one
if 'state' not in kwargs:
state = column_state(num_lev, num_lat, lev, lat, water_depth)
kwargs.update({'state': state})
super(GreyRadiationModel, self).__init__(timestep=timestep, **kwargs)
self.param['water_depth'] = water_depth
self.param['albedo_sfc'] = albedo_sfc
self.param['Q'] = Q
self.param['abs_coeff'] = abs_coeff
sfc = self.Ts.domain
atm = self.Tatm.domain
# create sub-models for longwave and shortwave radiation
dp = self.Tatm.domain.lev.delta
absorbLW = compute_layer_absorptivity(self.param['abs_coeff'], dp)
absorbLW = Field(np.tile(absorbLW, sfc.shape), domain=atm)
absorbSW = np.zeros_like(absorbLW)
longwave = GreyGas(state=self.state, absorptivity=absorbLW,
albedo_sfc=0)
shortwave = GreyGasSW(state=self.state, absorptivity=absorbSW,
albedo_sfc=self.param['albedo_sfc'])
# sub-model for insolation ... here we just set constant Q
thisQ = self.param['Q']*np.ones_like(self.Ts)
Q = FixedInsolation(S0=thisQ, domains=sfc, **self.param)
self.add_subprocess('LW', longwave)
self.add_subprocess('SW', shortwave)
self.add_subprocess('insolation', Q)
newdiags = ['OLR',
'LW_down_sfc',
'LW_up_sfc',
'LW_absorbed_sfc',
'LW_absorbed_atm',
'LW_emission',
'ASR',
'SW_absorbed_sfc',
'SW_absorbed_atm',
'SW_up_sfc',
'SW_up_TOA',
'SW_down_TOA',
'planetary_albedo']
for name in newdiags:
self.add_diagnostic(name)
# This process has to handle the coupling between
# insolation and column radiation
self.subprocess['SW'].flux_from_space = \
self.subprocess['insolation'].diagnostics['insolation']
def _compute(self):
# set diagnostics
self.do_diagnostics()
# no tendencies for the parent process
tendencies = {}
for name, var in self.state.items():
tendencies[name] = var * 0.
return tendencies
def do_diagnostics(self):
'''Set all the diagnostics from long and shortwave radiation.'''
self.OLR = self.subprocess['LW'].flux_to_space
self.LW_down_sfc = self.subprocess['LW'].flux_to_sfc
self.LW_up_sfc = self.subprocess['LW'].flux_from_sfc
self.LW_absorbed_sfc = self.LW_down_sfc - self.LW_up_sfc
self.LW_absorbed_atm = self.subprocess['LW'].absorbed
self.LW_emission = self.subprocess['LW'].emission
# contributions to OLR from surface and atm. levels
#self.diagnostics['OLR_sfc'] = self.flux['sfc2space']
#self.diagnostics['OLR_atm'] = self.flux['atm2space']
self.ASR = (self.subprocess['SW'].flux_from_space -
self.subprocess['SW'].flux_to_space)
#self.SW_absorbed_sfc = (self.subprocess['surface'].SW_from_atm -
# self.subprocess['surface'].SW_to_atm)
self.SW_absorbed_atm = self.subprocess['SW'].absorbed
self.SW_down_sfc = self.subprocess['SW'].flux_to_sfc
self.SW_up_sfc = self.subprocess['SW'].flux_from_sfc
self.SW_absorbed_sfc = self.SW_down_sfc - self.SW_up_sfc
self.SW_up_TOA = self.subprocess['SW'].flux_to_space
self.SW_down_TOA = self.subprocess['SW'].flux_from_space
self.planetary_albedo = (self.subprocess['SW'].flux_to_space /
self.subprocess['SW'].flux_from_space)
class RadiativeConvectiveModel(GreyRadiationModel):
def __init__(self,
# lapse rate for convective adjustment, in K / km
adj_lapse_rate=6.5,
**kwargs):
super(RadiativeConvectiveModel, self).__init__(**kwargs)
self.param['adj_lapse_rate'] = adj_lapse_rate
self.add_subprocess('convective adjustment', \
ConvectiveAdjustment(state=self.state, **self.param))
class BandRCModel(RadiativeConvectiveModel):
def __init__(self, **kwargs):
super(BandRCModel, self).__init__(**kwargs)
# Initialize specific humidity
h2o = ManabeWaterVapor(state=self.state, **self.param)
self.add_subprocess('H2O', h2o)
# q is an input field for this process, which is set by subproc
# (though in this sense it is actually diagnostic...)
newinput = ['q']
self.add_input('q')
self.q = self.subprocess['H2O'].q
# initialize radiatively active gas inventories
self.absorber_vmr = {}
self.absorber_vmr['CO2'] = 380.E-6 * np.ones_like(self.Tatm)
self.absorber_vmr['O3'] = np.zeros_like(self.Tatm)
# water vapor is actually specific humidity, not VMR.
self.absorber_vmr['H2O'] = self.q
longwave = FourBandLW(state=self.state,
absorber_vmr=self.absorber_vmr,
albedo_sfc=0.)
shortwave = ThreeBandSW(state=self.state,
absorber_vmr=self.absorber_vmr,
emissivity_sfc=0.,
albedo_sfc=self.param['albedo_sfc'])
self.add_subprocess('LW', longwave)
self.add_subprocess('SW', shortwave)
# This process has to handle the coupling between
# insolation and column radiation
self.subprocess['SW'].flux_from_space = \
self.subprocess['insolation'].diagnostics['insolation']
def compute_layer_absorptivity(abs_coeff, dp):
'''Compute layer absorptivity from a constant absorption coefficient.'''
return (2. / (1 + 2. * const.g / abs_coeff /
(dp * const.mb_to_Pa)))
|
nni/algorithms/nas/pytorch/cream/trainer.py | dutxubo/nni | 9,680 | 12621330 | <reponame>dutxubo/nni
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
from copy import deepcopy
import torch
from nni.nas.pytorch.trainer import Trainer
from nni.nas.pytorch.utils import AverageMeterGroup
from .utils import accuracy, reduce_metrics
logger = logging.getLogger(__name__)
class CreamSupernetTrainer(Trainer):
"""
This trainer trains a supernet and output prioritized architectures that can be used for other tasks.
Parameters
----------
model : nn.Module
Model with mutables.
loss : callable
Called with logits and targets. Returns a loss tensor.
val_loss : callable
Called with logits and targets for validation only. Returns a loss tensor.
optimizer : Optimizer
Optimizer that optimizes the model.
num_epochs : int
Number of epochs of training.
train_loader : iterablez
Data loader of training. Raise ``StopIteration`` when one epoch is exhausted.
valid_loader : iterablez
Data loader of validation. Raise ``StopIteration`` when one epoch is exhausted.
mutator : Mutator
A mutator object that has been initialized with the model.
batch_size : int
Batch size.
log_frequency : int
Number of mini-batches to log metrics.
meta_sta_epoch : int
start epoch of using meta matching network to pick teacher architecture
update_iter : int
interval of updating meta matching networks
slices : int
batch size of mini training data in the process of training meta matching network
pool_size : int
board size
pick_method : basestring
how to pick teacher network
choice_num : int
number of operations in supernet
sta_num : int
layer number of each stage in supernet (5 stage in supernet)
acc_gap : int
maximum accuracy improvement to omit the limitation of flops
flops_dict : Dict
dictionary of each layer's operations in supernet
flops_fixed : int
flops of fixed part in supernet
local_rank : int
index of current rank
callbacks : list of Callback
Callbacks to plug into the trainer. See Callbacks.
"""
def __init__(self, model, loss, val_loss,
optimizer, num_epochs, train_loader, valid_loader,
mutator=None, batch_size=64, log_frequency=None,
meta_sta_epoch=20, update_iter=200, slices=2,
pool_size=10, pick_method='meta', choice_num=6,
sta_num=(4, 4, 4, 4, 4), acc_gap=5,
flops_dict=None, flops_fixed=0, local_rank=0, callbacks=None):
assert torch.cuda.is_available()
super(CreamSupernetTrainer, self).__init__(model, mutator, loss, None,
optimizer, num_epochs, None, None,
batch_size, None, None, log_frequency, callbacks)
self.model = model
self.loss = loss
self.val_loss = val_loss
self.train_loader = train_loader
self.valid_loader = valid_loader
self.log_frequency = log_frequency
self.batch_size = batch_size
self.optimizer = optimizer
self.model = model
self.loss = loss
self.num_epochs = num_epochs
self.meta_sta_epoch = meta_sta_epoch
self.update_iter = update_iter
self.slices = slices
self.pick_method = pick_method
self.pool_size = pool_size
self.local_rank = local_rank
self.choice_num = choice_num
self.sta_num = sta_num
self.acc_gap = acc_gap
self.flops_dict = flops_dict
self.flops_fixed = flops_fixed
self.current_student_arch = None
self.current_teacher_arch = None
self.main_proc = (local_rank == 0)
self.current_epoch = 0
self.prioritized_board = []
# size of prioritized board
def _board_size(self):
return len(self.prioritized_board)
# select teacher architecture according to the logit difference
def _select_teacher(self):
self._replace_mutator_cand(self.current_student_arch)
if self.pick_method == 'top1':
meta_value, teacher_cand = 0.5, sorted(
self.prioritized_board, reverse=True)[0][3]
elif self.pick_method == 'meta':
meta_value, cand_idx, teacher_cand = -1000000000, -1, None
for now_idx, item in enumerate(self.prioritized_board):
inputx = item[4]
output = torch.nn.functional.softmax(self.model(inputx), dim=1)
weight = self.model.module.forward_meta(output - item[5])
if weight > meta_value:
meta_value = weight
cand_idx = now_idx
teacher_cand = self.prioritized_board[cand_idx][3]
assert teacher_cand is not None
meta_value = torch.nn.functional.sigmoid(-weight)
else:
raise ValueError('Method Not supported')
return meta_value, teacher_cand
# check whether to update prioritized board
def _isUpdateBoard(self, prec1, flops):
if self.current_epoch <= self.meta_sta_epoch:
return False
if len(self.prioritized_board) < self.pool_size:
return True
if prec1 > self.prioritized_board[-1][1] + self.acc_gap:
return True
if prec1 > self.prioritized_board[-1][1] and flops < self.prioritized_board[-1][2]:
return True
return False
# update prioritized board
def _update_prioritized_board(self, inputs, teacher_output, outputs, prec1, flops):
if self._isUpdateBoard(prec1, flops):
val_prec1 = prec1
training_data = deepcopy(inputs[:self.slices].detach())
if len(self.prioritized_board) == 0:
features = deepcopy(outputs[:self.slices].detach())
else:
features = deepcopy(
teacher_output[:self.slices].detach())
self.prioritized_board.append(
(val_prec1,
prec1,
flops,
self.current_student_arch,
training_data,
torch.nn.functional.softmax(
features,
dim=1)))
self.prioritized_board = sorted(
self.prioritized_board, reverse=True)
if len(self.prioritized_board) > self.pool_size:
del self.prioritized_board[-1]
# only update student network weights
def _update_student_weights_only(self, grad_1):
for weight, grad_item in zip(
self.model.module.rand_parameters(self.current_student_arch), grad_1):
weight.grad = grad_item
torch.nn.utils.clip_grad_norm_(
self.model.module.rand_parameters(self.current_student_arch), 1)
self.optimizer.step()
for weight, grad_item in zip(
self.model.module.rand_parameters(self.current_student_arch), grad_1):
del weight.grad
# only update meta networks weights
def _update_meta_weights_only(self, teacher_cand, grad_teacher):
for weight, grad_item in zip(self.model.module.rand_parameters(
teacher_cand, self.pick_method == 'meta'), grad_teacher):
weight.grad = grad_item
# clip gradients
torch.nn.utils.clip_grad_norm_(
self.model.module.rand_parameters(
self.current_student_arch, self.pick_method == 'meta'), 1)
self.optimizer.step()
for weight, grad_item in zip(self.model.module.rand_parameters(
teacher_cand, self.pick_method == 'meta'), grad_teacher):
del weight.grad
# simulate sgd updating
def _simulate_sgd_update(self, w, g, optimizer):
return g * optimizer.param_groups[-1]['lr'] + w
# split training images into several slices
def _get_minibatch_input(self, input): # pylint: disable=redefined-builtin
slice = self.slices # pylint: disable=redefined-builtin
x = deepcopy(input[:slice].clone().detach())
return x
# calculate 1st gradient of student architectures
def _calculate_1st_gradient(self, kd_loss):
self.optimizer.zero_grad()
grad = torch.autograd.grad(
kd_loss,
self.model.module.rand_parameters(self.current_student_arch),
create_graph=True)
return grad
# calculate 2nd gradient of meta networks
def _calculate_2nd_gradient(self, validation_loss, teacher_cand, students_weight):
self.optimizer.zero_grad()
grad_student_val = torch.autograd.grad(
validation_loss,
self.model.module.rand_parameters(self.current_student_arch),
retain_graph=True)
grad_teacher = torch.autograd.grad(
students_weight[0],
self.model.module.rand_parameters(
teacher_cand,
self.pick_method == 'meta'),
grad_outputs=grad_student_val)
return grad_teacher
# forward training data
def _forward_training(self, x, meta_value):
self._replace_mutator_cand(self.current_student_arch)
output = self.model(x)
with torch.no_grad():
self._replace_mutator_cand(self.current_teacher_arch)
teacher_output = self.model(x)
soft_label = torch.nn.functional.softmax(teacher_output, dim=1)
kd_loss = meta_value * \
self._cross_entropy_loss_with_soft_target(output, soft_label)
return kd_loss
# calculate soft target loss
def _cross_entropy_loss_with_soft_target(self, pred, soft_target):
logsoftmax = torch.nn.LogSoftmax()
return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1))
# forward validation data
def _forward_validation(self, input, target): # pylint: disable=redefined-builtin
slice = self.slices # pylint: disable=redefined-builtin
x = input[slice:slice * 2].clone()
self._replace_mutator_cand(self.current_student_arch)
output_2 = self.model(x)
validation_loss = self.loss(output_2, target[slice:slice * 2])
return validation_loss
def _isUpdateMeta(self, batch_idx):
isUpdate = True
isUpdate &= (self.current_epoch > self.meta_sta_epoch)
isUpdate &= (batch_idx > 0)
isUpdate &= (batch_idx % self.update_iter == 0)
isUpdate &= (self._board_size() > 0)
return isUpdate
def _replace_mutator_cand(self, cand):
self.mutator._cache = cand
# update meta matching networks
def _run_update(self, input, target, batch_idx): # pylint: disable=redefined-builtin
if self._isUpdateMeta(batch_idx):
x = self._get_minibatch_input(input)
meta_value, teacher_cand = self._select_teacher()
kd_loss = self._forward_training(x, meta_value)
# calculate 1st gradient
grad_1st = self._calculate_1st_gradient(kd_loss)
# simulate updated student weights
students_weight = [
self._simulate_sgd_update(
p, grad_item, self.optimizer) for p, grad_item in zip(
self.model.module.rand_parameters(self.current_student_arch), grad_1st)]
# update student weights
self._update_student_weights_only(grad_1st)
validation_loss = self._forward_validation(input, target)
# calculate 2nd gradient
grad_teacher = self._calculate_2nd_gradient(validation_loss, teacher_cand, students_weight)
# update meta matching networks
self._update_meta_weights_only(teacher_cand, grad_teacher)
# delete internal variants
del grad_teacher, grad_1st, x, validation_loss, kd_loss, students_weight
def _get_cand_flops(self, cand):
flops = 0
for block_id, block in enumerate(cand):
if block == 'LayerChoice1' or block_id == 'LayerChoice23':
continue
for idx, choice in enumerate(cand[block]):
flops += self.flops_dict[block_id][idx] * (1 if choice else 0)
return flops + self.flops_fixed
def train_one_epoch(self, epoch):
self.current_epoch = epoch
meters = AverageMeterGroup()
self.steps_per_epoch = len(self.train_loader)
for step, (input_data, target) in enumerate(self.train_loader):
self.mutator.reset()
self.current_student_arch = self.mutator._cache
input_data, target = input_data.cuda(), target.cuda()
# calculate flops of current architecture
cand_flops = self._get_cand_flops(self.mutator._cache)
# update meta matching network
self._run_update(input_data, target, step)
if self._board_size() > 0:
# select teacher architecture
meta_value, teacher_cand = self._select_teacher()
self.current_teacher_arch = teacher_cand
# forward supernet
if self._board_size() == 0 or epoch <= self.meta_sta_epoch:
self._replace_mutator_cand(self.current_student_arch)
output = self.model(input_data)
loss = self.loss(output, target)
kd_loss, teacher_output, teacher_cand = None, None, None
else:
self._replace_mutator_cand(self.current_student_arch)
output = self.model(input_data)
gt_loss = self.loss(output, target)
with torch.no_grad():
self._replace_mutator_cand(self.current_teacher_arch)
teacher_output = self.model(input_data).detach()
soft_label = torch.nn.functional.softmax(teacher_output, dim=1)
kd_loss = self._cross_entropy_loss_with_soft_target(output, soft_label)
loss = (meta_value * kd_loss + (2 - meta_value) * gt_loss) / 2
# update network
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# update metrics
prec1, prec5 = accuracy(output, target, topk=(1, 5))
metrics = {"prec1": prec1, "prec5": prec5, "loss": loss}
metrics = reduce_metrics(metrics)
meters.update(metrics)
# update prioritized board
self._update_prioritized_board(input_data, teacher_output, output, metrics['prec1'], cand_flops)
if self.main_proc and (step % self.log_frequency == 0 or step + 1 == self.steps_per_epoch):
logger.info("Epoch [%d/%d] Step [%d/%d] %s", epoch + 1, self.num_epochs,
step + 1, len(self.train_loader), meters)
if self.main_proc and self.num_epochs == epoch + 1:
for idx, i in enumerate(self.prioritized_board):
logger.info("No.%s %s", idx, i[:4])
def validate_one_epoch(self, epoch):
self.model.eval()
meters = AverageMeterGroup()
with torch.no_grad():
for step, (x, y) in enumerate(self.valid_loader):
self.mutator.reset()
logits = self.model(x)
loss = self.val_loss(logits, y)
prec1, prec5 = accuracy(logits, y, topk=(1, 5))
metrics = {"prec1": prec1, "prec5": prec5, "loss": loss}
metrics = reduce_metrics(metrics)
meters.update(metrics)
if self.log_frequency is not None and step % self.log_frequency == 0:
logger.info("Epoch [%s/%s] Validation Step [%s/%s] %s", epoch + 1,
self.num_epochs, step + 1, len(self.valid_loader), meters)
|
ServidorPython/python32_web/Lib/site-packages/sklearn/feature_extraction/__init__.py | mak213k/Servidor_automatizado_python | 6,989 | 12621339 | """
The :mod:`sklearn.feature_extraction` module deals with feature extraction
from raw data. It currently includes methods to extract features from text and
images.
"""
from .dict_vectorizer import DictVectorizer
from .hashing import FeatureHasher
from .image import img_to_graph, grid_to_graph
from . import text
__all__ = ['DictVectorizer', 'image', 'img_to_graph', 'grid_to_graph', 'text',
'FeatureHasher']
|
src/test-apps/happy/test-templates/WeaveKeyExport.py | robszewczyk/openweave-core | 249 | 12621373 | <gh_stars>100-1000
#!/usr/bin/env python3
#
# Copyright (c) 2017 <NAME>, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Implements WeaveKeyExport class that tests Weave Key Export protocol among Weave nodes.
#
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import time
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.utils.IP import IP
from happy.HappyNode import HappyNode
from happy.HappyNetwork import HappyNetwork
from WeaveTest import WeaveTest
import WeaveUtilities
import plugins.plaid.Plaid as Plaid
options = {}
options["client"] = None
options["server"] = None
options["quiet"] = False
options["plaid"] = False
options["use_persistent_storage"] = True
def option():
return options.copy()
class WeaveKeyExport(HappyNode, HappyNetwork, WeaveTest):
"""
weave-key-export [-h --help] [-q --quiet] [-o --origin <NAME>] [-s --server <NAME>]
[-c --count <NUMBER>] [-u --udp] [-t --tcp] [-k --key-id <key-id>] [-d --dont-sign-msgs]
[-p --tap <TAP_INTERFACE>] [--client_faults <fault-injection configuration>]
[--server_faults <fault-injection configuration>]
commands:
$ weave-key-export -o node01 -s node02 -u
weave key export test between node01 and node02 via UDP with requested default
Client Root Key (key-id = 0x00010400)
$ weave-key-export -o node01 -s node02 -t
weave key export test between node01 and node02 via TCP with requested default
Client Root Key (key-id = 0x00010400)
$ weave-key-export -o node01 -s node02 -u --wrmp
weave key export test between node01 and node02 via WRMP over UDP with requested
default Client Root Key (key-id = 0x00010400)
$ weave-key-export -o node01 -s node02 -u --wrmp --key-id 0x00005536
weave key export test between node01 and node02 via WRMP over UDP with requested
application key (key-id = 0x00005536)
return:
True or False for test
"""
def __init__(self, opts = options):
HappyNode.__init__(self)
HappyNetwork.__init__(self)
WeaveTest.__init__(self)
default_values = {
"count": None,
'udp': True,
'wrmp': False,
'tcp': False,
"sign_msgs": True,
"key_id": "0x00010400",
'tap': None,
'client_faults': None,
'server_faults': None,
'iterations': None,
'test_tag': ""
}
default_values.update(opts)
self.__dict__.update(default_values)
self.no_service = False
self.server_process_tag = "WEAVE_KEY_EXPORT_SERVER" + opts["test_tag"]
self.client_process_tag = "WEAVE_KEY_EXPORT_CLIENT" + opts["test_tag"]
self.plaid_server_process_tag = "PLAID_SERVER" + opts["test_tag"]
self.client_node_id = None
self.server_node_id = None
plaid_opts = Plaid.default_options()
plaid_opts['quiet'] = self.quiet
self.plaid_server_node_id = 'node03'
plaid_opts['server_node_id'] = self.plaid_server_node_id
plaid_opts['num_clients'] = 2
plaid_opts['server_ip_address'] = self.getNodeWeaveIPAddress(self.plaid_server_node_id)
plaid_opts['interface'] = 'wlan0'
self.plaid = Plaid.Plaid(plaid_opts)
self.use_plaid = opts["plaid"]
if opts["plaid"] == "auto":
if self.server == "service":
# can't use plaid when talking to an external service
self.use_plaid = False
else:
self.use_plaid = self.plaid.isPlaidConfigured()
def __pre_check(self):
# Check if Weave Key Export client node is given.
if self.client == None:
emsg = "Missing name or address of the Weave Key Export client node."
self.logger.error("[localhost] WeaveKeyExport: %s" % (emsg))
sys.exit(1)
# Check if Weave Key Export server node is given.
if self.server == None:
emsg = "Missing name or address of the Weave Key Export server node."
self.logger.error("[localhost] WeaveKeyExport: %s" % (emsg))
sys.exit(1)
# Make sure that fabric was created
if self.getFabricId() == None:
emsg = "Weave Fabric has not been created yet."
self.logger.error("[localhost] WeaveKeyExport: %s" % (emsg))
sys.exit(1)
if self.count != None and self.count.isdigit():
self.count = int(float(self.count))
else:
self.count = 1
# Check if Weave Key Export client node exists.
if self._nodeExists(self.client):
self.client_node_id = self.client
# Check if Weave Key Export server node exists.
if self._nodeExists(self.server):
self.server_node_id = self.server
# Check if client is provided in a form of IP address
if IP.isIpAddress(self.client):
self.client_node_id = self.getNodeIdFromAddress(self.client)
# Check if server is provided in a form of IP address
if IP.isIpAddress(self.server):
self.no_service = True
self.server_ip = self.server
self.server_weave_id = self.IPv6toWeaveId(self.server)
else:
# Check if server is a true cloud service instance
if self.getNodeType(self.server) == self.node_type_service:
self.no_service = True
if self.client_node_id == None:
emsg = "Unknown identity of the client node."
self.logger.error("[localhost] WeaveKeyExport: %s" % (emsg))
sys.exit(1)
if not self.no_service and self.server_node_id == None:
emsg = "Unknown identity of the server node."
self.logger.error("[localhost] WeaveKeyExport: %s" % (emsg))
sys.exit(1)
if self.getNodeType(self.client_node_id) == "service":
self.client_ip = self.getServiceWeaveIPAddress("KeyExport", self.client_node_id)
self.client_weave_id = self.getServiceWeaveID("KeyExport", self.client_node_id)
else:
self.client_ip = self.getNodeWeaveIPAddress(self.client_node_id)
self.client_weave_id = self.getWeaveNodeID(self.client_node_id)
if self.getNodeType(self.server_node_id) == "service":
self.server_ip = self.getServiceWeaveIPAddress("KeyExport", self.server_node_id)
self.server_weave_id = self.getServiceWeaveID("KeyExport", self.server_node_id)
else:
if not self.no_service:
self.server_ip = self.getNodeWeaveIPAddress(self.server_node_id)
self.server_weave_id = self.getWeaveNodeID(self.server_node_id)
# Check if all unknowns were found
if self.client_ip == None:
emsg = "Could not find IP address of the client node."
self.logger.error("[localhost] WeaveKeyExport: %s" % (emsg))
sys.exit(1)
if self.server_ip == None:
emsg = "Could not find IP address of the server node."
self.logger.error("[localhost] WeaveKeyExport: %s" % (emsg))
sys.exit(1)
if self.client_weave_id == None:
emsg = "Could not find Weave node ID of the client node."
self.logger.error("[localhost] WeaveKeyExport: %s" % (emsg))
sys.exit(1)
if not self.no_service and self.server_weave_id == None:
emsg = "Could not find Weave node ID of the server node."
self.logger.error("[localhost] WeaveKeyExport: %s" % (emsg))
sys.exit(1)
def __process_results(self, client_output):
# search for "Received Key Export Response" phrase
fail_test = True
for line in client_output.split("\n"):
if "Received Key Export Response" in line:
fail_test = False
break
if self.quiet == False:
print("weave-key-export requested by node %s (%s) from node %s (%s) : " % \
(self.client_node_id, self.client_ip,
self.server_node_id, self.server_ip), end=' ')
if fail_test:
print(hred("FAILED"))
else:
print(hgreen("PASSED"))
return (fail_test, client_output)
def __start_plaid_server(self):
self.plaid.startPlaidServerProcess()
emsg = "plaid-server should be running."
self.logger.debug("[%s] WeaveKeyExport: %s" % (self.plaid_server_node_id, emsg))
def __start_server_side(self):
if self.no_service:
return
cmd = self.getWeaveMockDevicePath()
if not cmd:
return
cmd += " --debug-resource-usage --print-fault-counters"
if self.tap:
cmd += " --tap-device " + self.tap
if self.server_faults:
cmd += " --faults " + self.server_faults
custom_env = {}
if self.use_plaid:
custom_env = self.plaid.getPlaidClientLibEnv(self.server_node_id)
self.start_simple_weave_server(cmd, self.server_ip,
self.server_node_id, self.server_process_tag, listen = False, env=custom_env, use_persistent_storage=self.use_persistent_storage)
def __start_client_side(self, pase_fail = False):
cmd = self.getWeaveKeyExportPath()
if not cmd:
return
cmd += " --debug-resource-usage --print-fault-counters"
if self.tcp:
cmd += " --tcp"
else:
# default is UDP
cmd += " --udp"
if self.wrmp:
cmd += " --wrmp"
cmd += " --key-id " + str(self.key_id)
cmd += " --count " + str(self.count)
if not self.sign_msgs:
cmd += " --dont-sign-msgs "
if self.tap:
cmd += " --tap-device " + self.tap
if self.client_faults:
cmd += " --faults " + self.client_faults
if self.iterations:
cmd += " --iterations " + str(self.iterations)
custom_env = {}
if self.use_plaid:
custom_env = self.plaid.getPlaidClientLibEnv(self.client_node_id)
self.start_simple_weave_client(cmd, self.client_ip,
self.server_ip, self.server_weave_id,
self.client_node_id, self.client_process_tag, env=custom_env, use_persistent_storage=self.use_persistent_storage)
def __wait_for_client(self):
self.wait_for_test_to_end(self.client_node_id, self.client_process_tag)
def __stop_plaid_server(self):
self.plaid.stopPlaidServerProcess()
def __stop_server_side(self):
if self.no_service:
return
self.stop_weave_process(self.server_node_id, self.server_process_tag)
def run(self):
self.logger.debug("[localhost] WeaveKeyExport: Run.")
self.__pre_check()
if self.use_plaid:
self.__start_plaid_server()
self.__start_server_side()
emsg = "WeaveKeyExport %s should be running." % (self.server_process_tag)
self.logger.debug("[%s] WeaveKeyExport: %s" % (self.server_node_id, emsg))
self.__start_client_side(False)
self.__wait_for_client()
client_output_value, client_output_data = \
self.get_test_output(self.client_node_id, self.client_process_tag, True)
client_strace_value, client_strace_data = \
self.get_test_strace(self.client_node_id, self.client_process_tag, True)
if self.no_service:
server_output_data = ""
server_strace_data = ""
else:
self.__stop_server_side()
if self.use_plaid:
self.__stop_plaid_server()
server_output_value, server_output_data = \
self.get_test_output(self.server_node_id, self.server_process_tag, True)
server_strace_value, server_strace_data = \
self.get_test_strace(self.server_node_id, self.server_process_tag, True)
avg, results = self.__process_results(client_output_data)
client_parser_error, client_leak_detected = WeaveUtilities.scan_for_leaks_and_parser_errors(client_output_data)
server_parser_error, server_leak_detected = WeaveUtilities.scan_for_leaks_and_parser_errors(server_output_data)
data = {}
data["client_output"] = client_output_data
data["client_strace"] = client_strace_data
data["server_output"] = server_output_data
data["server_strace"] = server_strace_data
data["other_failure"] = client_parser_error or client_leak_detected or server_parser_error or server_leak_detected
self.logger.debug("[localhost] WeaveKeyExport: Done.")
return ReturnMsg(avg, data)
|
WorldModels/env.py | zacwellmer/WorldModels | 240 | 12621387 | <gh_stars>100-1000
import numpy as np
import gym
import json
import os
import tensorflow as tf
import gc
from PIL import Image
from gym.spaces.box import Box
from gym.envs.box2d.car_racing import CarRacing
class CarRacingWrapper(CarRacing):
def __init__(self, full_episode=False):
super(CarRacingWrapper, self).__init__()
self.full_episode = full_episode
self.observation_space = Box(low=0, high=255, shape=(64, 64, 3)) # , dtype=np.uint8
def _process_frame(self, frame):
obs = frame[0:84, :, :]
obs = Image.fromarray(obs, mode='RGB').resize((64, 64))
obs = np.array(obs)
return obs
def _step(self, action):
obs, reward, done, _ = super(CarRacingWrapper, self)._step(action)
if self.full_episode:
return self._process_frame(obs), reward, False, {}
return self._process_frame(obs), reward, done, {}
from vae.vae import CVAE
from rnn.rnn import MDNRNN, rnn_next_state, rnn_init_state
class CarRacingMDNRNN(CarRacingWrapper):
def __init__(self, args, load_model=True, full_episode=False, with_obs=False):
super(CarRacingMDNRNN, self).__init__(full_episode=full_episode)
self.with_obs = with_obs # whether or not to return the frame with the encodings
self.vae = CVAE(args)
self.rnn = MDNRNN(args)
if load_model:
self.vae.set_weights([param_i.numpy() for param_i in tf.saved_model.load('results/{}/{}/tf_vae'.format(args.exp_name, args.env_name)).variables])
self.rnn.set_weights([param_i.numpy() for param_i in tf.saved_model.load('results/{}/{}/tf_rnn'.format(args.exp_name, args.env_name)).variables])
self.rnn_states = rnn_init_state(self.rnn)
self.full_episode = False
self.observation_space = Box(low=np.NINF, high=np.Inf, shape=(args.z_size+args.rnn_size*args.state_space))
def encode_obs(self, obs):
# convert raw obs to z, mu, logvar
result = np.copy(obs).astype(np.float)/255.0
result = result.reshape(1, 64, 64, 3)
z = self.vae.encode(result)[0]
return z
def reset(self):
self.rnn_states = rnn_init_state(self.rnn)
if self.with_obs:
[z_state, obs] = super(CarRacingMDNRNN, self).reset() # calls step
self.N_tiles = len(self.track)
return [z_state, obs]
else:
z_state = super(CarRacingMDNRNN, self).reset() # calls step
self.N_tiles = len(self.track)
return z_state
def _step(self, action):
obs, reward, done, _ = super(CarRacingMDNRNN, self)._step(action)
z = tf.squeeze(self.encode_obs(obs))
h = tf.squeeze(self.rnn_states[0])
c = tf.squeeze(self.rnn_states[1])
if self.rnn.args.state_space == 2:
z_state = tf.concat([z, c, h], axis=-1)
else:
z_state = tf.concat([z, h], axis=-1)
if action is not None: # don't compute state on reset
self.rnn_states = rnn_next_state(self.rnn, z, action, self.rnn_states)
if self.with_obs:
return [z_state, obs], reward, done, {}
else:
return z_state, reward, done, {}
def close(self):
super(CarRacingMDNRNN, self).close()
tf.keras.backend.clear_session()
gc.collect()
from ppaquette_gym_doom.doom_take_cover import DoomTakeCoverEnv
from gym.utils import seeding
class DoomTakeCoverMDNRNN(DoomTakeCoverEnv):
def __init__(self, args, render_mode=False, load_model=True, with_obs=False):
super(DoomTakeCoverMDNRNN, self).__init__()
self.with_obs = with_obs
self.no_render = True
if render_mode:
self.no_render = False
self.current_obs = None
self.vae = CVAE(args)
self.rnn = MDNRNN(args)
if load_model:
self.vae.set_weights([param_i.numpy() for param_i in tf.saved_model.load('results/{}/{}/tf_vae'.format(args.exp_name, args.env_name)).variables])
self.rnn.set_weights([param_i.numpy() for param_i in tf.saved_model.load('results/{}/{}/tf_rnn'.format(args.exp_name, args.env_name)).variables])
self.action_space = Box(low=-1.0, high=1.0, shape=())
self.obs_size = self.rnn.args.z_size + self.rnn.args.rnn_size * self.rnn.args.state_space
self.observation_space = Box(low=0, high=255, shape=(64, 64, 3))
self.actual_observation_space = Box(low=-50., high=50., shape=(self.obs_size))
self._seed()
self.rnn_states = None
self.z = None
self.restart = None
self.frame_count = None
self.viewer = None
self._reset()
def close(self):
super(DoomTakeCoverMDNRNN, self).close()
tf.keras.backend.clear_session()
gc.collect()
def _step(self, action):
# update states of rnn
self.frame_count += 1
self.rnn_states = rnn_next_state(self.rnn, self.z, action, self.rnn_states)
# actual action in wrapped env:
threshold = 0.3333
full_action = [0] * 43
if action < -threshold:
full_action[11] =1
if action > threshold:
full_action[10] = 1
obs, reward, done, _ = super(DoomTakeCoverMDNRNN, self)._step(full_action)
small_obs = self._process_frame(obs)
self.current_obs = small_obs
self.z = self._encode(small_obs)
if done:
self.restart = 1
else:
self.restart = 0
if self.with_obs:
return [self._current_state(), self.current_obs], reward, done, {}
else:
return self._current_state(), reward, done, {}
def _encode(self, img):
simple_obs = np.copy(img).astype(np.float)/255.0
simple_obs = simple_obs.reshape(1, 64, 64, 3)
z = self.vae.encode(simple_obs)[0]
return z
def _reset(self):
obs = super(DoomTakeCoverMDNRNN, self)._reset()
small_obs = self._process_frame(obs)
self.current_obs = small_obs
self.rnn_states = rnn_init_state(self.rnn)
self.z = self._encode(small_obs)
self.restart = 1
self.frame_count = 0
if self.with_obs:
return [self._current_state(), self.current_obs]
else:
return self._current_state()
def _process_frame(self, frame):
obs = frame[0:400, :, :]
obs = Image.fromarray(obs, mode='RGB').resize((64, 64))
obs = np.array(obs)
return obs
def _current_state(self):
if self.rnn.args.state_space == 2:
return np.concatenate([self.z, tf.keras.backend.flatten(self.rnn_states[1]), tf.keras.backend.flatten(self.rnn_states[0])], axis=0) # cell then hidden fro some reason
return np.concatenate([self.z, tf.keras.backend.flatten(self.rnn_states[0])], axis=0) # only the hidden state
def _seed(self, seed=None):
if seed:
tf.random.set_seed(seed)
self.np_random, seed = seeding.np_random(seed)
return [seed]
from rnn.rnn import rnn_sim
class DreamDoomTakeCoverMDNRNN:
def __init__(self, args, render_mode=False, load_model=True):
self.render_mode = render_mode
model_path_name = 'results/{}/{}'.format(args.exp_name, args.env_name)
with open(os.path.join(model_path_name, 'tf_initial_z/initial_z.json'), 'r') as f:
[initial_mu, initial_logvar] = json.load(f)
self.initial_mu_logvar = np.array([list(elem) for elem in zip(initial_mu, initial_logvar)])
self.vae = CVAE(args)
self.rnn = MDNRNN(args)
if load_model:
self.vae.set_weights([param_i.numpy() for param_i in tf.saved_model.load('results/{}/{}/tf_vae'.format(args.exp_name, args.env_name)).variables])
self.rnn.set_weights([param_i.numpy() for param_i in tf.saved_model.load('results/{}/{}/tf_rnn'.format(args.exp_name, args.env_name)).variables])
# future versions of OpenAI gym needs a dtype=np.float32 in the next line:
self.action_space = Box(low=-1.0, high=1.0, shape=())
obs_size = self.rnn.args.z_size + self.rnn.args.rnn_size * self.rnn.args.state_space
# future versions of OpenAI gym needs a dtype=np.float32 in the next line:
self.observation_space = Box(low=-50., high=50., shape=(obs_size,))
self.rnn_states = None
self.o = None
self._training=True
self.seed()
self.reset()
def _sample_init_z(self):
idx = self.np_random.randint(low=0, high=self.initial_mu_logvar.shape[0])
init_mu, init_logvar = self.initial_mu_logvar[idx]
init_mu = init_mu / 10000.0
init_logvar = init_logvar / 10000.0
init_z = init_mu + np.exp(init_logvar/2.0) * self.np_random.randn(*init_logvar.shape)
return init_z
def reset(self):
self.rnn_states = rnn_init_state(self.rnn)
z = np.expand_dims(self._sample_init_z(), axis=0)
self.o = z
z_ch = tf.concat([z, self.rnn_states[1], self.rnn_states[0]], axis=-1)
return tf.squeeze(z_ch)
def seed(self, seed=None):
if seed:
tf.random.set_seed(seed)
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
rnn_states_p1, z_tp1, r_tp1, d_tp1 = rnn_sim(self.rnn, self.o, self.rnn_states, action, training=self._training)
self.rnn_states = rnn_states_p1
self.o = z_tp1
z_ch = tf.squeeze(tf.concat([z_tp1, self.rnn_states[1], self.rnn_states[0]], axis=-1))
return z_ch.numpy(), tf.squeeze(r_tp1), d_tp1.numpy(), {}
def close(self):
tf.keras.backend.clear_session()
gc.collect()
def render(self, mode):
pass
def make_env(args, dream_env=False, seed=-1, render_mode=False, full_episode=False, with_obs=False, load_model=True):
if args.env_name == 'DoomTakeCover-v0':
if dream_env:
print('making rnn doom environment')
env = DreamDoomTakeCoverMDNRNN(args=args, render_mode=render_mode, load_model=load_model)
else:
print('making real doom environment')
env = DoomTakeCoverMDNRNN(args=args, render_mode=render_mode, load_model=load_model, with_obs=with_obs)
else:
if dream_env:
raise ValueError('training in dreams for carracing is not yet supported')
else:
print('makeing real CarRacing environment')
env = CarRacingMDNRNN(args=args, full_episode=full_episode, with_obs=with_obs, load_model=load_model)
if (seed >= 0):
env.seed(seed)
return env
|
tests/structures/test_rgbdimages.py | glebshevchukk/gradslam | 1,048 | 12621437 | import logging
import os
import unittest
import numpy as np
import torch
from tests.common import default_to_cpu_if_no_gpu, load_test_data
from tests.common_testing import TestCaseMixin
from gradslam.geometry.geometryutils import create_meshgrid
from gradslam.geometry.projutils import project_points
from gradslam.structures.rgbdimages import RGBDImages
class TestRGBDImages(TestCaseMixin, unittest.TestCase):
@staticmethod
def init_rgbdimages(
use_poses=True,
channels_first=False,
device: str = "cpu",
):
device = torch.device(device)
colors, depths, intrinsics, poses = load_test_data(channels_first)
if use_poses:
rgbdimages = RGBDImages(
colors.to(device),
depths.to(device),
intrinsics.to(device),
poses.to(device),
channels_first=channels_first,
)
else:
rgbdimages = RGBDImages(
colors.to(device),
depths.to(device),
intrinsics.to(device),
channels_first=channels_first,
)
return rgbdimages, colors, depths, intrinsics, poses
def test_simple(self):
device = default_to_cpu_if_no_gpu("cuda:0")
args = [(True, True), (True, False), (False, True), (False, False)]
for arg in args:
res_tuple = TestRGBDImages.init_rgbdimages(
use_poses=arg[0], channels_first=arg[1], device=device
)
rgbdimages, colors, depths, intrinsics, poses = res_tuple
self.assertEqual(rgbdimages.shape, (2, 3, 120, 160))
self.assertEqual(colors.shape, rgbdimages.rgb_image.shape)
self.assertEqual(depths.shape, rgbdimages.depth_image.shape)
self.assertEqual(intrinsics.shape, rgbdimages.intrinsics.shape)
self.assertEqual(colors.shape, rgbdimages.vertex_map.shape)
self.assertEqual(colors.shape, rgbdimages.normal_map.shape)
def test_vertex_map(self):
device = default_to_cpu_if_no_gpu("cuda:0")
scriptdir = os.path.dirname(os.path.realpath(__file__))
gt_vmap = np.load(os.path.join(scriptdir, "../data/msrd_b2s3/vertex_map.npy"))
gt_global_vmap = np.load(
os.path.join(scriptdir, "../data/msrd_b2s3/global_vertex_map.npy")
)
for use_poses in [False, True]:
for channels_first in [False, True]:
rgbdimages, *_ = TestRGBDImages.init_rgbdimages(
channels_first=channels_first, use_poses=use_poses, device=device
)
vertex_map = rgbdimages.vertex_map
global_vertex_map = rgbdimages.global_vertex_map
depth_image = rgbdimages.depth_image
intrinsics = rgbdimages.intrinsics
self.assertEqual(vertex_map.ndim, 5)
if channels_first:
vertex_map = vertex_map.permute(0, 1, 3, 4, 2).contiguous()
global_vertex_map = global_vertex_map.permute(
0, 1, 3, 4, 2
).contiguous()
depth_image = depth_image.permute(0, 1, 3, 4, 2).contiguous()
self.assertEqual(vertex_map.shape, (2, 3, 120, 160, 3))
self.assertEqual(global_vertex_map.shape, (2, 3, 120, 160, 3))
self.assertEqual(depth_image.shape, (2, 3, 120, 160, 1))
for b in range(2):
for s in range(3):
vmap = vertex_map[b, s]
dmap = depth_image[b, s]
K = intrinsics[b, 0]
test_unproj_res = project_points(vmap, K)
meshgrid = (
create_meshgrid(120, 160, False).squeeze(0).to(device)
)
meshgrid = torch.cat(
[
meshgrid[..., 1:],
meshgrid[..., 0:1],
],
-1,
)
correct_unproj_res = meshgrid * (dmap != 0).float()
# self.assertClose() fails here, probably because not close enough?
assert (test_unproj_res - correct_unproj_res).abs().max() < 1e-4
assert ((gt_vmap - vertex_map.cpu().numpy()) ** 2).sum() < 1e-2
if use_poses:
assert (
(gt_global_vmap - global_vertex_map.cpu().numpy()) ** 2
).sum() < 1e-2
else:
assert (
(gt_vmap - global_vertex_map.cpu().numpy()) ** 2
).sum() < 1e-2
def test_normal_map(self):
device = default_to_cpu_if_no_gpu("cuda:0")
def diff(x, y):
# normals on gpu give slightly different values at some pixels
return (((x - y) ** 2) < 1e-5).mean() > 0.99
scriptdir = os.path.dirname(os.path.realpath(__file__))
gt_nmap = np.load(os.path.join(scriptdir, "../data/msrd_b2s3/normal_map.npy"))
gt_global_nmap = np.load(
os.path.join(scriptdir, "../data/msrd_b2s3/global_normal_map.npy")
)
for use_poses in [False, True]:
for channels_first in [False, True]:
rgbdimages, *_ = TestRGBDImages.init_rgbdimages(
channels_first=channels_first, use_poses=use_poses, device=device
)
normal_map = rgbdimages.normal_map
global_normal_map = rgbdimages.global_normal_map
self.assertEqual(normal_map.ndim, 5)
self.assertEqual(global_normal_map.ndim, 5)
remove_missing = global_normal_map * rgbdimages.valid_depth_mask.to(
global_normal_map.dtype
)
assert ((global_normal_map - remove_missing) ** 2).sum().item() < 1e-5
if channels_first:
normal_map = normal_map.permute(0, 1, 3, 4, 2).contiguous()
global_normal_map = global_normal_map.permute(
0, 1, 3, 4, 2
).contiguous()
self.assertEqual(normal_map.shape, (2, 3, 120, 160, 3))
self.assertEqual(global_normal_map.shape, (2, 3, 120, 160, 3))
nmap = normal_map.detach().cpu().numpy()
global_nmap = global_normal_map.detach().cpu().numpy()
assert diff(gt_nmap, nmap)
if use_poses:
# # visualize normals
# import matplotlib.pyplot as plt
# fig, ax = plt.subplots(2, 2)
# ax[0, 0].imshow((nmap[-1, -1] * 255).astype(np.uint8))
# ax[0, 1].imshow((gt_nmap[-1, -1] * 255).astype(np.uint8))
# ax[1, 0].imshow((global_nmap[-1, -1] * 255).astype(np.uint8))
# ax[1, 1].imshow((gt_global_nmap[-1, -1] * 255).astype(np.uint8))
# plt.show()
assert diff(gt_global_nmap, global_nmap)
else:
assert diff(gt_nmap, global_nmap)
def test_indexing(self):
device = default_to_cpu_if_no_gpu("cuda:0")
for channels_first in [False, True]:
# rgb_image
rgbdimages, *_ = TestRGBDImages.init_rgbdimages(
channels_first=channels_first, use_poses=True, device=device
)
self.assertClose(rgbdimages.rgb_image, rgbdimages.rgb_image)
self.assertClose(
rgbdimages[0:2, 0:2].rgb_image, rgbdimages.rgb_image[0:2, 0:2]
)
self.assertClose(
rgbdimages[1, 1].rgb_image.squeeze(0).squeeze(0),
rgbdimages.rgb_image[1, 1],
)
# depth_image
self.assertClose(rgbdimages.depth_image, rgbdimages.depth_image)
self.assertClose(
rgbdimages[0:2, 0:2].depth_image, rgbdimages.depth_image[0:2, 0:2]
)
self.assertClose(
rgbdimages[1, 1].depth_image.squeeze(0).squeeze(0),
rgbdimages.depth_image[1, 1],
)
# intrinsics
self.assertClose(rgbdimages.intrinsics, rgbdimages.intrinsics)
self.assertClose(
rgbdimages[0:2, 0:1].intrinsics, rgbdimages.intrinsics[0:2, 0:1]
)
self.assertClose(
rgbdimages[1, 0].intrinsics.squeeze(0).squeeze(0),
rgbdimages.intrinsics[1, 0],
)
# poses
self.assertClose(rgbdimages.poses, rgbdimages.poses)
self.assertClose(rgbdimages[0:2, 0:2].poses, rgbdimages.poses[0:2, 0:2])
self.assertClose(
rgbdimages[1, 1].poses.squeeze(0).squeeze(0), rgbdimages.poses[1, 1]
)
# vertex_map
self.assertClose(rgbdimages.vertex_map, rgbdimages.vertex_map)
self.assertClose(
rgbdimages[0:2, 0:2].vertex_map, rgbdimages.vertex_map[0:2, 0:2]
)
self.assertClose(
rgbdimages[1, 1].vertex_map.squeeze(0).squeeze(0),
rgbdimages.vertex_map[1, 1],
)
# normal_map
self.assertClose(rgbdimages.normal_map, rgbdimages.normal_map)
self.assertClose(
rgbdimages[0:2, 0:2].normal_map, rgbdimages.normal_map[0:2, 0:2]
)
self.assertClose(
rgbdimages[1, 1].normal_map.squeeze(0).squeeze(0),
rgbdimages.normal_map[1, 1],
)
if __name__ == "__main__":
logging.basicConfig()
logger = logging.getLogger("config")
logger.setLevel(logging.DEBUG)
unittest.main()
|
tools/SeeDot/seedot/compiler/codegen/arduino.py | Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML | 719 | 12621441 | <gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
'''
Arduino backend handles the automatic Arduino sketch generation.
It adds the appropriate header files to the sketch and makes it easy to 'compile and upload' the sketch to a device.
Most of the routines in the base class CodegenBase are unchanged.
'''
import numpy as np
import os
from seedot.compiler.codegen.codegenBase import CodegenBase
import seedot.compiler.ir.ir as IR
import seedot.compiler.ir.irUtil as IRUtil
import seedot.compiler.type as Type
from seedot.util import *
from seedot.writer import Writer
import functools
import operator
class Arduino(CodegenBase):
def __init__(self, outputDir, decls, localDecls, scales, intvs, cnsts, expTables, globalVars, internalVars, floatConstants, substitutions, demotedVarsOffsets, varsForBitwidth, varLiveIntervals, notScratch, coLocatedVariables):
super().__init__(decls, localDecls, scales, intvs, cnsts, expTables, globalVars, internalVars, floatConstants, substitutions, demotedVarsOffsets, varsForBitwidth, varLiveIntervals, notScratch, coLocatedVariables)
outputFile = os.path.join(outputDir, "predict.cpp")
self.outputDir = outputDir
self.out = Writer(outputFile)
def printCompilerConfig(self):
configFile = os.path.join(self.outputDir, "compileConfig.h")
with open(configFile, "w") as file:
file.write("// The datatype of the fixed-point representation is specified below\n")
file.write("#define INT%d\n" % config.wordLength)
if forFloat():
file.write("#define XFLOAT\n")
else:
if config.vbwEnabled:
file.write("#define XINT%d\n" % self.varsForBitwidth['X'])
else:
file.write("#define XINT%d\n" % config.wordLength)
def printPrefix(self):
self.printCompilerConfig()
self.printArduinoIncludes()
self.printExpTables()
self.printArduinoHeader()
self.computeScratchLocationsFirstFitPriority() # computeScratchLocations computeScratchLocationsFirstFit computeScratchLocationsFirstFitPriority computeScratchLocationsDLX
self.printVarDecls()
self.printConstDecls()
self.out.printf('\n')
def printArduinoIncludes(self):
self.out.printf('#include <Arduino.h>\n\n', indent=True)
self.out.printf('#include "config.h"\n', indent=True)
self.out.printf('#include "predict.h"\n', indent=True)
self.out.printf('#include "library.h"\n', indent=True)
self.out.printf('#include "model.h"\n\n', indent=True)
self.out.printf('using namespace model;\n\n', indent=True)
# Dumps the generated look-up table for computing exponentials.
def printExpTables(self):
for exp, [table, [tableVarA, tableVarB]] in self.expTables.items():
self.printExpTable(table[0], tableVarA)
self.printExpTable(table[1], tableVarB)
self.out.printf('\n')
def printExpTable(self, table_row, var):
self.out.printf('const PROGMEM MYINT %s[%d] = {\n' % (
var.idf, len(table_row)), indent=True)
self.out.increaseIndent()
self.out.printf('', indent=True)
for i in range(len(table_row)):
self.out.printf('%d, ' % table_row[i])
self.out.decreaseIndent()
self.out.printf('\n};\n')
def printArduinoHeader(self):
self.out.printf('int predict() {\n', indent=True)
self.out.increaseIndent()
# Generate the appropriate return expression.
# If integer, return the integer.
# If tensor of size 0, convert the fixed-point integer to float and return the float value.
# If tensor of size >0, convert the tensor to fixed-point integer, print
# it to the serial port, and return void.
def printSuffix(self, expr: IR.Expr):
self.out.printf('\n')
type = self.decls[expr.idf]
if Type.isInt(type):
self.out.printf('return ', indent=True)
self.print(expr)
self.out.printf(';\n')
elif Type.isTensor(type):
idfr = expr.idf
exponent = self.scales[expr.idf]
num = 2 ** exponent
if type.dim == 0:
self.out.printf('Serial.println(', indent=True)
self.out.printf('float(' + idfr + ')*' + str(num))
self.out.printf(', 6);\n')
else:
iters = []
for i in range(type.dim):
s = chr(ord('i') + i)
tempVar = IR.Var(s)
iters.append(tempVar)
expr_1 = IRUtil.addIndex(expr, iters)
cmds = IRUtil.loop(type.shape, iters, [
IR.PrintAsFloat(expr_1, exponent)])
self.print(IR.Prog(cmds))
else:
assert False
self.out.decreaseIndent()
self.out.printf('}\n', indent=True)
self.out.close()
'''
Below functions are overriding their corresponding definitions in codegenBase.py.
These function have arduino-specific print functions.
'''
# Print the variable with pragmas.
def printVar(self, ir):
# Model parameters are read from RAM, hence they are read differently.
if ir.inputVar:
if config.wordLength == 8:
self.out.printf('((MYINT) pgm_read_byte_near(&')
if config.wordLength == 16:
self.out.printf('((MYINT) pgm_read_word_near(&')
elif config.wordLength == 32:
self.out.printf('((MYINT) pgm_read_dword_near(&')
else:
assert False
self.out.printf('%s', ir.idf)
for e in ir.idx:
self.out.printf('[')
self.print(e)
self.out.printf(']')
if ir.inputVar:
self.out.printf('))')
def printFor(self, ir):
self.printForHeader(ir)
self.out.increaseIndent()
# The following is used for memory management within a for loop only.
# All the variables stored in ir.varDecls are those variables with scope limited to within the for loop.
# Currently, ir.varDecls is never populated as alternate memory management mechanism in codegen.py is used.
varToLiveRange = []
for var in ir.varDecls.keys():
size = np.prod(self.localDecls[var].shape)
varToLiveRange.append((self.varLiveIntervals[var], var, size, self.varsForBitwidth[var]))
varToLiveRange.sort()
usedSpaceMap = {}
totalScratchSize = -1
listOfDimensions = []
for ([_,_], var, size, atomSize) in varToLiveRange:
listOfDimensions.append(size)
mode = (lambda x: np.bincount(x).argmax())(listOfDimensions) if len(listOfDimensions) > 0 else None
for ([startIns, endIns], var, size, atomSize) in varToLiveRange:
if var in self.notScratch:
continue
spaceNeeded = size * atomSize // 8
varsToKill = []
for activeVar in usedSpaceMap.keys():
endingIns = usedSpaceMap[activeVar][0]
if endingIns < startIns:
varsToKill.append(activeVar)
for tbk in varsToKill:
del usedSpaceMap[tbk]
i = 0
if spaceNeeded >= mode:
blockSize = int(2**np.ceil(np.log2(spaceNeeded / mode))) * mode
else:
blockSize = mode / int(2**np.floor(np.log2(mode // spaceNeeded)))
breakOutOfWhile = True
while True:
potentialStart = int(blockSize * i)
potentialEnd = int(blockSize * (i+1)) - 1
for activeVar in usedSpaceMap.keys():
(locationOccupiedStart, locationOccupiedEnd) = usedSpaceMap[activeVar][1]
if not (locationOccupiedStart > potentialEnd or locationOccupiedEnd < potentialStart):
i += 1
breakOutOfWhile = False
break
else:
breakOutOfWhile = True
continue
if breakOutOfWhile:
break
usedSpaceMap[var] = (endIns, (potentialStart, potentialEnd))
totalScratchSize = max(totalScratchSize, potentialEnd)
self.scratchSubs[var] = potentialStart
self.out.printf("char scratch[%d];\n"%(totalScratchSize+1), indent=True)
self.printLocalVarDecls(ir)
for cmd in ir.cmd_l:
self.print(cmd)
self.out.decreaseIndent()
self.out.printf('}\n', indent=True)
# The variable X is used to define the data point.
# It is either read from the serial port or from the device's memory based on the operating mode.
# The getIntFeature() function reads the appropriate value of X based on the mode.
def printAssn(self, ir):
if isinstance(ir.e, IR.Var) and ir.e.idf == "X":
self.out.printf("", indent=True)
self.print(ir.var)
indices = [index.idf for index in ir.e.idx]
sizes = self.localDecls[ir.e.idf].shape if ir.e.idf in self.localDecls else self.decls[ir.e.idf].shape
assert len(indices) == len(sizes), "Illegal state"
prod = functools.reduce(operator.mul, sizes)
dereferenceString = ""
for i in range(len(indices)):
prod = prod // sizes[i]
dereferenceString += ("%s * %d + " % (indices[i], prod))
dereferenceString = dereferenceString[:-3]
if forFixed():
self.out.printf(" = getIntFeature(%s);\n"%(dereferenceString))
else:
self.out.printf(" = getFloatFeature(%s);\n"%(dereferenceString))
else:
super().printAssn(ir)
def printFuncCall(self, ir):
self.out.printf("{\n", indent=True)
self.out.increaseIndent()
self.printLocalVarDecls(ir)
self.out.printf("%s(" % ir.name, indent=True)
keys = list(ir.argList)
for i in range(len(keys)):
arg = keys[i]
# Do not print the 'X' variable as it will be read from the getIntFeature() function.
if isinstance(arg, IR.Var) and arg.idf == 'X':
continue
# The value of x in the below code is the number of special characters (& and []) around the variable in the function call.
# This number depends on the shape of the variable.
# Example: A[10][10] is written as &A[0][0]. The value of x in this case is 2.
# x is 0 for constants.
# x is -1 for integer variables where only & is printed and not [].
if isinstance(arg, IR.Var) and (arg.idf in self.decls.keys() or arg.idf in self.localDecls.keys()) and not arg.idf == 'X':
type = self.decls[arg.idf] if arg.idf in self.decls else self.localDecls[arg.idf]
if isinstance(type, Type.Tensor):
if type.dim == 0:
x = -1
else:
x = type.dim - len(arg.idx)
else:
x = -1
else:
x = 0
if forFixed():
typeCast = "(int%d_t*)" % self.varsForBitwidth[arg.idf] if x > 0 else ""
self.out.printf(typeCast)
if not (isinstance(arg, IR.Var) and arg.idf in self.scratchSubs):
if x != 0:
self.out.printf("&")
self.print(arg)
if x != 0 and x != -1:
self.out.printf("[0]" * x)
else:
self.out.printf("(scratch + %d)"%(self.scratchSubs[arg.idf]))
if i != len(keys) - 1:
self.out.printf(", ")
self.out.printf(");\n")
self.out.decreaseIndent()
self.out.printf("}\n", indent=True)
def printPrint(self, ir):
self.out.printf('Serial.println(', indent=True)
self.print(ir.expr)
self.out.printf(');\n')
def printPrintAsFloat(self, ir):
self.out.printf('Serial.println(float(', indent=True)
self.print(ir.expr)
self.out.printf(') * ' + str(2 ** ir.expnt) + ', 6);')
|
irc3/plugins/quakenet.py | gtmanfred/irc3 | 178 | 12621449 | <reponame>gtmanfred/irc3
import hashlib
import hmac
import irc3
__doc__ = '''
====================================================
:mod:`irc3.plugins.quakenet` QuakeNet authorization
====================================================
Plugin supports both simple and
`challenge based <https://www.quakenet.org/development/challengeauth>`_
authorization. Challenge based auth is used by default, since it is more
secure than simple. Also, plugin can hide your IP after authorization
by applying ``+x`` mode.
..
>>> from irc3.testing import IrcBot
>>> from irc3.testing import ini2config
Usage::
>>> config = ini2config("""
... [bot]
... includes =
... irc3.plugins.quakenet
... [quakenet]
... user = login
... password = <PASSWORD>
... # optional, false by default
... hidehost = true
... # optional, true by default
... challenge_auth = true
... """)
>>> bot = IrcBot(**config)
'''
Q_NICK = "<EMAIL>"
CHALLENGE_PATTERN = ("^:Q![a-zA-Z]+<EMAIL> NOTICE "
"(?P<nick>\S+) :CHALLENGE (?P<challenge>[a-z0-9]+) ")
def get_digest(digest):
if not isinstance(digest, str): # pragma: no cover
raise ValueError("Wrong type of digest")
digest = digest.lower()
if digest in ("sha256", "sha1", "md5"):
return getattr(hashlib, digest)
else: # pragma: no cover
raise ValueError("Wrong value for digest")
def challenge_auth(username, password, challenge, lower, digest='sha256'):
"""Calculates quakenet's challenge auth hash
.. code-block:: python
>>> challenge_auth("mooking", "0000000000",
... "12345678901234567890123456789012", str.lower, "md5")
'2ed1a1f1d2cd5<PASSWORD>'
"""
def hdig(x):
return fdigest(x).hexdigest()
fdigest = get_digest(digest)
luser = lower(username)
tpass = password[:10].encode("ascii")
hvalue = hdig("{0}:{1}".format(luser, hdig(tpass)).encode("ascii"))
bhvalue = hvalue.encode("ascii")
bchallenge = challenge.encode("ascii")
return hmac.HMAC(bhvalue, bchallenge, digestmod=fdigest).hexdigest()
@irc3.plugin
class QuakeNet(object):
requires = [
'irc3.plugins.core',
'irc3.plugins.casefold'
]
def __init__(self, bot):
self.bot = bot
self.config = bot.config.get("quakenet", {})
self.user = self.config.get('user', None)
self.password = self.config.get('password', None)
self.hidehost = bool(self.config.get('hidehost', False))
# secure by default
self.challenge_auth = bool(self.config.get('challenge_auth', True))
self.pending_auth = False
def server_ready(self):
self.auth()
def auth(self):
if self.user and self.password:
if self.challenge_auth:
self.bot.log.info("Requesting challenge")
self.bot.privmsg(Q_NICK, 'CHALLENGE')
self.pending_auth = True
else:
self.bot.log.info("Sending login information to QuakeNet")
self.bot.send_line("AUTH {user} {password}".format(
user=self.user, password=self.password))
self.after_auth()
def after_auth(self):
if self.hidehost:
self.bot.mode(self.bot.nick, "+x")
@irc3.event(CHALLENGE_PATTERN)
def get_challenge(self, nick, challenge, **kwargs):
if nick == self.bot.nick and self.pending_auth:
hauth = challenge_auth(self.user, self.password, challenge,
self.bot.casefold, "sha256")
cmd = 'CHALLENGEAUTH {user} {response} {algo}'. \
format(user=self.user, response=hauth, algo="HMAC-SHA-256")
self.bot.log.info("Performing challenge authorization on QuakeNet")
self.bot.privmsg(Q_NICK, cmd)
self.after_auth()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.