max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
dmb/visualization/stereo/sparsification_plot.py | jiaw-z/DenseMatchingBenchmark | 160 | 11197215 | <gh_stars>100-1000
import warnings
import numpy as np
import torch
def mask_to_neg(x, mask):
# if mask=1, keep x, if mask=0, convert x to -1
x = x * mask + (mask - 1)
return x
def norm(x):
x = x / (x.max() - x.min())
# scale x to [0.05, 0.9] for counting convenient, it doesn't influence the final result
x = x * 0.9 + 0.05
return x
def sparsification_plot(est_disp=None, gt_disp=None, est_conf=None, bins=10, lb=None, ub=None):
"""
Refer to paper: Uncertainty estimates and multi-hypotheses networks for optical flow
Args:
est_disp (Tensor): in (..., Height, Width) layout
gt_disp (Tensor): in (..., Height, Width) layout
est_conf (Tensor): in (..., Height, Width) layout, we will normalize it to [0,1] for convenient
bins (int): divide the all pixel into $bins factions, ie each fraction is (100/bins)%
lb (scaler): the lower bound of disparity you want to mask out
ub (scaler): the upper bound of disparity you want to mask out
Output:
dict: the average error epe when pixels with the lowest confidence are removed gradually
ideally, the error should monotonically decrease
"""
assert isinstance(bins, int) and (100 % bins == 0), \
"bins must be divided by 100, and should be int, but get {} is type {}".format(bins, type(bins))
error_dict = {}
percentages = []
part = 100 // bins
for i in range(bins + 1):
percentages.append(part * i)
error_dict['est_{}'.format(part * i)] = torch.Tensor([0.])
error_dict['oracle_{}'.format(part * i)] = torch.Tensor([0.])
error_dict['random_{}'.format(part * i)] = torch.Tensor([0.])
err_msg = '{} is supposed to be torch.Tensor; find {}'
if not isinstance(est_disp, torch.Tensor):
warnings.warn(err_msg.format('Estimated disparity map', type(est_disp)))
if not isinstance(gt_disp, torch.Tensor):
warnings.warn(err_msg.format('Ground truth disparity map', type(gt_disp)))
if not isinstance(est_conf, torch.Tensor):
warnings.warn(err_msg.format('Estimated confidence map', type(est_conf)))
if any([not isinstance(est_disp, torch.Tensor), not isinstance(gt_disp, torch.Tensor),
not isinstance(est_conf, torch.Tensor)]):
warnings.warn('Input maps contains None, expected given torch.Tensor')
return error_dict
if not est_disp.shape == gt_disp.shape:
warnings.warn('Estimated and ground truth disparity map should have same shape')
if not est_disp.shape == est_conf.shape:
warnings.warn('Estimated disparity and confidence map should have same shape')
if any([not (est_disp.shape == gt_disp.shape), not (est_disp.shape == est_conf.shape)]):
return error_dict
est_disp = est_disp.clone().cpu()
gt_disp = gt_disp.clone().cpu()
est_conf = est_conf.clone().cpu()
mask = torch.ones(gt_disp.shape, dtype=torch.uint8)
if lb is not None:
mask = mask & (gt_disp > lb)
if ub is not None:
mask = mask & (gt_disp < ub)
mask.detach_()
total_valid_num = mask.sum()
if total_valid_num < bins:
return error_dict
mask = mask.float()
est_disp = est_disp * mask
gt_disp = gt_disp * mask
abs_error = torch.abs(gt_disp - est_disp)
# normalize confidence map and error map
est_conf = norm(est_conf)
# error is lower the better, but confidence is bigger the better
neg_norm_abs_error = 1.0 - norm(abs_error)
# random remove map
randRemove = torch.rand_like(est_conf)
randRemove = norm(randRemove)
# let invalid pixels to -1
neg_norm_abs_error = mask_to_neg(neg_norm_abs_error, mask)
est_conf = mask_to_neg(est_conf, mask)
randRemove = mask_to_neg(randRemove, mask)
# flatten
flat_neg_norm_abs_error, _ = neg_norm_abs_error.view(-1).sort()
flat_est_conf, _ = est_conf.view(-1).sort()
flat_randRemove, _ = randRemove.view(-1).sort()
assert (flat_neg_norm_abs_error <= 0).sum() == (flat_est_conf <= 0).sum(), \
'The number of invalid confidence and disparity should be the same'
assert (flat_neg_norm_abs_error <= 0).sum() == (flat_randRemove <= 0).sum(), \
'The number of invalid random map and disparity should be the same'
start_pointer = (flat_neg_norm_abs_error <= 0).sum()
part = (total_valid_num - start_pointer - 1) // bins
pointer_edges = [start_pointer + part * i for i in range(bins + 1)]
conf_edges = []
error_edges = []
rand_edges = []
for pointer in pointer_edges:
conf_edges.append(flat_est_conf[pointer])
error_edges.append(flat_neg_norm_abs_error[pointer])
rand_edges.append(flat_randRemove[pointer])
for i in range(bins):
# kick out the lowest percentages[i]% confidence pixels, and evaluate the left
conf_mask = (est_conf >= conf_edges[i]).float()
# kick out the biggest percentages[i]% error pixels, and evaluate the left
# absolute error is lower is better, it's different from confidence value
error_mask = (neg_norm_abs_error >= error_edges[i]).float()
# kick out percentages[i]% random generated value
rand_mask = (randRemove >= rand_edges[i]).float()
error_dict['est_{}'.format(percentages[i])] = (abs_error * conf_mask).sum() / (conf_mask.sum())
error_dict['oracle_{}'.format(percentages[i])] = (abs_error * error_mask).sum() / (error_mask.sum())
error_dict['random_{}'.format(percentages[i])] = (abs_error * rand_mask).sum() / (rand_mask.sum())
return error_dict
|
alipay/aop/api/domain/AlipayEcoMycarMaintainAftersaleSyncModel.py | snowxmas/alipay-sdk-python-all | 213 | 11197233 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayEcoMycarMaintainAftersaleSyncModel(object):
def __init__(self):
self._aftersale_no = None
self._refuse_reason = None
self._status = None
@property
def aftersale_no(self):
return self._aftersale_no
@aftersale_no.setter
def aftersale_no(self, value):
self._aftersale_no = value
@property
def refuse_reason(self):
return self._refuse_reason
@refuse_reason.setter
def refuse_reason(self, value):
self._refuse_reason = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.aftersale_no:
if hasattr(self.aftersale_no, 'to_alipay_dict'):
params['aftersale_no'] = self.aftersale_no.to_alipay_dict()
else:
params['aftersale_no'] = self.aftersale_no
if self.refuse_reason:
if hasattr(self.refuse_reason, 'to_alipay_dict'):
params['refuse_reason'] = self.refuse_reason.to_alipay_dict()
else:
params['refuse_reason'] = self.refuse_reason
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoMycarMaintainAftersaleSyncModel()
if 'aftersale_no' in d:
o.aftersale_no = d['aftersale_no']
if 'refuse_reason' in d:
o.refuse_reason = d['refuse_reason']
if 'status' in d:
o.status = d['status']
return o
|
objectModel/Python/cdm/persistence/modeljson/argument_persistence.py | rt112000/CDM | 884 | 11197237 | <gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import Optional, TYPE_CHECKING
from cdm.enums import CdmObjectType
from .types import Annotation
if TYPE_CHECKING:
from cdm.objectmodel import CdmArgumentDefinition, CdmCorpusContext
from cdm.utilities import CopyOptions, ResolveOptions
class ArgumentPersistence:
@staticmethod
async def from_data(ctx: 'CdmCorpusContext', obj: 'Annotation') -> 'CdmArgumentDefinition':
arg = ctx.corpus.make_object(CdmObjectType.ARGUMENT_DEF, obj.name)
arg.name = obj.name
arg.value = obj.value
return arg
@staticmethod
async def to_data(instance: 'CdmArgumentDefinition', res_opt: 'ResolveOptions', options: 'CopyOptions') -> Optional['Annotation']:
if isinstance(instance.value, str):
annotation = Annotation()
annotation.name = instance.name
annotation.value = instance.value
return annotation
|
compiler_gym/bin/validate.py | sahirgomez1/CompilerGym | 562 | 11197256 | <filename>compiler_gym/bin/validate.py<gh_stars>100-1000
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Validate environment states.
Example usage:
.. code-block::
$ cat << EOF |
benchmark,reward,walltime,commandline
cbench-v1/crc32,0,1.2,opt input.bc -o output.bc
EOF
python -m compiler_gym.bin.validate --env=llvm-ic-v0 -
Use this script to validate environment states. Environment states are read from
stdin as a comma-separated list of benchmark names, walltimes, episode rewards,
and commandlines. Each state is validated by replaying the commandline and
validating that the reward matches the expected value. Further, some benchmarks
allow for validation of program semantics. When available, those additional
checks will be automatically run.
Input Format
------------
The correct format for generating input states can be generated using
:class:`CompilerEnvStateWriter <compiler_gym.CompilerEnvStateWriter>`. For
example:
>>> env = gym.make("llvm-autophase-ic-v0")
>>> env.reset()
>>> env.step(env.action_space.sample())
>>> with CompilerEnvStateWriter(open("results.csv", "wb")) as writer:
... writer.write_state(env.state)
Output Format
-------------
This script prints one line per input state. The order of input states is not
preserved. A successfully validated state has the format:
.. code-block::
✅ <benchmark_name> <reproduced_reward>
Else if validation fails, the output is:
.. code-block::
❌ <benchmark_name> <error_details>
"""
import json
import re
import sys
import numpy as np
from absl import app, flags
import compiler_gym.util.flags.nproc # noqa Flag definition.
from compiler_gym.compiler_env_state import CompilerEnvState, CompilerEnvStateReader
from compiler_gym.util.flags.env_from_flags import env_from_flags
from compiler_gym.util.shell_format import emph, plural
from compiler_gym.util.statistics import arithmetic_mean, geometric_mean, stdev
from compiler_gym.validate import ValidationResult, validate_states
flags.DEFINE_boolean(
"inorder",
False,
"Whether to print results in the order they are provided. "
"The default is to print results as soon as they are available.",
)
flags.DEFINE_string(
"reward_aggregation",
"geomean",
"The aggregation method to use for rewards. Allowed values are 'mean' for "
"arithmetic mean and 'geomean' for geometric mean.",
)
flags.DEFINE_boolean(
"debug_force_valid",
False,
"Debugging flags. Skips the validation and prints output as if all states "
"were succesfully validated.",
)
flags.DEFINE_boolean(
"summary_only",
False,
"Do not print individual validation results, print only the summary at the " "end.",
)
flags.DEFINE_string(
"validation_logfile",
"validation.log.json",
"The path of a file to write a JSON validation log to.",
)
FLAGS = flags.FLAGS
def state_name(state: CompilerEnvState) -> str:
"""Get the string name for a state."""
return re.sub(r"^benchmark://", "", state.benchmark)
def to_string(result: ValidationResult, name_col_width: int) -> str:
"""Format a validation result for printing."""
name = state_name(result.state)
if not result.okay():
msg = ", ".join(result.error_details.strip().split("\n"))
return f"❌ {name} {msg}"
elif result.state.reward is None:
return f"✅ {name}"
else:
return f"✅ {name:<{name_col_width}} {result.state.reward:9.4f}"
def main(argv):
"""Main entry point."""
try:
states = list(CompilerEnvStateReader.read_paths(argv[1:]))
except ValueError as e:
print(e, file=sys.stderr)
sys.exit(1)
if not states:
print(
"No inputs to validate. Pass a CSV file path as an argument, or "
"use - to read from stdin.",
file=sys.stderr,
)
sys.exit(1)
# Send the states off for validation
if FLAGS.debug_force_valid:
validation_results = (
ValidationResult(
state=state,
reward_validated=True,
actions_replay_failed=False,
reward_validation_failed=False,
benchmark_semantics_validated=False,
benchmark_semantics_validation_failed=False,
walltime=0,
)
for state in states
)
else:
validation_results = validate_states(
env_from_flags,
states,
nproc=FLAGS.nproc,
inorder=FLAGS.inorder,
)
# Determine the name of the reward space.
with env_from_flags() as env:
if FLAGS.reward_aggregation == "geomean":
def reward_aggregation(a):
return geometric_mean(np.clip(a, 0, None))
reward_aggregation_name = "Geometric mean"
elif FLAGS.reward_aggregation == "mean":
reward_aggregation = arithmetic_mean
reward_aggregation_name = "Mean"
else:
raise app.UsageError(
f"Unknown aggregation type: '{FLAGS.reward_aggregation}'"
)
if env.reward_space:
reward_name = f"{reward_aggregation_name} {env.reward_space.id}"
else:
reward_name = ""
# Determine the maximum column width required for printing tabular output.
max_state_name_length = max(
len(s)
for s in [state_name(s) for s in states]
+ [
"Mean inference walltime",
reward_name,
]
)
name_col_width = min(max_state_name_length + 2, 78)
error_count = 0
rewards = []
walltimes = []
if FLAGS.summary_only:
def intermediate_print(*args, **kwargs):
del args
del kwargs
else:
intermediate_print = print
def progress_message(i):
intermediate_print(
f"{i} remaining {plural(i, 'state', 'states')} to validate ... ",
end="",
flush=True,
)
progress_message(len(states))
result_dicts = []
def dump_result_dicst_to_json():
with open(FLAGS.validation_logfile, "w") as f:
json.dump(result_dicts, f)
for i, result in enumerate(validation_results, start=1):
intermediate_print("\r\033[K", to_string(result, name_col_width), sep="")
progress_message(len(states) - i)
result_dicts.append(result.dict())
if not result.okay():
error_count += 1
elif result.reward_validated and not result.reward_validation_failed:
rewards.append(result.state.reward)
walltimes.append(result.state.walltime)
if not i % 10:
dump_result_dicst_to_json()
dump_result_dicst_to_json()
# Print a summary footer.
intermediate_print("\r\033[K----", "-" * name_col_width, "-----------", sep="")
print(f"Number of validated results: {emph(len(walltimes))} of {len(states)}")
walltime_mean = f"{arithmetic_mean(walltimes):.3f}s"
walltime_std = f"{stdev(walltimes):.3f}s"
print(
f"Mean walltime per benchmark: {emph(walltime_mean)} "
f"(std: {emph(walltime_std)})"
)
reward = f"{reward_aggregation(rewards):.3f}"
reward_std = f"{stdev(rewards):.3f}"
print(f"{reward_name}: {emph(reward)} " f"(std: {emph(reward_std)})")
if error_count:
sys.exit(1)
if __name__ == "__main__":
app.run(main)
|
seahub/group/urls.py | weimens/seahub | 420 | 11197257 | # Copyright (c) 2012-2016 Seafile Ltd.
from django.conf.urls import url
from .views import group_remove
urlpatterns = [
url(r'^(?P<group_id>\d+)/remove/$', group_remove, name='group_remove'),
]
|
tests/conftest.py | vishalbelsare/voila-gridstack | 181 | 11197267 | <gh_stars>100-1000
import os
import voila.app
import pytest
BASE_DIR = os.path.dirname(__file__)
class VoilaTest(voila.app.Voila):
def listen(self):
pass # the ioloop is taken care of by the pytest-tornado framework
@pytest.fixture
def voila_app(voila_args, voila_config):
voila_app = VoilaTest.instance()
voila_app.initialize(voila_args + ['--no-browser', '--template=gridstack'])
voila_config(voila_app)
voila_app.start()
yield voila_app
voila_app.stop()
voila_app.clear_instance()
@pytest.fixture
def base_url():
return "/"
@pytest.fixture
def app(voila_app):
return voila_app.app
@pytest.fixture
def voila_config():
return lambda app: None
@pytest.fixture
def voila_args():
nb_path = os.path.join(BASE_DIR, 'nb.ipynb')
return [nb_path, '--VoilaTest.config_file_paths=[]']
|
convlab/modules/usr/multiwoz/vhus_usr/usermodule.py | ngduyanhece/ConvLab | 405 | 11197282 | <reponame>ngduyanhece/ConvLab
# Modified by Microsoft Corporation.
# Licensed under the MIT license.
# -*- coding: utf-8 -*-
from allennlp.modules import Attention
import random
import numpy as np
import torch
import torch.nn as nn
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def reparameterize(mu, logvar):
std = (0.5*logvar).exp()
eps = torch.randn_like(std)
return eps.mul(std) + mu
def batch_gather_3_1(inputs, dim):
"""
Args:
inputs (batchsz, sen_len, embed_dim)
dim (batchsz)
Returns:
output (batch, embed_dim)
"""
a = torch.arange(dim.shape[0])
b = dim.view(-1) - 1
output = inputs[a, b, :]
return output
def batch_gather_4_2(inputs, dim):
"""
Args:
inputs (batchsz, sen_len, word_len, embed_dim)
dim (batchsz, sen_len)
Returns:
output (batch, sen_len, embed_dim)
"""
a = torch.arange(dim.shape[0])
a = a.unsqueeze(1).expand(-1, dim.shape[1]).contiguous().view(-1)
b = torch.arange(dim.shape[1])
b = b.unsqueeze(0).expand(dim.shape[0], -1).contiguous().view(-1)
c = dim.view(-1) - 1
output = inputs[a, b, c, :].view(dim.shape[0], dim.shape[1], -1)
return output
class VHUS(nn.Module):
def __init__(self, cfg, voc_goal_size, voc_usr_size, voc_sys_size):
super(VHUS, self).__init__()
self.goal_encoder = Encoder(voc_goal_size, cfg.eu_dim, cfg.hu_dim)
self.sys_encoder = Encoder(voc_sys_size, cfg.eu_dim, cfg.hu_dim)
self.context_encoder = nn.GRU(cfg.hu_dim, cfg.hu_dim, batch_first=True)
self.mu_net = nn.Linear(cfg.hu_dim, cfg.hu_dim)
self.logvar_net = nn.Linear(cfg.hu_dim, cfg.hu_dim)
self.mu_net_last = nn.Linear(cfg.hu_dim, cfg.hu_dim)
self.logvar_net_last = nn.Linear(cfg.hu_dim, cfg.hu_dim)
self.concat_net = nn.Linear(cfg.hu_dim*2, cfg.hu_dim)
self.terminal_net = nn.Sequential(nn.Linear(cfg.hu_dim, cfg.hu_dim),
nn.ReLU(),
nn.Linear(cfg.hu_dim, 1))
self.usr_decoder = Decoder(voc_usr_size, cfg.max_ulen, cfg.eu_dim, cfg.hu_dim)
def forward(self, goals, goals_length, posts, posts_length, origin_responses=None):
goal_output, _ = self.goal_encoder(goals) # [B, G, H]
goal_h = batch_gather_3_1(goal_output, goals_length) # [B, H]
batchsz, max_sen, max_word = posts.shape
post_flat = posts.view(batchsz*max_sen, max_word)
post_output_flat, _ = self.sys_encoder(post_flat)
post_output = post_output_flat.view(batchsz, max_sen, max_word, -1) # [B, S, P, H]
post_h = batch_gather_4_2(post_output, posts_length) # [B, S, H]
context_output, _ = self.context_encoder(post_h, goal_h.unsqueeze(0)) # [B, S, H]
posts_sen_length = posts_length.gt(0).sum(1) # [B]
context = batch_gather_3_1(context_output, posts_sen_length) # [B, H]
mu, logvar = self.mu_net(context), self.logvar_net(context)
last_context = batch_gather_3_1(context_output, posts_sen_length-1)
mu_last, logvar_last = self.mu_net_last(last_context), self.logvar_net_last(last_context)
z = reparameterize(mu_last, logvar_last)
hidden = self.concat_net(torch.cat([context, z], dim=1))
teacher = 1 if origin_responses is not None else 0
a_weights, _, _ = self.usr_decoder(inputs=origin_responses, encoder_hidden=hidden.unsqueeze(0), \
teacher_forcing_ratio=teacher)
t_weights = self.terminal_net(context).squeeze(1)
return a_weights, t_weights, (mu_last, logvar_last, mu, logvar)
def select_action(self, goal, goal_length, post, post_length):
"""
:param goal: [goal_len]
:param goal_length: []
:param post: [sen_len, word_len]
:param post_length: [sen_len]
:return: [act_len], [1]
"""
goal, goal_length, post, post_length = goal.to(device=DEVICE).unsqueeze(0), \
goal_length.to(device=DEVICE).unsqueeze(0), post.to(device=DEVICE).unsqueeze(0), \
post_length.to(device=DEVICE).unsqueeze(0)
a_weights, t_weights, _ = self.forward(goal, goal_length, post, post_length)
usr_a = []
for a_weight in a_weights:
a = a_weight.argmax(1).item()
if a == self.usr_decoder.eos_id:
break
usr_a.append(a)
terminal = t_weights.ge(0).item()
return usr_a, terminal
class Encoder(nn.Module):
def __init__(self, vocab_size, embed_size, hidden_size, input_dropout_p=0, dropout_p=0, n_layers=1,
rnn_cell='GRU', variable_lengths=False, embedding=None, update_embedding=True):
super(Encoder, self).__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.n_layers = n_layers
self.input_dropout = nn.Dropout(p=input_dropout_p)
if rnn_cell == 'LSTM':
self.rnn_cell = nn.LSTM
elif rnn_cell == 'GRU':
self.rnn_cell = nn.GRU
else:
raise ValueError("Unsupported RNN Cell: {0}".format(rnn_cell))
self.variable_lengths = variable_lengths
self.embedding = nn.Embedding(vocab_size, embed_size)
if embedding is not None:
self.embedding.weight = nn.Parameter(embedding)
self.embedding.weight.requires_grad = update_embedding
self.rnn = self.rnn_cell(embed_size, hidden_size, n_layers, batch_first=True, dropout=dropout_p)
def forward(self, input_var, input_lengths=None):
"""
Applies a multi-layer RNN to an input sequence.
Args:
input_var (batch, seq_len): tensor containing the features of the input sequence.
input_lengths (list of int, optional): A list that contains the lengths of sequences
in the mini-batch
Returns: output, hidden
- **output** (batch, seq_len, hidden_size): variable containing the encoded features of
the input sequence
- **hidden** (num_layers * num_directions, batch, hidden_size): variable containing the
features in the hidden state h
"""
embedded = self.embedding(input_var)
embedded = self.input_dropout(embedded)
if self.variable_lengths:
embedded = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths, batch_first=True)
output, hidden = self.rnn(embedded)
if self.variable_lengths:
output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True)
return output, hidden
class Decoder(nn.Module):
KEY_ATTN_SCORE = 'attention_score'
KEY_LENGTH = 'length'
KEY_SEQUENCE = 'sequence'
def __init__(self, vocab_size, max_len, embed_size, hidden_size, sos_id=2, eos_id=3, n_layers=1, rnn_cell='GRU',
input_dropout_p=0, dropout_p=0, use_attention=False):
super(Decoder, self).__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.n_layers = n_layers
self.input_dropout = nn.Dropout(p=input_dropout_p)
if rnn_cell == 'LSTM':
self.rnn_cell = nn.LSTM
elif rnn_cell == 'GRU':
self.rnn_cell = nn.GRU
else:
raise ValueError("Unsupported RNN Cell: {0}".format(rnn_cell))
self.rnn = self.rnn_cell(embed_size, hidden_size, n_layers, batch_first=True, dropout=dropout_p)
self.output_size = vocab_size
self.max_length = max_len
self.use_attention = use_attention
self.eos_id = eos_id
self.sos_id = sos_id
self.init_input = None
self.embedding = nn.Embedding(self.output_size, embed_size)
if use_attention:
self.attention = Attention(self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward_step(self, input_var, hidden, encoder_outputs, function):
batch_size = input_var.size(0)
output_size = input_var.size(1)
embedded = self.embedding(input_var)
embedded = self.input_dropout(embedded)
output, hidden = self.rnn(embedded, hidden)
attn = None
if self.use_attention:
output, attn = self.attention(output, encoder_outputs)
predicted_softmax = function(self.out(output.contiguous().view(-1, self.hidden_size)), dim=1).view(batch_size, output_size, -1)
return predicted_softmax, hidden, attn
def forward(self, inputs=None, encoder_hidden=None, encoder_outputs=None,
function=torch.log_softmax, teacher_forcing_ratio=0):
ret_dict = dict()
if self.use_attention:
ret_dict[Decoder.KEY_ATTN_SCORE] = list()
inputs, batch_size, max_length = self._validate_args(inputs, encoder_hidden, encoder_outputs,
function, teacher_forcing_ratio)
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
decoder_outputs = []
sequence_symbols = []
lengths = np.array([max_length] * batch_size)
def decode(step, step_output, step_attn, infer=False):
decoder_outputs.append(step_output)
if self.use_attention:
ret_dict[Decoder.KEY_ATTN_SCORE].append(step_attn)
symbols = decoder_outputs[-1].topk(1)[1]
if infer and not step:
symbols = torch.cat((decoder_outputs[-1][:, :self.eos_id],
decoder_outputs[-1][:, (self.eos_id+1):]), 1).topk(1)[1]
symbols.add_(symbols.ge(self.eos_id).long())
sequence_symbols.append(symbols)
eos_batches = symbols.data.eq(self.eos_id)
if eos_batches.dim() > 0:
eos_batches = eos_batches.cpu().view(-1).numpy()
update_idx = ((lengths > step) & eos_batches) != 0
lengths[update_idx] = len(sequence_symbols)
return symbols
# Manual unrolling is used to support random teacher forcing.
# If teacher_forcing_ratio is True or False instead of a probability, the unrolling can be done in graph
if use_teacher_forcing:
decoder_input = inputs[:, :-1]
decoder_output, decoder_hidden, attn = self.forward_step(decoder_input, decoder_hidden, encoder_outputs,
function=function)
for di in range(decoder_output.size(1)):
step_output = decoder_output[:, di, :]
if attn is not None:
step_attn = attn[:, di, :]
else:
step_attn = None
decode(di, step_output, step_attn)
else:
decoder_input = inputs[:, 0].unsqueeze(1)
for di in range(max_length):
decoder_output, decoder_hidden, step_attn = self.forward_step(decoder_input, decoder_hidden, encoder_outputs,
function=function)
step_output = decoder_output.squeeze(1)
symbols = decode(di, step_output, step_attn, infer=True)
decoder_input = symbols
ret_dict[Decoder.KEY_SEQUENCE] = sequence_symbols
ret_dict[Decoder.KEY_LENGTH] = lengths.tolist()
return decoder_outputs, decoder_hidden, ret_dict # NLLLoss
def _validate_args(self, inputs, encoder_hidden, encoder_outputs, function, teacher_forcing_ratio):
if self.use_attention:
if encoder_outputs is None:
raise ValueError("Argument encoder_outputs cannot be None when attention is used.")
# inference batch size
if inputs is None and encoder_hidden is None:
batch_size = 1
else:
if inputs is not None:
batch_size = inputs.size(0)
else:
if self.rnn_cell is nn.LSTM:
batch_size = encoder_hidden[0].size(1)
elif self.rnn_cell is nn.GRU:
batch_size = encoder_hidden.size(1)
# set default input and max decoding length
if inputs is None:
if teacher_forcing_ratio > 0:
raise ValueError("Teacher forcing has to be disabled (set 0) when no inputs is provided.")
inputs = torch.LongTensor([self.sos_id] * batch_size).view(batch_size, 1)
if torch.cuda.is_available():
inputs = inputs.cuda()
max_length = self.max_length
else:
max_length = inputs.size(1) - 1 # minus the start of sequence symbol
return inputs, batch_size, max_length
|
angr/procedures/win32/system_paths.py | Kyle-Kyle/angr | 6,132 | 11197292 | import angr
import claripy
class GetTempPathA(angr.SimProcedure):
RESULT = claripy.BVV(b"C:\\Temp\\")
def run(self, nBufferLength, lpBuffer):
try:
length = self.state.solver.eval_one(nBufferLength)
except angr.errors.SimValueError:
raise angr.errors.SimProcedureError("Can't handle symbolic nBufferLength in GetTempPath")
copy_len = min(self.RESULT.length//8, length - 1)
self.state.memory.store(lpBuffer, self.RESULT[self.RESULT.length - 1 : self.RESULT.length - copy_len*8].concat(claripy.BVV(0, 8)))
return self.RESULT.length // 8
class GetWindowsDirectoryA(angr.SimProcedure):
RESULT = claripy.BVV(b"C:\\Windows")
def run(self, lpBuffer, uSize):
try:
length = self.state.solver.eval_one(uSize)
except angr.errors.SimValueError:
raise angr.errors.SimProcedureError("Can't handle symbolic uSize in GetWindowsDirectory")
copy_len = min(self.RESULT.length//8, length - 1)
self.state.memory.store(lpBuffer, self.RESULT[self.RESULT.length - 1 : self.RESULT.length - copy_len*8].concat(claripy.BVV(0, 8)))
return self.RESULT.length // 8
|
inter/GetQueueCountAsync.py | middleprince/12306 | 33,601 | 11197308 | import TickerConfig
[]# coding=utf-8
import datetime
import sys
import time
from collections import OrderedDict
import wrapcache
from inter.ConfirmSingleForQueueAsys import confirmSingleForQueueAsys
class getQueueCountAsync:
"""
排队
"""
def __init__(self,
session,
train_no,
stationTrainCode,
fromStationTelecode,
toStationTelecode,
leftTicket,
set_type,
users,
station_dates,
passengerTicketStr,
oldPassengerStr,
result,
ifShowPassCodeTime):
self.train_no = train_no
self.session = session
self.stationTrainCode = stationTrainCode
self.fromStationTelecode = fromStationTelecode
self.toStationTelecode = toStationTelecode
self.set_type = set_type
self.leftTicket = leftTicket
self.users = users
self.station_dates = station_dates
self.passengerTicketStr = passengerTicketStr
self.oldPassengerStr = oldPassengerStr
self.result = result
self.ifShowPassCodeTime=ifShowPassCodeTime
def data_par(self):
"""
- 字段说明
- train_date 时间
- train_no 列车编号,查询代码里面返回
- stationTrainCode 列车编号
- seatType 对应坐席
- fromStationTelecode 起始城市
- toStationTelecode 到达城市
- leftTicket 查询代码里面返回
- purpose_codes 学生还是成人
- _json_att 没啥卵用,还是带上吧
:return:
"""
if sys.version_info.major is 2:
new_train_date = filter(None, str(time.asctime(time.strptime(self.station_dates, "%Y-%m-%d"))).split(" "))
else:
new_train_date = list(filter(None, str(time.asctime(time.strptime(self.station_dates, "%Y-%m-%d"))).split(" ")))
data = OrderedDict()
data['train_date'] = "{0} {1} {2} {3} 00:00:00 GMT+0800 (中国标准时间)".format(
new_train_date[0],
new_train_date[1],
new_train_date[2] if len(new_train_date[2]) is 2 else f"0{new_train_date[2]}",
new_train_date[4],
time.strftime("%H:%M:%S", time.localtime(time.time()))
),
data["train_no"] = self.train_no
data["stationTrainCode"] = self.stationTrainCode
data["seatType"] = self.set_type
data["fromStationTelecode"] = self.fromStationTelecode
data["toStationTelecode"] = self.toStationTelecode
data["leftTicket"] = self.leftTicket
data["purpose_codes"] = "ADULT"
data["_json_att"] = ""
return data
def conversion_int(self, str):
return int(str)
def sendGetQueueCountAsync(self):
"""
请求排队接口
:return:
"""
urls = self.session.urls["getQueueCountAsync"]
data = self.data_par()
getQueueCountAsyncResult = self.session.httpClint.send(urls, data)
if getQueueCountAsyncResult.get("status", False) and getQueueCountAsyncResult.get("data", False):
if "status" in getQueueCountAsyncResult and getQueueCountAsyncResult["status"] is True:
if "countT" in getQueueCountAsyncResult["data"]:
ticket_data = getQueueCountAsyncResult["data"]["ticket"]
ticket_split = sum(map(self.conversion_int, ticket_data.split(","))) if ticket_data.find(
",") != -1 else ticket_data
if int(ticket_split) is 0:
# 增加余票数为0时,将车次加入小黑屋
wrapcache.set(key=self.train_no, value=datetime.datetime.now(),
timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60)
print(f"排队失败,当前余票数为{ticket_split}张")
return
print(u"排队成功, 当前余票还剩余: {0} 张".format(ticket_split))
c = confirmSingleForQueueAsys(session=self.session,
passengerTicketStr=self.passengerTicketStr,
oldPassengerStr=self.oldPassengerStr,
result=self.result,)
print(u"验证码提交安全期,等待{}MS".format(self.ifShowPassCodeTime))
time.sleep(self.ifShowPassCodeTime)
c.sendConfirmSingleForQueueAsys()
else:
print(u"排队发现未知错误{0},将此列车 {1}加入小黑屋".format(getQueueCountAsyncResult, self.train_no))
wrapcache.set(key=self.train_no, value=datetime.datetime.now(),
timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60)
elif "messages" in getQueueCountAsyncResult and getQueueCountAsyncResult["messages"]:
print(u"排队异常,错误信息:{0}, 将此列车 {1}加入小黑屋".format(getQueueCountAsyncResult["messages"][0], self.train_no))
wrapcache.set(key=self.train_no, value=datetime.datetime.now(),
timeout=TickerConfig.TICKET_BLACK_LIST_TIME * 60)
else:
if "validateMessages" in getQueueCountAsyncResult and getQueueCountAsyncResult["validateMessages"]:
print(str(getQueueCountAsyncResult["validateMessages"]))
|
Bio/PopGen/GenePop/EasyController.py | lukasz-kozlowski/biopython | 2,856 | 11197321 | <filename>Bio/PopGen/GenePop/EasyController.py
# Copyright 2009 by <NAME> <<EMAIL>>. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Control GenePop through an easier interface.
This interface is less efficient than the standard GenePopControler
"""
from .Controller import GenePopController
from Bio.PopGen import GenePop
class EasyController:
"""Define a class for an easier interface with the GenePop program."""
def __init__(self, fname, genepop_dir=None):
"""Initialize the controller.
genepop_dir is the directory where GenePop is.
The binary should be called Genepop (capital G)
"""
self._fname = fname
self._controller = GenePopController(genepop_dir)
self.__fst_pair_locus = {} # More caches like this needed!
self.__allele_frequency = {} # More caches like this needed!
def get_basic_info(self):
"""Obtain the population list and loci list from the file."""
with open(self._fname) as f:
rec = GenePop.read(f)
return rec.pop_list, rec.loci_list
# 1.3
def test_hw_pop(self, pop_pos, test_type="probability"):
"""Perform Hardy-Weinberg test on the given position."""
if test_type == "deficiency":
hw_res = self._controller.test_pop_hz_deficiency(self._fname)
elif test_type == "excess":
hw_res = self._controller.test_pop_hz_excess(self._fname)
else:
loci_res, hw_res, fisher_full = self._controller.test_pop_hz_prob(
self._fname, ".P"
)
for i in range(pop_pos - 1):
next(hw_res)
return next(hw_res)
# 1.4
def test_hw_global(
self,
test_type="deficiency",
enum_test=True,
dememorization=10000,
batches=20,
iterations=5000,
):
"""Perform Hardy-Weinberg global Heterozygote test."""
if test_type == "deficiency":
pop_res, loc_res, all = self._controller.test_global_hz_deficiency(
self._fname, enum_test, dememorization, batches, iterations
)
else:
pop_res, loc_res, all = self._controller.test_global_hz_excess(
self._fname, enum_test, dememorization, batches, iterations
)
return list(pop_res), list(loc_res), all
# 2.1
def test_ld_all_pair(
self, locus1, locus2, dememorization=10000, batches=20, iterations=5000
):
"""Test for linkage disequilibrium for each pair of loci in each population."""
all_ld = self._controller.test_ld(
self._fname, dememorization, batches, iterations
)[1]
for ld_case in all_ld:
(l1, l2), result = ld_case
if (l1 == locus1 and l2 == locus2) or (l1 == locus2 and l2 == locus1):
return result
def estimate_nm(self):
"""Estimate Nm. Just a simple bridge."""
return self._controller.estimate_nm(self._fname)
def get_heterozygosity_info(self, pop_pos, locus_name):
"""Return the heterozygosity info for a certain locus on a population.
Returns (Expected homozygotes, observed homozygotes,
Expected heterozygotes, observed heterozygotes)
"""
geno_freqs = self._controller.calc_allele_genotype_freqs(self._fname)
pop_iter, loc_iter = geno_freqs
pops = list(pop_iter)
return pops[pop_pos][1][locus_name][1]
def get_genotype_count(self, pop_pos, locus_name):
"""Return the genotype counts for a certain population and locus."""
geno_freqs = self._controller.calc_allele_genotype_freqs(self._fname)
pop_iter, loc_iter = geno_freqs
pop_iter = list(pop_iter)
return pop_iter[pop_pos][1][locus_name][0]
def get_fis(self, pop_pos, locus_name):
"""Return the Fis for a certain population and locus.
Below CW means Cockerham and Weir and RH means Robertson and Hill.
Returns a pair:
- dictionary [allele] = (repetition count, frequency, Fis CW )
with information for each allele
- a triple with total number of alleles, Fis CW, Fis RH
"""
geno_freqs = self._controller.calc_allele_genotype_freqs(self._fname)
pop_iter, loc_iter = geno_freqs
pops = list(pop_iter)
return pops[pop_pos][1][locus_name][2:]
def get_alleles(self, pop_pos, locus_name):
"""Return the alleles for a certain population and locus."""
geno_freqs = self._controller.calc_allele_genotype_freqs(self._fname)
pop_iter, loc_iter = geno_freqs
pop_iter = list(pop_iter)
return list(pop_iter[pop_pos][1][locus_name][2].keys())
def get_alleles_all_pops(self, locus_name):
"""Return the alleles for a certain population and locus."""
geno_freqs = self._controller.calc_allele_genotype_freqs(self._fname)
pop_iter, loc_iter = geno_freqs
for locus_info in loc_iter:
if locus_info[0] == locus_name:
return locus_info[1]
def get_allele_frequency(self, pop_pos, locus_name):
"""Calculate the allele frequency for a certain locus on a population."""
if len(self.__allele_frequency) == 0:
geno_freqs = self._controller.calc_allele_genotype_freqs(self._fname)
pop_iter, loc_iter = geno_freqs
for locus_info in loc_iter:
if locus_info[0] is None:
self.__allele_frequency[locus_info[0]] = None, None
else:
self.__allele_frequency[locus_info[0]] = locus_info[1:]
info = self.__allele_frequency[locus_name]
pop_name, freqs, total = info[1][pop_pos]
allele_freq = {}
alleles = info[0]
for i, allele in enumerate(alleles):
allele_freq[allele] = freqs[i]
return total, allele_freq
def get_multilocus_f_stats(self):
"""Return the multilocus F stats.
Explain averaging.
Returns Fis(CW), Fst, Fit
"""
return self._controller.calc_fst_all(self._fname)[0]
def get_f_stats(self, locus_name):
"""Return F stats for a locus.
Returns Fis(CW), Fst, Fit, Qintra, Qinter
"""
loci_iter = self._controller.calc_fst_all(self._fname)[1]
for name, fis, fst, fit, qintra, qinter in loci_iter:
if name == locus_name:
return fis, fst, fit, qintra, qinter
def get_avg_fis(self):
"""Calculate identity-base average Fis."""
return self._controller.calc_diversities_fis_with_identity(self._fname)[1]
def get_avg_fst_pair(self):
"""Calculate Allele size-base average Fis for all population pairs."""
return self._controller.calc_fst_pair(self._fname)[1]
def get_avg_fst_pair_locus(self, locus):
"""Calculate Allele size-base average Fis for all population pairs of the given locus."""
if len(self.__fst_pair_locus) == 0:
iter = self._controller.calc_fst_pair(self._fname)[0]
for locus_info in iter:
self.__fst_pair_locus[locus_info[0]] = locus_info[1]
return self.__fst_pair_locus[locus]
def calc_ibd(self, is_diplo=True, stat="a", scale="Log", min_dist=0.00001):
"""Calculate isolation by distance statistics for Diploid or Haploid."""
if is_diplo:
return self._controller.calc_ibd_diplo(self._fname, stat, scale, min_dist)
else:
return self._controller.calc_ibd_haplo(self._fname, stat, scale, min_dist)
|
neo/rawio/neuralynxrawio/ncssections.py | yger/python-neo | 199 | 11197387 | <filename>neo/rawio/neuralynxrawio/ncssections.py
import math
class NcsSections:
"""
Contains information regarding the contiguous sections of records in an Ncs file.
Methods of NcsSectionsFactory perform parsing of this information from an Ncs file and
produce these where the sections are discontiguous in time and in temporal order.
TODO: This class will likely need __eq__, __ne__, and __hash__ to be useful in
more sophisticated segment construction algorithms.
"""
def __init__(self):
self.sects = []
self.sampFreqUsed = 0 # actual sampling frequency of samples
self.microsPerSampUsed = 0 # microseconds per sample
class NcsSection:
"""
Information regarding a single contiguous section or group of records in an Ncs file.
Model is that times are closed on the left and open on the right. Record
numbers are closed on both left and right, that is, inclusive of the last record.
endTime should never be set less than startTime for comparison functions to work
properly, though this is not enforced.
"""
_RECORD_SIZE = 512 # nb sample per signal record
def __init__(self):
self.startRec = -1 # index of starting record
self.startTime = -1 # starttime of first record
self.endRec = -1 # index of last record (inclusive)
self.endTime = -1 # end time of last record, that is, the end time of the last
# sampling period contained in the last record of the section
def __init__(self, sb, st, eb, et):
self.startRec = sb
self.startTime = st
self.endRec = eb
self.endTime = et
def before_time(self, rhb):
"""
Determine if this section is completely before another section in time.
"""
return self.endTime < rhb.startTime
def overlaps_time(self, rhb):
"""
Determine if this section overlaps another in time.
"""
return self.startTime <= rhb.endTime and self.endTime >= rhb.startTime
def after_time(self, rhb):
"""
Determine if this section is completely after another section in time.
"""
return self.startTime >= rhb.endTime
class NcsSectionsFactory:
"""
Class for factory methods which perform parsing of contiguous sections of records
in Ncs files.
Model for times is that times are rounded to nearest microsecond. Times
from start of a sample until just before the next sample are included,
that is, closed lower bound and open upper bound on intervals. A
channel with no samples is empty and contains no time intervals.
Moved here since algorithm covering all 3 header styles and types used is
more complicated.
"""
_maxGapSampFrac = 0.2 # maximum fraction of a sampling interval between predicted
# and actual record timestamps still considered within one section
@staticmethod
def get_freq_for_micros_per_samp(micros):
"""
Compute fractional sampling frequency, given microseconds per sample.
"""
return 1e6 / micros
@staticmethod
def get_micros_per_samp_for_freq(sampFr):
"""
Calculate fractional microseconds per sample, given the sampling frequency (Hz).
"""
return 1e6 / sampFr
@staticmethod
def calc_sample_time(sampFr, startTime, posn):
"""
Calculate time rounded to microseconds for sample given frequency,
start time, and sample position.
"""
return round(startTime + NcsSectionsFactory.get_micros_per_samp_for_freq(sampFr) * posn)
@staticmethod
def _parseGivenActualFrequency(ncsMemMap, ncsSects, chanNum, reqFreq, blkOnePredTime):
"""
Parse sections in memory mapped file when microsPerSampUsed and sampFreqUsed are known,
filling in an NcsSections object.
PARAMETERS
ncsMemMap:
memmap of Ncs file
ncsSections:
NcsSections with actual sampFreqUsed correct, first NcsSection with proper startSect
and startTime already added.
chanNum:
channel number that should be present in all records
reqFreq:
rounded frequency that all records should contain
blkOnePredTime:
predicted starting time of second record in block
RETURN
NcsSections object with block locations marked
"""
startBlockPredTime = blkOnePredTime
blkLen = 0
curBlock = ncsSects.sects[0]
for recn in range(1, ncsMemMap.shape[0]):
if ncsMemMap['channel_id'][recn] != chanNum or \
ncsMemMap['sample_rate'][recn] != reqFreq:
raise IOError('Channel number or sampling frequency changed in ' +
'records within file')
predTime = NcsSectionsFactory.calc_sample_time(ncsSects.sampFreqUsed,
startBlockPredTime, blkLen)
ts = ncsMemMap['timestamp'][recn]
nValidSamps = ncsMemMap['nb_valid'][recn]
if ts != predTime:
curBlock.endRec = recn - 1
curBlock.endTime = predTime
curBlock = NcsSection(recn, ts, -1, -1)
ncsSects.sects.append(curBlock)
startBlockPredTime = NcsSectionsFactory.calc_sample_time(
ncsSects.sampFreqUsed, ts, nValidSamps)
blkLen = 0
else:
blkLen += nValidSamps
curBlock.endRec = ncsMemMap.shape[0] - 1
endTime = NcsSectionsFactory.calc_sample_time(ncsSects.sampFreqUsed,
startBlockPredTime,
blkLen)
curBlock.endTime = endTime
return ncsSects
@staticmethod
def _buildGivenActualFrequency(ncsMemMap, actualSampFreq, reqFreq):
"""
Build NcsSections object for file given actual sampling frequency.
Requires that frequency in each record agrees with requested frequency. This is
normally obtained by rounding the header frequency; however, this value may be different
from the rounded actual frequency used in the recording, since the underlying
requirement in older Ncs files was that the number of microseconds per sample in the
records is the inverse of the sampling frequency stated in the header truncated to
whole microseconds.
PARAMETERS
ncsMemMap:
memmap of Ncs file
actualSampFreq:
actual sampling frequency used
reqFreq:
frequency to require in records
RETURN:
NcsSections object
"""
# check frequency in first record
if ncsMemMap['sample_rate'][0] != reqFreq:
raise IOError("Sampling frequency in first record doesn't agree with header.")
chanNum = ncsMemMap['channel_id'][0]
nb = NcsSections()
nb.sampFreqUsed = actualSampFreq
nb.microsPerSampUsed = NcsSectionsFactory.get_micros_per_samp_for_freq(actualSampFreq)
# check if file is one block of records, which is often the case, and avoid full parse
lastBlkI = ncsMemMap.shape[0] - 1
ts0 = ncsMemMap['timestamp'][0]
nb0 = ncsMemMap['nb_valid'][0]
predLastBlockStartTime = NcsSectionsFactory.calc_sample_time(actualSampFreq, ts0,
NcsSection._RECORD_SIZE *
lastBlkI)
lts = ncsMemMap['timestamp'][lastBlkI]
lnb = ncsMemMap['nb_valid'][lastBlkI]
if ncsMemMap['channel_id'][lastBlkI] == chanNum and \
ncsMemMap['sample_rate'][lastBlkI] == reqFreq and \
lts == predLastBlockStartTime:
lastBlkEndTime = NcsSectionsFactory.calc_sample_time(actualSampFreq, lts, lnb)
curBlock = NcsSection(0, ts0, lastBlkI, lastBlkEndTime)
nb.sects.append(curBlock)
return nb
# otherwise need to scan looking for breaks
else:
blkOnePredTime = NcsSectionsFactory.calc_sample_time(actualSampFreq, ts0, nb0)
curBlock = NcsSection(0, ts0, -1, -1)
nb.sects.append(curBlock)
return NcsSectionsFactory._parseGivenActualFrequency(ncsMemMap, nb, chanNum, reqFreq,
blkOnePredTime)
@staticmethod
def _parseForMaxGap(ncsMemMap, ncsSects, maxGapLen):
"""
Parse blocks of records from file, allowing a maximum gap in timestamps between records
in sections. Estimates frequency being used based on timestamps.
PARAMETERS
ncsMemMap:
memmap of Ncs file
ncsSects:
NcsSections object with sampFreqUsed set to nominal frequency to use in computing time
for samples (Hz)
maxGapLen:
maximum difference within a block between predicted time of start of record and
recorded time
RETURN:
NcsSections object with sampFreqUsed and microsPerSamp set based on estimate from
largest block
"""
# track frequency of each block and use estimate with longest block
maxBlkLen = 0
maxBlkFreqEstimate = 0
# Parse the record sequence, finding blocks of continuous time with no more than
# maxGapLength and same channel number
chanNum = ncsMemMap['channel_id'][0]
startBlockTime = ncsMemMap['timestamp'][0]
blkLen = ncsMemMap['nb_valid'][0]
lastRecTime = startBlockTime
lastRecNumSamps = blkLen
recFreq = ncsMemMap['sample_rate'][0]
curBlock = NcsSection(0, startBlockTime, -1, -1)
ncsSects.sects.append(curBlock)
for recn in range(1, ncsMemMap.shape[0]):
if ncsMemMap['channel_id'][recn] != chanNum or \
ncsMemMap['sample_rate'][recn] != recFreq:
raise IOError('Channel number or sampling frequency changed in ' +
'records within file')
predTime = NcsSectionsFactory.calc_sample_time(ncsSects.sampFreqUsed, lastRecTime,
lastRecNumSamps)
ts = ncsMemMap['timestamp'][recn]
nb = ncsMemMap['nb_valid'][recn]
if abs(ts - predTime) > maxGapLen:
curBlock.endRec = recn - 1
curBlock.endTime = predTime
curBlock = NcsSection(recn, ts, -1, -1)
ncsSects.sects.append(curBlock)
if blkLen > maxBlkLen:
maxBlkLen = blkLen
maxBlkFreqEstimate = (blkLen - lastRecNumSamps) * 1e6 / \
(lastRecTime - startBlockTime)
startBlockTime = ts
blkLen = nb
else:
blkLen += nb
lastRecTime = ts
lastRecNumSamps = nb
if blkLen > maxBlkLen:
maxBlkFreqEstimate = (blkLen - lastRecNumSamps) * 1e6 / \
(lastRecTime - startBlockTime)
curBlock.endRec = ncsMemMap.shape[0] - 1
endTime = NcsSectionsFactory.calc_sample_time(ncsSects.sampFreqUsed, lastRecTime,
lastRecNumSamps)
curBlock.endTime = endTime
ncsSects.sampFreqUsed = maxBlkFreqEstimate
ncsSects.microsPerSampUsed = NcsSectionsFactory.get_micros_per_samp_for_freq(
maxBlkFreqEstimate)
return ncsSects
@staticmethod
def _buildForMaxGap(ncsMemMap, nomFreq):
"""
Determine sections of records in memory mapped Ncs file given a nominal frequency of
the file, using the default values of frequency tolerance and maximum gap between blocks.
PARAMETERS
ncsMemMap:
memmap of Ncs file
nomFreq:
nominal sampling frequency used, normally from header of file
RETURN:
NcsSections object
"""
nb = NcsSections()
numRecs = ncsMemMap.shape[0]
if numRecs < 1:
return nb
chanNum = ncsMemMap['channel_id'][0]
ts0 = ncsMemMap['timestamp'][0]
lastBlkI = numRecs - 1
lts = ncsMemMap['timestamp'][lastBlkI]
lcid = ncsMemMap['channel_id'][lastBlkI]
lnb = ncsMemMap['nb_valid'][lastBlkI]
lsr = ncsMemMap['sample_rate'][lastBlkI]
# check if file is one block of records, with exact timestamp match, which may be the case
numSampsForPred = NcsSection._RECORD_SIZE * lastBlkI
predLastBlockStartTime = NcsSectionsFactory.calc_sample_time(nomFreq, ts0, numSampsForPred)
freqInFile = math.floor(nomFreq)
if lts - predLastBlockStartTime == 0 and lcid == chanNum and lsr == freqInFile:
endTime = NcsSectionsFactory.calc_sample_time(nomFreq, lts, lnb)
curBlock = NcsSection(0, ts0, lastBlkI, endTime)
nb.sects.append(curBlock)
nb.sampFreqUsed = numSampsForPred / (lts - ts0) * 1e6
nb.microsPerSampUsed = NcsSectionsFactory.get_micros_per_samp_for_freq(nb.sampFreqUsed)
# otherwise parse records to determine blocks using default maximum gap length
else:
nb.sampFreqUsed = nomFreq
nb.microsPerSampUsed = NcsSectionsFactory.get_micros_per_samp_for_freq(nb.sampFreqUsed)
maxGapToAllow = round(NcsSectionsFactory._maxGapSampFrac * 1e6 / nomFreq)
nb = NcsSectionsFactory._parseForMaxGap(ncsMemMap, nb, maxGapToAllow)
return nb
@staticmethod
def build_for_ncs_file(ncsMemMap, nlxHdr):
"""
Build an NcsSections object for an NcsFile, given as a memmap and NlxHeader,
handling gap detection appropriately given the file type as specified by the header.
PARAMETERS
ncsMemMap:
memory map of file
nlxHdr:
NlxHeader from corresponding file.
RETURNS
An NcsSections corresponding to the provided ncsMemMap and nlxHdr
"""
acqType = nlxHdr.type_of_recording()
# Old Neuralynx style with truncated whole microseconds for actual sampling. This
# restriction arose from the sampling being based on a master 1 MHz clock.
if acqType == "PRE4":
freq = nlxHdr['sampling_rate']
microsPerSampUsed = math.floor(NcsSectionsFactory.get_micros_per_samp_for_freq(freq))
sampFreqUsed = NcsSectionsFactory.get_freq_for_micros_per_samp(microsPerSampUsed)
nb = NcsSectionsFactory._buildGivenActualFrequency(ncsMemMap, sampFreqUsed,
math.floor(freq))
nb.sampFreqUsed = sampFreqUsed
nb.microsPerSampUsed = microsPerSampUsed
# digital lynx style with fractional frequency and micros per samp determined from
# block times
elif acqType == "DIGITALLYNX" or acqType == "DIGITALLYNXSX":
nomFreq = nlxHdr['sampling_rate']
nb = NcsSectionsFactory._buildForMaxGap(ncsMemMap, nomFreq)
# BML style with fractional frequency and micros per samp
elif acqType == "BML":
sampFreqUsed = nlxHdr['sampling_rate']
nb = NcsSectionsFactory._buildGivenActualFrequency(ncsMemMap, sampFreqUsed,
math.floor(sampFreqUsed))
else:
raise TypeError("Unknown Ncs file type from header.")
return nb
@staticmethod
def _verifySectionsStructure(ncsMemMap, ncsSects):
"""
Check that the record structure and timestamps for the ncsMemMap
agrees with that in ncsSects.
Provides a more rapid verification of structure than building a new NcsSections
and checking equality.
PARAMETERS
ncsMemMap:
memmap of file to be checked
ncsSects
existing block structure to be checked
RETURN:
true if all timestamps and block record starts and stops agree, otherwise false.
"""
for blki in range(0, len(ncsSects.sects)):
if ncsMemMap['timestamp'][ncsSects.sects[blki].startRec] != \
ncsSects.sects[blki].startTime:
return False
ets = ncsMemMap['timestamp'][ncsSects.sects[blki].endRec]
enb = ncsMemMap['nb_valid'][ncsSects.sects[blki].endRec]
endTime = NcsSectionsFactory.calc_sample_time(ncsSects.sampFreqUsed, ets, enb)
if endTime != ncsSects.sects[blki].endTime:
return False
return True
|
graphgallery/nn/models/pytorch/graphat/utils.py | EdisonLeeeee/GraphGallery | 300 | 11197442 | <gh_stars>100-1000
import torch.nn.functional as F
def get_normalized_vector(d):
d = d / (1e-12 + d.abs().max(dim=1).values.view(-1, 1))
d = d / (1e-6 + d.pow(2.0).sum(dim=1).view(-1, 1))
return d
def kld_with_logits(logit_q, logit_p):
q = F.softmax(logit_q, dim=-1)
cross_entropy = softmax_cross_entropy_with_logits(logits=logit_p, labels=q)
entropy = softmax_cross_entropy_with_logits(logits=logit_q, labels=q)
return (cross_entropy - entropy).mean()
def neighbor_kld_with_logit(neighbor_logits, p_logit):
dist = 0.
for neighbor_logit in neighbor_logits:
dist += kld_with_logits(neighbor_logit, p_logit)
return dist
def softmax_cross_entropy_with_logits(labels, logits, dim=-1):
return (-labels * F.log_softmax(logits, dim=dim)).sum(dim=dim)
def l2_normalize(d):
return F.normalize(d, p=2, dim=1)
|
cbox/__main__.py | shmuelamar/cbox | 164 | 11197445 | <gh_stars>100-1000
import argparse
import ast
import sys
from sys import stdin, stdout, stderr
import cbox
from cbox import concurrency
__all__ = ('get_inline_func', 'main', )
def _inline2func(inline_str, inline_globals, **stream_kwargs):
if stream_kwargs.get('worker_type') != concurrency.ASYNCIO:
@cbox.stream(**stream_kwargs)
def inline(s):
return eval(inline_str, inline_globals, locals())
else:
@cbox.stream(**stream_kwargs)
async def inline(s):
return eval(inline_str, inline_globals, locals())
return inline
def _import_inline_modules(modules=None):
inline_globals = globals()
if not modules:
return inline_globals
for m in modules.split(','):
inline_globals[m] = __import__(m, inline_globals)
return inline_globals
def _is_compilable(s):
"""returns True if the string is compilable, False otherwise"""
try:
ast.parse(s)
return True
except Exception:
return False
def _parse_args(argv=None):
parser = argparse.ArgumentParser(
description='runs the inline statement using eval() for each input on '
'stdin and outputs the results to stdout',
)
parser.add_argument('inline')
parser.add_argument(
'-m', '--modules', default=None,
help='comma separated list of modules to import',
)
parser.add_argument(
'-t', '--input-type', default='lines',
help='defines how the input stream is split',
choices=('lines', 'chars', 'raw')
)
parser.add_argument(
'-w', '--worker-type', default='simple',
choices=('simple', 'thread', 'asyncio'),
help='worker type to use for concurrency',
)
parser.add_argument(
'-c', '--max-workers', default=1, type=int,
help='how many max workers (i.e. threads) to run in parallel. '
'only affect if --worker-type is thread',
)
parser.add_argument(
'--workers-window', default=100, type=int,
help='how many tasks to execute in parallel before waiting for them '
'to be completed. only affect if --worker-type is not simple.',
)
return parser.parse_args(argv)
def get_inline_func(inline_str, modules=None, **stream_kwargs):
"""returns a function decorated by `cbox.stream` decorator.
:param str inline_str: the inline function to execute,
can use `s` - local variable as the input line/char/raw
(according to `input_type` param).
:param str modules: comma separated list of modules to import before
running the inline function.
:param dict stream_kwargs: optional arguments to `cbox.stream` decorator
:rtype: callable
"""
if not _is_compilable(inline_str):
raise ValueError(
'cannot compile the inline expression - "%s"' % inline_str
)
inline_globals = _import_inline_modules(modules)
func = _inline2func(inline_str, inline_globals, **stream_kwargs)
return func
def main(argv=None, input_stream=stdin, output_stream=stdout,
error_stream=stderr):
"""runs inline function - more info run `cbox --help`"""
args = _parse_args(argv)
args_dict = args.__dict__.copy()
inline_str = args_dict.pop('inline')
modules = args_dict.pop('modules')
func = get_inline_func(inline_str, modules, **args_dict)
return cbox.main(
func=func, argv=[], input_stream=input_stream,
output_stream=output_stream, error_stream=error_stream, exit=False,
)
if __name__ == '__main__': # pragma: nocover
sys.exit(main())
|
idaes/generic_models/properties/core/eos/tests/test_enrtl_verification_2_solvent.py | carldlaird/idaes-pse | 112 | 11197451 | <reponame>carldlaird/idaes-pse<filename>idaes/generic_models/properties/core/eos/tests/test_enrtl_verification_2_solvent.py
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for eNRTL methods
Reference:
[1] <NAME>. and <NAME>., Symmetric Electrolyte Nonrandom Two-Liquid Activity
Coefficient Model, Ind. Eng. Chem. Res., 2009, Vol. 48, pgs. 7788–7797
Figures digitized using WebPlotDigitizer, https://apps.automeris.io/wpd/,
May 2021
Author: <NAME>
"""
import pytest
from math import log
from pyomo.environ import (ConcreteModel,
units as pyunits,
value)
from idaes.core import (AqueousPhase,
Solvent,
Apparent,
Anion,
Cation)
from idaes.generic_models.properties.core.eos.enrtl import ENRTL
from idaes.generic_models.properties.core.generic.generic_property import (
GenericParameterBlock, StateIndex)
from idaes.generic_models.properties.core.state_definitions import FTPx
from idaes.generic_models.properties.core.pure.electrolyte import \
relative_permittivity_constant
def rho_H2O(b, *args, **kwargs):
return 1000/18e-3*pyunits.mol/pyunits.m**3
def rho_MeOH(b, *args, **kwargs):
return 792/32e-3*pyunits.mol/pyunits.m**3
def rho_EtOH(b, *args, **kwargs):
return 789.45/46e-3*pyunits.mol/pyunits.m**3
configuration = {
"components": {
"H2O": {"type": Solvent,
"dens_mol_liq_comp": rho_H2O,
"relative_permittivity_liq_comp":
relative_permittivity_constant,
"parameter_data": {
"mw": (18E-3, pyunits.kg/pyunits.mol),
"relative_permittivity_liq_comp": 78.54}},
"MeOH": {"type": Solvent,
"dens_mol_liq_comp": rho_MeOH,
"relative_permittivity_liq_comp":
relative_permittivity_constant,
"parameter_data": {
"mw": (32E-3, pyunits.kg/pyunits.mol),
"relative_permittivity_liq_comp": 32.6146}},
"EtOH": {"type": Solvent,
"dens_mol_liq_comp": rho_EtOH,
"relative_permittivity_liq_comp":
relative_permittivity_constant,
"parameter_data": {
"mw": (46E-3, pyunits.kg/pyunits.mol),
"relative_permittivity_liq_comp": 24.113}},
"NaBr": {"type": Apparent,
"dissociation_species": {"Na+": 1, "Br-": 1}},
"Na+": {"type": Cation,
"charge": +1},
"Br-": {"type": Anion,
"charge": -1}},
"phases": {
"Liq": {"type": AqueousPhase,
"equation_of_state": ENRTL}},
"base_units": {"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K},
"state_definition": FTPx,
"state_components": StateIndex.true,
"pressure_ref": 1e5,
"temperature_ref": 300,
"parameter_data": {
"Liq_alpha": {
("H2O", "EtOH"): 0.3031,
("H2O", "MeOH"): 0.2994,
("MeOH", "EtOH"): 0.3356,
("H2O", "Na+, Br-"): 0.2,
("MeOH", "Na+, Br-"): 0.2,
("EtOH", "Na+, Br-"): 0.1},
"Liq_tau": {
("H2O", "MeOH"): 1.4265,
("MeOH", "H2O"): -0.42864,
("H2O", "EtOH"): 2.2485,
("EtOH", "H2O"): -0.18514,
("MeOH", "EtOH"): -0.04394,
("EtOH", "MeOH"): 0.02147,
("H2O", "Na+, Br-"): 9.527,
("Na+, Br-", "H2O"): -4.790,
("MeOH", "Na+, Br-"): 5.910,
("Na+, Br-", "MeOH"): -3.863,
("EtOH", "Na+, Br-"): 6.118,
("Na+, Br-", "EtOH"): -4.450}}}
class Test_H2O_MeOH_EtOH(object):
# Test case for having parameters for a second salt with 0 concentration
# Results should be the same as for the single salt case
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel()
m.params = GenericParameterBlock(default=configuration)
m.state = m.params.build_state_block([1])
# Need to set a value of T for checking expressions later
m.state[1].temperature.set_value(298.15)
return m
@pytest.mark.unit
def test_H2O_EtOH(self, model):
# Using 0 results in division by zero errors
for k in model.state[1].mole_frac_phase_comp:
model.state[1].mole_frac_phase_comp[k].set_value(1e-12)
# Data digitized from Fig 5 [1]
# Form %H2O in solvent by mass, %NaBr in mix by mass
data = {1.90023164606316: 4.07279483333809,
2.52553840508245: 4.60341051665865,
3.07268181922432: 5.10150067253983,
3.6198252333662: 5.70745038179749,
4.3232953372629: 6.59686802660166,
5.18309213091442: 7.50180397873668,
6.35554230407559: 8.65548586720136,
10.420036237701: 11.7202417243546,
12.2959565147588: 12.9721116679189,
14.0155501020619: 14.2110461502489,
15.9696337239972: 15.5348240281863,
18.0018806908098: 16.9196471596141,
20.1122910024999: 18.3011439277624,
21.9658788953071: 19.4438459565312,
23.4174838716019: 20.2934710794969,
25.0365817297768: 20.9436527661739,
26.9906653517121: 21.8041782373378,
29.1792390082796: 23.0611962099913,
30.7425059058279: 23.7038456814246,
32.8529162175179: 24.7168170838022,
35.1196532189629: 25.7943190088345,
37.230063530653: 26.6777599200545,
39.4968005320979: 27.3055144261466,
40.9037407398913: 27.8418581569274,
42.8578243618266: 28.7814740254471,
45.1245613632715: 29.6986205793095,
47.2349716749616: 30.2248062217835,
53.01905919589: 32.5290639405499,
55.2076328524575: 33.3573486018869,
57.3180431641476: 33.9680033246278,
62.8520079814683: 36.2484067677242,
65.0562143070113: 36.6504595612375,
66.9929282967516: 37.5059452940427,
73.0288754845073: 39.062432020278,
75.1392857961974: 39.8922616615831,
76.780716038623: 40.8064682302478,
83.7372537327126: 42.7720496832428,
85.4568473200156: 43.2448153854284,
87.1764409073187: 43.3361172522717,
88.1144010458476: 43.9206762302395,
92.6478750487374: 44.9451625065632,
94.3674686360405: 45.3523294496401,
96.0870622233435: 45.6278737343614,
97.8066558106466: 45.9958029050737}
for x, g in data.items():
n_salt = g/102.9e-3 # MW of NaBr
n_H2O = (100-g)*(x/100)/18e-3
n_EtOH = (100-g)*((100-x)/100)/46e-3
n_total = n_H2O + n_EtOH + 2*n_salt
model.state[1].mole_frac_phase_comp["Liq", "H2O"].set_value(
n_H2O/n_total)
model.state[1].mole_frac_phase_comp["Liq", "EtOH"].set_value(
n_EtOH/n_total)
model.state[1].mole_frac_phase_comp["Liq", "Na+"].set_value(
n_salt/n_total)
model.state[1].mole_frac_phase_comp["Liq", "Br-"].set_value(
n_salt/n_total)
# Check NaBr solubility product
# Note the Ksp given in [1] is actually ln(Ksp)
ln_Ksp = value(
model.state[1].Liq_log_gamma["Na+"] +
log(value(model.state[1].mole_frac_phase_comp["Liq", "Na+"])) +
model.state[1].Liq_log_gamma["Br-"] +
log(value(model.state[1].mole_frac_phase_comp["Liq", "Br-"])))
assert pytest.approx(-7.157, rel=2e-2) == ln_Ksp
@pytest.mark.unit
def test_H2O_MeOH(self, model):
# Using 0 results in division by zero errors
for k in model.state[1].mole_frac_phase_comp:
model.state[1].mole_frac_phase_comp[k].set_value(1e-12)
# Data digitized from Fig 5 [1]
# Form %H2O in solvent by mass, %NaBr in mix by mass
data = {6.82452237334007: 19.1520757612024,
8.59622485722806: 19.9360728637851,
10.2637095479462: 20.7022143982515,
11.9833031352492: 21.984026817603,
13.7028967225522: 22.8450059962131,
15.4224903098553: 23.6114376640682,
17.1420838971583: 24.3195929462338,
18.8616774844614: 24.9062851989822,
20.5812710717644: 25.4301958788082,
22.3008646590675: 26.3135810224415,
24.0204582463705: 26.9298015887213,
25.7400518336735: 27.5604530219357,
27.4596454209766: 28.261495638838,
29.1792390082796: 28.7765060560814,
30.8988325955827: 29.4697504108841,
32.6184261828857: 30.1301893048218,
34.3380197701888: 30.7447999546189,
36.0576133574918: 31.3102035876584,
37.7772069447949: 31.91742945333,
39.4968005320979: 32.732522595915,
41.2163941194009: 32.803533475634,
42.935987706704: 33.4957645464418,
44.655581294007: 33.9441655334417,
46.3751748813101: 34.4833831285068,
48.0947684686131: 34.9794442095881,
49.8143620559162: 36.0640561473066,
51.5339556432192: 35.8727337255362,
53.2535492305222: 36.3672811244236,
54.9731428178253: 36.8359756714104,
56.6927364051283: 37.3474811618958,
58.4123299924314: 37.6614971193476,
60.1319235797344: 38.6004592649369,
61.8515171670375: 38.5624551461233,
63.5711107543405: 38.9441861952893,
65.2907043416436: 39.2909739234897,
67.0102979289466: 39.6603783446311,
68.7298915162496: 40.0924508332419,
70.4494851035527: 40.890132583986,
72.1690786908557: 40.6091546532659,
73.6541822435265: 40.9707684873752,
81.8613334556547: 42.0336203709758,
83.5809270429577: 42.4706249979091,
85.3005206302608: 42.7855500518895,
87.0201142175638: 43.2516691324203,
88.7397078048669: 43.78176630887,
92.178894979473: 44.5269393858065,
93.898488566776: 44.7467835048108,
95.618082154079: 45.0120297029858,
97.3376757413821: 45.4128499806034,
98.9009426389303: 45.5991226308649}
for x, g in data.items():
n_salt = g/102.9e-3 # MW of NaBr
n_H2O = (100-g)*(x/100)/18e-3
n_MeOH = (100-g)*((100-x)/100)/32e-3
n_total = n_H2O + n_MeOH + 2*n_salt
model.state[1].mole_frac_phase_comp["Liq", "H2O"].set_value(
n_H2O/n_total)
model.state[1].mole_frac_phase_comp["Liq", "MeOH"].set_value(
n_MeOH/n_total)
model.state[1].mole_frac_phase_comp["Liq", "Na+"].set_value(
n_salt/n_total)
model.state[1].mole_frac_phase_comp["Liq", "Br-"].set_value(
n_salt/n_total)
# Check NaBr solubility product
# Note the Ksp given in [1] is actually ln(Ksp)
ln_Ksp = value(
model.state[1].Liq_log_gamma["Na+"] +
log(value(model.state[1].mole_frac_phase_comp["Liq", "Na+"])) +
model.state[1].Liq_log_gamma["Br-"] +
log(value(model.state[1].mole_frac_phase_comp["Liq", "Br-"])))
assert pytest.approx(-7.157, rel=2.5e-2) == ln_Ksp
@pytest.mark.unit
def test_MeOH_EtOH(self, model):
# Using 0 results in division by zero errors
for k in model.state[1].mole_frac_phase_comp:
model.state[1].mole_frac_phase_comp[k].set_value(1e-12)
# Data digitized from Fig 5 [1]
# Form %MeOH in solvent by mass, %NaBr in mix by mass
data = {2.71536367121331: 2.73554829623076,
4.28421366482421: 2.86945960942034,
6.08755369306734: 3.02744863943992,
7.84064585674641: 3.18919593486594,
13.1036444118254: 3.73345296717809,
15.2214988515991: 3.93266625747611,
17.3765739317906: 4.14197504888717,
23.0824981078416: 4.76850518869678,
25.2264069959077: 4.97587135803405,
26.9125020068347: 5.17220110227576,
28.1631155248733: 5.35033381027391,
33.2883977104064: 5.88583802085873,
35.6146877365198: 6.19031092340952,
37.7325421762935: 6.44369463450957,
43.293305854715: 7.09823866325609,
45.5488766640346: 7.40118664900166,
47.4381963716429: 7.60676829681603,
53.2088844620209: 8.35096922885007,
55.3974581185884: 8.66033648961119,
57.7423584649107: 8.94647646211022,
64.8217242723791: 9.94790728412982,
67.4792779982111: 10.3476934868586,
73.0623740608833: 11.102759854227,
74.7484690718103: 11.3459914299202,
76.6578879252442: 11.5676617376356,
83.0672822051919: 12.4888362818425,
84.7533772161189: 12.7303712917151,
86.9084522963104: 13.0824620221641,
93.3178465762581: 13.9936939870009,
95.5734173855777: 14.3668192656925,
97.3376757413821: 14.6361763117594}
for x, g in data.items():
n_salt = g/102.9e-3 # MW of NaBr
n_MeOH = (100-g)*(x/100)/32e-3
n_EtOH = (100-g)*((100-x)/100)/46e-3
n_total = n_MeOH + n_EtOH + 2*n_salt
model.state[1].mole_frac_phase_comp["Liq", "MeOH"].set_value(
n_MeOH/n_total)
model.state[1].mole_frac_phase_comp["Liq", "EtOH"].set_value(
n_EtOH/n_total)
model.state[1].mole_frac_phase_comp["Liq", "Na+"].set_value(
n_salt/n_total)
model.state[1].mole_frac_phase_comp["Liq", "Br-"].set_value(
n_salt/n_total)
# Check NaBr solubility product
# Note the Ksp given in [1] is actually ln(Ksp)
ln_Ksp = value(
model.state[1].Liq_log_gamma["Na+"] +
log(value(model.state[1].mole_frac_phase_comp["Liq", "Na+"])) +
model.state[1].Liq_log_gamma["Br-"] +
log(value(model.state[1].mole_frac_phase_comp["Liq", "Br-"])))
assert pytest.approx(-7.157, rel=2.3e-2) == ln_Ksp
|
kivy/tools/packaging/cython_cfg.py | Galland/kivy | 13,889 | 11197453 | import configparser
from os.path import join, dirname
import textwrap
__all__ = ('get_cython_versions', 'get_cython_msg')
def get_cython_versions(setup_cfg=''):
_cython_config = configparser.ConfigParser()
if setup_cfg:
_cython_config.read(setup_cfg)
else:
_cython_config.read(
join(dirname(__file__), '..', '..', '..', 'setup.cfg'))
cython_min = _cython_config['kivy']['cython_min']
cython_max = _cython_config['kivy']['cython_max']
cython_unsupported = _cython_config['kivy']['cython_exclude'].split(',')
# ref https://github.com/cython/cython/issues/1968
cython_requires = (
'cython>={min_version},<={max_version},{exclusion}'.format(
min_version=cython_min,
max_version=cython_max,
exclusion=','.join('!=%s' % excl for excl in cython_unsupported),
)
)
return cython_requires, cython_min, cython_max, cython_unsupported
def get_cython_msg():
cython_requires, cython_min, cython_max, cython_unsupported = \
get_cython_versions()
cython_unsupported_append = '''
Please note that the following versions of Cython are not supported
at all: {}'''.format(', '.join(map(str, cython_unsupported)))
cython_min_msg = textwrap.dedent('''
This version of Cython is not compatible with Kivy. Please upgrade to
at least version {0}, preferably the newest supported version {1}.
If your platform provides a Cython package, make sure you have upgraded
to the newest version. If the newest version available is still too low,
please remove it and install the newest supported Cython via pip:
pip install -I "{3}"{2}
'''.format(cython_min, cython_max,
cython_unsupported_append if cython_unsupported else '',
cython_requires))
cython_max_msg = textwrap.dedent('''
This version of Cython is untested with Kivy. While this version may
work perfectly fine, it is possible that you may experience issues.
Please downgrade to a supported version, or update cython_max in
setup.cfg to your version of Cython. It is best to use the newest
supported version, {1}, but the minimum supported version is {0}.
If your platform provides a Cython package, check if you can downgrade
to a supported version. Otherwise, uninstall the platform package and
install Cython via pip:
pip install -I "{3}"{2}
'''.format(cython_min, cython_max,
cython_unsupported_append if cython_unsupported else '',
cython_requires))
cython_unsupported_msg = textwrap.dedent('''
This version of Cython suffers from known bugs and is unsupported.
Please install the newest supported version, {1}, if possible, but
the minimum supported version is {0}.
If your platform provides a Cython package, check if you can install
a supported version. Otherwise, uninstall the platform package and
install Cython via pip:
pip install -I "{3}"{2}
'''.format(cython_min, cython_max, cython_unsupported_append,
cython_requires))
return cython_min_msg, cython_max_msg, cython_unsupported_msg
|
astropy/modeling/setup_package.py | jayvdb/astropy | 445 | 11197459 | <reponame>jayvdb/astropy
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from os.path import join
from distutils.core import Extension
from distutils import log
from astropy_helpers import setup_helpers, utils
from astropy_helpers.version_helpers import get_pkg_version_module
wcs_setup_package = utils.import_file(join('astropy', 'wcs', 'setup_package.py'))
MODELING_ROOT = os.path.relpath(os.path.dirname(__file__))
MODELING_SRC = join(MODELING_ROOT, 'src')
SRC_FILES = [join(MODELING_SRC, 'projections.c.templ'),
__file__]
GEN_FILES = [join(MODELING_SRC, 'projections.c')]
# This defines the set of projection functions that we want to wrap.
# The key is the projection name, and the value is the number of
# parameters.
# (These are in the order that the appear in the WCS coordinate
# systems paper).
projections = {
'azp': 2,
'szp': 3,
'tan': 0,
'stg': 0,
'sin': 2,
'arc': 0,
'zea': 0,
'air': 1,
'cyp': 2,
'cea': 1,
'mer': 0,
'sfl': 0,
'par': 0,
'mol': 0,
'ait': 0,
'cop': 2,
'coe': 2,
'cod': 2,
'coo': 2,
'bon': 1,
'pco': 0,
'tsc': 0,
'csc': 0,
'qsc': 0,
'hpx': 2,
'xph': 0,
}
def pre_build_py_hook(cmd_obj):
preprocess_source()
def pre_build_ext_hook(cmd_obj):
preprocess_source()
def pre_sdist_hook(cmd_obj):
preprocess_source()
def preprocess_source():
# TODO: Move this to setup_helpers
# Generating the wcslib wrappers should only be done if needed. This also
# ensures that it is not done for any release tarball since those will
# include core.py and core.c.
if all(os.path.exists(filename) for filename in GEN_FILES):
# Determine modification times
src_mtime = max(os.path.getmtime(filename) for filename in SRC_FILES)
gen_mtime = min(os.path.getmtime(filename) for filename in GEN_FILES)
version = get_pkg_version_module('astropy')
if gen_mtime > src_mtime:
# If generated source is recent enough, don't update
return
elif version.release:
# or, if we're on a release, issue a warning, but go ahead and use
# the wrappers anyway
log.warn('WARNING: The autogenerated wrappers in '
'astropy.modeling._projections seem to be older '
'than the source templates used to create '
'them. Because this is a release version we will '
'use them anyway, but this might be a sign of '
'some sort of version mismatch or other '
'tampering. Or it might just mean you moved '
'some files around or otherwise accidentally '
'changed timestamps.')
return
# otherwise rebuild the autogenerated files
# If jinja2 isn't present, then print a warning and use existing files
try:
import jinja2 # pylint: disable=W0611
except ImportError:
log.warn("WARNING: jinja2 could not be imported, so the existing "
"modeling _projections.c file will be used")
return
from jinja2 import Environment, FileSystemLoader
# Prepare the jinja2 templating environment
env = Environment(loader=FileSystemLoader(MODELING_SRC))
c_in = env.get_template('projections.c.templ')
c_out = c_in.render(projections=projections)
with open(join(MODELING_SRC, 'projections.c'), 'w') as fd:
fd.write(c_out)
def get_extensions():
wcslib_files = [ # List of wcslib files to compile
'prj.c',
'wcserr.c',
'wcsprintf.c',
'wcsutil.c'
]
wcslib_config_paths = [
join(MODELING_SRC, 'wcsconfig.h')
]
cfg = setup_helpers.DistutilsExtensionArgs()
wcs_setup_package.get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths)
cfg['include_dirs'].append(MODELING_SRC)
astropy_files = [ # List of astropy.modeling files to compile
'projections.c'
]
cfg['sources'].extend(join(MODELING_SRC, x) for x in astropy_files)
cfg['sources'] = [str(x) for x in cfg['sources']]
cfg = dict((str(key), val) for key, val in cfg.items())
return [Extension('astropy.modeling._projections', **cfg)]
|
external/deep-object-reid/tests/sc_input_params_validation/test_ote_inference_task_input_params_validation.py | opencv/openvino_training_extensions | 775 | 11197483 | import pytest
from ote_sdk.configuration.configurable_parameters import ConfigurableParameters
from ote_sdk.entities.datasets import DatasetEntity
from ote_sdk.entities.inference_parameters import InferenceParameters
from ote_sdk.entities.label_schema import LabelSchemaEntity
from ote_sdk.entities.model import ModelConfiguration, ModelEntity
from ote_sdk.entities.resultset import ResultSetEntity
from ote_sdk.test_suite.e2e_test_system import e2e_pytest_unit
from ote_sdk.tests.parameters_validation.validation_helper import (
check_value_error_exception_raised,
)
from ote_sdk.usecases.tasks.interfaces.export_interface import ExportType
from torchreid_tasks.inference_task import OTEClassificationInferenceTask
class MockClassificationInferenceTask(OTEClassificationInferenceTask):
def __init__(self):
pass
class TestOTEClassificationInferenceTaskInputParamsValidation:
@staticmethod
def model():
model_configuration = ModelConfiguration(
configurable_parameters=ConfigurableParameters(
header="header", description="description"
),
label_schema=LabelSchemaEntity(),
)
return ModelEntity(
train_dataset=DatasetEntity(), configuration=model_configuration
)
@e2e_pytest_unit
def test_ote_classification_inference_task_init_params_validation(self):
"""
<b>Description:</b>
Check OTEClassificationInferenceTask object initialization parameters validation
<b>Input data:</b>
OTEClassificationInferenceTask object initialization parameters with unexpected type
<b>Expected results:</b>
Test passes if ValueError exception is raised when unexpected type object is specified as
OTEClassificationInferenceTask initialization parameter
"""
with pytest.raises(ValueError):
OTEClassificationInferenceTask(task_environment="unexpected string") # type: ignore
@e2e_pytest_unit
def test_ote_classification_inference_task_infer_params_validation(self):
"""
<b>Description:</b>
Check OTEClassificationInferenceTask object "infer" method input parameters validation
<b>Input data:</b>
OTEClassificationInferenceTask object. "infer" method unexpected-type input parameters
<b>Expected results:</b>
Test passes if ValueError exception is raised when unexpected type object is specified as
input parameter for "infer" method
"""
task = MockClassificationInferenceTask()
correct_values_dict = {
"dataset": DatasetEntity(),
"inference_parameters": InferenceParameters(),
}
unexpected_str = "unexpected string"
unexpected_values = [
# Unexpected string is specified as "dataset" parameter
("dataset", unexpected_str),
# Unexpected string is specified as "inference_parameters" parameter
("inference_parameters", unexpected_str),
]
check_value_error_exception_raised(
correct_parameters=correct_values_dict,
unexpected_values=unexpected_values,
class_or_function=task.infer,
)
@e2e_pytest_unit
def test_ote_classification_inference_task_evaluate_params_validation(self):
"""
<b>Description:</b>
Check OTEClassificationInferenceTask object "evaluate" method input parameters validation
<b>Input data:</b>
OTEClassificationInferenceTask object. "evaluate" method unexpected-type input parameters
<b>Expected results:</b>
Test passes if ValueError exception is raised when unexpected type object is specified as
input parameter for "evaluate" method
"""
task = MockClassificationInferenceTask()
model = self.model()
result_set = ResultSetEntity(
model=model,
ground_truth_dataset=DatasetEntity(),
prediction_dataset=DatasetEntity(),
)
correct_values_dict = {
"output_resultset": result_set,
"evaluation_metric": "metric",
}
unexpected_int = 1
unexpected_values = [
# Unexpected integer is specified as "output_resultset" parameter
("output_resultset", unexpected_int),
# Unexpected integer is specified as "evaluation_metric" parameter
("evaluation_metric", unexpected_int),
]
check_value_error_exception_raised(
correct_parameters=correct_values_dict,
unexpected_values=unexpected_values,
class_or_function=task.evaluate,
)
@e2e_pytest_unit
def test_ote_classification_inference_task_export_params_validation(self):
"""
<b>Description:</b>
Check OTEClassificationInferenceTask object "export" method input parameters validation
<b>Input data:</b>
OTEClassificationInferenceTask object. "export" method unexpected-type input parameters
<b>Expected results:</b>
Test passes if ValueError exception is raised when unexpected type object is specified as
input parameter for "export" method
"""
task = MockClassificationInferenceTask()
model = self.model()
correct_values_dict = {
"export_type": ExportType.OPENVINO,
"output_model": model,
}
unexpected_str = "unexpected string"
unexpected_values = [
# Unexpected string is specified as "export_type" parameter
("export_type", unexpected_str),
# Unexpected string is specified as "output_model" parameter
("output_model", unexpected_str),
]
check_value_error_exception_raised(
correct_parameters=correct_values_dict,
unexpected_values=unexpected_values,
class_or_function=task.export,
)
|
heron/tools/explorer/src/python/physicalplan.py | pjfanning/incubator-heron | 3,348 | 11197487 | <reponame>pjfanning/incubator-heron
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' physicalplan.py '''
import sys
from typing import Optional
import requests
from tabulate import tabulate
from heron.common.src.python.utils.log import Log
from heron.tools.common.src.python.clients import tracker
def to_table(metrics):
""" normalize raw metrics API result to table """
all_queries = tracker.metric_queries()
m = tracker.queries_map()
header = ['container id'] + [m[k] for k in all_queries if k in metrics.keys()]
stats = []
if not metrics:
return stats, header
names = list(metrics.values())[0].keys()
for n in names:
info = [n]
for field in all_queries:
try:
info.append(str(metrics[field][n]))
except KeyError:
pass
stats.append(info)
return stats, header
def run_metrics(
cluster: str,
role: str,
environment: str,
topology: str,
component: Optional[str],
) -> None:
"""Render a table of metrics."""
try:
result = tracker.get_topology_info(cluster, environment, topology, role)
except requests.ConnectionError as e:
Log.error(f"Fail to connect to tracker: {e}")
sys.exit(1)
all_components = sorted(result['physical_plan']['components'].keys())
if component:
if component not in all_components:
Log.error(f"Unknown component: {component!r}")
sys.exit(1)
components = [component]
else:
components = all_components
all_queries = tracker.metric_queries()
for i, comp in enumerate(components):
try:
result = tracker.get_comp_metrics(
cluster, environment, topology, comp, [], all_queries, [0, -1], role,
)
except requests.ConnectionError as e:
Log.error(f"Fail to connect to tracker: {e}")
sys.exit(1)
stat, header = to_table(result["metrics"])
if i != 0:
print('')
print(f"{comp!r} metrics:")
print(tabulate(stat, headers=header))
def run_containers(
cluster: str,
role: str,
environment: str,
topology: str,
container_id: str,
) -> None:
"""Render a table of container information."""
try:
result = tracker.get_topology_info(cluster, environment, topology, role)
except requests.ConnectionError as e:
Log.error(f"Fail to connect to tracker: {e}")
sys.exit(1)
containers = result['physical_plan']['stmgrs']
all_bolts, all_spouts = set(), set()
for bolts in result['physical_plan']['bolts'].values():
all_bolts |= set(bolts)
for spouts in result['physical_plan']['spouts'].values():
all_spouts |= set(spouts)
stmgrs = sorted(containers.keys())
if container_id is not None:
stmgrs = [stmgrs[container_id]]
table = []
for cid, name in enumerate(stmgrs, (container_id + 1 if container_id else 1)):
instances = containers[name]["instance_ids"]
table.append([
cid,
containers[name]["host"],
containers[name]["port"],
containers[name]["pid"],
len([1 for instance in instances if instance in all_bolts]),
len([1 for instance in instances if instance in all_spouts]),
len(instances),
])
headers = ["container", "host", "port", "pid", "#bolt", "#spout", "#instance"]
print(tabulate(table, headers=headers))
|
neuralprophet/configure.py | yasirroni/neural_prophet | 2,144 | 11197502 | <filename>neuralprophet/configure.py
from collections import OrderedDict
from dataclasses import dataclass, field
import numpy as np
import pandas as pd
import logging
import inspect
import torch
import math
from neuralprophet import utils_torch, utils
log = logging.getLogger("NP.config")
def from_kwargs(cls, kwargs):
return cls(**{k: v for k, v in kwargs.items() if k in inspect.signature(cls).parameters})
@dataclass
class Model:
num_hidden_layers: int
d_hidden: int
@dataclass
class Train:
learning_rate: (float, None)
epochs: (int, None)
batch_size: (int, None)
loss_func: (str, torch.nn.modules.loss._Loss, "typing.Callable")
optimizer: (str, torch.optim.Optimizer)
train_speed: (int, float, None)
ar_sparsity: (float, None)
reg_delay_pct: float = 0.5
reg_lambda_trend: float = None
trend_reg_threshold: (bool, float) = None
reg_lambda_season: float = None
n_data: int = field(init=False)
def __post_init__(self):
if type(self.loss_func) == str:
if self.loss_func.lower() in ["huber", "smoothl1", "smoothl1loss"]:
self.loss_func = torch.nn.SmoothL1Loss()
elif self.loss_func.lower() in ["mae", "l1", "l1loss"]:
self.loss_func = torch.nn.L1Loss()
elif self.loss_func.lower() in ["mse", "mseloss", "l2", "l2loss"]:
self.loss_func = torch.nn.MSELoss()
else:
raise NotImplementedError("Loss function {} name not defined".format(self.loss_func))
elif callable(self.loss_func):
pass
elif issubclass(self.loss_func.__class__, torch.nn.modules.loss._Loss):
pass
else:
raise NotImplementedError("Loss function {} not found".format(self.loss_func))
def set_auto_batch_epoch(
self,
n_data: int,
min_batch: int = 16,
max_batch: int = 256,
min_epoch: int = 50,
max_epoch: int = 500,
):
assert n_data >= 1
self.n_data = n_data
if self.batch_size is None:
self.batch_size = int(2 ** (2 + int(np.log10(n_data))))
self.batch_size = min(max_batch, max(min_batch, self.batch_size))
self.batch_size = min(self.n_data, self.batch_size)
log.info("Auto-set batch_size to {}".format(self.batch_size))
if self.epochs is None:
self.epochs = int((2 ** (2.5 * np.log10(n_data))) / (n_data / 1000.0))
self.epochs = min(max_epoch, max(min_epoch, self.epochs))
log.info("Auto-set epochs to {}".format(self.epochs))
# also set lambda_delay:
self.lambda_delay = int(self.reg_delay_pct * self.epochs)
def apply_train_speed(self, batch=False, epoch=False, lr=False):
if self.train_speed is not None and not math.isclose(self.train_speed, 0):
if batch:
self.batch_size = max(1, int(self.batch_size * 2 ** self.train_speed))
self.batch_size = min(self.n_data, self.batch_size)
log.info(
"train_speed-{} {}creased batch_size to {}".format(
self.train_speed, ["in", "de"][int(self.train_speed < 0)], self.batch_size
)
)
if epoch:
self.epochs = max(1, int(self.epochs * 2 ** -self.train_speed))
log.info(
"train_speed-{} {}creased epochs to {}".format(
self.train_speed, ["in", "de"][int(self.train_speed > 0)], self.epochs
)
)
if lr:
self.learning_rate = self.learning_rate * 2 ** self.train_speed
log.info(
"train_speed-{} {}creased learning_rate to {}".format(
self.train_speed, ["in", "de"][int(self.train_speed < 0)], self.learning_rate
)
)
def apply_train_speed_all(self):
if self.train_speed is not None and not math.isclose(self.train_speed, 0):
self.apply_train_speed(batch=True, epoch=True, lr=True)
def get_optimizer(self, model_parameters):
return utils_torch.create_optimizer_from_config(self.optimizer, model_parameters, self.learning_rate)
def get_scheduler(self, optimizer, steps_per_epoch):
return torch.optim.lr_scheduler.OneCycleLR(
optimizer,
max_lr=self.learning_rate,
epochs=self.epochs,
steps_per_epoch=steps_per_epoch,
pct_start=0.3,
anneal_strategy="cos",
div_factor=100.0,
final_div_factor=5000.0,
)
def get_reg_delay_weight(self, e, iter_progress, reg_start_pct: float = 0.5, reg_full_pct: float = 1.0):
progress = (e + iter_progress) / float(self.epochs)
if reg_start_pct == reg_full_pct:
reg_progress = float(progress > reg_start_pct)
else:
reg_progress = (progress - reg_start_pct) / (reg_full_pct - reg_start_pct)
if reg_progress <= 0:
delay_weight = 0
elif reg_progress < 1:
delay_weight = 1 - (1 + np.cos(np.pi * reg_progress)) / 2.0
else:
delay_weight = 1
return delay_weight
def find_learning_rate(self, model, dataset, repeat: int = 3):
lrs = []
for i in range(repeat):
lr = utils_torch.lr_range_test(
model,
dataset,
loss_func=self.loss_func,
optimizer=self.optimizer,
batch_size=self.batch_size,
)
lrs.append(lr)
lrs_log10_mean = sum([np.log10(x) for x in lrs]) / repeat
learning_rate = 10 ** lrs_log10_mean
return learning_rate
@dataclass
class Trend:
growth: str
changepoints: list
n_changepoints: int
changepoints_range: float
trend_reg: float
trend_reg_threshold: (bool, float)
def __post_init__(self):
if self.growth not in ["off", "linear", "discontinuous"]:
log.error("Invalid trend growth '{}'. Set to 'linear'".format(self.growth))
self.growth = "linear"
if self.growth == "off":
self.changepoints = None
self.n_changepoints = 0
if self.changepoints is not None:
self.n_changepoints = len(self.changepoints)
self.changepoints = pd.to_datetime(self.changepoints).values
if type(self.trend_reg_threshold) == bool:
if self.trend_reg_threshold:
self.trend_reg_threshold = 3.0 / (3.0 + (1.0 + self.trend_reg) * np.sqrt(self.n_changepoints))
log.debug("Trend reg threshold automatically set to: {}".format(self.trend_reg_threshold))
else:
self.trend_reg_threshold = None
elif self.trend_reg_threshold < 0:
log.warning("Negative trend reg threshold set to zero.")
self.trend_reg_threshold = None
elif math.isclose(self.trend_reg_threshold, 0):
self.trend_reg_threshold = None
if self.trend_reg < 0:
log.warning("Negative trend reg lambda set to zero.")
self.trend_reg = 0
if self.trend_reg > 0:
if self.n_changepoints > 0:
log.info("Note: Trend changepoint regularization is experimental.")
self.trend_reg = 0.001 * self.trend_reg
else:
log.info("Trend reg lambda ignored due to no changepoints.")
self.trend_reg = 0
if self.trend_reg_threshold > 0:
log.info("Trend reg threshold ignored due to no changepoints.")
else:
if self.trend_reg_threshold is not None and self.trend_reg_threshold > 0:
log.info("Trend reg threshold ignored due to reg lambda <= 0.")
@dataclass
class Season:
resolution: int
period: float
arg: str
@dataclass
class AllSeason:
mode: str = "additive"
computation: str = "fourier"
reg_lambda: float = 0
yearly_arg: (str, bool, int) = "auto"
weekly_arg: (str, bool, int) = "auto"
daily_arg: (str, bool, int) = "auto"
periods: OrderedDict = field(init=False) # contains SeasonConfig objects
def __post_init__(self):
if self.reg_lambda > 0 and self.computation == "fourier":
log.info("Note: Fourier-based seasonality regularization is experimental.")
self.reg_lambda = 0.01 * self.reg_lambda
self.periods = OrderedDict(
{
"yearly": Season(resolution=6, period=365.25, arg=self.yearly_arg),
"weekly": Season(resolution=3, period=7, arg=self.weekly_arg),
"daily": Season(resolution=6, period=1, arg=self.daily_arg),
}
)
def append(self, name, period, resolution, arg):
self.periods[name] = Season(resolution=resolution, period=period, arg=arg)
@dataclass
class AR:
n_lags: int
ar_sparsity: float
def __post_init__(self):
if self.ar_sparsity is not None and self.ar_sparsity < 1:
assert self.ar_sparsity > 0
self.reg_lambda = 0.001 * (1.0 / (1e-6 + self.ar_sparsity) - 1.00)
else:
self.reg_lambda = None
def regularize(self, weights, original=False):
"""Regularization of AR coefficients
Args:
weights (torch tensor): Model weights to be regularized towards zero
Returns:
regularization loss, scalar
"""
if original:
reg = torch.div(2.0, 1.0 + torch.exp(-2 * (1e-9 + torch.abs(weights)).pow(1 / 2.0))) - 1.0
else:
reg = utils_torch.penalize_nonzero(weights, eagerness=3, acceptance=1.0)
return reg
@dataclass
class Covar:
reg_lambda: float
as_scalar: bool
normalize: (bool, str)
def __post_init__(self):
if self.reg_lambda is not None:
if self.reg_lambda < 0:
raise ValueError("regularization must be >= 0")
@dataclass
class Regressor:
reg_lambda: float
normalize: str
mode: str
@dataclass
class Event:
lower_window: int
upper_window: int
reg_lambda: float
mode: str
@dataclass
class Holidays:
country: str
lower_window: int
upper_window: int
mode: str = "additive"
reg_lambda: float = None
holiday_names: set = field(init=False)
def init_holidays(self, df=None):
self.holiday_names = utils.get_holidays_from_country(self.country, df)
|
fastrunner/utils/parser.py | FuxiongYang/faster | 227 | 11197526 | <reponame>FuxiongYang/faster<gh_stars>100-1000
import datetime
import json
import json5
import time
import requests
from tornado import ioloop, httpclient
from enum import Enum
from loguru import logger
from fastrunner import models
from fastrunner.utils.tree import get_tree_max_id, get_all_ycatid, get_tree_ycatid_mapping
class FileType(Enum):
"""
文件类型枚举
"""
string = 1
int = 2
float = 3
bool = 4
list = 5
dict = 6
file = 7
class Format(object):
"""
解析标准HttpRunner脚本 前端->后端
"""
def __init__(self, body, level='test'):
"""
body => {
header: header -> [{key:'', value:'', desc:''},],
request: request -> {
form: formData - > [{key: '', value: '', type: 1, desc: ''},],
json: jsonData -> {},-
params: paramsData -> [{key: '', value: '', type: 1, desc: ''},]
files: files -> {"fields","binary"}
},
extract: extract -> [{key:'', value:'', desc:''}],
validate: validate -> [{expect: '', actual: '', comparator: 'equals', type: 1},],
variables: variables -> [{key: '', value: '', type: 1, desc: ''},],
hooks: hooks -> [{setup: '', teardown: ''},],
url: url -> string
method: method -> string
name: name -> string
}
"""
try:
self.name = body.pop('name')
self.__headers = body['header'].pop('header')
self.__params = body['request']['params'].pop('params')
self.__data = body['request']['form'].pop('data')
self.__json = body['request'].pop('json')
self.__files = body['request']['files'].pop('files')
self.__variables = body['variables'].pop('variables')
self.__setup_hooks = body['hooks'].pop('setup_hooks')
self.__teardown_hooks = body['hooks'].pop('teardown_hooks')
self.__desc = {
"header": body['header'].pop('desc'),
"data": body['request']['form'].pop('desc'),
"files": body['request']['files'].pop('desc'),
"params": body['request']['params'].pop('desc'),
"variables": body['variables'].pop('desc'),
}
if level == 'test':
self.url = body.pop('url')
self.method = body.pop('method')
self.__times = body.pop('times')
self.__extract = body['extract'].pop('extract')
self.__validate = body.pop('validate').pop('validate')
self.__desc['extract'] = body['extract'].pop('desc')
elif level == 'config':
self.base_url = body.pop('base_url')
self.is_default = body.pop('is_default')
self.__parameters = body['parameters'].pop('parameters')
self.__desc["parameters"] = body['parameters'].pop('desc')
self.__level = level
self.testcase = None
self.project = body.pop('project')
self.relation = body.pop('nodeId')
# FastRunner的API没有rig_id字段,需要兼容
self.rig_id = body['rig_id'] if body.get('rig_id') else None
self.rig_env = body['rig_env'] if body.get('rig_env') else 0
except KeyError:
# project or relation
pass
def parse(self):
"""
返回标准化HttpRunner "desc" 字段运行需去除
"""
if not hasattr(self, 'rig_id'):
self.rig_id = None
if not hasattr(self, 'rig_env'):
self.rig_env = 0
if self.__level == 'test':
test = {
"name": self.name,
"rig_id": self.rig_id,
"times": self.__times,
"request": {
"url": self.url,
"method": self.method,
"verify": False
},
"desc": self.__desc
}
if self.__extract:
test["extract"] = self.__extract
if self.__validate:
test['validate'] = self.__validate
elif self.__level == 'config':
test = {
"name": self.name,
"request": {
"base_url": self.base_url,
},
"desc": self.__desc
}
if self.__parameters:
test['parameters'] = self.__parameters
if self.__headers:
test["request"]["headers"] = self.__headers
if self.__params:
test["request"]["params"] = self.__params
if self.__data:
test["request"]["data"] = self.__data
if self.__json:
test["request"]["json"] = self.__json
# 兼容一些接口需要传空json
if self.__json == {}:
test["request"]["json"] = {}
if self.__files:
test["request"]["files"] = self.__files
if self.__variables:
test["variables"] = self.__variables
if self.__setup_hooks:
test['setup_hooks'] = self.__setup_hooks
if self.__teardown_hooks:
test['teardown_hooks'] = self.__teardown_hooks
self.testcase = test
class Parse(object):
"""
标准HttpRunner脚本解析至前端 后端->前端
"""
def __init__(self, body, level='test'):
"""
body: => {
"name": "get token with $user_agent, $os_platform, $app_version",
"request": {
"url": "/api/get-token",
"method": "POST",
"headers": {
"app_version": "$app_version",
"os_platform": "$os_platform",
"user_agent": "$user_agent"
},
"json": {
"sign": "${get_sign($user_agent, $device_sn, $os_platform, $app_version)}"
},
"extract": [
{"token": "content.token"}
],
"validate": [
{"eq": ["status_code", 200]},
{"eq": ["headers.Content-Type", "application/json"]},
{"eq": ["content.success", true]}
],
"setup_hooks": [],
"teardown_hooks": []
}
"""
self.name = body.get('name')
self.__request = body.get('request') # header files params json data
self.__variables = body.get('variables')
self.__setup_hooks = body.get('setup_hooks', [])
self.__teardown_hooks = body.get('teardown_hooks', [])
self.__desc = body.get('desc')
if level == 'test':
self.__times = body.get('times', 1) # 如果导入没有times 默认为1
self.__extract = body.get('extract')
self.__validate = body.get('validate')
elif level == "config":
self.__parameters = body.get("parameters")
self.__level = level
self.testcase = None
@staticmethod
def __get_type(content):
"""
返回data_type 默认string
"""
var_type = {
"str": 1,
"int": 2,
"float": 3,
"bool": 4,
"list": 5,
"dict": 6,
}
key = str(type(content).__name__)
# 黑魔法,为了兼容值是int,但又是$引用变量的情况
if key == 'str' and '$int' in content:
return var_type['int'], content
if key in ["list", "dict"]:
content = json.dumps(content, ensure_ascii=False)
else:
content = str(content)
return var_type[key], content
def parse_http(self):
"""
标准前端脚本格式
"""
init = [{
"key": "",
"value": "",
"desc": ""
}]
init_p = [{
"key": "",
"value": "",
"desc": "",
"type": 1
}]
# 初始化test结构
test = {
"name": self.name,
"header": init,
"request": {
"data": init_p,
"params": init_p,
"json_data": ''
},
"variables": init_p,
"hooks": [{
"setup": "",
"teardown": ""
}]
}
if self.__level == 'test':
test["times"] = self.__times
test["method"] = self.__request['method']
test["url"] = self.__request['url']
test["validate"] = [{
"expect": "",
"actual": "",
"comparator": "equals",
"type": 1
}]
test["extract"] = init
if self.__extract:
test["extract"] = []
for content in self.__extract:
for key, value in content.items():
test['extract'].append({
"key": key,
"value": value,
"desc": self.__desc["extract"][key]
})
if self.__validate:
test["validate"] = []
for content in self.__validate:
for key, value in content.items():
obj = Parse.__get_type(value[1])
test["validate"].append({
"expect": obj[1],
"actual": value[0],
"comparator": key,
"type": obj[0]
})
elif self.__level == "config":
test["base_url"] = self.__request["base_url"]
test["parameters"] = init
if self.__parameters:
test["parameters"] = []
for content in self.__parameters:
for key, value in content.items():
test["parameters"].append({
"key": key,
"value": Parse.__get_type(value)[1],
"desc": self.__desc["parameters"][key]
})
if self.__request.get('headers'):
test["header"] = []
for key, value in self.__request.pop('headers').items():
test['header'].append({
"key": key,
"value": value,
"desc": self.__desc["header"][key]
})
if self.__request.get('data'):
test["request"]["data"] = []
for key, value in self.__request.pop('data').items():
obj = Parse.__get_type(value)
test['request']['data'].append({
"key": key,
"value": obj[1],
"type": obj[0],
"desc": self.__desc["data"][key]
})
# if self.__request.get('files'):
# for key, value in self.__request.pop("files").items():
# size = FileBinary.objects.get(name=value).size
# test['request']['data'].append({
# "key": key,
# "value": value,
# "size": size,
# "type": 5,
# "desc": self.__desc["files"][key]
# })
if self.__request.get('params'):
test["request"]["params"] = []
for key, value in self.__request.pop('params').items():
test['request']['params'].append({
"key": key,
"value": value,
"type": 1,
"desc": self.__desc["params"][key]
})
if self.__request.get('json'):
test["request"]["json_data"] = \
json.dumps(self.__request.pop("json"), indent=4,
separators=(',', ': '), ensure_ascii=False)
if self.__variables:
test["variables"] = []
for content in self.__variables:
for key, value in content.items():
obj = Parse.__get_type(value)
test["variables"].append({
"key": key,
"value": obj[1],
"desc": self.__desc["variables"][key],
"type": obj[0]
})
if self.__setup_hooks or self.__teardown_hooks:
test["hooks"] = []
if len(self.__setup_hooks) > len(self.__teardown_hooks):
for index in range(0, len(self.__setup_hooks)):
teardown = ""
if index < len(self.__teardown_hooks):
teardown = self.__teardown_hooks[index]
test["hooks"].append({
"setup": self.__setup_hooks[index],
"teardown": teardown
})
else:
for index in range(0, len(self.__teardown_hooks)):
setup = ""
if index < len(self.__setup_hooks):
setup = self.__setup_hooks[index]
test["hooks"].append({
"setup": setup,
"teardown": self.__teardown_hooks[index]
})
self.testcase = test
def format_json(value):
try:
return json.dumps(
value, indent=4, separators=(
',', ': '), ensure_ascii=False)
except BaseException:
return value
def yapi_properties2json(properties, req_json={}, variables=[], desc={}):
for field_name, field_value in properties.items():
value_type = field_value['type']
if not (value_type == 'array' or value_type == 'object'):
req_json[field_name] = f'${field_name}'
variables.append({field_name: field_value.get('default', '')})
desc[field_name] = field_value['description']
if value_type == 'array':
pass
pass
def format_summary_to_ding(msg_type, summary, report_name=None):
rows_count = summary['stat']['testsRun']
pass_count = summary['stat']['successes']
fail_count = summary['stat']['failures']
error_count = summary['stat']['errors']
try:
# 使用运行环境在配置的report_url
base_url = summary['details'][0]['in_out']['in']['report_url']
except KeyError:
base_url = summary['details'][0]['base_url']
env_name = '测试' if 'test' in base_url else '生产'
case_suite_name = summary['details'][0]['name'] # 用例集名称
# celery执行的报告名
if report_name:
case_suite_name = report_name
start_at = time.strftime(
'%Y-%m-%d %H:%M:%S',
time.localtime(
summary['time']['start_at']))
duration = '%.2fs' % summary['time']['duration']
# 已执行的条数
executed = rows_count
title = '''自动化测试报告: \n开始执行时间:{2} \n消耗时间:{3} \n环境:{0} \nHOST:{1} \n用例集:{4}'''.format(
env_name, base_url, start_at, duration, case_suite_name)
# 通过率
pass_rate = '{:.2%}'.format(pass_count / executed)
# 失败率
fail_rate = '{:.2%}'.format(fail_count / executed)
fail_count_list = []
# 失败详情
if fail_count == 0:
fail_detail = ''
else:
details = summary['details']
print(details)
for detail in details:
for record in detail['records']:
print(record['meta_data']['validators'])
if record['status'] != 'failure':
continue
else:
response_message = record['meta_data']['response']['json']['info']['message']
response_error = record['meta_data']['response']['json']['info']['error']
request_url = record['meta_data']['request']['url']
case_name = record['name']
expect = []
check_value = []
for validator in record['meta_data']['validators']:
expect.append(validator['expect'])
check_value.append(validator['check_value'])
fail_count_list.append(
{
'case_name': case_name,
'request_url': request_url,
'fail_message': f'{response_error} - {response_message}'})
fail_detail = '失败的接口是:\n'
for i in fail_count_list:
s = '用例名:{0}\n PATH:{1}\n \n'.format(
i["case_name"], i["fail_message"])
fail_detail += s
if msg_type == 'markdown':
fail_detail_markdown = ''
report_id = models.Report.objects.last().id
report_url = f'http://10.0.3.57:8000/api/fastrunner/reports/{report_id}/'
for item in fail_count_list:
case_name_and_fail_message = f'> - **{item["case_name"]} - {item["request_url"]} - {item["fail_message"]}**\n'
fail_detail_markdown += case_name_and_fail_message
msg_markdown = f"""
## FasterRunner自动化测试报告
### 用例集: {case_suite_name}
### 耗时: {duration}
### 成功用例: {pass_count}个
### 异常用例: {error_count}个
### 失败用例: {fail_count}个
{fail_detail_markdown}
### 失败率: {fail_rate}
### [查看详情]({report_url})"""
else:
msg = '''{0}
总用例{1}共条,执行了{2}条,异常{3}条.
通过{4}条,通过率{5}.
失败{6}条,失败率{7}.
{8}'''.format(title, rows_count, executed, error_count, pass_count, pass_rate, fail_count, fail_rate,
fail_detail)
return (msg_markdown, fail_count) if msg_markdown else (msg, fail_count)
# 特殊字段conditions
def set_customized_variable(api_info_template, items):
if items['type'] == 'object':
properties: dict = items['properties']
attr_name: dict = properties['attributeName']
attribute_name_enum: list = attr_name.get('enum', [''])
if len(attribute_name_enum) == 0:
attribute_name_enum = ['']
target_value: list = [f'${value}' for value in attribute_name_enum]
# 查询条件字段默认模板
api_info_template['request']['json']['conditions'] = {
'attributeName': f'${attribute_name_enum[0]}',
"rangeType": "$rangeType",
"targetValue": target_value
}
for attr in attribute_name_enum:
api_info_template['variables']['variables'].append({attr: ''})
api_info_template['variables']['desc'][attr] = attr_name.get('description', '')
# 查询条件比较类型
range_type: dict = properties['rangeType']
range_type_enum: list = range_type.get('enum', [''])
api_info_template['variables']['variables'].append({'rangeType': range_type_enum[0]})
api_info_template['variables']['desc']['rangeType'] = f'条件匹配方式: {",".join(range_type_enum)}'
# 默认排序
api_info_template['request']['json']['orderBy'] = [
{
"attributeName": f'${attribute_name_enum[0]}',
"rankType": "DESC"
}
]
class Yapi:
def __init__(
self,
yapi_base_url: str,
token: str,
faster_project_id: int):
self.__yapi_base_url = yapi_base_url
self.__token = token
self.fast_project_id = faster_project_id
self.api_info: list = []
self.api_ids: list = []
# self.category_info: list = []
# api基础信息,不包含请求报文
self.api_list_url = self.__yapi_base_url + '/api/interface/list'
# api详情,包含详细的请求报文
self.api_detail_url = self.__yapi_base_url + '/api/interface/get'
# api所有分组目录, 也包含了api的基础信息
self.category_info_url = self.__yapi_base_url + '/api/interface/list_menu'
def get_category_info(self):
try:
res = requests.get(self.category_info_url, params={'token': self.__token}).json()
except Exception as e:
logger.error(f"获取yapi的目录失败: {e}")
finally:
# {
# "errcode": 0,
# "errmsg": "成功!",
# "data": [
# {
# "index": 0,
# "_id": 3945,
# "name": "机台区域管理",
# "project_id": 458,
# "desc": "",
# "uid": 950,
# "add_time": 1588490260,
# "up_time": 1588490260,
# "__v": 0,
# "list": [
# {
# "edit_uid": 0,
# "status": "done",
# "index": 0,
# "tag": [],
# "_id": 31573,
# "method": "GET",
# "catid": 3945,
# "title": "查询列表",
# "path": "/woven/base/equipmentArea/query",
# "project_id": 458,
# "uid": 950,
# "add_time": 1588490282,
# "up_time": 1588490541
# }
# ]
# }
# ]
# }
if res['errcode'] == 0:
return res
else:
return {
"errcode": 1,
"errmsg": str(e),
"data": []
}
def get_api_uptime_mapping(self):
"""
yapi所有api的更新时间映射关系, {api_id: api_up_time}
"""
category_info_list = self.get_category_info()
mapping = {}
for category_info in category_info_list['data']:
category_detail = category_info.get('list', [])
for category in category_detail:
api_id = category['_id']
up_time = category['up_time']
mapping[api_id] = up_time
return mapping
def get_category_id_name_mapping(self):
"""
获取yapi的分组信息映射关系, {category_id: category_name}
"""
try:
res = self.get_category_info()
if res['errcode'] == 0:
"""
{
"errcode": 0,
"errmsg": "成功!",
"data": [
{
"_id": 8409,
"name": "布行小程序",
"project_id": 395,
"desc": 'null',
"add_time": 1595317970,
"up_time": 1595317970,
"list": [
{
"edit_uid": 0,
"status": "undone",
"index": 0,
"tag": [],
"_id": 48205,
"title": "查询用户布行信息",
"catid": 8409,
"path": "/mes/bh/user/listMyFabricStore",
"method": "POST",
"project_id": 395,
"uid": 246,
"add_time": 1595317919,
"up_time": 1608537377
}]
}
]
}
"""
# {'category_id': 'category_name'}
category_id_name_mapping = {}
for category_info in res['data']:
# 排除为空的分组
if category_info.get('list'):
category_name = category_info.get('name')
category_id = category_info.get('_id')
category_id_name_mapping[category_id] = category_name
return category_id_name_mapping
except Exception as e:
logger.error(f"获取yapi的目录失败: {e}")
def get_api_info_list(self):
"""
获取接口列表数据
"""
try:
res = requests.get(
self.api_list_url,
params={
'token': self.__token,
'page': 1,
'limit': 100000}).json()
if res['errcode'] == 0:
"""
{
"errcode": 0,
"errmsg": "成功!",
"data": [
'list': [
{
"_id": 4444,
"project_id": 299,
"catid": 1376,
"title": "/api/group/del",
"path": "/api/group/del",
"method": "POST",
"uid": 11,
"add_time": 1511431246,
"up_time": 1511751531,
"status": "undone",
"edit_uid": 0
}
]
]
}
"""
return res
except Exception as e:
logger.error(f"获取api list失败: {e}")
def get_api_ids(self):
"""
获取yapi的api_ids
"""
api_list = self.get_api_info_list()
return [api['_id'] for api in api_list['data']['list']]
def get_batch_api_detail(self, api_ids):
"""
获取yapi的所有api的详细信息
"""
api_info = []
token = self.__token
i = 0
# yapi单个api的详情
"""
{'query_path': {'path': '/mes/common/customer/retreive',
'params': []},
'edit_uid': 0,
'status': 'undone',
'type': 'static',
'req_body_is_json_schema': False,
'res_body_is_json_schema': True,
'api_opened': False, 'index': 0, 'tag': [],
'_id': 8850,
'method': 'POST',
'catid': 948,
'title': '查询客户详情',
'path': '/mes/common/customer/retreive',
'project_id': 395,
'res_body_type': 'json',
'desc': '', 'markdown': '', 'req_body_other': '',
'req_body_type': 'raw',
'res_body': '{"$schema":"http://json-schema.org/draft-04/schema#","type":"object","properties":{"result":{"type":"object","properties":{"customerNo":{"type":"string"},"factoryNo":{"type":"string"},"fullName":{"type":"string"},"abbrName":{"type":"string"},"province":{"type":"string"},"city":{"type":"string"},"area":{"type":"string"},"address":{"type":"string"},"contactName":{"type":"string"},"contactMobile":{"type":"string"},"description":{"type":"string"},"createTime":{"type":"string"},"createUser":{"type":"string"},"createSystem":{"type":"string"}}},"successful":{"type":"boolean"}}}',
'uid': 36,
'add_time': 1560820025,
'up_time': 1560820411,
'req_body_form': [], 'req_params': [],
'req_headers': [{'required': '1', '_id': '5d083abb0bdee900010a98b3', 'value': 'application/x-www-form-urlencoded', 'name': 'Content-Type'}, {'required': '1', '_id': '5d083abb0bdee900010a98b2', 'desc': '', 'example': '', 'value': '88F13DF0B2AA4E1188B38E1A5E909AF1', 'name': 'clientId'}, {'required': '1', '_id': '5d083abb0bdee900010a98b1', 'desc': '', 'example': '', 'value': 'AF4649FFA4674ADB873F0C92E7B00227', 'name': 'accessToken'}, {'required': '1', '_id': '5d083abb0bdee900010a98b0', 'desc': '', 'example': '', 'value': 'V2', 'name': 'authen-type'}, {'required': '1', '_id': '5d083abb0bdee900010a98af', 'desc': '', 'example': '', 'value': '74BDB6DA54524D8BAE9C34C04A476019', 'name': 'userId'}],
'req_query': [{'required': '1', '_id': '5d083abb0bdee900010a98ae', 'desc': '客户编号', 'name': 'customerNo'}], '__v': 0, 'username': 'liucanwen'}
"""
api_info = []
err_info = set()
def handle_request(response):
try:
res = json.loads(response.body, encoding='utf-8')
api_info.append(res['data'])
except Exception as e:
err_info.add(e)
nonlocal i
i -= 1
if i <= 0:
ioloop.IOLoop.instance().stop()
http_client = httpclient.AsyncHTTPClient()
for api_id in api_ids:
i += 1
http_client.fetch(
f'{self.api_detail_url}?token={token}&id={api_id}',
handle_request,
method='GET')
ioloop.IOLoop.instance().start()
if len(err_info) > 0:
for err in err_info:
logger.error(f'err message: {err}')
return api_info
def get_variable_default_value(self, variable_type, variable_value):
if isinstance(variable_value, dict) is False:
return ''
variable_type = variable_type.lower()
if variable_type in ('integer', 'number', 'bigdecimal'):
return variable_value.get('default', 0)
elif variable_type == "date":
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
elif variable_type == "string":
return ""
return ""
def create_relation_id(self, project_id):
category_id_name_mapping: dict = self.get_category_id_name_mapping()
obj = models.Relation.objects.get(project_id=project_id, type=1)
eval_tree: list = eval(obj.tree)
yapi_catids: list = [yapi_catid for yapi_catid in get_all_ycatid(eval_tree, [])]
if category_id_name_mapping is None:
return
for cat_id, cat_name in category_id_name_mapping.items():
if cat_id not in yapi_catids:
tree_id = get_tree_max_id(eval_tree)
base_tree_node = {
"id": tree_id + 1,
"yapi_catid": cat_id,
"label": cat_name,
"children": []
}
eval_tree.append(base_tree_node)
obj.tree = json.dumps(eval_tree, ensure_ascii=False)
obj.save()
def yapi2faster(self, source_api_info):
"""
yapi单个api转成faster格式
"""
api_info_template = {
"header": {
"header": {},
"desc": {}
},
"request": {
"form": {
"data": {},
"desc": {}
},
"json": {},
"params": {
"params": {},
"desc": {}
},
"files": {
"files": {},
"desc": {}
}
}, "extract": {
"extract": [],
"desc": {}
}, "validate": {
"validate": []
}, "variables": {
"variables": [],
"desc": {}
}, "hooks": {
"setup_hooks": [],
"teardown_hooks": []
}, "url": "", "method": "", "name": "", "times": 1, "nodeId": 0, "project": self.fast_project_id,
}
default_header = {"accessToken": "$accessToken"}
default_header_desc = {"accessToken": "登录token"}
api_info_template['header']['header'].update(default_header)
api_info_template['header']['desc'].update(default_header_desc)
default_validator = {'equals': ['content.successful', True]}
api_info_template['validate']['validate'].append(default_validator)
api_info_template['name'] = source_api_info['title']
# path中{var}替换成$var格式
api_info_template['url'] = source_api_info['path'].replace('{', '$').replace('}', '')
api_info_template['method'] = source_api_info['method']
# yapi的分组id
api_info_template['yapi_catid'] = source_api_info['catid']
api_info_template['yapi_id'] = source_api_info['_id']
# 十位时间戳
api_info_template['ypai_add_time'] = source_api_info.get("add_time", "")
api_info_template['ypai_up_time'] = source_api_info.get("up_time", "")
# yapi原作者名
api_info_template['ypai_username'] = source_api_info.get("username", "")
req_body_type = source_api_info.get('req_body_type')
req_body_other = source_api_info.get('req_body_other', '')
if req_body_type == 'json' and req_body_other != '':
try:
req_body = json.loads(req_body_other, encoding='utf8')
except json.decoder.JSONDecodeError:
# 解析带注释的json
req_body = json5.loads(req_body_other, encoding='utf8')
except Exception as e:
logger.error(
f'yapi: {source_api_info["_id"]}, req_body json loads failed: {source_api_info.get("req_body_other", e)}')
else:
# TODO: 递归遍历properties所有节点
if isinstance(req_body, dict):
req_body_properties = req_body.get('properties')
if isinstance(req_body_properties, dict):
for field_name, field_value in req_body_properties.items():
field_type = field_value['type']
if not (field_type == 'array' or field_type == 'object'):
self.set_ordinary_variable(api_info_template, field_name, field_type,
field_value)
if field_type == 'array':
items: dict = field_value['items']
# 特殊字段处理,通用的查询条件
if field_name == 'conditions':
set_customized_variable(api_info_template, items)
else:
if items['type'] != 'array' and items['type'] != 'object':
self.set_ordinary_variable(api_info_template, field_name, field_type,
field_value)
if field_type == 'object':
properties: dict = field_value.get('properties')
if properties and isinstance(properties, dict):
for property_name, property_value in properties.items():
field_type = property_value['type']
if not (field_type == 'array' or field_type == 'object'):
self.set_ordinary_variable(api_info_template, property_name, field_type,
property_value)
req_query: list = source_api_info.get('req_query', [])
if req_query:
for param in req_query:
param_name = param['name']
param_desc = param.get('desc', '')
api_info_template['request']['params']['params'][param_name] = f"${param_name}"
api_info_template['request']['params']['desc'][param_name] = param_desc
api_info_template['variables']['variables'].append({param_name: ''})
api_info_template['variables']['desc'][param_name] = param_desc
req_params: list = source_api_info.get('req_params', [])
if req_params:
for param in req_params:
# {
# "_id": "600155566e7043643b6f1ae2",
# "name": "namespace",
# "example": "abc123",
# "desc": "命名空间"
# }
param_name = param['name']
param_desc = param.get('desc', '')
param_example = param.get('example', '')
api_info_template['variables']['variables'].append({param_name: param_example})
api_info_template['variables']['desc'][param_name] = param_desc
return api_info_template
def set_ordinary_variable(self, api_info_template, field_name, field_type, field_value):
api_info_template['request']['json'][field_name] = f'${field_name}'
api_info_template['variables']['variables'].append(
{field_name: self.get_variable_default_value(field_type, field_value)})
api_info_template['variables']['desc'][field_name] = field_value.get(
'description', '')
def get_parsed_apis(self, api_info):
"""
批量创建faster的api
"""
apis = [self.yapi2faster(api) for api in api_info]
proj = models.Project.objects.get(id=self.fast_project_id)
obj = models.Relation.objects.get(project_id=self.fast_project_id, type=1)
eval_tree: list = eval(obj.tree)
tree_ycatid_mapping = get_tree_ycatid_mapping(eval_tree)
parsed_api = []
for api in apis:
format_api = Format(api)
format_api.parse()
yapi_catid: int = api['yapi_catid']
api_body = {
'name': format_api.name,
'body': format_api.testcase,
'url': format_api.url,
'method': format_api.method,
'project': proj,
'relation': tree_ycatid_mapping.get(yapi_catid, 0),
# 直接从yapi原来的api中获取
'yapi_catid': yapi_catid,
'yapi_id': api['yapi_id'],
'ypai_add_time': api['ypai_add_time'],
'ypai_up_time': api['ypai_up_time'],
'ypai_username': api['ypai_username'],
# 默认为yapi用户
'creator': 'yapi'
}
parsed_api.append(models.API(**api_body))
return parsed_api
def merge_api(self, parsed_apis, imported_apis):
"""
合并从yapi获取的api到已经导入测试平台的api
两种情况:
1、parsed_api.yapi_id不存在测试平台
2、yapi的id已经存在测试平台,新获取的parsed_api.ypai_up_time > imported_api.ypai_up_time
"""
imported_apis_mapping = {api.yapi_id: api.ypai_up_time for api in imported_apis}
imported_apis_index = {api.yapi_id: index for index, api in enumerate(imported_apis)}
new_apis = []
update_apis = []
imported_apis_ids = set(imported_apis_mapping.keys())
for api in parsed_apis:
yapi_id = api.yapi_id
# 情况1
if yapi_id not in imported_apis_ids:
new_apis.append(api)
else:
# 情况2
imported_ypai_up_time = imported_apis_mapping[yapi_id]
if api.ypai_up_time > int(imported_ypai_up_time):
index = imported_apis_index[yapi_id]
imported_api = imported_apis[index]
imported_api.method = api.method
imported_api.name = api.name
imported_api.url = api.url
imported_api.body = api.body
imported_api.ypai_up_time = api.ypai_up_time
update_apis.append(imported_api)
return update_apis, new_apis
def get_create_or_update_apis(self, imported_apis_mapping):
"""
返回需要新增和更新的api_id
imported_apis_mapping: {yapi_id: ypai_up_time}
新增:
yapi_id不存在测试平台imported_apis_mapping中
更新:
yapi_id存在测试平台imported_apis_mapping, 且up_time大于测试平台的
"""
api_uptime_mapping: dict = self.get_api_uptime_mapping()
create_ids = []
update_ids = []
for yapi_id, yapi_up_time in api_uptime_mapping.items():
imported_ypai_up_time = imported_apis_mapping.get(yapi_id)
if not imported_ypai_up_time:
# 新增
create_ids.append(yapi_id)
elif yapi_up_time > int(imported_ypai_up_time):
# 更新
update_ids.append(yapi_id)
return create_ids, update_ids
|
tests/samplers/test_frequency.py | heureka-labs/pyRDF2Vec | 154 | 11197531 | <reponame>heureka-labs/pyRDF2Vec<gh_stars>100-1000
import itertools
import pytest
from pyrdf2vec.graphs import KG, Vertex
from pyrdf2vec.samplers import ( # isort: skip
ObjFreqSampler,
ObjPredFreqSampler,
PredFreqSampler,
)
LOOP = [
["Alice", "knows", "Bob"],
["Alice", "knows", "Dean"],
["Bob", "knows", "Dean"],
["Dean", "loves", "Alice"],
]
LONG_CHAIN = [
["Alice", "knows", "Bob"],
["Alice", "knows", "Dean"],
["Bob", "knows", "Mathilde"],
["Mathilde", "knows", "Alfy"],
["Alfy", "knows", "Stephane"],
["Stephane", "knows", "Alfred"],
["Alfred", "knows", "Emma"],
["Emma", "knows", "Julio"],
]
URL = "http://pyRDF2Vec"
KG_LOOP = KG()
KG_CHAIN = KG()
FREQ_SAMPLERS = [ObjFreqSampler, ObjPredFreqSampler, PredFreqSampler]
IS_INVERSE = [False, True]
IS_REVERSE = [False, True]
IS_SPLIT = [False, True]
KGS = [KG_LOOP, KG_CHAIN]
ROOTS_WITHOUT_URL = ["Alice", "Bob", "Dean"]
class TestFreqSampler:
@pytest.fixture(scope="session")
def setup(self):
for i, graph in enumerate([LOOP, LONG_CHAIN]):
for row in graph:
subj = Vertex(f"{URL}#{row[0]}")
obj = Vertex((f"{URL}#{row[2]}"))
pred = Vertex(
(f"{URL}#{row[1]}"), predicate=True, vprev=subj, vnext=obj
)
if i == 0:
KG_LOOP.add_walk(subj, pred, obj)
else:
KG_CHAIN.add_walk(subj, pred, obj)
@pytest.mark.parametrize(
"sampler, is_inverse, is_split",
list(
itertools.product(
FREQ_SAMPLERS,
IS_INVERSE,
IS_SPLIT,
)
),
)
def test_invalid_weight(self, sampler, is_inverse, is_split):
with pytest.raises(ValueError):
sampler(is_inverse, is_split).get_weight(None)
@pytest.mark.parametrize(
"kg, sampler, is_inverse, is_split",
list(
itertools.product(
KGS,
FREQ_SAMPLERS,
IS_INVERSE,
IS_SPLIT,
)
),
)
def test_fit(self, setup, kg, sampler, is_inverse, is_split):
sampler = sampler(is_inverse, is_split)
assert len(sampler._counts) == 0
sampler.fit(kg)
if isinstance(sampler, ObjFreqSampler):
if kg == KG_LOOP:
assert len(sampler._counts) == 3
else:
assert len(sampler._counts) == 9
elif isinstance(sampler, ObjPredFreqSampler):
if kg == KG_LOOP:
assert len(sampler._counts) == 3
else:
assert len(sampler._counts) == 8
else:
if kg == KG_LOOP:
assert len(sampler._counts) == 2
else:
assert len(sampler._counts) == 1
@pytest.mark.parametrize(
"kg, root, is_reverse, sampler, is_inverse, is_split",
list(
itertools.product(
KGS,
ROOTS_WITHOUT_URL,
IS_REVERSE,
FREQ_SAMPLERS,
IS_INVERSE,
IS_SPLIT,
)
),
)
def test_weight(
self, setup, kg, root, is_reverse, sampler, is_inverse, is_split
):
sampler = sampler(is_inverse, is_split)
sampler.fit(kg)
for hop in kg.get_hops(Vertex(f"{URL}#{root}"), is_reverse=is_reverse):
if isinstance(sampler, ObjFreqSampler):
assert sampler.get_weight(hop) <= 4
|
src/MoveNode.py | L4xus/termux-chess | 302 | 11197546 | class MoveNode:
def __init__(self, move, children, parent):
self.move = move
self.children = children
self.parent = parent
self.pointAdvantage = None
self.depth = 1
def __str__(self):
stringRep = "Move : " + str(self.move) + \
" Point advantage : " + str(self.pointAdvantage) + \
" Checkmate : " + str(self.move.checkmate)
stringRep += "\n"
for child in self.children:
stringRep += " " * self.getDepth() * 4
stringRep += str(child)
return stringRep
def __gt__(self, other):
if self.move.checkmate and not other.move.checkmate:
return True
if not self.move.checkmate and other.move.checkmate:
return False
if self.move.checkmate and other.move.checkmate:
return False
return self.pointAdvantage > other.pointAdvantage
def __lt__(self, other):
if self.move.checkmate and not other.move.checkmate:
return False
if not self.move.checkmate and other.move.checkmate:
return True
if self.move.stalemate and other.move.stalemate:
return False
return self.pointAdvantage < other.pointAdvantage
def __eq__(self, other):
if self.move.checkmate and other.move.checkmate:
return True
return self.pointAdvantage == other.pointAdvantage
def getHighestNode(self):
highestNode = self
while True:
if highestNode.parent is not None:
highestNode = highestNode.parent
else:
return highestNode
def getDepth(self):
depth = 1
highestNode = self
while True:
if highestNode.parent is not None:
highestNode = highestNode.parent
depth += 1
else:
return depth
|
tests/layer_tests/onnx_tests/test_transpose.py | pazamelin/openvino | 2,406 | 11197574 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import itertools
import numpy as np
import pytest
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
class TestTranspose(Caffe2OnnxLayerTest):
def create_net(self, shape, perm, ir_version):
"""
ONNX net IR net
Input->Transpose->Sigmoid->Output => Input->Permute->sigmoid
"""
#
# Create ONNX model
#
from onnx import helper
from onnx import TensorProto
output_shape = np.transpose(np.ones(shape), perm).shape
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
args = dict()
if perm:
args['perm'] = perm
node_def = helper.make_node(
'Transpose',
inputs=['input'],
outputs=['transpose'],
**args
)
sigmoid_def = helper.make_node(
'Sigmoid',
inputs=['transpose'],
outputs=['output']
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_def, sigmoid_def],
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
#
# Create reference IR net
#
ref_net = None
if not perm:
perm = list(reversed(range(len(shape))))
return onnx_net, ref_net
def create_net_const(self, shape, perm, ir_version):
"""
ONNX net IR net
Input->Concat(+transposed const)->Output => Input->Concat(+const)
"""
#
# Create ONNX model
#
from onnx import helper
from onnx import TensorProto
constant = np.random.randint(-127, 127, shape).astype(np.float)
constant_transposed = np.transpose(constant, perm)
concat_axis = 0
input_shape = list(constant_transposed.shape)
output_shape = input_shape.copy()
output_shape[concat_axis] *= 2
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
node_const_def = helper.make_node(
'Constant',
inputs=[],
outputs=['const1'],
value=helper.make_tensor(
name='const_tensor',
data_type=TensorProto.FLOAT,
dims=constant.shape,
vals=constant.flatten(),
),
)
args = dict()
if perm:
args['perm'] = perm
node_def = helper.make_node(
'Transpose',
inputs=['const1'],
outputs=['transpose'],
**args
)
node_concat_def = helper.make_node(
'Concat',
inputs=['input', 'transpose'],
outputs=['output'],
axis=concat_axis
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_const_def, node_def, node_concat_def],
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
#
# Create reference IR net
#
ref_net = None
return onnx_net, ref_net
test_data_precommit = [dict(shape=[4, 6, 8, 10, 12], perm=None),
dict(shape=[8, 10, 12], perm=[2, 1, 0]),
dict(shape=[6, 8, 10, 12], perm=[0, 3, 1, 2]),
dict(shape=[4, 6, 8, 10, 12], perm=[1, 0, 4, 3, 2])]
test_data = [dict(shape=[10, 12], perm=None),
dict(shape=[8, 10, 12], perm=None),
dict(shape=[6, 8, 10, 12], perm=None),
dict(shape=[4, 6, 8, 10, 12], perm=None)]
for shape in [[10, 12], [8, 10, 12], [6, 8, 10, 12], [4, 6, 8, 10, 12]]:
for perm in itertools.permutations(np.arange(len(shape))):
test_data.append(dict(shape=shape, perm=list(perm)))
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_transpose_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_transpose(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.nightly
def test_transpose_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_transpose_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
|
test-framework/test-suites/integration/tests/list/test_list_host_attr.py | sammeidinger/stack | 123 | 11197580 | import json
from operator import itemgetter
import os
from itertools import groupby
class TestListHostAttr:
def test_invalid(self, host):
result = host.run('stack list host attr test')
assert result.rc == 255
assert result.stderr.startswith('error - ')
def test_no_args_frontend_only(self, host):
result = host.run('stack list host attr output-format=json')
assert result.rc == 0
attr_obj = json.loads(result.stdout)
# with no other hosts in the db, these commands produce identical output
result = host.run('stack list host attr localhost output-format=json')
assert result.rc == 0
json.loads(result.stdout) == attr_obj
# test appliance selector, too
result = host.run('stack list host attr a:frontend output-format=json')
assert result.rc == 0
json.loads(result.stdout) == attr_obj
# there should be exactly one host
assert len({row['host'] for row in attr_obj}) == 1
def test_with_backend(self, host, add_host):
result = host.run('stack list host attr output-format=json')
assert result.rc == 0
attr_obj = json.loads(result.stdout)
# with other hosts in the db, this will be different
result = host.run('stack list host attr localhost output-format=json')
assert result.rc == 0
json.loads(result.stdout) != attr_obj
# test appliance selector, too
result = host.run('stack list host attr a:frontend output-format=json')
assert result.rc == 0
json.loads(result.stdout) != attr_obj
# both selectors should work together, though
result = host.run('stack list host attr a:frontend a:backend output-format=json')
assert result.rc == 0
json.loads(result.stdout) == attr_obj
# both hostnames specified should be the same as none by default
result = host.run('stack list host attr localhost backend-0-0 output-format=json')
assert result.rc == 0
json.loads(result.stdout) == attr_obj
# there should be exactly two hosts
assert len({row['host'] for row in attr_obj}) == 2
def test_common_with_only_frontend(self, host):
result = host.run('stack list host attr display=common output-format=json')
assert result.rc == 0
attr_obj = json.loads(result.stdout)
# there should be one "host" called '_common_'
assert len({row['host'] for row in attr_obj}) == 1
assert attr_obj[0]['host'] == '_common_'
def test_distinct_with_multiple_hosts(self, host, add_host):
result = host.run('stack list host attr display=distinct output-format=json')
assert result.rc == 0
attr_obj = json.loads(result.stdout)
host_attrs = {
k: {i['attr']: i['value'] for i in v}
for k, v in groupby(attr_obj, itemgetter('host'))
}
# don't hardcode FE hostname
fe_hostname = [h for h in host_attrs if h != 'backend-0-0'].pop()
assert len(host_attrs) == 2
assert {'backend-0-0', fe_hostname} == set(host_attrs)
# some keys will only be in common (by default)
assert 'Kickstart_PrivateRootPassword' not in host_attrs[fe_hostname]
assert 'Kickstart_PrivateRootPassword' not in host_attrs['backend-0-0']
# some keys will always be distinct
assert 'hostname' in host_attrs['backend-0-0']
# backend doesn't have a hostaddr here
assert 'hostaddr' in host_attrs[fe_hostname]
result = host.run('stack add host attr backend-0-0 attr=foo value=bar')
assert result.rc == 0
result = host.run('stack list host attr display=distinct output-format=json')
assert result.rc == 0
new_attr_obj = json.loads(result.stdout)
new_host_attrs = {
k: {i['attr']: i['value'] for i in v}
for k, v in groupby(new_attr_obj, itemgetter('host'))
}
assert len(new_host_attrs['backend-0-0']) == len(host_attrs['backend-0-0']) + 1
assert len(new_host_attrs[fe_hostname]) == len(host_attrs[fe_hostname])
result = host.run('stack list host attr display=common output-format=json')
assert result.rc == 0
common_attr_obj = json.loads(result.stdout)
common_host_attrs = {
k: {i['attr']: i['value'] for i in v}
for k, v in groupby(common_attr_obj, itemgetter('host'))
}
# the set of common attrs and distinct attrs should never overlap
assert set(common_host_attrs['_common_']).isdisjoint(new_host_attrs['backend-0-0'])
def test_common_with_multiple_hosts_single_attr_param(self, host, add_host):
result = host.run('stack list host attr display=distinct attr=hostname output-format=json')
assert result.rc == 0
attr_obj = json.loads(result.stdout)
# only two hosts, no common attrs here
assert len({row['host'] for row in attr_obj}) == 2
result = host.run('stack list host attr display=common attr=rank output-format=json')
assert result.rc == 0
attr_obj = json.loads(result.stdout)
# by default these will resolve to the same, so only common will be listed
assert {row['host'] for row in attr_obj} == {'_common_'}
def test_scope_resolving(self, host, add_host, add_environment, host_os, test_file):
# Add our host to the test environment
result = host.run('stack set host environment backend-0-0 environment=test')
assert result.rc == 0
# Add a bunch of attrs to get applied to the host, in different scopes
result = host.run(
'stack add attr attr=test.global value=test_1'
)
assert result.rc == 0
result = host.run(
'stack add appliance attr backend attr=test.appliance value=test_2'
)
assert result.rc == 0
result = host.run(
f'stack add os attr {host_os} attr=test.os value=test_3'
)
assert result.rc == 0
result = host.run(
'stack add environment attr test attr=test.environment value=test_4'
)
assert result.rc == 0
result = host.run(
'stack add host attr backend-0-0 attr=test.host value=test_5'
)
assert result.rc == 0
# Add a bunch of attrs that will be overridden to just one output
result = host.run(
'stack add attr attr=test.override value=test_6'
)
assert result.rc == 0
result = host.run(
'stack add appliance attr backend attr=test.override value=test_7'
)
assert result.rc == 0
result = host.run(
f'stack add os attr {host_os} attr=test.override value=test_8'
)
assert result.rc == 0
result = host.run(
'stack add environment attr test attr=test.override value=test_9'
)
assert result.rc == 0
result = host.run(
'stack add host attr backend-0-0 attr=test.override value=test_10'
)
assert result.rc == 0
# Now list all the host attrs and see if they match what we expect
result = host.run(
"stack list host attr backend-0-0 attr='test.*' output-format=json"
)
assert result.rc == 0
with open(test_file('list/host_attr_scope_resolving.json')) as output:
assert json.loads(result.stdout) == json.loads(output.read())
def test_scope_no_enviroment(self, host, add_host, test_file):
# Create some more hosts
add_host('backend-0-1', '0', '1', 'backend')
add_host('backend-0-2', '0', '2', 'backend')
# Add a route to each host
result = host.run(
'stack add host attr backend-0-0 attr=test.backend_0 value=test_0'
)
assert result.rc == 0
result = host.run(
'stack add host attr backend-0-1 attr=test.backend_1 value=test_1'
)
assert result.rc == 0
result = host.run(
'stack add host attr backend-0-2 attr=test.backend_2 value=test_2'
)
assert result.rc == 0
# Now list all the host attrs and see if they match what we expect
result = host.run(
"stack list host attr backend-0-0 attr='test.*' output-format=json"
)
assert result.rc == 0
with open(test_file('list/host_attr_scope_no_enviroment.json')) as output:
assert json.loads(result.stdout) == json.loads(output.read())
def test_normal_user_no_shadow(self, host, add_host):
# Add a shadow attr
result = host.run('stack set host attr backend-0-0 attr=test value=True shadow=True')
assert result.rc == 0
# Make sure it got there
result = host.run('stack list host attr backend-0-0 attr=test shadow=True output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [{
'attr': 'test',
'host': 'backend-0-0',
'scope': 'host',
'type': 'shadow',
'value': 'True'
}]
# Give the vagrant user access to list commands
result = host.run("stack set access command='list *' group=vagrant")
assert result.rc == 0
# Now make sure a normal user can't see it.
with host.sudo("vagrant"):
result = host.run(
'/opt/stack/bin/stack list host attr backend-0-0 attr=test shadow=True output-format=json'
)
assert result.rc == 0
assert result.stdout == ""
def test_const_overwrite(self, host, add_host):
result = host.run('stack set host attr backend-0-0 attr=const_overwrite value=False')
assert result.rc == 0
# Now overwrite the os attribute
result = host.run('stack set host attr backend-0-0 attr=os value=test')
assert result.rc == 0
# Confirm we have overwritten it
result = host.run('stack list host attr backend-0-0 attr=os output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [{
'attr': 'os',
'host': 'backend-0-0',
'scope': 'host',
'type': 'var',
'value': 'test'
}]
# A non-overwritten const should return as normal
result = host.run('stack list host attr backend-0-0 attr=rack output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [{
'attr': 'rack',
'host': 'backend-0-0',
'scope': 'host',
'type': 'const',
'value': '0'
}]
def test_regression_shadow_glob_interaction(self, host, add_host):
# Create another backend
add_host('backend-0-1', '0', '1', 'backend')
# Add a shadow appliance attr
result = host.run(
'stack add appliance attr backend attr=test value=test shadow=true'
)
assert result.rc == 0
# Now add a non-shadow attr at the host level
result = host.run(
'stack add host attr backend-0-0 attr=test value=foo'
)
assert result.rc == 0
# List a non-related glob pattern and make sure it doesn't traceback
result = host.run(
'stack list host attr a:backend attr=time.* output-format=json'
)
assert result.rc == 0
def test_const_determined_correctly(self, host):
result = host.run('stack list host attr localhost attr=os* output-format=json')
assert result.rc == 0
attrs = json.loads(result.stdout)
stacki_os_attributes = {}
for this_attr in attrs:
if this_attr['attr'] == 'os':
stacki_os_attributes['os'] = this_attr['value']
elif this_attr['attr'] == 'os.version':
stacki_os_attributes['os.version'] = this_attr['value']
elif this_attr['attr'] == 'os.minor_version':
stacki_os_attributes['os.minor_version'] = this_attr['value']
expected_keys = ('os', 'os.version', 'os.minor_version')
assert set(expected_keys) == set(stacki_os_attributes.keys())
expected_values = itemgetter(*expected_keys)(stacki_os_attributes)
distro, rel = host.system_info.distribution, host.system_info.release
if (distro, rel) == ('sles', '12.3'):
assert ('sles', '12.x', 'sp3') == expected_values
if (distro, rel) == ('sles', '15.1'):
assert ('sles', '15.x', 'sp1') == expected_values
elif (distro, rel) == ('centos', '7'):
assert ('redhat', '7.x', '6') == expected_values
# TODO Ubuntu
|
torchkit/version.py | kevinzakka/torchkit | 144 | 11197582 | """Version file will be exec'd by setup.py and imported by __init__.py"""
# Semantic versioning.
_MAJOR_VERSION = "0"
_MINOR_VERSION = "0"
_PATCH_VERSION = "3"
__version__ = ".".join([_MAJOR_VERSION, _MINOR_VERSION, _PATCH_VERSION])
|
tests/framework/MCMC/likelihoods/likelihood_10D.py | rinelson456/raven | 159 | 11197593 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import scipy.stats as st
import numpy as np
def initialize(self, runInfoDict, inputFiles):
"""
Method to generate the observed data
@ In, runInfoDict, dict, the dictionary containing the runInfo
@ In, inputFiles, list, the list of input files
@ Out, None
"""
self.dim = 10
seed = 1086
np.random.seed(seed)
self.cov = 10**(np.random.randn(self.dim)*1.5)
self.mu = st.norm(loc=0, scale=10).rvs(self.dim)
def run(self, inputDict):
"""
Method required by RAVEN to run this as an external model.
log likelihood function
@ In, self, object, object to store members on
@ In, inputDict, dict, dictionary containing inputs from RAVEN
@ Out, None
"""
vars = ['x1','x2','x3','x4','x5','x6','x7','x8','x9','x10']
xin = []
for var in vars:
xin.extend(inputDict[var])
xin = np.asarray(xin)
if np.all(xin < 500) and np.all(xin > -500):
zout = st.multivariate_normal(mean=self.mu, cov=self.cov).logpdf(xin)
else:
zout = -1.0E6
self.zout = np.atleast_1d(zout)
|
examples/ports/multi_receive.py | fooker/mido | 658 | 11197609 | <filename>examples/ports/multi_receive.py
#!/usr/bin/env python
"""
Receive messages from multiple ports.
"""
import mido
from mido.ports import multi_receive
# Open all available inputs.
ports = [mido.open_input(name) for name in mido.get_input_names()]
for port in ports:
print('Using {}'.format(port))
print('Waiting for messages...')
try:
for message in multi_receive(ports):
print('Received {}'.format(message))
except KeyboardInterrupt:
pass
|
dfirtrack_main/tests/note/test_note_views.py | stuhli/dfirtrack | 273 | 11197630 | <reponame>stuhli/dfirtrack<filename>dfirtrack_main/tests/note/test_note_views.py
import urllib.parse
from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_main.models import (
Case,
Casepriority,
Casestatus,
Note,
Notestatus,
Tag,
Tagcolor,
)
class NoteViewTestCase(TestCase):
"""note view tests"""
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(
username='testuser_note', password='<PASSWORD>'
)
# create object
notestatus_1 = Notestatus.objects.create(notestatus_name='notestatus_1')
# create object
Note.objects.create(
note_title='note_1',
note_content='lorem ipsum',
notestatus=notestatus_1,
note_created_by_user_id=test_user,
note_modified_by_user_id=test_user,
)
# create object
tagcolor_1 = Tagcolor.objects.create(tagcolor_name='tagcolor_1')
# create object
Tag.objects.create(tag_name='tag_1', tagcolor=tagcolor_1)
# create objects
casepriority_1 = Casepriority.objects.create(casepriority_name='casepriority_1')
casestatus_1 = Casestatus.objects.create(casestatus_name='casestatus_1')
# create object
Case.objects.create(
case_name='case_1',
case_is_incident=True,
case_created_by_user_id=test_user,
casepriority=casepriority_1,
casestatus=casestatus_1,
)
def test_note_list_not_logged_in(self):
"""test list view"""
# create url
destination = '/login/?next=' + urllib.parse.quote('/note/', safe='')
# get response
response = self.client.get('/note/', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_note_list_logged_in(self):
"""test list view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get response
response = self.client.get('/note/')
# compare
self.assertEqual(response.status_code, 200)
def test_note_list_template(self):
"""test list view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get response
response = self.client.get('/note/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/note/note_list.html')
def test_note_list_get_user_context(self):
"""test list view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get response
response = self.client.get('/note/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_note')
def test_note_list_redirect(self):
"""test list view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# create url
destination = urllib.parse.quote('/note/', safe='/')
# get response
response = self.client.get('/note', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_note_detail_not_logged_in(self):
"""test detail view"""
# get object
note_1 = Note.objects.get(note_title='note_1')
# create url
destination = '/login/?next=' + urllib.parse.quote(
'/note/' + str(note_1.note_id) + '/', safe=''
)
# get response
response = self.client.get('/note/' + str(note_1.note_id) + '/', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_note_detail_logged_in(self):
"""test detail view"""
# get object
note_1 = Note.objects.get(note_title='note_1')
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get response
response = self.client.get('/note/' + str(note_1.note_id) + '/')
# compare
self.assertEqual(response.status_code, 200)
def test_note_detail_template(self):
"""test detail view"""
# get object
note_1 = Note.objects.get(note_title='note_1')
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get response
response = self.client.get('/note/' + str(note_1.note_id) + '/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/note/note_detail.html')
def test_note_detail_get_user_context(self):
"""test detail view"""
# get object
note_1 = Note.objects.get(note_title='note_1')
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get response
response = self.client.get('/note/' + str(note_1.note_id) + '/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_note')
def test_note_detail_redirect(self):
"""test detail view"""
# get object
note_1 = Note.objects.get(note_title='note_1')
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# create url
destination = urllib.parse.quote('/note/' + str(note_1.note_id) + '/', safe='/')
# get response
response = self.client.get('/note/' + str(note_1.note_id), follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_note_add_not_logged_in(self):
"""test add view"""
# create url
destination = '/login/?next=' + urllib.parse.quote('/note/add/', safe='')
# get response
response = self.client.get('/note/add/', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_note_add_logged_in(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get response
response = self.client.get('/note/add/')
# compare
self.assertEqual(response.status_code, 200)
def test_note_add_template(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get response
response = self.client.get('/note/add/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/note/note_generic_form.html')
def test_note_add_get_user_context(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get response
response = self.client.get('/note/add/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_note')
def test_note_add_redirect(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_note', password='oh8Szsuk8BpbEJ1RRL21')
# create url
destination = urllib.parse.quote('/note/add/', safe='/')
# get response
response = self.client.get('/note/add', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_note_add_post_redirect(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get objects
notestatus_id = Notestatus.objects.get(
notestatus_name='notestatus_1'
).notestatus_id
# create post data
data_dict = {
'note_title': 'note_add_post_test',
'note_content': 'lorem ipsum',
'notestatus': notestatus_id,
}
# get response
response = self.client.post('/note/add/', data_dict)
# get object
note_add_post_test = Note.objects.get(note_title='note_add_post_test')
# create url
destination = urllib.parse.quote(
'/note/' + str(note_add_post_test.note_id) + '/', safe='/'
)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
self.assertEqual(note_add_post_test.note_version, 1)
def test_note_add_post_invalid_reload(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# create post data
data_dict = {}
# get response
response = self.client.post('/note/add/', data_dict)
# compare
self.assertEqual(response.status_code, 200)
def test_note_add_post_invalid_template(self):
"""test add view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# create post data
data_dict = {}
# get response
response = self.client.post('/note/add/', data_dict)
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/note/note_generic_form.html')
def test_note_edit_not_logged_in(self):
"""test edit view"""
# get object
note_1 = Note.objects.get(note_title='note_1')
# create url
destination = '/login/?next=' + urllib.parse.quote(
'/note/' + str(note_1.note_id) + '/edit/', safe=''
)
# get response
response = self.client.get(
'/note/' + str(note_1.note_id) + '/edit/', follow=True
)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_note_edit_logged_in(self):
"""test edit view"""
# get object
note_1 = Note.objects.get(note_title='note_1')
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get response
response = self.client.get('/note/' + str(note_1.note_id) + '/edit/')
# compare
self.assertEqual(response.status_code, 200)
def test_note_edit_template(self):
"""test edit view"""
# get object
note_1 = Note.objects.get(note_title='note_1')
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get response
response = self.client.get('/note/' + str(note_1.note_id) + '/edit/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/note/note_generic_form.html')
def test_note_edit_get_user_context(self):
"""test edit view"""
# get object
note_1 = Note.objects.get(note_title='note_1')
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get response
response = self.client.get('/note/' + str(note_1.note_id) + '/edit/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_note')
def test_note_edit_redirect(self):
"""test edit view"""
# get object
note_1 = Note.objects.get(note_title='note_1')
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# create url
destination = urllib.parse.quote(
'/note/' + str(note_1.note_id) + '/edit/', safe='/'
)
# get response
response = self.client.get(
'/note/' + str(note_1.note_id) + '/edit', follow=True
)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_note_edit_post_redirect(self):
"""test edit view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get user
test_user = User.objects.get(username='testuser_note')
# get objects
notestatus_1 = Notestatus.objects.get(notestatus_name='notestatus_1')
# create object
note_1 = Note.objects.create(
note_title='note_edit_post_test_1',
note_content='lorem ipsum',
notestatus=notestatus_1,
note_created_by_user_id=test_user,
note_modified_by_user_id=test_user,
)
# get note version because it needs to be (hidden) part of the form
note_version = Note.objects.get(note_title='note_edit_post_test_1').note_version
# compare
self.assertEqual(note_1.note_version, 1)
# create post data
data_dict = {
'note_title': 'note_edit_post_test_2',
'note_content': 'lorem ipsum',
'note_version': note_version,
'notestatus': notestatus_1.notestatus_id,
}
# get response
response = self.client.post(
'/note/' + str(note_1.note_id) + '/edit/', data_dict
)
# create url
destination = urllib.parse.quote('/note/' + str(note_1.note_id) + '/', safe='/')
# get object
note_edit_post_test_2 = Note.objects.get(note_title='note_edit_post_test_2')
# compare
self.assertEqual(note_edit_post_test_2.note_version, 2)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_note_edit_post_invalid_reload(self):
"""test edit view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get object
note_id = Note.objects.get(note_title='note_1').note_id
# create post data
data_dict = {}
# get response
response = self.client.post('/note/' + str(note_id) + '/edit/', data_dict)
# compare
self.assertEqual(response.status_code, 200)
def test_note_edit_post_invalid_template(self):
"""test edit view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get object
note_id = Note.objects.get(note_title='note_1').note_id
# create post data
data_dict = {}
# get response
response = self.client.post('/note/' + str(note_id) + '/edit/', data_dict)
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/note/note_generic_form.html')
def test_note_edit_documentation_redirect(self):
"""test note edit documentation redirect"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get object
note_1 = Note.objects.get(note_title='note_1')
notestatus_1 = Notestatus.objects.get(notestatus_name='notestatus_1')
# create post data
data_dict = data_dict = {
'note_title': note_1.note_title,
'note_content': 'lorem ipsum',
'note_version': note_1.note_version,
'notestatus': notestatus_1.notestatus_id,
}
# get response
response = self.client.post(
'/note/' + str(note_1.note_id) + '/edit/?documentation', data_dict
)
# create url
destination = urllib.parse.quote(
f'/documentation/#note_id_{note_1.note_id}', safe='/#'
)
# check
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_note_add_documentation_redirect(self):
"""test note add documentation redirect"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get object
notestatus_1 = Notestatus.objects.get(notestatus_name='notestatus_1')
# create post data
data_dict = data_dict = {
'note_title': "test add note redirect",
'note_content': 'lorem ipsum',
'notestatus': notestatus_1.notestatus_id,
}
# get response
response = self.client.post('/note/add/?documentation', data_dict)
# get latest note
new_note = Note.objects.latest('note_create_time')
# create url
destination = urllib.parse.quote(
f'/documentation/#note_id_{new_note.note_id}', safe='/#'
)
# check
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_note_edit_valid_tag(self):
"""test edit view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get user
test_user = User.objects.get(username='testuser_note')
# get objects
notestatus_1 = Notestatus.objects.get(notestatus_name='notestatus_1')
tag_1 = Tag.objects.get(tag_name='tag_1')
# create object
note_1 = Note.objects.create(
note_title='tag_note_edit_post_test_1',
note_content='lorem ipsum',
notestatus=notestatus_1,
note_created_by_user_id=test_user,
note_modified_by_user_id=test_user,
)
# create post data
data_dict = {
'note_title': 'tag_note_edit_post_test_2',
'note_content': 'lorem ipsum',
'note_version': note_1.note_version,
'notestatus': notestatus_1.notestatus_id,
'tag': [
tag_1.tag_id,
],
}
# get response
response = self.client.post(
'/note/' + str(note_1.note_id) + '/edit/', data_dict
)
# create url
destination = urllib.parse.quote('/note/' + str(note_1.note_id) + '/', safe='/')
# get object
note_edit_post_test_2 = Note.objects.get(note_title='tag_note_edit_post_test_2')
# compare
self.assertEqual(len(note_edit_post_test_2.tag.all()), 1)
self.assertEqual(note_edit_post_test_2.tag.all()[0].tag_name, 'tag_1')
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_note_edit_valid_case(self):
"""test edit view"""
# login testuser
self.client.login(username='testuser_note', password='<PASSWORD>')
# get user
test_user = User.objects.get(username='testuser_note')
# get objects
notestatus_1 = Notestatus.objects.get(notestatus_name='notestatus_1')
case_1 = Case.objects.get(case_name='case_1')
# create object
note_1 = Note.objects.create(
note_title='case_note_edit_post_test_1',
note_content='lorem ipsum',
notestatus=notestatus_1,
note_created_by_user_id=test_user,
note_modified_by_user_id=test_user,
)
# create post data
data_dict = {
'note_title': 'case_note_edit_post_test_2',
'note_content': 'lorem ipsum',
'note_version': note_1.note_version,
'notestatus': notestatus_1.notestatus_id,
'case': [
case_1.case_id,
],
}
# get response
response = self.client.post(
'/note/' + str(note_1.note_id) + '/edit/', data_dict
)
# create url
destination = urllib.parse.quote('/note/' + str(note_1.note_id) + '/', safe='/')
# get object
note_edit_post_test_2 = Note.objects.get(
note_title='case_note_edit_post_test_2'
)
# compare
self.assertEqual(note_edit_post_test_2.case.case_name, 'case_1')
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
|
RecoJets/JetAnalyzers/test/runL2L3JetCorrectionOnTheFly_cfg.py | ckamtsikis/cmssw | 852 | 11197644 | # PYTHON configuration file for class: JetCorExample
# Description: Example of simple EDAnalyzer for correcting jets on the fly.
# Author: <NAME>
# Date: 02 - September - 2009
import FWCore.ParameterSet.Config as cms
process = cms.Process("Ana")
process.load("FWCore.MessageService.MessageLogger_cfi")
############# Set the number of events #############
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
############# Define the source file ###############
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('/store/relval/CMSSW_3_1_2/RelValQCD_FlatPt_15_3000/GEN-SIM-RECO/MC_31X_V3-v1/0007/9E83A122-E978-DE11-9D04-001D09F23C73.root')
)
############# Include the jet corrections ##########
process.load("JetMETCorrections.Configuration.L2L3Corrections_Summer09_cff")
# set the record's IOV. Must be defined once. Choose ANY correction service. #
process.prefer("L2L3JetCorrectorSC5Calo")
############# Correct Calo Jets on the fly #########
process.calo = cms.EDAnalyzer("CaloJetCorExample",
JetAlgorithm = cms.string('sisCone5CaloJets'),
HistoFileName = cms.string('CaloJetCorOnTheFlyExample_SC5Calo.root'),
JetCorrectionService = cms.string('L2L3JetCorrectorSC5Calo')
)
############# Correct PF Jets on the fly #########
process.calo = cms.EDAnalyzer("PFJetCorExample",
JetAlgorithm = cms.string('sisCone5PFJets'),
HistoFileName = cms.string('PFJetCorOnTheFlyExample_SC5PF.root'),
JetCorrectionService = cms.string('L2L3JetCorrectorSC5PF')
)
############# Path ###########################
process.p = cms.Path(process.calo)
############# Format MessageLogger #################
process.MessageLogger.cerr.FwkReport.reportEvery = 10
|
casbin/persist/adapters/update_adapter.py | abichinger/pycasbin | 915 | 11197651 | <gh_stars>100-1000
class UpdateAdapter:
"""UpdateAdapter is the interface for Casbin adapters with add update policy function."""
def update_policy(self, sec, ptype, old_rule, new_policy):
"""
update_policy updates a policy rule from storage.
This is part of the Auto-Save feature.
"""
pass
def update_policies(self, sec, ptype, old_rules, new_rules):
"""
UpdatePolicies updates some policy rules to storage, like db, redis.
"""
pass
|
leetcode.com/python/681_Next_Closest_Time.py | vansh-tiwari/coding-interview-gym | 713 | 11197652 | <filename>leetcode.com/python/681_Next_Closest_Time.py<gh_stars>100-1000
import bisect
# My initial solution. Not correct solution
class Solution(object):
def nextClosestTime(self, time):
"""
:type time: str
:rtype: str
"""
digits = list(time)
digits.pop(2)
sortedDigits = sorted(digits)
for idx in range(len(digits) - 1, -1, -1):
digit = digits[idx]
nextPossibleDigitIdx = bisect.bisect_right(sortedDigits, digit)
if nextPossibleDigitIdx >= len(digits):
continue
if idx == 3:
digits[3] = sortedDigits[nextPossibleDigitIdx]
break
elif idx == 2 and int(sortedDigits[nextPossibleDigitIdx]) < 6:
digits[2] = sortedDigits[nextPossibleDigitIdx]
break
elif idx == 1:
if int(digits[0]) < 2:
digits[1] = sortedDigits[nextPossibleDigitIdx]
break
elif int(digits[0]) == 2 and int(sortedDigits[nextPossibleDigitIdx]) < 4:
digits[1] = sortedDigits[nextPossibleDigitIdx]
break
elif idx == 0:
if int(sortedDigits[nextPossibleDigitIdx]) < 3:
digits[0] = sortedDigits[nextPossibleDigitIdx]
break
else:
digits[1] = digits[0]
digits[2] = digits[0]
digits[3] = digits[0]
hours = digits[0:2]
minuites = digits[2:]
return "".join(hours) + ":" + "".join(minuites)
# https://tinyurl.com/vupwnhw
class Solution(object):
def nextClosestTime(self, time):
"""
:type time: str
:rtype: str
"""
hour, minuite = time.split(":")
# Generate all possible 2 digit values
# There are at most 16 sorted values here
digits = sorted(set(hour + minuite))
twoDigitValues = [a+b for a in digits for b in digits]
# Check if the next valid minute is within the hour
minuiteIndex = twoDigitValues.index(minuite)
if minuiteIndex + 1 < len(twoDigitValues) and twoDigitValues[minuiteIndex + 1] < "60":
return hour + ":" + twoDigitValues[minuiteIndex + 1]
# Check if the next valid hour is within the day
hourIndex = twoDigitValues.index(hour)
if hourIndex + 1 < len(twoDigitValues) and twoDigitValues[hourIndex + 1] < "24":
return twoDigitValues[hourIndex + 1] + ":" + twoDigitValues[0]
# Return the earliest time of the next day
return twoDigitValues[0] + ":" + twoDigitValues[0]
|
ansible/roles/lib_gcloud/build/ansible/gcloud_dm_deployments.py | fahlmant/openshift-tools | 164 | 11197683 | <reponame>fahlmant/openshift-tools<gh_stars>100-1000
# pylint: skip-file
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
''' ansible module for gcloud deployment-manager deployments '''
module = AnsibleModule(
argument_spec=dict(
# credentials
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
name=dict(default=None, type='str'),
config=dict(default=None, type='dict'),
config_path=dict(default=None, type='str'),
opts=dict(default=None, type='dict'),
),
supports_check_mode=True,
required_one_of=[['config', 'config_path']],
)
config = None
if module.params['config'] != None:
config = module.params['config']
else:
config = module.params['config_path']
gconfig = GcloudDeploymentManager(module.params['name'],
config,
module.params['opts'])
state = module.params['state']
api_rval = gconfig.list_deployments()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if gconfig.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = gconfig.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
if not gconfig.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = gconfig.create_deployment()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
api_rval = gconfig.update_deployment()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
#if __name__ == '__main__':
# gcloud = GcloudDeploymentManager('optestgcp')
# print gcloud.list_deployments()
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
tests/test_download.py | pmaciel/climetlab | 182 | 11197701 | #!/usr/bin/env python3
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
import pathlib
import time
from climetlab import settings
from climetlab.utils import download_and_cache
def path_to_url(path):
return pathlib.Path(os.path.abspath(path)).as_uri()
def test_download_1():
url = "https://github.com/ecmwf/climetlab/raw/main/docs/examples/test.grib?_=%s" % (
time.time(),
)
download_and_cache(url)
def test_download_2():
url = "https://github.com/ecmwf/climetlab/raw/main/docs/examples/test.grib"
download_and_cache(url)
def test_download_3():
with settings.temporary("download-out-of-date-urls", True):
url = "https://get.ecmwf.int/test-data/climetlab/input/test.txt"
download_and_cache(url)
def test_download_4():
url = "https://get.ecmwf.int/test-data/climetlab/input/missing.txt"
r = download_and_cache(url, return_none_on_404=True)
assert r is None, r
if __name__ == "__main__":
from climetlab.testing import main
main(__file__)
|
orttraining/tools/scripts/pipeline_model_split.py | mszhanyi/onnxruntime | 669 | 11197706 | <reponame>mszhanyi/onnxruntime<filename>orttraining/tools/scripts/pipeline_model_split.py
import sys
import os
import onnx
from onnx import helper
from onnx import TensorProto
from onnx import OperatorSetIdProto
# Edge that needs to be cut for the split.
# If the edge is feeding into more than one nodes, and not all the nodes belong to the same cut,
# specify those consuming nodes that need to be cut
class CutEdge:
def __init__(self, edgeId, consumingNodes=None):
self.edgeId = edgeId
self.consumingNodes = consumingNodes
def add_expand_type(model, name, type):
expand_edge = model.graph.value_info.add()
expand_edge.name = name
expand_edge.type.CopyFrom(type)
# Add wait/record/send/recv nodes and split the graph into disconnected subgraphs
def split_graph(model, split_edge_groups):
ms_domain = "com.microsoft"
new_send_nodes = []
new_recv_nodes = []
for cut_index in range(len(split_edge_groups)):
edgeIds = split_edge_groups[cut_index]
# split the graph based on edgeIds
upstream_nodes = []
upstream_nodes_output_index = []
output_shapes = []
element_types = []
for id in edgeIds:
for node in model.graph.node:
if len(node.output) >= 1:
for i, j in enumerate(node.output):
if j == id:
upstream_nodes.append(node)
upstream_nodes_output_index.append(i)
# assuming all tensors are of type float
element_types.append(1)
for info in model.graph.value_info:
if info.name == id:
output_shapes.append(info.type)
send_input_signal_name = "send_input_signal" + str(cut_index)
send_signal = model.graph.input.add()
send_signal.CopyFrom(helper.make_tensor_value_info(send_input_signal_name, onnx.TensorProto.BOOL, None))
send_signal = helper.make_tensor(send_input_signal_name, TensorProto.BOOL, (), (True,))
model.graph.initializer.extend([send_signal])
recv_input_signal_name = "recv_input_signal" + str(cut_index)
recv_signal = model.graph.input.add()
recv_signal.CopyFrom(helper.make_tensor_value_info(recv_input_signal_name, onnx.TensorProto.BOOL, None))
recv_signal = helper.make_tensor(recv_input_signal_name, TensorProto.BOOL, (), (True,))
model.graph.initializer.extend([recv_signal])
send_dst_rank_name = "send_dst_rank" + str(cut_index)
send_dst_rank = model.graph.input.add()
send_dst_rank.CopyFrom(helper.make_tensor_value_info(send_dst_rank_name, onnx.TensorProto.INT64, None))
send_dst_rank = helper.make_tensor(send_dst_rank_name, TensorProto.INT64, (), (cut_index + 1,))
model.graph.initializer.extend([send_dst_rank])
recv_src_rank_name = "recv_src_rank" + str(cut_index)
recv_src_rank = model.graph.input.add()
recv_src_rank.CopyFrom(helper.make_tensor_value_info(recv_src_rank_name, onnx.TensorProto.INT64, None))
recv_src_rank = helper.make_tensor(recv_src_rank_name, TensorProto.INT64, (), (cut_index,))
model.graph.initializer.extend([recv_src_rank])
# output signal from send after cut
send_output_signal = model.graph.output.add()
send_output_signal.CopyFrom(
helper.make_tensor_value_info("send_output_signal" + str(cut_index), onnx.TensorProto.BOOL, None)
)
# output signal from receive after cut
receive_output_signal = model.graph.output.add()
receive_output_signal.CopyFrom(
helper.make_tensor_value_info("receive_output_signal" + str(cut_index), onnx.TensorProto.BOOL, None)
)
new_send = model.graph.node.add()
new_send.CopyFrom(
helper.make_node(
"Send",
inputs=[send_input_signal_name, send_dst_rank_name],
outputs=["send_output_signal" + str(cut_index)],
tag=0,
domain=ms_domain,
element_types=element_types,
name="send",
)
)
new_receive = model.graph.node.add()
new_receive.CopyFrom(
helper.make_node(
"Recv",
inputs=[recv_input_signal_name, recv_src_rank_name],
outputs=["receive_output_signal" + str(cut_index)],
tag=0,
domain=ms_domain,
element_types=element_types,
name="receive",
)
)
for i in range(len(upstream_nodes)):
n = upstream_nodes[i]
idx = upstream_nodes_output_index[i]
output_type = output_shapes[i]
output_edge_name = n.output[idx]
output_nodes = find_all_output_nodes_by_edge(model, output_edge_name)
# deal with shape inference for newly added edge
new_send_input_name = output_edge_name + "_send" + str(cut_index)
add_expand_type(model, new_send_input_name, output_type)
new_receive_output_name = output_edge_name + "_recv" + str(cut_index)
add_expand_type(model, new_receive_output_name, output_type)
# the order of data flow is: node-output -> record -> send -> recv -> wait -> node-input
new_send.input.extend([output_edge_name])
new_receive.output.extend([new_receive_output_name])
for output_node in output_nodes:
for i in range(len(output_node.input)):
for edgeId in edgeIds:
if output_node.input[i] == edgeId:
output_node.input[i] = new_receive_output_name
new_send_nodes.append(new_send)
new_recv_nodes.append(new_receive)
model = onnx.shape_inference.infer_shapes(model)
return new_send_nodes, new_recv_nodes
def find_all_input_nodes(model, node):
nodes = []
inputs = []
if node:
for inputId in node.input:
nodes.extend([n for n in model.graph.node if inputId in n.output])
inputs.extend([n for n in model.graph.input if inputId in n.name])
return nodes, inputs
def find_all_output_nodes(model, node):
nodes = []
outputs = []
if node:
for outputId in node.output:
nodes.extend([n for n in model.graph.node if outputId in n.input])
outputs.extend([n for n in model.graph.output if outputId in n.name])
return nodes, outputs
def find_all_output_nodes_by_edge(model, arg):
result = [n for n in model.graph.node if arg in n.input]
return result
# Insert identity nodes to separate same output edge which feeds into different sub-graph.
def add_identity(model, cuttingEdge, newEdgeIdName):
output_nodes = None
edgeId = cuttingEdge.edgeId
for node in model.graph.node:
if len(node.output) >= 1:
for output in node.output:
if output == edgeId:
output_nodes = find_all_output_nodes_by_edge(model, output)
break
assert output_nodes, "no output node"
new_identity = model.graph.node.add()
new_identity.op_type = "Identity"
new_identity.input.extend([edgeId])
new_identity.output.extend([newEdgeIdName])
for i in range(len(output_nodes)):
for output in output_nodes[i].output:
if output in cuttingEdge.consumingNodes:
for j in range(len(output_nodes[i].input)):
if output_nodes[i].input[j] == edgeId:
output_nodes[i].input[j] = newEdgeIdName
return new_identity
def insert_identity(model, all_cut_inputs):
count = 0
updated_edges = {}
new_added_identity = []
split_edge_groups = []
need_shape_inference = False
# Sweep the cut edge to see if there are edges feeding into nodes from two sub-graphs. If so,
# insert identity node after those edges with a new ID to distinguish the rest.
for cut_input in all_cut_inputs:
split_edges = []
for i in cut_input:
if i.consumingNodes:
# if this edge has previously been modified, update its edgeId before inserting new identity
if i.edgeId in updated_edges:
i.edgeId = updated_edges[i.edgeId]
new_edge_name = "identity_output_" + str(count)
new_added_identity.append(add_identity(model, i, new_edge_name))
count += 1
split_edges.append(new_edge_name)
updated_edges[i.edgeId] = new_edge_name
need_shape_inference = True
else:
split_edges.append(i.edgeId)
split_edge_groups.append(split_edges)
return split_edge_groups, new_added_identity, need_shape_inference
# after the graph is split, remove the added identity node because identity op is not registered in gradient builder.
def remove_identity(model, new_added_identity):
for node in new_added_identity:
assert node.op_type == "Identity"
output_nodes = [n for n in model.graph.node if node.output[0] in n.input]
for output_node in output_nodes:
for i in range(len(output_node.input)):
if output_node.input[i] == node.output[0]:
output_node.input[i] = node.input[0]
def find_all_connected_nodes(model, node):
nodes0, inputs = find_all_input_nodes(model, node)
nodes1, outputs = find_all_output_nodes(model, node)
connected_nodes = nodes0 + nodes1
return connected_nodes, inputs, outputs
def get_index(node_list, node):
found = [i for i, n in enumerate(node_list) if n == node]
return found[0] if found else None
def get_identity_index_for_deleting(node_list, node):
for i, n in enumerate(node_list):
# The node's input name has been changed during send/recv insertion,
# but it is sufficient to just compare the type and outputs.
if n.op_type == "Identity" and n.output == node.output:
return i
return None
# traverse the graph, group connected nodes and generate subgraph
def generate_subgraph(model, start_nodes, identity_node_list):
subgraphs = []
main_graph = onnx.ModelProto()
main_graph.CopyFrom(model)
# remove added identity node before copy to subgraph
identity_node_index = []
for n in identity_node_list:
identity_node_index.append(get_identity_index_for_deleting(main_graph.graph.node, n))
identity_node_index.sort(reverse=True)
for i in reversed(range(len(main_graph.graph.node))):
try:
if i in identity_node_index:
del main_graph.graph.node[i]
except:
print("error deleting identity node", i)
all_visited_nodes = []
model_count = len(start_nodes)
for start in reversed(start_nodes):
stack0 = [start]
visited0 = []
tranversed_node = 0
inputs0 = []
outputs0 = []
while stack0:
node = stack0.pop()
if not node in visited0:
tranversed_node += 1
visited0.append(node)
all_visited_nodes.append(node)
connected_nodes, inputs, outputs = find_all_connected_nodes(main_graph, node)
stack0 = stack0 + connected_nodes
inputs0 = inputs0 + inputs
outputs0 = outputs0 + outputs
subgraph = onnx.ModelProto()
subgraph.CopyFrom(main_graph)
# gather visited nodes
visited_nodes = []
for n in visited0:
visited_nodes.append(get_index(main_graph.graph.node, n))
visited_nodes.sort(reverse=True)
# gather visited inputs
visited_inputs = []
for n in inputs0:
visited_inputs.append(get_index(main_graph.graph.input, n))
visited_inputs.sort(reverse=True)
# gather visited outputs
visited_outputs = []
for n in outputs0:
visited_outputs.append(get_index(main_graph.graph.output, n))
visited_outputs.sort(reverse=True)
for i in reversed(range(len(main_graph.graph.node))):
try:
if i not in visited_nodes:
del subgraph.graph.node[i]
else:
del main_graph.graph.node[i]
except:
print("error deleting node", i)
for i in reversed(range(len(main_graph.graph.input))):
try:
if i not in visited_inputs:
del subgraph.graph.input[i]
else:
del main_graph.graph.input[i]
except:
print("error deleting inputs", i)
for i in reversed(range(len(main_graph.graph.output))):
try:
if i not in visited_outputs:
del subgraph.graph.output[i]
else:
del main_graph.graph.output[i]
except:
print("error deleting outputs ", i)
print("model", str(model_count), " length ", len(subgraph.graph.node))
subgraphs.append(subgraph)
model_count -= 1
print("model", str(model_count), " length ", len(main_graph.graph.node))
subgraphs.append(main_graph)
# as the subgraphs were added in reverse order (the last split is added first), reverse the order back before return
subgraphs.reverse()
return subgraphs
def main():
# temporary hard coded the cutting edge structure
# TODO: move this info to a file (json?) and load the data from there.
input_model_name = "bert-tiny-uncased_L_3_H_128_A_2_V_30528_S_512_Dp_0.1.onnx"
stage_count = 3
cut0_input = {CutEdge("186"), CutEdge("71", {"273", "395"})}
cut1_input = {CutEdge("308"), CutEdge("71", {"395"})}
all_cut_inputs = [cut0_input, cut1_input]
model = onnx.load(input_model_name)
if len(model.graph.value_info) == 0:
model = onnx.shape_inference.infer_shapes(model)
print("original model length ", len(model.graph.node))
output_model_names = [os.path.splitext(input_model_name)[0] + "_" + str(i) + ".onnx" for i in range(stage_count)]
split_edge_groups, new_identity, need_shape_inference = insert_identity(model, all_cut_inputs)
# new edge is being added, need to re-inference shape
if need_shape_inference:
model = onnx.shape_inference.infer_shapes(model)
# after all need-to-be-cut edges identified, split the graph
new_sends, new_receives = split_graph(model, split_edge_groups)
remove_identity(model, new_identity)
sub_graphs = generate_subgraph(model, new_receives, new_identity)
for i in range(stage_count):
sub_graphs[i] = onnx.shape_inference.infer_shapes(sub_graphs[i])
onnx.save(sub_graphs[i], output_model_names[i])
print("save to file: ", output_model_names[i])
if __name__ == "__main__":
main()
|
bowtie/_app.py | timgates42/bowtie | 813 | 11197714 | <filename>bowtie/_app.py
"""Defines the App class."""
from typing import ( # pylint: disable=unused-import
Any,
Callable,
Generator,
List,
Optional,
Set,
Tuple,
Union,
Dict,
Sequence,
)
import os
import json
import itertools
import shutil
from collections import namedtuple, defaultdict
from subprocess import Popen, PIPE, STDOUT, check_output
from pathlib import Path
import secrets
import socket
import warnings
import traceback
import eventlet
import msgpack
import flask
from flask import (
Flask,
render_template,
make_response,
copy_current_request_context,
jsonify,
request,
)
from flask_socketio import SocketIO
from jinja2 import Environment, FileSystemLoader, ChoiceLoader
from bowtie._component import Event, Component, COMPONENT_REGISTRY
from bowtie.pager import Pager
from bowtie.exceptions import (
GridIndexError,
NoSidebarError,
NotStatefulEvent,
NoUnusedCellsError,
SpanOverlapError,
SizeError,
WebpackError,
YarnError,
)
eventlet.monkey_patch(time=True)
Route = namedtuple('Route', ['view', 'path', 'exact'])
_Import = namedtuple('_Import', ['module', 'component'])
_DIRECTORY = Path('build')
_WEBPACK = './node_modules/.bin/webpack'
_MIN_NODE_VERSION = 8, 10, 0
class Scheduler:
"""Run scheduled tasks."""
def __init__(self, app, seconds, func):
"""Create a scheduled function."""
self.app = app
self.seconds = seconds
self.func = func
self.thread = None
def context(self, func):
"""Provide flask context to function."""
# docstyle
def wrap():
with self.app.app_context():
func()
return wrap
def start(self):
"""Start the scheduled task."""
self.thread = eventlet.spawn(self.run)
def run(self):
"""Invoke the function repeatedly on a timer."""
ret = eventlet.spawn(self.context(self.func))
eventlet.sleep(self.seconds)
try:
ret.wait()
except Exception: # pylint: disable=broad-except
traceback.print_exc()
self.thread = eventlet.spawn(self.run)
def stop(self):
"""Stop the scheduled task."""
if self.thread:
self.thread.cancel()
def raise_not_number(x: float) -> None:
"""Raise ``SizeError`` if ``x`` is not a number``."""
try:
float(x)
except ValueError:
raise SizeError('Must pass a number, received {}'.format(x))
class Span:
"""Define the location of a widget."""
def __init__(
self,
row_start: int,
column_start: int,
row_end: Optional[int] = None,
column_end: Optional[int] = None,
) -> None:
"""Create a span for a widget.
Indexing starts at 0. Start is inclusive and end is exclusive
CSS Grid indexing starts at 1 and is [inclusive, exclusive)
Note: `_start` and `_end` follow css grid naming convention.
Parameters
----------
row_start : int
column_start : int
row_end : int, optional
column_end : int, optional
"""
self.row_start = row_start
self.column_start = column_start
# add 1 to then ends because they start counting from 1
if row_end is None:
self.row_end = self.row_start + 1
else:
self.row_end = row_end
if column_end is None:
self.column_end = self.column_start + 1
else:
self.column_end = column_end
@property
def _key(self) -> Tuple[int, int, int, int]:
return self.row_start, self.column_start, self.row_end, self.column_end
def __hash__(self) -> int:
"""Hash for dict."""
return hash(self._key)
def __eq__(self, other) -> bool:
"""Compare eq for dict."""
# pylint: disable=protected-access
return isinstance(other, type(self)) and self._key == other._key
def __repr__(self) -> str:
"""Show the starting and ending points.
This is used as a key in javascript.
"""
return '{},{},{},{}'.format(
self.row_start + 1, self.column_start + 1, self.row_end + 1, self.column_end + 1
)
def overlap(self, other: 'Span'):
"""Detect if two spans overlap."""
return not (
# if one rectangle is left of other
other.column_end <= self.column_start
or self.column_end <= other.column_start
# if one rectangle is above other
or other.row_end <= self.row_start
or self.row_end <= other.row_start
)
@property
def cells(self) -> Generator[Tuple[int, int], None, None]:
"""Generate cells in span."""
yield from itertools.product(
range(self.row_start, self.row_end), range(self.column_start, self.column_end)
)
class Size:
"""Size of rows and columns in grid.
This is accessed through ``.rows`` and ``.columns`` from App and View instances.
This uses CSS's minmax function.
The minmax() CSS function defines a size range greater than or equal
to min and less than or equal to max. If max < min, then max is ignored
and minmax(min,max) is treated as min. As a maximum, a <flex> value
sets the flex factor of a grid track; it is invalid as a minimum.
Examples
--------
Laying out an app with the first row using 1/3 of the space
and the second row using 2/3 of the space.
>>> app = App(rows=2, columns=3)
>>> app.rows[0].fraction(1)
1fr
>>> app.rows[1].fraction(2)
2fr
"""
def __init__(self) -> None:
"""Create a default row or column size with fraction = 1."""
self.minimum: str = ''
self.maximum: str = ''
self.fraction(1)
def auto(self) -> 'Size':
"""Set the size to auto or content based."""
self.maximum = 'auto'
return self
def min_auto(self) -> 'Size':
"""Set the minimum size to auto or content based."""
self.minimum = 'auto'
return self
def pixels(self, value: float) -> 'Size':
"""Set the size in pixels."""
raise_not_number(value)
self.maximum = '{}px'.format(value)
return self
def min_pixels(self, value: float) -> 'Size':
"""Set the minimum size in pixels."""
raise_not_number(value)
self.minimum = '{}px'.format(value)
return self
def ems(self, value: float) -> 'Size':
"""Set the size in ems."""
raise_not_number(value)
self.maximum = '{}em'.format(value)
return self
def min_ems(self, value: float) -> 'Size':
"""Set the minimum size in ems."""
raise_not_number(value)
self.minimum = '{}em'.format(value)
return self
def fraction(self, value: float) -> 'Size':
"""Set the fraction of free space to use."""
raise_not_number(value)
self.maximum = '{}fr'.format(value)
return self
def percent(self, value: float) -> 'Size':
"""Set the percentage of free space to use."""
raise_not_number(value)
self.maximum = '{}%'.format(value)
return self
def min_percent(self, value: float) -> 'Size':
"""Set the minimum percentage of free space to use."""
raise_not_number(value)
self.minimum = '{}%'.format(value)
return self
def __repr__(self) -> str:
"""Represent the size to be inserted into a JSX template."""
if self.minimum:
return 'minmax({}, {})'.format(self.minimum, self.maximum)
return self.maximum
class Gap:
"""Margin between rows or columns of the grid.
This is accessed through ``.row_gap`` and ``.column_gap`` from App and View instances.
Examples
--------
Create a gap of 5 pixels between all rows.
>>> app = App()
>>> app.row_gap.pixels(5)
5px
"""
def __init__(self) -> None:
"""Create a default margin of zero."""
self.gap: str = ''
self.pixels(0)
def pixels(self, value: int) -> 'Gap':
"""Set the margin in pixels."""
raise_not_number(value)
self.gap = '{}px'.format(value)
return self
def ems(self, value: int) -> 'Gap':
"""Set the margin in ems."""
raise_not_number(value)
self.gap = '{}em'.format(value)
return self
def percent(self, value) -> 'Gap':
"""Set the margin as a percentage."""
raise_not_number(value)
self.gap = '{}%'.format(value)
return self
def __repr__(self) -> str:
"""Represent the margin to be inserted into a JSX template."""
return self.gap
def _check_index(value: int, length: int, bound: bool) -> int:
if not isinstance(value, int):
raise GridIndexError('Indices must be integers, found {}.'.format(value))
if value < 0:
value = value + length
if value < 0 + bound or value >= length + bound:
raise GridIndexError('Index out of range.')
return value
def _slice_to_start_end(slc: slice, length: int) -> Tuple[int, int]:
if slc.step is not None and slc.step != 1:
raise GridIndexError(
'slice step is not supported must be None or 1, was {}'.format(slc.step)
)
start = 0
if slc.start is not None:
start = slc.start
end = length
if slc.stop is not None:
end = slc.stop
return start, end
class Components:
"""List like class for storing components to override iadd.
The purpose of this class is to override the `iadd` function.
I want to be able to support all the following
>>> from bowtie import App
>>> from bowtie.control import Button
>>> app = App()
>>> button = Button()
>>> app[0, 0] = button
>>> app[0, 0] = button, button
>>> app[0, 0] += button
>>> app[0, 0] += button, button
"""
TYPE_MSG: str = 'Must add a component or sequence of components, found {}.'
def __init__(self, component: Optional[Union[Component, Sequence[Component]]] = None) -> None:
"""Create a components list."""
self.data: List[Component]
if component is None:
self.data = []
elif isinstance(component, Component):
self.data = [component]
else:
self.data = list(component)
def __len__(self):
"""Count components."""
return self.data.__len__()
def append(self, component: Component):
"""Append component to the list."""
return self.data.append(component)
def __iter__(self):
"""Iterate over components."""
return self.data.__iter__()
def __getitem__(self, key):
"""Get item as a list."""
return self.data.__getitem__(key)
def _add(self, method, other: Union[Component, Sequence[Component]]) -> 'Components':
if isinstance(other, Component):
return method([other])
if isinstance(other, Sequence):
other = list(other)
if not all(True for x in other if isinstance(x, Component)):
raise TypeError(self.TYPE_MSG.format(other))
return method(other)
raise TypeError(self.TYPE_MSG.format(other))
def __iadd__(self, other: Union[Component, Sequence[Component]]):
"""Append items to list when adding."""
return self._add(self.data.__iadd__, other)
def __add__(self, other: Union[Component, Sequence[Component]]):
"""Append items to list when adding."""
return self._add(self.data.__add__, other)
class View:
"""Grid of components."""
_NEXT_UUID = 0
@classmethod
def _next_uuid(cls) -> int:
cls._NEXT_UUID += 1
return cls._NEXT_UUID
def __init__(
self,
rows: int = 1,
columns: int = 1,
sidebar: bool = False,
background_color: str = 'White',
) -> None:
"""Create a new grid.
Parameters
----------
rows : int, optional
Number of rows in the grid.
columns : int, optional
Number of columns in the grid.
sidebar : bool, optional
Enable a sidebar for control components.
background_color : str, optional
Background color of the control pane.
"""
self._uuid = View._next_uuid()
self.column_gap = Gap()
self.row_gap = Gap()
self.border = Gap().pixels(7)
self.rows = [Size() for _ in range(rows)]
self.columns = [Size() for _ in range(columns)]
self.sidebar = sidebar
self.background_color = background_color
self.layout: Optional[Callable] = None
self._controllers: List[Component] = []
self._spans: Dict[Span, Components] = {}
def _all_components(self) -> Generator[Component, None, None]:
yield from self._controllers
yield from itertools.chain.from_iterable(self._spans.values())
@property
def _packages(self) -> Set[str]:
# pylint: disable=protected-access
packages = set(x._PACKAGE for x in self._all_components())
packages.discard(None)
return packages
@property
def _templates(self) -> Set[str]:
# pylint: disable=protected-access
return set(x._TEMPLATE for x in self._all_components())
@property
def _imports(self) -> Set[_Import]:
# pylint: disable=protected-access
return set(
_Import(component=x._COMPONENT, module=x._TEMPLATE[: x._TEMPLATE.find('.')])
for x in self._all_components()
)
@property
def _components(self) -> Set[Component]:
return set(self._all_components())
def _key_to_span(self, key: Any) -> Span:
# TODO spaghetti code cleanup needed!
if isinstance(key, Span):
return key
if isinstance(key, tuple):
if len(key) == 1:
return self._key_to_span(key[0])
try:
row_key, column_key = key
except ValueError:
raise GridIndexError('Index must be 1 or 2 values, found {}'.format(key))
if isinstance(row_key, int):
row_start = _check_index(row_key, len(self.rows), False)
row_end = row_start + 1
elif isinstance(row_key, slice):
row_start, row_end = _slice_to_start_end(row_key, len(self.rows))
row_start = _check_index(row_start, len(self.rows), False)
row_end = _check_index(row_end, len(self.rows), True)
else:
raise GridIndexError(
'Cannot index with {}, pass in a int or a slice.'.format(row_key)
)
if isinstance(column_key, int):
column_start = _check_index(column_key, len(self.columns), False)
column_end = column_start + 1
elif isinstance(column_key, slice):
column_start, column_end = _slice_to_start_end(column_key, len(self.columns))
column_start = _check_index(column_start, len(self.columns), False)
column_end = _check_index(column_end, len(self.columns), True)
else:
raise GridIndexError(
'Cannot index with {}, pass in a int or a slice.'.format(column_key)
)
rows_cols = row_start, column_start, row_end, column_end
elif isinstance(key, slice):
start, end = _slice_to_start_end(key, len(self.rows))
start = _check_index(start, len(self.rows), False)
end = _check_index(end, len(self.rows), True)
rows_cols = start, 0, end, len(self.columns)
elif isinstance(key, int):
row_start = _check_index(key, len(self.rows), False)
rows_cols = row_start, 0, row_start + 1, len(self.columns)
else:
raise GridIndexError('Invalid index {}'.format(key))
return Span(*rows_cols)
def __getitem__(self, key: Any) -> Components:
"""Get item from the view."""
span = self._key_to_span(key)
if span not in self._spans:
raise KeyError(f'Key {key} has not been used')
return self._spans[span]
def __setitem__(self, key: Any, component: Union[Component, Sequence[Component]]) -> None:
"""Add widget to the view."""
span = self._key_to_span(key)
for used_span in self._spans:
if span != used_span and span.overlap(used_span):
raise SpanOverlapError(
f'Spans {span} and {used_span} overlap. '
'This is not permitted. '
'If you want to do this please open an issue '
'and explain your use case. '
'https://github.com/jwkvam/bowtie/issues'
)
self._spans[span] = Components(component)
def add(self, component: Union[Component, Sequence[Component]]) -> None:
"""Add a widget to the grid in the next available cell.
Searches over columns then rows for available cells.
Parameters
----------
components : bowtie._Component
A Bowtie widget instance.
"""
try:
self[Span(*self._available_cell())] = component
except NoUnusedCellsError:
span = list(self._spans.keys())[-1]
self._spans[span] += component
def _available_cell(self) -> Tuple[int, int]:
"""Find next available cell first by row then column.
First, construct a set containing all cells.
Then iterate over the spans and remove occupied cells.
"""
cells = set(itertools.product(range(len(self.rows)), range(len(self.columns))))
for span in self._spans:
for cell in span.cells:
cells.remove(cell)
if not cells:
raise NoUnusedCellsError('No available cells')
return min(cells)
def add_sidebar(self, component: Component) -> None:
"""Add a widget to the sidebar.
Parameters
----------
component : bowtie._Component
Add this component to the sidebar, it will be appended to the end.
"""
if not self.sidebar:
raise NoSidebarError('Set `sidebar=True` if you want to use the sidebar.')
if not isinstance(component, Component):
raise ValueError('component must be Component type, found {}'.format(component))
# self._track_widget(widget)
self._controllers.append(component) # pylint: disable=protected-access
@property
def _columns_sidebar(self):
columns = []
if self.sidebar:
columns.append(Size().ems(18))
columns += self.columns
return columns
class App:
"""Core class to layout, connect, build a Bowtie app."""
def __init__(
self,
name='__main__',
app=None,
rows: int = 1,
columns: int = 1,
sidebar: bool = False,
title: str = 'Bowtie App',
theme: Optional[str] = None,
background_color: str = 'White',
socketio: str = '',
debug: bool = False,
) -> None:
"""Create a Bowtie App.
Parameters
----------
name : str, optional
Use __name__ or leave as default if using a single module.
Consult the Flask docs on "import_name" for details on more
complex apps.
app : Flask app, optional
If you are defining your own Flask app, pass it in here.
You only need this if you are doing other stuff with Flask
outside of bowtie.
row : int, optional
Number of rows in the grid.
columns : int, optional
Number of columns in the grid.
sidebar : bool, optional
Enable a sidebar for control components.
title : str, optional
Title of the HTML.
theme : str, optional
Color for Ant Design components.
background_color : str, optional
Background color of the control pane.
socketio : string, optional
Socket.io path prefix, only change this for advanced deployments.
debug : bool, optional
Enable debugging in Flask. Disable in production!
"""
self.title = title
self.theme = theme
self._init: Optional[Callable] = None
self._socketio_path = socketio
self._schedules: List[Scheduler] = []
self._subscriptions: Dict[Event, List[Tuple[List[Event], Callable]]] = defaultdict(list)
self._pages: Dict[Pager, Callable] = {}
self._uploads: Dict[int, Callable] = {}
self._root = View(
rows=rows, columns=columns, sidebar=sidebar, background_color=background_color
)
self._routes: List[Route] = []
self._package_dir = Path(os.path.dirname(__file__))
self._jinjaenv = Environment(
loader=FileSystemLoader(str(self._package_dir / 'templates')),
trim_blocks=True,
lstrip_blocks=True,
)
if app is None:
self.app = Flask(name)
else:
self.app = app
self.app.debug = debug
self._socketio = SocketIO(self.app, binary=True, path=socketio + 'socket.io')
self.app.secret_key = secrets.token_bytes()
self.add_route(view=self._root, path='/', exact=True)
# https://buxty.com/b/2012/05/custom-template-folders-with-flask/
templates = Path(__file__).parent / 'templates'
self.app.jinja_loader = ChoiceLoader( # type: ignore
[self.app.jinja_loader, FileSystemLoader(str(templates))]
)
self._build_dir = self.app.root_path / _DIRECTORY # type: ignore
self.app.before_first_request(self._endpoints)
def wsgi_app(self, environ, start_response):
"""Support uwsgi and gunicorn."""
return self.app.wsgi_app(environ, start_response)
def __call__(self, environ, start_response):
"""Support uwsgi and gunicorn."""
return self.wsgi_app(environ, start_response)
def __getattr__(self, name: str):
"""Export attributes from root view."""
if name == 'columns':
return self._root.columns
if name == 'rows':
return self._root.rows
if name == 'column_gap':
return self._root.column_gap
if name == 'row_gap':
return self._root.row_gap
if name == 'border':
return self._root.border
if name == 'layout':
return self._root.layout
raise AttributeError(name)
def __setattr__(self, name, value):
"""Set layout function for root view."""
if name == 'layout':
return self._root.__setattr__(name, value)
return super().__setattr__(name, value)
def __getitem__(self, key: Any):
"""Get item from root view."""
return self._root.__getitem__(key)
def __setitem__(self, key: Any, value: Union[Component, Sequence[Component]]) -> None:
"""Add widget to the root view."""
self._root.__setitem__(key, value)
def add(self, component: Component) -> None:
"""Add a widget to the grid in the next available cell.
Searches over columns then rows for available cells.
Parameters
----------
component : bowtie._Component
A Bowtie component instance.
"""
self._root.add(component)
def add_sidebar(self, widget: Component) -> None:
"""Add a widget to the sidebar.
Parameters
----------
widget : bowtie._Component
Add this widget to the sidebar, it will be appended to the end.
"""
self._root.add_sidebar(widget)
def add_route(self, view: View, path: str, exact: bool = True) -> None:
"""Add a view to the app.
Parameters
----------
view : View
path : str
exact : bool, optional
"""
if path[0] != '/':
path = '/' + path
for route in self._routes:
assert path != route.path, 'Cannot use the same path twice'
self._routes.append(Route(view=view, path=path, exact=exact))
self.app.add_url_rule(
path, path[1:], lambda: render_template('bowtie.html', title=self.title)
)
def subscribe(self, *events: Union[Event, Pager]) -> Callable:
"""Call a function in response to an event.
If more than one event is given, `func` will be given
as many arguments as there are events.
If the pager calls notify, the decorated function will be called.
Parameters
----------
*event : event or pager
Bowtie event, must have at least one.
Examples
--------
Subscribing a function to multiple events.
>>> from bowtie.control import Dropdown, Slider
>>> app = App()
>>> dd = Dropdown()
>>> slide = Slider()
>>> @app.subscribe(dd.on_change, slide.on_change)
... def callback(dd_item, slide_value):
... pass
>>> @app.subscribe(dd.on_change)
... @app.subscribe(slide.on_change)
... def callback2(value):
... pass
Using the pager to run a callback function.
>>> from bowtie.pager import Pager
>>> app = App()
>>> pager = Pager()
>>> @app.subscribe(pager)
... def callback():
... pass
>>> def scheduledtask():
... pager.notify()
"""
try:
first_event = events[0]
except IndexError:
raise IndexError('Must subscribe to at least one event.')
if len(events) != len(set(events)):
raise ValueError(
'Subscribed to the same event multiple times. All events must be unique.'
)
if len(events) > 1:
# check if we are using any non stateful events
for event in events:
if isinstance(event, Pager):
raise NotStatefulEvent('Pagers must be subscribed by itself.')
if event.getter is None:
raise NotStatefulEvent(
f'{event.uuid}.on_{event.name} is not a stateful event. '
'It must be used alone.'
)
def decorator(func: Callable) -> Callable:
"""Handle three types of events: pages, uploads, and normal events."""
if isinstance(first_event, Pager):
self._pages[first_event] = func
elif first_event.name == 'upload':
if first_event.uuid in self._uploads:
warnings.warn(
(
'Overwriting function "{func1}" with function '
'"{func2}" for upload object "{obj}".'
).format(
func1=self._uploads[first_event.uuid],
func2=func.__name__,
obj=COMPONENT_REGISTRY[first_event.uuid],
),
Warning,
)
self._uploads[first_event.uuid] = func
else:
for event in events:
# need to have `events` here to maintain order of arguments
# not sure how to deal with mypy typing errors on events so ignoring
self._subscriptions[event].append((events, func)) # type: ignore
return func
return decorator
def load(self, func: Callable) -> Callable:
"""Call a function on page load.
Parameters
----------
func : callable
Function to be called.
"""
self._init = func
return func
def schedule(self, seconds: float):
"""Call a function periodically.
Parameters
----------
seconds : float
Minimum interval of function calls.
func : callable
Function to be called.
"""
# docstyle
def wrap(func: Callable):
self._schedules.append(Scheduler(self.app, seconds, func))
return wrap
def _write_templates(self) -> Set[str]:
indexjsx = self._jinjaenv.get_template('index.jsx.j2')
componentsjs = self._jinjaenv.get_template('components.js.j2')
webpack = self._jinjaenv.get_template('webpack.common.js.j2')
src = self._create_jspath()
webpack_path = self._build_dir / webpack.name[:-3] # type: ignore
with webpack_path.open('w') as f:
f.write(webpack.render(color=self.theme))
# copy js modules that are always needed
for name in ['progress.jsx', 'view.jsx', 'utils.js']:
template_src = self._package_dir / 'src' / name
shutil.copy(template_src, src)
# Layout Design
#
# Dictionaries that are keyed by the components
#
# To layout this will need to look through all components that have a key of the route
#
# use cases
# 1. statically add items to controller in list
# 2. remove item from controller
# 3. add item back to controller
#
# issues:
# widget reordering
# order preserving operations
components: Set[Component] = set()
imports: Set[_Import] = set()
packages: Set[str] = set()
for route in self._routes:
if route.view.layout:
route.view.layout()
packages |= route.view._packages # pylint: disable=protected-access
imports |= route.view._imports # pylint: disable=protected-access
components |= route.view._components # pylint: disable=protected-access
for template in route.view._templates: # pylint: disable=protected-access
template_src = self._package_dir / 'src' / template
shutil.copy(template_src, src)
with (src / componentsjs.name[:-3]).open('w') as f: # type: ignore
f.write(
componentsjs.render(
imports=imports, socketio=self._socketio_path, components=components
)
)
with (src / indexjsx.name[:-3]).open('w') as f: # type: ignore
f.write(
indexjsx.render(
maxviewid=View._NEXT_UUID, # pylint: disable=protected-access
socketio=self._socketio_path,
pages=self._pages,
routes=self._routes,
)
)
return packages
def _build(self, notebook: Optional[str] = None) -> None:
"""Compile the Bowtie application."""
if node_version() < _MIN_NODE_VERSION:
raise WebpackError(
f'Webpack requires at least version {_MIN_NODE_VERSION} of Node, '
f'found version {node_version}.'
)
packages = self._write_templates()
for filename in ['package.json', 'webpack.prod.js', 'webpack.dev.js']:
if not (self._build_dir / filename).is_file():
sourcefile = self._package_dir / 'src' / filename
shutil.copy(sourcefile, self._build_dir)
if self._run(['yarn', '--ignore-engines', 'install'], notebook=notebook) > 1:
raise YarnError('Error installing node packages')
if packages:
installed = self._installed_packages()
new_packages = [x for x in packages if x.split('@')[0] not in installed]
if new_packages:
retval = self._run(
['yarn', '--ignore-engines', 'add'] + new_packages, notebook=notebook
)
if retval == 1:
print('Yarn error but trying to continue build')
elif retval > 1:
raise YarnError('Error installing node packages')
retval = self._run([_WEBPACK, '--config', 'webpack.dev.js'], notebook=notebook)
if retval != 0:
raise WebpackError('Error building with webpack')
def _endpoints(self) -> None:
def generate_sio_handler(main_event: Event, supports) -> Callable:
# get all events from all subscriptions associated with this event
uniq_events: Set[Event] = set()
for events, _ in supports:
uniq_events.update(events)
uniq_events.remove(main_event)
for event in uniq_events:
comp = COMPONENT_REGISTRY[event.uuid]
if event.getter is None:
raise AttributeError(
f'{comp} has no getter associated with event "on_{event.name}"'
)
def handler(*args):
def wrapuser():
event_data = {}
for event in uniq_events:
comp = COMPONENT_REGISTRY[event.uuid]
# we already checked that this component has a getter
event_data[event.signal] = getattr(comp, event.getter)()
# if there is no getter, then there is no data to unpack
# if there is a getter, then we need to unpack the data sent
main_getter = main_event.getter
if main_getter is not None:
comp = COMPONENT_REGISTRY[main_event.uuid]
event_data[main_event.signal] = getattr(comp, '_' + main_getter)(
msgpack.unpackb(args[0], encoding='utf8')
)
# gather the remaining data from the other events through their getter methods
for events, func in supports:
if main_getter is not None:
func(*(event_data[event.signal] for event in events))
else:
func()
# TODO replace with flask socketio start_background_task
eventlet.spawn(copy_current_request_context(wrapuser))
return handler
for event, supports in self._subscriptions.items():
self._socketio.on(event.signal)(generate_sio_handler(event, supports))
if self._init is not None:
self._socketio.on('INITIALIZE')(
lambda: eventlet.spawn(copy_current_request_context(self._init))
)
def gen_upload(func) -> Callable:
def upload():
upfile = request.files['file']
retval = func(upfile.filename, upfile.stream)
if retval:
return make_response(jsonify(), 400)
return make_response(jsonify(), 200)
return upload
for uuid, func in self._uploads.items():
self.app.add_url_rule(
f'/upload{uuid}', f'upload{uuid}', gen_upload(func), methods=['POST']
)
for page, func in self._pages.items():
# pylint: disable=protected-access
self._socketio.on(f'resp#{page._uuid}')(
lambda: eventlet.spawn(copy_current_request_context(func))
)
# bundle route
@self.app.route('/bowtie/bundle.js')
def bowtiebundlejs(): # pylint: disable=unused-variable
bundle_path = self.app.root_path + '/build/bundle.js'
bundle_path_gz = bundle_path + '.gz'
try:
if os.path.getmtime(bundle_path) > os.path.getmtime(bundle_path_gz):
return open(bundle_path, 'r').read()
bundle = open(bundle_path_gz, 'rb').read()
response = flask.make_response(bundle)
response.headers['content-encoding'] = 'gzip'
response.headers['vary'] = 'accept-encoding'
response.headers['content-length'] = len(response.data)
return response
except FileNotFoundError:
if os.path.isfile(bundle_path_gz):
bundle = open(bundle_path_gz, 'rb').read()
response = flask.make_response(bundle)
response.headers['Content-Encoding'] = 'gzip'
response.headers['Vary'] = 'Accept-Encoding'
response.headers['Content-Length'] = len(response.data)
return response
return open(bundle_path, 'r').read()
for schedule in self._schedules:
schedule.start()
def _serve(self, host='0.0.0.0', port=9991) -> None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((host, port))
if result == 0:
raise Exception(f'Port {port} is unavailable on host {host}, aborting.')
self._socketio.run(self.app, host=host, port=port)
for schedule in self._schedules:
schedule.stop()
def _installed_packages(self) -> Generator[str, None, None]:
"""Extract installed packages as list from `package.json`."""
with (self._build_dir / 'package.json').open('r') as f:
packages = json.load(f)
yield from packages['dependencies'].keys()
def _create_jspath(self) -> Path:
"""Create the source directory for the build."""
src = self._build_dir / 'bowtiejs'
os.makedirs(src, exist_ok=True)
return src
def _run(self, command: List[str], notebook: Optional[str] = None) -> int:
"""Run command from terminal and notebook and view output from subprocess."""
if notebook is None:
return Popen(command, cwd=self._build_dir).wait()
cmd = Popen(command, cwd=self._build_dir, stdout=PIPE, stderr=STDOUT)
while True:
line = cmd.stdout.readline()
if line == b'' and cmd.poll() is not None:
return cmd.poll()
print(line.decode('utf-8'), end='')
raise Exception()
def node_version() -> Tuple[int, ...]:
"""Get node version."""
version = check_output(('node', '--version'))
return tuple(int(x) for x in version.strip()[1:].split(b'.'))
|
snorkel/utils/lr_schedulers.py | melonwater211/snorkel | 2,323 | 11197719 | from snorkel.types import Config
class ExponentialLRSchedulerConfig(Config):
"""Settings for Exponential decay learning rate scheduler."""
gamma: float = 0.9
class StepLRSchedulerConfig(Config):
"""Settings for Step decay learning rate scheduler."""
gamma: float = 0.9
step_size: int = 5
class LRSchedulerConfig(Config):
"""Settings common to all LRSchedulers.
Parameters
----------
warmup_steps
The number of warmup_units over which to perform learning rate warmup (a linear
increase from 0 to the specified lr)
warmup_unit
The unit to use when counting warmup (one of ["batches", "epochs"])
warmup_percentage
The percentage of the training procedure to warm up over (ignored if
warmup_steps is non-zero)
min_lr
The minimum learning rate to use during training (the learning rate specified
by a learning rate scheduler will be rounded up to this if it is lower)
exponential_config
Extra settings for the ExponentialLRScheduler
step_config
Extra settings for the StepLRScheduler
"""
warmup_steps: float = 0 # warm up steps
warmup_unit: str = "batches" # [epochs, batches]
warmup_percentage: float = 0.0 # warm up percentage
min_lr: float = 0.0 # minimum learning rate
exponential_config: ExponentialLRSchedulerConfig = ExponentialLRSchedulerConfig() # type:ignore
step_config: StepLRSchedulerConfig = StepLRSchedulerConfig() # type:ignore
|
train/v4/vqa_data_provider_layer.py | minsuu/vqa_mcb | 214 | 11197752 | <reponame>minsuu/vqa_mcb
import caffe
import numpy as np
import re, json, random
import config
QID_KEY_SEPARATOR = '/'
class VQADataProvider:
def __init__(self, batchsize=64, max_length=15, mode='train'):
self.batchsize = batchsize
self.d_vocabulary = None
self.batch_index = None
self.batch_len = None
self.rev_adict = None
self.max_length = max_length
self.mode = mode
self.qdic, self.adic = VQADataProvider.load_data(mode)
with open('./result/vdict.json','r') as f:
self.vdict = json.load(f)
with open('./result/adict.json','r') as f:
self.adict = json.load(f)
self.n_ans_vocabulary = len(self.adict)
@staticmethod
def load_vqa_json(data_split):
"""
Parses the question and answer json files for the given data split.
Returns the question dictionary and the answer dictionary.
"""
qdic, adic = {}, {}
with open(config.DATA_PATHS[data_split]['ques_file'], 'r') as f:
qdata = json.load(f)['questions']
for q in qdata:
qdic[data_split + QID_KEY_SEPARATOR + str(q['question_id'])] = \
{'qstr': q['question'], 'iid': q['image_id']}
if 'test' not in data_split:
with open(config.DATA_PATHS[data_split]['ans_file'], 'r') as f:
adata = json.load(f)['annotations']
for a in adata:
adic[data_split + QID_KEY_SEPARATOR + str(a['question_id'])] = \
a['answers']
print 'parsed', len(qdic), 'questions for', data_split
return qdic, adic
@staticmethod
def load_genome_json():
"""
Parses the genome json file. Returns the question dictionary and the
answer dictionary.
"""
qdic, adic = {}, {}
with open(config.DATA_PATHS['genome']['genome_file'], 'r') as f:
qdata = json.load(f)
for q in qdata:
key = 'genome' + QID_KEY_SEPARATOR + str(q['id'])
qdic[key] = {'qstr': q['question'], 'iid': q['image']}
adic[key] = [{'answer': q['answer']}]
print 'parsed', len(qdic), 'questions for genome'
return qdic, adic
@staticmethod
def load_data(data_split_str):
all_qdic, all_adic = {}, {}
for data_split in data_split_str.split('+'):
assert data_split in config.DATA_PATHS.keys(), 'unknown data split'
if data_split == 'genome':
qdic, adic = VQADataProvider.load_genome_json()
all_qdic.update(qdic)
all_adic.update(adic)
else:
qdic, adic = VQADataProvider.load_vqa_json(data_split)
all_qdic.update(qdic)
all_adic.update(adic)
return all_qdic, all_adic
def getQuesIds(self):
return self.qdic.keys()
def getStrippedQuesId(self, qid):
return qid.split(QID_KEY_SEPARATOR)[1]
def getImgId(self,qid):
return self.qdic[qid]['iid']
def getQuesStr(self,qid):
return self.qdic[qid]['qstr']
def getAnsObj(self,qid):
if self.mode == 'test-dev' or self.mode == 'test':
return -1
return self.adic[qid]
@staticmethod
def seq_to_list(s):
t_str = s.lower()
for i in [r'\?',r'\!',r'\'',r'\"',r'\$',r'\:',r'\@',r'\(',r'\)',r'\,',r'\.',r'\;']:
t_str = re.sub( i, '', t_str)
for i in [r'\-',r'\/']:
t_str = re.sub( i, ' ', t_str)
q_list = re.sub(r'\?','',t_str.lower()).split(' ')
return q_list
def extract_answer(self,answer_obj):
""" Return the most popular answer in string."""
if self.mode == 'test-dev' or self.mode == 'test':
return -1
answer_list = [ answer_obj[i]['answer'] for i in xrange(10)]
dic = {}
for ans in answer_list:
if dic.has_key(ans):
dic[ans] +=1
else:
dic[ans] = 1
max_key = max((v,k) for (k,v) in dic.items())[1]
return max_key
def extract_answer_prob(self,answer_obj):
""" Return the most popular answer in string."""
if self.mode == 'test-dev' or self.mode == 'test':
return -1
answer_list = [ ans['answer'] for ans in answer_obj]
prob_answer_list = []
for ans in answer_list:
if self.adict.has_key(ans):
prob_answer_list.append(ans)
if len(prob_answer_list) == 0:
if self.mode == 'val' or self.mode == 'test-dev' or self.mode == 'test':
return 'hoge'
else:
raise Exception("This should not happen.")
else:
return random.choice(prob_answer_list)
def qlist_to_vec(self, max_length, q_list):
qvec = np.zeros(max_length)
cvec = np.zeros(max_length)
for i,_ in enumerate(xrange(max_length)):
if i < max_length - len(q_list):
cvec[i] = 0
elif i == max_length - len(q_list):
w = q_list[i-(max_length-len(q_list))]
# is the word in the vocabulary?
if self.vdict.has_key(w) is False:
w = ''
qvec[i] = self.vdict[w]
cvec[i] = 0
else:
w = q_list[i-(max_length-len(q_list))]
# is the word in the vocabulary?
if self.vdict.has_key(w) is False:
w = ''
qvec[i] = self.vdict[w]
cvec[i] = 1
return qvec, cvec
def answer_to_vec(self, ans_str):
""" Return answer id if the answer is included in vocabulary otherwise '' """
if self.mode =='test-dev' or self.mode == 'test':
return -1
if self.adict.has_key(ans_str):
ans = self.adict[ans_str]
else:
ans = self.adict['']
return ans
def vec_to_answer(self, ans_symbol):
""" Return answer id if the answer is included in vocabulary otherwise '' """
if self.rev_adict is None:
rev_adict = {}
for k,v in self.adict.items():
rev_adict[v] = k
self.rev_adict = rev_adict
return self.rev_adict[ans_symbol]
def create_batch(self,qid_list):
qvec = (np.zeros(self.batchsize*self.max_length)).reshape(self.batchsize,self.max_length)
cvec = (np.zeros(self.batchsize*self.max_length)).reshape(self.batchsize,self.max_length)
ivec = (np.zeros(self.batchsize*2048)).reshape(self.batchsize,2048)
avec = (np.zeros(self.batchsize)).reshape(self.batchsize)
for i,qid in enumerate(qid_list):
# load raw question information
q_str = self.getQuesStr(qid)
q_ans = self.getAnsObj(qid)
q_iid = self.getImgId(qid)
# convert question to vec
q_list = VQADataProvider.seq_to_list(q_str)
t_qvec, t_cvec = self.qlist_to_vec(self.max_length, q_list)
try:
qid_split = qid.split(QID_KEY_SEPARATOR)
data_split = qid_split[0]
if data_split == 'genome':
t_ivec = np.load(config.DATA_PATHS['genome']['features_prefix'] + str(q_iid) + '.jpg.npz')['x']
else:
t_ivec = np.load(config.DATA_PATHS[data_split]['features_prefix'] + str(q_iid).zfill(12) + '.jpg.npz')['x']
t_ivec = ( t_ivec / np.sqrt((t_ivec**2).sum()) )
except:
t_ivec = 0.
print 'data not found for qid : ', q_iid, self.mode
# convert answer to vec
if self.mode == 'val' or self.mode == 'test-dev' or self.mode == 'test':
q_ans_str = self.extract_answer(q_ans)
else:
q_ans_str = self.extract_answer_prob(q_ans)
t_avec = self.answer_to_vec(q_ans_str)
qvec[i,...] = t_qvec
cvec[i,...] = t_cvec
ivec[i,...] = t_ivec
avec[i,...] = t_avec
return qvec, cvec, ivec, avec
def get_batch_vec(self):
if self.batch_len is None:
self.n_skipped = 0
qid_list = self.getQuesIds()
random.shuffle(qid_list)
self.qid_list = qid_list
self.batch_len = len(qid_list)
self.batch_index = 0
self.epoch_counter = 0
def has_at_least_one_valid_answer(t_qid):
answer_obj = self.getAnsObj(t_qid)
answer_list = [ans['answer'] for ans in answer_obj]
for ans in answer_list:
if self.adict.has_key(ans):
return True
counter = 0
t_qid_list = []
t_iid_list = []
while counter < self.batchsize:
t_qid = self.qid_list[self.batch_index]
t_iid = self.getImgId(t_qid)
if self.mode == 'val' or self.mode == 'test-dev' or self.mode == 'test':
t_qid_list.append(t_qid)
t_iid_list.append(t_iid)
counter += 1
elif has_at_least_one_valid_answer(t_qid):
t_qid_list.append(t_qid)
t_iid_list.append(t_iid)
counter += 1
else:
self.n_skipped += 1
if self.batch_index < self.batch_len-1:
self.batch_index += 1
else:
self.epoch_counter += 1
qid_list = self.getQuesIds()
random.shuffle(qid_list)
self.qid_list = qid_list
self.batch_index = 0
print("%d questions were skipped in a single epoch" % self.n_skipped)
self.n_skipped = 0
t_batch = self.create_batch(t_qid_list)
return t_batch + (t_qid_list, t_iid_list, self.epoch_counter)
class VQADataProviderLayer(caffe.Layer):
"""
Provide input data for VQA.
"""
def setup(self, bottom, top):
self.batchsize = json.loads(self.param_str)['batchsize']
self.top_names = ['data','cont','feature','label']
top[0].reshape(15,self.batchsize)
top[1].reshape(15,self.batchsize)
top[2].reshape(self.batchsize,2048)
top[3].reshape(self.batchsize)
self.mode = json.loads(self.param_str)['mode']
if self.mode == 'val' or self.mode == 'test-dev' or self.mode == 'test':
pass
else:
self.dp = VQADataProvider(batchsize=self.batchsize, mode=self.mode)
def reshape(self, bottom, top):
pass
def forward(self, bottom, top):
if self.mode == 'val' or self.mode == 'test-dev' or self.mode == 'test':
pass
else:
word, cont, feature, answer, _, _, _ = self.dp.get_batch_vec()
top[0].data[...] = np.transpose(word,(1,0))
top[1].data[...] = np.transpose(cont,(1,0))
top[2].data[...] = feature
top[3].data[...] = answer
def backward(self, top, propagate_down, bottom):
pass
|
lib/bindings/samples/server/test/test_project_manager.py | tlalexander/stitchEm | 182 | 11197754 | <reponame>tlalexander/stitchEm
import unittest
import vs
from project_manager import ProjectManager
from utils.settings_manager import SETTINGS
TEST_PTV_FILEPATH = "./test_data/procedural-4K-nvenc.ptv"
TEST_PTV_SAVEPATH = "./test_data/project_manager_save.ptv"
class TestProjectManager(unittest.TestCase):
def setUp(self):
SETTINGS.current_audio_source = ProjectManager.AUDIO_NOAUDIO
self.project_manager = ProjectManager(TEST_PTV_FILEPATH)
def test_save(self):
exposure = self.project_manager.project_config.has("exposure")
exposure = exposure.clone()
exposure.push("updated", vs.Value.boolObject(True))
self.project_manager.update("exposure", exposure, auto_save=False)
self.project_manager.save(TEST_PTV_SAVEPATH)
updated_project = ProjectManager(TEST_PTV_SAVEPATH)
self.assertEqual(exposure.has("updated").getBool(), updated_project.project_config.has("exposure").has("updated").getBool())
|
modules/audio/asr/u2_conformer_librispeech/module.py | AK391/PaddleHub | 8,360 | 11197762 | <reponame>AK391/PaddleHub<filename>modules/audio/asr/u2_conformer_librispeech/module.py<gh_stars>1000+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import sys
import numpy as np
from paddlehub.env import MODULE_HOME
from paddlehub.module.module import moduleinfo, serving
from paddlehub.utils.log import logger
import paddle
import soundfile as sf
# TODO: Remove system path when deepspeech can be installed via pip.
sys.path.append(os.path.join(MODULE_HOME, 'u2_conformer_librispeech'))
from deepspeech.exps.u2.config import get_cfg_defaults
from deepspeech.utils.utility import UpdateConfig
from .u2_conformer_tester import U2ConformerTester
@moduleinfo(
name="u2_conformer_librispeech", version="1.0.0", summary="", author="Baidu", author_email="", type="audio/asr")
class U2Conformer(paddle.nn.Layer):
def __init__(self):
super(U2Conformer, self).__init__()
# resource
res_dir = os.path.join(MODULE_HOME, 'u2_conformer_librispeech', 'assets')
conf_file = os.path.join(res_dir, 'conf/conformer.yaml')
checkpoint = os.path.join(res_dir, 'checkpoints/avg_30.pdparams')
# config
self.config = get_cfg_defaults()
self.config.merge_from_file(conf_file)
# TODO: Remove path updating snippet.
with UpdateConfig(self.config):
self.config.collator.vocab_filepath = os.path.join(res_dir, self.config.collator.vocab_filepath)
self.config.collator.spm_model_prefix = os.path.join(res_dir, self.config.collator.spm_model_prefix)
self.config.collator.augmentation_config = os.path.join(res_dir, self.config.collator.augmentation_config)
self.config.model.cmvn_file = os.path.join(res_dir, self.config.model.cmvn_file)
self.config.decoding.decoding_method = 'attention_rescoring'
self.config.decoding.batch_size = 1
# model
self.tester = U2ConformerTester(self.config)
self.tester.setup_model()
self.tester.resume(checkpoint)
@staticmethod
def check_audio(audio_file):
sig, sample_rate = sf.read(audio_file)
assert sample_rate == 16000, 'Excepting sample rate of input audio is 16000, but got {}'.format(sample_rate)
@serving
def speech_recognize(self, audio_file, device='cpu'):
assert os.path.isfile(audio_file), 'File not exists: {}'.format(audio_file)
self.check_audio(audio_file)
paddle.set_device(device)
return self.tester.test(audio_file)[0][0]
|
boto3_type_annotations_with_docs/boto3_type_annotations/health/client.py | cowboygneox/boto3_type_annotations | 119 | 11197765 | <reponame>cowboygneox/boto3_type_annotations
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def describe_affected_entities(self, filter: Dict, locale: str = None, nextToken: str = None, maxResults: int = None) -> Dict:
"""
Returns a list of entities that have been affected by the specified events, based on the specified filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the AWS service. Events that have impact beyond that of the affected entities, or where the extent of impact is unknown, include at least one entity indicating this.
At least one event ARN is required. Results are sorted by the ``lastUpdatedTime`` of the entity, starting with the most recent.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/health-2016-08-04/DescribeAffectedEntities>`_
**Request Syntax**
::
response = client.describe_affected_entities(
filter={
'eventArns': [
'string',
],
'entityArns': [
'string',
],
'entityValues': [
'string',
],
'lastUpdatedTimes': [
{
'from': datetime(2015, 1, 1),
'to': datetime(2015, 1, 1)
},
],
'tags': [
{
'string': 'string'
},
],
'statusCodes': [
'IMPAIRED'|'UNIMPAIRED'|'UNKNOWN',
]
},
locale='string',
nextToken='string',
maxResults=123
)
**Response Syntax**
::
{
'entities': [
{
'entityArn': 'string',
'eventArn': 'string',
'entityValue': 'string',
'entityUrl': 'string',
'awsAccountId': 'string',
'lastUpdatedTime': datetime(2015, 1, 1),
'statusCode': 'IMPAIRED'|'UNIMPAIRED'|'UNKNOWN',
'tags': {
'string': 'string'
}
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **entities** *(list) --*
The entities that match the filter criteria.
- *(dict) --*
Information about an entity that is affected by a Health event.
- **entityArn** *(string) --*
The unique identifier for the entity. Format: ``arn:aws:health:*entity-region* :*aws-account* :entity/*entity-id* `` . Example: ``arn:aws:health:us-east-1:111222333444:entity/AVh5GGT7ul1arKr1sE1K``
- **eventArn** *(string) --*
The unique identifier for the event. Format: ``arn:aws:health:*event-region* ::event/*SERVICE* /*EVENT_TYPE_CODE* /*EVENT_TYPE_PLUS_ID* `` . Example: ``Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456``
- **entityValue** *(string) --*
The ID of the affected entity.
- **entityUrl** *(string) --*
- **awsAccountId** *(string) --*
The 12-digit AWS account number that contains the affected entity.
- **lastUpdatedTime** *(datetime) --*
The most recent time that the entity was updated.
- **statusCode** *(string) --*
The most recent status of the entity affected by the event. The possible values are ``IMPAIRED`` , ``UNIMPAIRED`` , and ``UNKNOWN`` .
- **tags** *(dict) --*
A map of entity tags attached to the affected entity.
- *(string) --*
- *(string) --*
- **nextToken** *(string) --*
If the results of a search are large, only a portion of the results are returned, and a ``nextToken`` pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.
:type filter: dict
:param filter: **[REQUIRED]**
Values to narrow the results returned. At least one event ARN is required.
- **eventArns** *(list) --* **[REQUIRED]**
A list of event ARNs (unique identifiers). For example: ``\"arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-CDE456\", \"arn:aws:health:us-west-1::event/EBS/AWS_EBS_LOST_VOLUME/AWS_EBS_LOST_VOLUME_CHI789_JKL101\"``
- *(string) --*
- **entityArns** *(list) --*
A list of entity ARNs (unique identifiers).
- *(string) --*
- **entityValues** *(list) --*
A list of IDs for affected entities.
- *(string) --*
- **lastUpdatedTimes** *(list) --*
A list of the most recent dates and times that the entity was updated.
- *(dict) --*
A range of dates and times that is used by the EventFilter and EntityFilter objects. If ``from`` is set and ``to`` is set: match items where the timestamp (``startTime`` , ``endTime`` , or ``lastUpdatedTime`` ) is between ``from`` and ``to`` inclusive. If ``from`` is set and ``to`` is not set: match items where the timestamp value is equal to or after ``from`` . If ``from`` is not set and ``to`` is set: match items where the timestamp value is equal to or before ``to`` .
- **from** *(datetime) --*
The starting date and time of a time range.
- **to** *(datetime) --*
The ending date and time of a time range.
- **tags** *(list) --*
A map of entity tags attached to the affected entity.
- *(dict) --*
- *(string) --*
- *(string) --*
- **statusCodes** *(list) --*
A list of entity status codes (``IMPAIRED`` , ``UNIMPAIRED`` , or ``UNKNOWN`` ).
- *(string) --*
:type locale: string
:param locale:
The locale (language) to return information in. English (en) is the default and the only supported value at this time.
:type nextToken: string
:param nextToken:
If the results of a search are large, only a portion of the results are returned, and a ``nextToken`` pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.
:type maxResults: integer
:param maxResults:
The maximum number of items to return in one batch, between 10 and 100, inclusive.
:rtype: dict
:returns:
"""
pass
def describe_entity_aggregates(self, eventArns: List = None) -> Dict:
"""
Returns the number of entities that are affected by each of the specified events. If no events are specified, the counts of all affected entities are returned.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/health-2016-08-04/DescribeEntityAggregates>`_
**Request Syntax**
::
response = client.describe_entity_aggregates(
eventArns=[
'string',
]
)
**Response Syntax**
::
{
'entityAggregates': [
{
'eventArn': 'string',
'count': 123
},
]
}
**Response Structure**
- *(dict) --*
- **entityAggregates** *(list) --*
The number of entities that are affected by each of the specified events.
- *(dict) --*
The number of entities that are affected by one or more events. Returned by the DescribeEntityAggregates operation.
- **eventArn** *(string) --*
The unique identifier for the event. Format: ``arn:aws:health:*event-region* ::event/*SERVICE* /*EVENT_TYPE_CODE* /*EVENT_TYPE_PLUS_ID* `` . Example: ``Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456``
- **count** *(integer) --*
The number entities that match the criteria for the specified events.
:type eventArns: list
:param eventArns:
A list of event ARNs (unique identifiers). For example: ``\"arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-CDE456\", \"arn:aws:health:us-west-1::event/EBS/AWS_EBS_LOST_VOLUME/AWS_EBS_LOST_VOLUME_CHI789_JKL101\"``
- *(string) --*
:rtype: dict
:returns:
"""
pass
def describe_event_aggregates(self, aggregateField: str, filter: Dict = None, maxResults: int = None, nextToken: str = None) -> Dict:
"""
Returns the number of events of each event type (issue, scheduled change, and account notification). If no filter is specified, the counts of all events in each category are returned.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/health-2016-08-04/DescribeEventAggregates>`_
**Request Syntax**
::
response = client.describe_event_aggregates(
filter={
'eventArns': [
'string',
],
'eventTypeCodes': [
'string',
],
'services': [
'string',
],
'regions': [
'string',
],
'availabilityZones': [
'string',
],
'startTimes': [
{
'from': datetime(2015, 1, 1),
'to': datetime(2015, 1, 1)
},
],
'endTimes': [
{
'from': datetime(2015, 1, 1),
'to': datetime(2015, 1, 1)
},
],
'lastUpdatedTimes': [
{
'from': datetime(2015, 1, 1),
'to': datetime(2015, 1, 1)
},
],
'entityArns': [
'string',
],
'entityValues': [
'string',
],
'eventTypeCategories': [
'issue'|'accountNotification'|'scheduledChange',
],
'tags': [
{
'string': 'string'
},
],
'eventStatusCodes': [
'open'|'closed'|'upcoming',
]
},
aggregateField='eventTypeCategory',
maxResults=123,
nextToken='string'
)
**Response Syntax**
::
{
'eventAggregates': [
{
'aggregateValue': 'string',
'count': 123
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **eventAggregates** *(list) --*
The number of events in each category that meet the optional filter criteria.
- *(dict) --*
The number of events of each issue type. Returned by the DescribeEventAggregates operation.
- **aggregateValue** *(string) --*
The issue type for the associated count.
- **count** *(integer) --*
The number of events of the associated issue type.
- **nextToken** *(string) --*
If the results of a search are large, only a portion of the results are returned, and a ``nextToken`` pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.
:type filter: dict
:param filter:
Values to narrow the results returned.
- **eventArns** *(list) --*
A list of event ARNs (unique identifiers). For example: ``\"arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-CDE456\", \"arn:aws:health:us-west-1::event/EBS/AWS_EBS_LOST_VOLUME/AWS_EBS_LOST_VOLUME_CHI789_JKL101\"``
- *(string) --*
- **eventTypeCodes** *(list) --*
A list of unique identifiers for event types. For example, ``\"AWS_EC2_SYSTEM_MAINTENANCE_EVENT\",\"AWS_RDS_MAINTENANCE_SCHEDULED\"``
- *(string) --*
- **services** *(list) --*
The AWS services associated with the event. For example, ``EC2`` , ``RDS`` .
- *(string) --*
- **regions** *(list) --*
A list of AWS regions.
- *(string) --*
- **availabilityZones** *(list) --*
A list of AWS availability zones.
- *(string) --*
- **startTimes** *(list) --*
A list of dates and times that the event began.
- *(dict) --*
A range of dates and times that is used by the EventFilter and EntityFilter objects. If ``from`` is set and ``to`` is set: match items where the timestamp (``startTime`` , ``endTime`` , or ``lastUpdatedTime`` ) is between ``from`` and ``to`` inclusive. If ``from`` is set and ``to`` is not set: match items where the timestamp value is equal to or after ``from`` . If ``from`` is not set and ``to`` is set: match items where the timestamp value is equal to or before ``to`` .
- **from** *(datetime) --*
The starting date and time of a time range.
- **to** *(datetime) --*
The ending date and time of a time range.
- **endTimes** *(list) --*
A list of dates and times that the event ended.
- *(dict) --*
A range of dates and times that is used by the EventFilter and EntityFilter objects. If ``from`` is set and ``to`` is set: match items where the timestamp (``startTime`` , ``endTime`` , or ``lastUpdatedTime`` ) is between ``from`` and ``to`` inclusive. If ``from`` is set and ``to`` is not set: match items where the timestamp value is equal to or after ``from`` . If ``from`` is not set and ``to`` is set: match items where the timestamp value is equal to or before ``to`` .
- **from** *(datetime) --*
The starting date and time of a time range.
- **to** *(datetime) --*
The ending date and time of a time range.
- **lastUpdatedTimes** *(list) --*
A list of dates and times that the event was last updated.
- *(dict) --*
A range of dates and times that is used by the EventFilter and EntityFilter objects. If ``from`` is set and ``to`` is set: match items where the timestamp (``startTime`` , ``endTime`` , or ``lastUpdatedTime`` ) is between ``from`` and ``to`` inclusive. If ``from`` is set and ``to`` is not set: match items where the timestamp value is equal to or after ``from`` . If ``from`` is not set and ``to`` is set: match items where the timestamp value is equal to or before ``to`` .
- **from** *(datetime) --*
The starting date and time of a time range.
- **to** *(datetime) --*
The ending date and time of a time range.
- **entityArns** *(list) --*
A list of entity ARNs (unique identifiers).
- *(string) --*
- **entityValues** *(list) --*
A list of entity identifiers, such as EC2 instance IDs (``i-34ab692e`` ) or EBS volumes (``vol-426ab23e`` ).
- *(string) --*
- **eventTypeCategories** *(list) --*
A list of event type category codes (``issue`` , ``scheduledChange`` , or ``accountNotification`` ).
- *(string) --*
- **tags** *(list) --*
A map of entity tags attached to the affected entity.
- *(dict) --*
- *(string) --*
- *(string) --*
- **eventStatusCodes** *(list) --*
A list of event status codes.
- *(string) --*
:type aggregateField: string
:param aggregateField: **[REQUIRED]**
The only currently supported value is ``eventTypeCategory`` .
:type maxResults: integer
:param maxResults:
The maximum number of items to return in one batch, between 10 and 100, inclusive.
:type nextToken: string
:param nextToken:
If the results of a search are large, only a portion of the results are returned, and a ``nextToken`` pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.
:rtype: dict
:returns:
"""
pass
def describe_event_details(self, eventArns: List, locale: str = None) -> Dict:
"""
Returns detailed information about one or more specified events. Information includes standard event data (region, service, etc., as returned by DescribeEvents ), a detailed event description, and possible additional metadata that depends upon the nature of the event. Affected entities are not included; to retrieve those, use the DescribeAffectedEntities operation.
If a specified event cannot be retrieved, an error message is returned for that event.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/health-2016-08-04/DescribeEventDetails>`_
**Request Syntax**
::
response = client.describe_event_details(
eventArns=[
'string',
],
locale='string'
)
**Response Syntax**
::
{
'successfulSet': [
{
'event': {
'arn': 'string',
'service': 'string',
'eventTypeCode': 'string',
'eventTypeCategory': 'issue'|'accountNotification'|'scheduledChange',
'region': 'string',
'availabilityZone': 'string',
'startTime': datetime(2015, 1, 1),
'endTime': datetime(2015, 1, 1),
'lastUpdatedTime': datetime(2015, 1, 1),
'statusCode': 'open'|'closed'|'upcoming'
},
'eventDescription': {
'latestDescription': 'string'
},
'eventMetadata': {
'string': 'string'
}
},
],
'failedSet': [
{
'eventArn': 'string',
'errorName': 'string',
'errorMessage': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **successfulSet** *(list) --*
Information about the events that could be retrieved.
- *(dict) --*
Detailed information about an event. A combination of an Event object, an EventDescription object, and additional metadata about the event. Returned by the DescribeEventDetails operation.
- **event** *(dict) --*
Summary information about the event.
- **arn** *(string) --*
The unique identifier for the event. Format: ``arn:aws:health:*event-region* ::event/*SERVICE* /*EVENT_TYPE_CODE* /*EVENT_TYPE_PLUS_ID* `` . Example: ``Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456``
- **service** *(string) --*
The AWS service that is affected by the event. For example, ``EC2`` , ``RDS`` .
- **eventTypeCode** *(string) --*
The unique identifier for the event type. The format is ``AWS_*SERVICE* _*DESCRIPTION* `` ; for example, ``AWS_EC2_SYSTEM_MAINTENANCE_EVENT`` .
- **eventTypeCategory** *(string) --*
The category of the event. Possible values are ``issue`` , ``scheduledChange`` , and ``accountNotification`` .
- **region** *(string) --*
The AWS region name of the event.
- **availabilityZone** *(string) --*
The AWS Availability Zone of the event. For example, us-east-1a.
- **startTime** *(datetime) --*
The date and time that the event began.
- **endTime** *(datetime) --*
The date and time that the event ended.
- **lastUpdatedTime** *(datetime) --*
The most recent date and time that the event was updated.
- **statusCode** *(string) --*
The most recent status of the event. Possible values are ``open`` , ``closed`` , and ``upcoming`` .
- **eventDescription** *(dict) --*
The most recent description of the event.
- **latestDescription** *(string) --*
The most recent description of the event.
- **eventMetadata** *(dict) --*
Additional metadata about the event.
- *(string) --*
- *(string) --*
- **failedSet** *(list) --*
Error messages for any events that could not be retrieved.
- *(dict) --*
Error information returned when a DescribeEventDetails operation cannot find a specified event.
- **eventArn** *(string) --*
The unique identifier for the event. Format: ``arn:aws:health:*event-region* ::event/*SERVICE* /*EVENT_TYPE_CODE* /*EVENT_TYPE_PLUS_ID* `` . Example: ``Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456``
- **errorName** *(string) --*
The name of the error.
- **errorMessage** *(string) --*
A message that describes the error.
:type eventArns: list
:param eventArns: **[REQUIRED]**
A list of event ARNs (unique identifiers). For example: ``\"arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-CDE456\", \"arn:aws:health:us-west-1::event/EBS/AWS_EBS_LOST_VOLUME/AWS_EBS_LOST_VOLUME_CHI789_JKL101\"``
- *(string) --*
:type locale: string
:param locale:
The locale (language) to return information in. English (en) is the default and the only supported value at this time.
:rtype: dict
:returns:
"""
pass
def describe_event_types(self, filter: Dict = None, locale: str = None, nextToken: str = None, maxResults: int = None) -> Dict:
"""
Returns the event types that meet the specified filter criteria. If no filter criteria are specified, all event types are returned, in no particular order.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/health-2016-08-04/DescribeEventTypes>`_
**Request Syntax**
::
response = client.describe_event_types(
filter={
'eventTypeCodes': [
'string',
],
'services': [
'string',
],
'eventTypeCategories': [
'issue'|'accountNotification'|'scheduledChange',
]
},
locale='string',
nextToken='string',
maxResults=123
)
**Response Syntax**
::
{
'eventTypes': [
{
'service': 'string',
'code': 'string',
'category': 'issue'|'accountNotification'|'scheduledChange'
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **eventTypes** *(list) --*
A list of event types that match the filter criteria. Event types have a category (``issue`` , ``accountNotification`` , or ``scheduledChange`` ), a service (for example, ``EC2`` , ``RDS`` , ``DATAPIPELINE`` , ``BILLING`` ), and a code (in the format ``AWS_*SERVICE* _*DESCRIPTION* `` ; for example, ``AWS_EC2_SYSTEM_MAINTENANCE_EVENT`` ).
- *(dict) --*
Metadata about a type of event that is reported by AWS Health. Data consists of the category (for example, ``issue`` ), the service (for example, ``EC2`` ), and the event type code (for example, ``AWS_EC2_SYSTEM_MAINTENANCE_EVENT`` ).
- **service** *(string) --*
The AWS service that is affected by the event. For example, ``EC2`` , ``RDS`` .
- **code** *(string) --*
The unique identifier for the event type. The format is ``AWS_*SERVICE* _*DESCRIPTION* `` ; for example, ``AWS_EC2_SYSTEM_MAINTENANCE_EVENT`` .
- **category** *(string) --*
A list of event type category codes (``issue`` , ``scheduledChange`` , or ``accountNotification`` ).
- **nextToken** *(string) --*
If the results of a search are large, only a portion of the results are returned, and a ``nextToken`` pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.
:type filter: dict
:param filter:
Values to narrow the results returned.
- **eventTypeCodes** *(list) --*
A list of event type codes.
- *(string) --*
- **services** *(list) --*
The AWS services associated with the event. For example, ``EC2`` , ``RDS`` .
- *(string) --*
- **eventTypeCategories** *(list) --*
A list of event type category codes (``issue`` , ``scheduledChange`` , or ``accountNotification`` ).
- *(string) --*
:type locale: string
:param locale:
The locale (language) to return information in. English (en) is the default and the only supported value at this time.
:type nextToken: string
:param nextToken:
If the results of a search are large, only a portion of the results are returned, and a ``nextToken`` pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.
:type maxResults: integer
:param maxResults:
The maximum number of items to return in one batch, between 10 and 100, inclusive.
:rtype: dict
:returns:
"""
pass
def describe_events(self, filter: Dict = None, nextToken: str = None, maxResults: int = None, locale: str = None) -> Dict:
"""
Returns information about events that meet the specified filter criteria. Events are returned in a summary form and do not include the detailed description, any additional metadata that depends on the event type, or any affected resources. To retrieve that information, use the DescribeEventDetails and DescribeAffectedEntities operations.
If no filter criteria are specified, all events are returned. Results are sorted by ``lastModifiedTime`` , starting with the most recent.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/health-2016-08-04/DescribeEvents>`_
**Request Syntax**
::
response = client.describe_events(
filter={
'eventArns': [
'string',
],
'eventTypeCodes': [
'string',
],
'services': [
'string',
],
'regions': [
'string',
],
'availabilityZones': [
'string',
],
'startTimes': [
{
'from': datetime(2015, 1, 1),
'to': datetime(2015, 1, 1)
},
],
'endTimes': [
{
'from': datetime(2015, 1, 1),
'to': datetime(2015, 1, 1)
},
],
'lastUpdatedTimes': [
{
'from': datetime(2015, 1, 1),
'to': datetime(2015, 1, 1)
},
],
'entityArns': [
'string',
],
'entityValues': [
'string',
],
'eventTypeCategories': [
'issue'|'accountNotification'|'scheduledChange',
],
'tags': [
{
'string': 'string'
},
],
'eventStatusCodes': [
'open'|'closed'|'upcoming',
]
},
nextToken='string',
maxResults=123,
locale='string'
)
**Response Syntax**
::
{
'events': [
{
'arn': 'string',
'service': 'string',
'eventTypeCode': 'string',
'eventTypeCategory': 'issue'|'accountNotification'|'scheduledChange',
'region': 'string',
'availabilityZone': 'string',
'startTime': datetime(2015, 1, 1),
'endTime': datetime(2015, 1, 1),
'lastUpdatedTime': datetime(2015, 1, 1),
'statusCode': 'open'|'closed'|'upcoming'
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **events** *(list) --*
The events that match the specified filter criteria.
- *(dict) --*
Summary information about an event, returned by the DescribeEvents operation. The DescribeEventDetails operation also returns this information, as well as the EventDescription and additional event metadata.
- **arn** *(string) --*
The unique identifier for the event. Format: ``arn:aws:health:*event-region* ::event/*SERVICE* /*EVENT_TYPE_CODE* /*EVENT_TYPE_PLUS_ID* `` . Example: ``Example: arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456``
- **service** *(string) --*
The AWS service that is affected by the event. For example, ``EC2`` , ``RDS`` .
- **eventTypeCode** *(string) --*
The unique identifier for the event type. The format is ``AWS_*SERVICE* _*DESCRIPTION* `` ; for example, ``AWS_EC2_SYSTEM_MAINTENANCE_EVENT`` .
- **eventTypeCategory** *(string) --*
The category of the event. Possible values are ``issue`` , ``scheduledChange`` , and ``accountNotification`` .
- **region** *(string) --*
The AWS region name of the event.
- **availabilityZone** *(string) --*
The AWS Availability Zone of the event. For example, us-east-1a.
- **startTime** *(datetime) --*
The date and time that the event began.
- **endTime** *(datetime) --*
The date and time that the event ended.
- **lastUpdatedTime** *(datetime) --*
The most recent date and time that the event was updated.
- **statusCode** *(string) --*
The most recent status of the event. Possible values are ``open`` , ``closed`` , and ``upcoming`` .
- **nextToken** *(string) --*
If the results of a search are large, only a portion of the results are returned, and a ``nextToken`` pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.
:type filter: dict
:param filter:
Values to narrow the results returned.
- **eventArns** *(list) --*
A list of event ARNs (unique identifiers). For example: ``\"arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-CDE456\", \"arn:aws:health:us-west-1::event/EBS/AWS_EBS_LOST_VOLUME/AWS_EBS_LOST_VOLUME_CHI789_JKL101\"``
- *(string) --*
- **eventTypeCodes** *(list) --*
A list of unique identifiers for event types. For example, ``\"AWS_EC2_SYSTEM_MAINTENANCE_EVENT\",\"AWS_RDS_MAINTENANCE_SCHEDULED\"``
- *(string) --*
- **services** *(list) --*
The AWS services associated with the event. For example, ``EC2`` , ``RDS`` .
- *(string) --*
- **regions** *(list) --*
A list of AWS regions.
- *(string) --*
- **availabilityZones** *(list) --*
A list of AWS availability zones.
- *(string) --*
- **startTimes** *(list) --*
A list of dates and times that the event began.
- *(dict) --*
A range of dates and times that is used by the EventFilter and EntityFilter objects. If ``from`` is set and ``to`` is set: match items where the timestamp (``startTime`` , ``endTime`` , or ``lastUpdatedTime`` ) is between ``from`` and ``to`` inclusive. If ``from`` is set and ``to`` is not set: match items where the timestamp value is equal to or after ``from`` . If ``from`` is not set and ``to`` is set: match items where the timestamp value is equal to or before ``to`` .
- **from** *(datetime) --*
The starting date and time of a time range.
- **to** *(datetime) --*
The ending date and time of a time range.
- **endTimes** *(list) --*
A list of dates and times that the event ended.
- *(dict) --*
A range of dates and times that is used by the EventFilter and EntityFilter objects. If ``from`` is set and ``to`` is set: match items where the timestamp (``startTime`` , ``endTime`` , or ``lastUpdatedTime`` ) is between ``from`` and ``to`` inclusive. If ``from`` is set and ``to`` is not set: match items where the timestamp value is equal to or after ``from`` . If ``from`` is not set and ``to`` is set: match items where the timestamp value is equal to or before ``to`` .
- **from** *(datetime) --*
The starting date and time of a time range.
- **to** *(datetime) --*
The ending date and time of a time range.
- **lastUpdatedTimes** *(list) --*
A list of dates and times that the event was last updated.
- *(dict) --*
A range of dates and times that is used by the EventFilter and EntityFilter objects. If ``from`` is set and ``to`` is set: match items where the timestamp (``startTime`` , ``endTime`` , or ``lastUpdatedTime`` ) is between ``from`` and ``to`` inclusive. If ``from`` is set and ``to`` is not set: match items where the timestamp value is equal to or after ``from`` . If ``from`` is not set and ``to`` is set: match items where the timestamp value is equal to or before ``to`` .
- **from** *(datetime) --*
The starting date and time of a time range.
- **to** *(datetime) --*
The ending date and time of a time range.
- **entityArns** *(list) --*
A list of entity ARNs (unique identifiers).
- *(string) --*
- **entityValues** *(list) --*
A list of entity identifiers, such as EC2 instance IDs (``i-34ab692e`` ) or EBS volumes (``vol-426ab23e`` ).
- *(string) --*
- **eventTypeCategories** *(list) --*
A list of event type category codes (``issue`` , ``scheduledChange`` , or ``accountNotification`` ).
- *(string) --*
- **tags** *(list) --*
A map of entity tags attached to the affected entity.
- *(dict) --*
- *(string) --*
- *(string) --*
- **eventStatusCodes** *(list) --*
A list of event status codes.
- *(string) --*
:type nextToken: string
:param nextToken:
If the results of a search are large, only a portion of the results are returned, and a ``nextToken`` pagination token is returned in the response. To retrieve the next batch of results, reissue the search request and include the returned token. When all results have been returned, the response does not contain a pagination token value.
:type maxResults: integer
:param maxResults:
The maximum number of items to return in one batch, between 10 and 100, inclusive.
:type locale: string
:param locale:
The locale (language) to return information in. English (en) is the default and the only supported value at this time.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
|
sphinxbase/src/libsphinxbase/util/clapack_scrub.py | ejuacel/pocketsphinx.js | 560 | 11197771 | <reponame>ejuacel/pocketsphinx.js<filename>sphinxbase/src/libsphinxbase/util/clapack_scrub.py
#!/usr/bin/env python2.4
import sys, os
from cStringIO import StringIO
import re
from Plex import *
from Plex.Traditional import re as Re
class MyScanner(Scanner):
def __init__(self, info, name='<default>'):
Scanner.__init__(self, self.lexicon, info, name)
def begin(self, state_name):
# if self.state_name == '':
# print '<default>'
# else:
# print self.state_name
Scanner.begin(self, state_name)
def sep_seq(sequence, sep):
pat = Str(sequence[0])
for s in sequence[1:]:
pat += sep + Str(s)
return pat
def runScanner(data, scanner_class, lexicon=None):
info = StringIO(data)
outfo = StringIO()
if lexicon is not None:
scanner = scanner_class(lexicon, info)
else:
scanner = scanner_class(info)
while 1:
value, text = scanner.read()
if value is None:
break
elif value is IGNORE:
pass
else:
outfo.write(value)
return outfo.getvalue(), scanner
class LenSubsScanner(MyScanner):
"""Following clapack, we remove ftnlen arguments, which f2c puts after
a char * argument to hold the length of the passed string. This is just
a nuisance in C.
"""
def __init__(self, info, name='<ftnlen>'):
MyScanner.__init__(self, info, name)
self.paren_count = 0
def beginArgs(self, text):
if self.paren_count == 0:
self.begin('args')
self.paren_count += 1
return text
def endArgs(self, text):
self.paren_count -= 1
if self.paren_count == 0:
self.begin('')
return text
digits = Re('[0-9]+')
iofun = Re(r'\([^;]*;')
decl = Re(r'\([^)]*\)[,;'+'\n]')
any = Re('[.]*')
S = Re('[ \t\n]*')
cS = Str(',') + S
len_ = Re('[a-z][a-z0-9]*_len')
iofunctions = Str("s_cat", "s_copy", "s_stop", "s_cmp",
"i_len", "do_fio", "do_lio") + iofun
# Routines to not scrub the ftnlen argument from
keep_ftnlen = (Str('ilaenv_') | Str('s_rnge')) + Str('(')
lexicon = Lexicon([
(iofunctions, TEXT),
(keep_ftnlen, beginArgs),
State('args', [
(Str(')'), endArgs),
(Str('('), beginArgs),
(AnyChar, TEXT),
]),
(cS+Re(r'[1-9][0-9]*L'), IGNORE),
(cS+Str('ftnlen')+Opt(S+len_), IGNORE),
(cS+sep_seq(['(', 'ftnlen', ')'], S)+S+digits, IGNORE),
(Bol+Str('ftnlen ')+len_+Str(';\n'), IGNORE),
(cS+len_, TEXT),
(AnyChar, TEXT),
])
def scrubFtnlen(source):
return runScanner(source, LenSubsScanner)[0]
def cleanSource(source):
# remove whitespace at end of lines
source = re.sub(r'[\t ]+\n', '\n', source)
# remove comments like .. Scalar Arguments ..
source = re.sub(r'(?m)^[\t ]*/\* *\.\. .*?\n', '', source)
# collapse blanks of more than two in-a-row to two
source = re.sub(r'\n\n\n\n+', r'\n\n\n', source)
return source
class LineQueue(object):
def __init__(self):
object.__init__(self)
self._queue = []
def add(self, line):
self._queue.append(line)
def clear(self):
self._queue = []
def flushTo(self, other_queue):
for line in self._queue:
other_queue.add(line)
self.clear()
def getValue(self):
q = LineQueue()
self.flushTo(q)
s = ''.join(q._queue)
self.clear()
return s
class CommentQueue(LineQueue):
def __init__(self):
LineQueue.__init__(self)
def add(self, line):
if line.strip() == '':
LineQueue.add(self, '\n')
else:
line = ' ' + line[2:-3].rstrip() + '\n'
LineQueue.add(self, line)
def flushTo(self, other_queue):
if len(self._queue) == 0:
pass
elif len(self._queue) == 1:
other_queue.add('/*' + self._queue[0][2:].rstrip() + ' */\n')
else:
other_queue.add('/*\n')
LineQueue.flushTo(self, other_queue)
other_queue.add('*/\n')
self.clear()
# This really seems to be about 4x longer than it needs to be
def cleanComments(source):
lines = LineQueue()
comments = CommentQueue()
def isCommentLine(line):
return line.startswith('/*') and line.endswith('*/\n')
blanks = LineQueue()
def isBlank(line):
return line.strip() == ''
def SourceLines(line):
if isCommentLine(line):
comments.add(line)
return HaveCommentLines
else:
lines.add(line)
return SourceLines
def HaveCommentLines(line):
if isBlank(line):
blanks.add('\n')
return HaveBlankLines
elif isCommentLine(line):
comments.add(line)
return HaveCommentLines
else:
comments.flushTo(lines)
lines.add(line)
return SourceLines
def HaveBlankLines(line):
if isBlank(line):
blanks.add('\n')
return HaveBlankLines
elif isCommentLine(line):
blanks.flushTo(comments)
comments.add(line)
return HaveCommentLines
else:
comments.flushTo(lines)
blanks.flushTo(lines)
lines.add(line)
return SourceLines
state = SourceLines
for line in StringIO(source):
state = state(line)
comments.flushTo(lines)
return lines.getValue()
def removeHeader(source):
lines = LineQueue()
def LookingForHeader(line):
m = re.match(r'/\*[^\n]*-- translated', line)
if m:
return InHeader
else:
lines.add(line)
return LookingForHeader
def InHeader(line):
if line.startswith('*/'):
return OutOfHeader
else:
return InHeader
def OutOfHeader(line):
if line.startswith('#include "f2c.h"'):
pass
else:
lines.add(line)
return OutOfHeader
state = LookingForHeader
for line in StringIO(source):
state = state(line)
return lines.getValue()
def replaceSlamch(source):
"""Replace slamch_ calls with appropiate macros"""
def repl(m):
s = m.group(1)
return dict(E='EPSILON', P='PRECISION', S='SAFEMINIMUM',
B='BASE')[s[0]]
source = re.sub(r'slamch_\("(.*?)"\)', repl, source)
source = re.sub(r'^\s+extern.*? slamch_.*?;$(?m)', '', source)
return source
# do it
def scrubSource(source, nsteps=None, verbose=False):
steps = [
('scrubbing ftnlen', scrubFtnlen),
('remove header', removeHeader),
('clean source', cleanSource),
('clean comments', cleanComments),
('replace slamch_() calls', replaceSlamch),
]
if nsteps is not None:
steps = steps[:nsteps]
for msg, step in steps:
if verbose:
print msg
source = step(source)
return source
if __name__ == '__main__':
filename = sys.argv[1]
outfilename = os.path.join(sys.argv[2], os.path.basename(filename))
fo = open(filename, 'r')
source = fo.read()
fo.close()
if len(sys.argv) > 3:
nsteps = int(sys.argv[3])
else:
nsteps = None
source = scrub_source(source, nsteps, verbose=True)
writefo = open(outfilename, 'w')
writefo.write(source)
writefo.close()
|
src/deutschland/nina/__init__.py | andreasbossard/deutschland | 445 | 11197777 | # flake8: noqa
"""
Bundesamt für Bevölkerungsschutz: NINA API
Erhalten Sie wichtige Warnmeldungen des Bevölkerungsschutzes für Gefahrenlagen wie zum Beispiel Gefahrstoffausbreitung oder Unwetter per Programmierschnittstelle. # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
__version__ = "1.0.0"
# import ApiClient
from deutschland.nina.api_client import ApiClient
# import Configuration
from deutschland.nina.configuration import Configuration
# import exceptions
from deutschland.nina.exceptions import (
ApiAttributeError,
ApiException,
ApiKeyError,
ApiTypeError,
ApiValueError,
OpenApiException,
)
|
scripts/update_thanks.py | ThePrez/gunicorn | 6,851 | 11197798 | <gh_stars>1000+
#!/usr/bin/env python
# Usage: git log --format="%an <%ae>" | python update_thanks.py
# You will get a result.txt file, you can work with the file (update, remove, ...)
#
# Install
# =======
# pip install validate_email pyDNS
#
import sys
from validate_email import validate_email
from email.utils import parseaddr
import DNS.Base
addresses = set()
bad_addresses = set()
collection = []
lines = list(reversed(sys.stdin.readlines()))
for author in map(str.strip, lines):
realname, email_address = parseaddr(author)
if email_address not in addresses:
if email_address in bad_addresses:
continue
else:
try:
value = validate_email(email_address)
if value:
addresses.add(email_address)
collection.append(author)
else:
bad_addresses.add(email_address)
except DNS.Base.TimeoutError:
bad_addresses.add(email_address)
with open('result.txt', 'w') as output:
output.write('\n'.join(collection))
|
homeassistant/components/p1_monitor/diagnostics.py | MrDelik/core | 30,023 | 11197804 | """Diagnostics support for P1 Monitor."""
from __future__ import annotations
from dataclasses import asdict
from typing import Any
from homeassistant.components.diagnostics import async_redact_data
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant
from . import P1MonitorDataUpdateCoordinator
from .const import DOMAIN, SERVICE_PHASES, SERVICE_SETTINGS, SERVICE_SMARTMETER
TO_REDACT = {
CONF_HOST,
}
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: P1MonitorDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
return {
"entry": {
"title": entry.title,
"data": async_redact_data(entry.data, TO_REDACT),
},
"data": {
"smartmeter": asdict(coordinator.data[SERVICE_SMARTMETER]),
"phases": asdict(coordinator.data[SERVICE_PHASES]),
"settings": asdict(coordinator.data[SERVICE_SETTINGS]),
},
}
|
tests/test_volume.py | stanik137/xtreemfs | 270 | 11197830 | <gh_stars>100-1000
# Copyright (c) 2009-2011 by <NAME>, <NAME>, Zuse Institute Berlin
# Licensed under the BSD License, see LICENSE file for details.
from time import sleep
import sys, os, subprocess, signal
DEBUG_LEVELS = [ 'EMERG', 'ALERT', 'CRIT', 'ERR', 'WARNING', 'NOTICE', 'INFO', 'DEBUG' ]
class Volume:
def __init__(self,
name,
mount_point_dir_path,
xtreemfs_dir,
debug_level,
mount_options,
mkfs_options,
mrc_uri,
dir_uri,
pkcs12_file_path,
pkcs12_passphrase,
stripe_width,
stripe_size,
rwr_policy,
rwr_factor,
ronly_factor):
self.__mount_point_dir_path = os.path.abspath(mount_point_dir_path)
self.__name = name
self.__debug_level = debug_level
self.__xtreemfs_dir = xtreemfs_dir
self.__mount_options = mount_options
self.__mkfs_options = mkfs_options
self.__mrc_uri = mrc_uri
if not mrc_uri.endswith("/"):
self.__mrc_uri += "/"
self.__dir_uri = dir_uri
self.__pkcs12_file_path = pkcs12_file_path
self.__pkcs12_passphrase = pkcs12_passphrase
self.__stripe_width = stripe_width
self.__stripe_size = stripe_size
self.__rwr_policy = rwr_policy
self.__rwr_factor = rwr_factor
self.__ronly_factor = ronly_factor
def create(self):
mkfs_xtreemfs_file_path = os.path.abspath(os.path.join(self.__xtreemfs_dir, "bin", "mkfs.xtreemfs"))
if not os.path.exists(mkfs_xtreemfs_file_path):
mkfs_xtreemfs_file_path = "mkfs.xtreemfs" # Assume it's in the global path
mkfs_xtreemfs_args = [mkfs_xtreemfs_file_path]
mkfs_xtreemfs_args.extend(("-d", DEBUG_LEVELS[int(self.__debug_level)]))
mkfs_xtreemfs_args.extend(("-p", 'RAID0'))
if self.__pkcs12_file_path is not None: mkfs_xtreemfs_args.extend(("--pkcs12-file-path", self.__pkcs12_file_path))
if self.__pkcs12_passphrase is not None: mkfs_xtreemfs_args.extend(("--pkcs12-passphrase", self.__pkcs12_passphrase))
mkfs_xtreemfs_args.extend(("-s", str(self.__stripe_size)))
mkfs_xtreemfs_args.extend(("-w", str(self.__stripe_width)))
mkfs_xtreemfs_args.extend(self.__mkfs_options)
mkfs_xtreemfs_args.append(self.__mrc_uri + self.__name)
mkfs_xtreemfs_args = " ".join(mkfs_xtreemfs_args)
print "xtestenv: creating volume", self.__name, "with", mkfs_xtreemfs_args
retcode = subprocess.call(mkfs_xtreemfs_args, shell=True)
if retcode != 0:
raise RuntimeError("Failed to create volume: " + self.__name + " You can use the option --clean-test-dir to clean previous data from the test dir. mkfs.xtreemfs return value: " + str(retcode) + " Executed command: " + mkfs_xtreemfs_args)
def get_mount_point_dir_path(self):
return self.__mount_point_dir_path
def get_name(self):
return self.__name
def mount(self, log_file_path):
xtfsutil_file_path = os.path.abspath(os.path.join(self.__xtreemfs_dir, "bin", "xtfsutil"))
if not os.path.exists(xtfsutil_file_path):
xtfsutil_file_path = "xtfsutil" # Assume it's in the global path
try: os.mkdir(self.__mount_point_dir_path)
except: pass
mount_xtreemfs_file_path = os.path.abspath(os.path.join(self.__xtreemfs_dir, "bin", "mount.xtreemfs"))
if not os.path.exists(mount_xtreemfs_file_path):
mount_xtreemfs_file_path = "mount.xtreemfs" # Assume it's in the global path
mount_xtreemfs_args = [mount_xtreemfs_file_path]
mount_xtreemfs_args.append("-f") # So we can redirect stdout and stderr
mount_xtreemfs_args.extend(("-d", DEBUG_LEVELS[int(self.__debug_level)]))
mount_xtreemfs_args.extend(self.__mount_options)
if self.__pkcs12_file_path is not None: mount_xtreemfs_args.extend(("--pkcs12-file-path", self.__pkcs12_file_path))
if self.__pkcs12_passphrase is not None: mount_xtreemfs_args.extend(("--pkcs12-passphrase", self.__pkcs12_passphrase))
volume_uri = self.__dir_uri
if not volume_uri.endswith("/"): volume_uri += "/"
volume_uri += self.__name
mount_xtreemfs_args.append(volume_uri)
mount_xtreemfs_args.append(self.get_mount_point_dir_path())
if log_file_path is None:
stdout = sys.stdout
stderr = sys.stderr
else:
stderr = stdout = open(log_file_path, "a")
print "xtestenv: mounting volume", self.__name, "at", self.get_mount_point_dir_path(), "with", " ".join(mount_xtreemfs_args)
# Use subprocess.Popen instead of subprocess.call to run in the background
p = subprocess.Popen(mount_xtreemfs_args, stderr=stderr, stdout=stdout)
sleep(1.0)
if p.returncode is not None:
raise RuntimeError("Failed to mount volume '" + self.__name + "' error: " + str(p.returncode))
# Use 'waitpid' to touch any zombies and ensure that these are cleaned up first before checking /proc/<pid>.
try: os.waitpid(p.pid, os.WNOHANG)
# We dont care about the actual result of waitpid.
except OSError: pass
if not os.path.exists("/proc/" + str(p.pid)):
raise RuntimeError("Failed to mount volume '" + self.__name + "' error: mount.xtreemfs did not successfully start.")
# enable replication
if self.__rwr_factor > 0:
command = (xtfsutil_file_path + " " +
"--set-drp " +
"--replication-policy="+self.__rwr_policy + " " +
"--replication-factor="+str(self.__rwr_factor) + " " +
self.__mount_point_dir_path)
retcode = subprocess.call(command, shell=True)
if retcode != 0:
raise RuntimeError("Failed to enable read-write replication on volume: " + self.__name
+ " xtfsutil return value: " + str(retcode)
+ " Executed command: " + command)
# enable replicate on close for ronly replication
if self.__ronly_factor > 0:
command = (xtfsutil_file_path + " " +
"--set-drp " +
"--replication-policy=readonly " +
"--replication-factor="+str(self.__rwr_factor) + " " +
self.__mount_point_dir_path)
retcode = subprocess.call(command, shell=True)
if retcode != 0:
raise RuntimeError("Failed to enable read/only replication on volume: " + self.__name
+ " xtfsutil return value: " + str(retcode)
+ " Executed command: " + command)
def unmount(self):
for mounts_line in open("/proc/mounts").readlines():
mounts_line_parts = mounts_line.split()
test_device = mounts_line_parts[0]
test_mount_point_dir_path = mounts_line_parts[1]
if test_mount_point_dir_path.endswith(self.get_mount_point_dir_path()):
fusermount_args = " ".join(["fusermount", "-u", "-z", self.get_mount_point_dir_path()])
print "xtestenv: unmounting volume", self.get_name(), "with", fusermount_args
retcode = subprocess.call(fusermount_args, shell=True)
if retcode != 0:
print("Failed to unmount volume: " + self.__name + " fusermount -u return value: " + str(retcode))
|
pyro/contrib/autoname/scoping.py | tianjuchen/pyro | 4,959 | 11197844 | <filename>pyro/contrib/autoname/scoping.py
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
"""
``pyro.contrib.autoname.scoping`` contains the implementation of
:func:`pyro.contrib.autoname.scope`, a tool for automatically appending
a semantically meaningful prefix to names of sample sites.
"""
import functools
from pyro.poutine.messenger import Messenger
from pyro.poutine.runtime import effectful
class NameCountMessenger(Messenger):
"""
``NameCountMessenger`` is the implementation of :func:`pyro.contrib.autoname.name_count`
"""
def __enter__(self):
self._names = set()
return super().__enter__()
def _increment_name(self, name, label):
while (name, label) in self._names:
split_name = name.split("__")
if "__" in name and split_name[-1].isdigit():
counter = int(split_name[-1]) + 1
name = "__".join(split_name[:-1] + [str(counter)])
else:
name = name + "__1"
return name
def _pyro_sample(self, msg):
msg["name"] = self._increment_name(msg["name"], "sample")
def _pyro_post_sample(self, msg):
self._names.add((msg["name"], "sample"))
def _pyro_post_scope(self, msg):
self._names.add((msg["args"][0], "scope"))
def _pyro_scope(self, msg):
msg["args"] = (self._increment_name(msg["args"][0], "scope"),)
class ScopeMessenger(Messenger):
"""
``ScopeMessenger`` is the implementation of :func:`pyro.contrib.autoname.scope`
"""
def __init__(self, prefix=None, inner=None):
super().__init__()
self.prefix = prefix
self.inner = inner
@staticmethod
@effectful(type="scope")
def _collect_scope(prefixed_scope):
return prefixed_scope.split("/")[-1]
def __enter__(self):
if self.prefix is None:
raise ValueError("no prefix was provided")
if not self.inner:
# to accomplish adding a counter to duplicate scopes,
# we make ScopeMessenger.__enter__ effectful
# so that the same mechanism that adds counters to sample names
# can be used to add a counter to a scope name
self.prefix = self._collect_scope(self.prefix)
return super().__enter__()
def __call__(self, fn):
if self.prefix is None:
self.prefix = fn.__code__.co_name # fn.__name__
@functools.wraps(fn)
def _fn(*args, **kwargs):
with type(self)(prefix=self.prefix, inner=self.inner):
return fn(*args, **kwargs)
return _fn
def _pyro_scope(self, msg):
msg["args"] = ("{}/{}".format(self.prefix, msg["args"][0]),)
def _pyro_sample(self, msg):
msg["name"] = "{}/{}".format(self.prefix, msg["name"])
def scope(fn=None, prefix=None, inner=None):
"""
:param fn: a stochastic function (callable containing Pyro primitive calls)
:param prefix: a string to prepend to sample names (optional if ``fn`` is provided)
:param inner: switch to determine where duplicate name counters appear
:returns: ``fn`` decorated with a :class:`~pyro.contrib.autoname.scoping.ScopeMessenger`
``scope`` prepends a prefix followed by a ``/`` to the name at a Pyro sample site.
It works much like TensorFlow's ``name_scope`` and ``variable_scope``,
and can be used as a context manager, a decorator, or a higher-order function.
``scope`` is very useful for aligning compositional models with guides or data.
Example::
>>> @scope(prefix="a")
... def model():
... return pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "a/x" in poutine.trace(model).get_trace()
Example::
>>> def model():
... with scope(prefix="a"):
... return pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "a/x" in poutine.trace(model).get_trace()
Scopes compose as expected, with outer scopes appearing before inner scopes in names::
>>> @scope(prefix="b")
... def model():
... with scope(prefix="a"):
... return pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "b/a/x" in poutine.trace(model).get_trace()
When used as a decorator or higher-order function,
``scope`` will use the name of the input function as the prefix
if no user-specified prefix is provided.
Example::
>>> @scope
... def model():
... return pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "model/x" in poutine.trace(model).get_trace()
"""
msngr = ScopeMessenger(prefix=prefix, inner=inner)
return msngr(fn) if fn is not None else msngr
def name_count(fn=None):
"""
``name_count`` is a very simple autonaming scheme that simply appends a suffix `"__"`
plus a counter to any name that appears multiple tims in an execution.
Only duplicate instances of a name get a suffix; the first instance is not modified.
Example::
>>> @name_count
... def model():
... for i in range(3):
... pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "x" in poutine.trace(model).get_trace()
>>> assert "x__1" in poutine.trace(model).get_trace()
>>> assert "x__2" in poutine.trace(model).get_trace()
``name_count`` also composes with :func:`~pyro.contrib.autoname.scope`
by adding a suffix to duplicate scope entrances:
Example::
>>> @name_count
... def model():
... for i in range(3):
... with pyro.contrib.autoname.scope(prefix="a"):
... pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "a/x" in poutine.trace(model).get_trace()
>>> assert "a__1/x" in poutine.trace(model).get_trace()
>>> assert "a__2/x" in poutine.trace(model).get_trace()
Example::
>>> @name_count
... def model():
... with pyro.contrib.autoname.scope(prefix="a"):
... for i in range(3):
... pyro.sample("x", dist.Bernoulli(0.5))
...
>>> assert "a/x" in poutine.trace(model).get_trace()
>>> assert "a/x__1" in poutine.trace(model).get_trace()
>>> assert "a/x__2" in poutine.trace(model).get_trace()
"""
msngr = NameCountMessenger()
return msngr(fn) if fn is not None else msngr
|
lib/spack/spack/test/cmd/ci.py | BenWibking/spack | 2,360 | 11197856 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import filecmp
import json
import os
import shutil
import pytest
from jsonschema import ValidationError, validate
import spack
import spack.binary_distribution
import spack.ci as ci
import spack.compilers as compilers
import spack.config
import spack.environment as ev
import spack.hash_types as ht
import spack.main
import spack.paths as spack_paths
import spack.repo as repo
import spack.util.gpg
import spack.util.spack_json as sjson
import spack.util.spack_yaml as syaml
import spack.util.url as url_util
from spack.schema.buildcache_spec import schema as specfile_schema
from spack.schema.database_index import schema as db_idx_schema
from spack.schema.gitlab_ci import schema as gitlab_ci_schema
from spack.spec import CompilerSpec, Spec
from spack.util.mock_package import MockPackageMultiRepo
ci_cmd = spack.main.SpackCommand('ci')
env_cmd = spack.main.SpackCommand('env')
mirror_cmd = spack.main.SpackCommand('mirror')
gpg_cmd = spack.main.SpackCommand('gpg')
install_cmd = spack.main.SpackCommand('install')
uninstall_cmd = spack.main.SpackCommand('uninstall')
buildcache_cmd = spack.main.SpackCommand('buildcache')
pytestmark = pytest.mark.maybeslow
@pytest.fixture()
def project_dir_env():
def _set_project_dir(path):
os.environ['CI_PROJECT_DIR'] = path
yield _set_project_dir
if 'CI_PROJECT_DIR' in os.environ:
os.environ.pop('CI_PROJECT_DIR')
def set_env_var(key, val):
os.environ[key] = val
def test_specs_staging(config):
"""Make sure we achieve the best possible staging for the following
spec DAG::
a
/|
c b
|\
e d
|\
f g
In this case, we would expect 'c', 'e', 'f', and 'g' to be in the first stage,
and then 'd', 'b', and 'a' to be put in the next three stages, respectively.
"""
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
g = mock_repo.add_package('g', [], [])
f = mock_repo.add_package('f', [], [])
e = mock_repo.add_package('e', [], [])
d = mock_repo.add_package('d', [f, g], [default, default])
c = mock_repo.add_package('c', [], [])
b = mock_repo.add_package('b', [d, e], [default, default])
mock_repo.add_package('a', [b, c], [default, default])
with repo.use_repositories(mock_repo):
spec_a = Spec('a')
spec_a.concretize()
spec_a_label = ci.spec_deps_key(spec_a)
spec_b_label = ci.spec_deps_key(spec_a['b'])
spec_c_label = ci.spec_deps_key(spec_a['c'])
spec_d_label = ci.spec_deps_key(spec_a['d'])
spec_e_label = ci.spec_deps_key(spec_a['e'])
spec_f_label = ci.spec_deps_key(spec_a['f'])
spec_g_label = ci.spec_deps_key(spec_a['g'])
spec_labels, dependencies, stages = ci.stage_spec_jobs([spec_a])
assert (len(stages) == 4)
assert (len(stages[0]) == 4)
assert (spec_c_label in stages[0])
assert (spec_e_label in stages[0])
assert (spec_f_label in stages[0])
assert (spec_g_label in stages[0])
assert (len(stages[1]) == 1)
assert (spec_d_label in stages[1])
assert (len(stages[2]) == 1)
assert (spec_b_label in stages[2])
assert (len(stages[3]) == 1)
assert (spec_a_label in stages[3])
def test_ci_generate_with_env(tmpdir, mutable_mock_env_path,
install_mockery, mock_packages, project_dir_env,
mock_binary_index):
"""Make sure we can get a .gitlab-ci.yml from an environment file
which has the gitlab-ci, cdash, and mirrors sections."""
project_dir_env(tmpdir.strpath)
mirror_url = 'https://my.fake.mirror'
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
definitions:
- bootstrap:
- [email protected]
- old-gcc-pkgs:
- archive-files
- callpath
# specify ^openblas-with-lapack to ensure that builtin.mock repo flake8
# package (which can also provide lapack) is not chosen, as it violates
# a package-level check which requires exactly one fetch strategy (this
# is apparently not an issue for other tests that use it).
- [email protected] ^openblas-with-lapack
specs:
- matrix:
- [$old-gcc-pkgs]
mirrors:
some-mirror: {0}
gitlab-ci:
bootstrap:
- name: bootstrap
compiler-agnostic: true
mappings:
- match:
- arch=test-debian6-core2
runner-attributes:
tags:
- donotcare
image: donotcare
service-job-attributes:
image: donotcare
tags: [donotcare]
cdash:
build-group: Not important
url: https://my.fake.cdash
project: Not used
site: Nothing
""".format(mirror_url))
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
found_spec = False
for ci_key in yaml_contents.keys():
if '(bootstrap)' in ci_key:
found_spec = True
assert('cmake' in ci_key)
assert(found_spec)
assert('stages' in yaml_contents)
assert(len(yaml_contents['stages']) == 6)
assert(yaml_contents['stages'][0] == 'stage-0')
assert(yaml_contents['stages'][5] == 'stage-rebuild-index')
assert('rebuild-index' in yaml_contents)
rebuild_job = yaml_contents['rebuild-index']
expected = 'spack buildcache update-index --keys -d {0}'.format(
mirror_url)
assert(rebuild_job['script'][0] == expected)
assert('variables' in yaml_contents)
assert('SPACK_ARTIFACTS_ROOT' in yaml_contents['variables'])
artifacts_root = yaml_contents['variables']['SPACK_ARTIFACTS_ROOT']
assert(artifacts_root == 'jobs_scratch_dir')
def _validate_needs_graph(yaml_contents, needs_graph, artifacts):
for job_name, job_def in yaml_contents.items():
for needs_def_name, needs_list in needs_graph.items():
if job_name.startswith(needs_def_name):
# check job needs against the expected needs definition
j_needs = job_def['needs']
assert all([job_needs['job'][:job_needs['job'].index('/')]
in needs_list for job_needs in j_needs])
assert(all([nl in
[n['job'][:n['job'].index('/')] for n in j_needs]
for nl in needs_list]))
assert all([job_needs['artifacts'] == artifacts
for job_needs in j_needs])
break
def test_ci_generate_bootstrap_gcc(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, project_dir_env):
"""Test that we can bootstrap a compiler and use it as the
compiler for a spec in the environment"""
project_dir_env(tmpdir.strpath)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
definitions:
- bootstrap:
- [email protected]
- [email protected]
specs:
- dyninst%[email protected]
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
bootstrap:
- name: bootstrap
compiler-agnostic: true
mappings:
- match:
- arch=test-debian6-x86_64
runner-attributes:
tags:
- donotcare
""")
needs_graph = {
'(bootstrap) conflict': [],
'(bootstrap) gcc': [
'(bootstrap) conflict',
],
'(specs) libelf': [
'(bootstrap) gcc',
],
'(specs) libdwarf': [
'(bootstrap) gcc',
'(specs) libelf',
],
'(specs) dyninst': [
'(bootstrap) gcc',
'(specs) libelf',
'(specs) libdwarf',
],
}
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
_validate_needs_graph(yaml_contents, needs_graph, False)
def test_ci_generate_bootstrap_artifacts_buildcache(tmpdir,
mutable_mock_env_path,
install_mockery,
mock_packages,
project_dir_env):
"""Test that we can bootstrap a compiler when artifacts buildcache
is turned on"""
project_dir_env(tmpdir.strpath)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
definitions:
- bootstrap:
- [email protected]
specs:
- dyninst%[email protected]
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
bootstrap:
- name: bootstrap
compiler-agnostic: true
mappings:
- match:
- arch=test-debian6-x86_64
runner-attributes:
tags:
- donotcare
enable-artifacts-buildcache: True
""")
needs_graph = {
'(bootstrap) conflict': [],
'(bootstrap) gcc': [
'(bootstrap) conflict',
],
'(specs) libelf': [
'(bootstrap) gcc',
'(bootstrap) conflict',
],
'(specs) libdwarf': [
'(bootstrap) gcc',
'(bootstrap) conflict',
'(specs) libelf',
],
'(specs) dyninst': [
'(bootstrap) gcc',
'(bootstrap) conflict',
'(specs) libelf',
'(specs) libdwarf',
],
}
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
_validate_needs_graph(yaml_contents, needs_graph, True)
def test_ci_generate_with_env_missing_section(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, project_dir_env,
mock_binary_index):
"""Make sure we get a reasonable message if we omit gitlab-ci section"""
project_dir_env(tmpdir.strpath)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
mirrors:
some-mirror: https://my.fake.mirror
""")
expect_out = 'Error: Environment yaml does not have "gitlab-ci" section'
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test'):
output = ci_cmd('generate', fail_on_error=False, output=str)
assert(expect_out in output)
def test_ci_generate_with_cdash_token(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, project_dir_env,
mock_binary_index):
"""Make sure we it doesn't break if we configure cdash"""
project_dir_env(tmpdir.strpath)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
enable-artifacts-buildcache: True
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
image: donotcare
cdash:
build-group: Not important
url: https://my.fake.cdash
project: Not used
site: Nothing
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test'):
fake_token = '<PASSWORD>'
os.environ['SPACK_CDASH_AUTH_TOKEN'] = fake_token
copy_to_file = str(tmpdir.join('backup-ci.yml'))
try:
output = ci_cmd('generate', '--copy-to', copy_to_file, output=str)
finally:
del os.environ['SPACK_CDASH_AUTH_TOKEN']
# That fake token should still have resulted in being unable to
# register build group with cdash, but the workload should
# still have been generated.
expect = 'Problem populating buildgroup'
assert(expect in output)
dir_contents = os.listdir(tmpdir.strpath)
assert('backup-ci.yml' in dir_contents)
orig_file = str(tmpdir.join('.gitlab-ci.yml'))
assert(filecmp.cmp(orig_file, copy_to_file) is True)
def test_ci_generate_with_custom_scripts(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
project_dir_env, mock_binary_index):
"""Test use of user-provided scripts"""
project_dir_env(tmpdir.strpath)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
variables:
ONE: plain-string-value
TWO: ${INTERP_ON_BUILD}
before_script:
- mkdir /some/path
- pushd /some/path
- git clone ${SPACK_REPO}
- cd spack
- git checkout ${SPACK_REF}
- popd
script:
- spack -d ci rebuild
after_script:
- rm -rf /some/path/spack
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
monkeypatch.setattr(spack.main, 'get_version', lambda: '0.15.3')
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
found_it = False
assert('variables' in yaml_contents)
global_vars = yaml_contents['variables']
assert('SPACK_VERSION' in global_vars)
assert(global_vars['SPACK_VERSION'] == '0.15.3')
assert('SPACK_CHECKOUT_VERSION' in global_vars)
assert(global_vars['SPACK_CHECKOUT_VERSION'] == 'v0.15.3')
for ci_key in yaml_contents.keys():
ci_obj = yaml_contents[ci_key]
if 'archive-files' in ci_key:
# Ensure we have variables, possibly interpolated
assert('variables' in ci_obj)
var_d = ci_obj['variables']
assert('ONE' in var_d)
assert(var_d['ONE'] == 'plain-string-value')
assert('TWO' in var_d)
assert(var_d['TWO'] == '${INTERP_ON_BUILD}')
# Ensure we have scripts verbatim
assert('before_script' in ci_obj)
before_script = ci_obj['before_script']
assert(before_script[0] == 'mkdir /some/path')
assert(before_script[1] == 'pushd /some/path')
assert(before_script[2] == 'git clone ${SPACK_REPO}')
assert(before_script[3] == 'cd spack')
assert(before_script[4] == 'git checkout ${SPACK_REF}')
assert(before_script[5] == 'popd')
assert('script' in ci_obj)
assert(ci_obj['script'][0] == 'spack -d ci rebuild')
assert('after_script' in ci_obj)
after_script = ci_obj['after_script'][0]
assert(after_script == 'rm -rf /some/path/spack')
found_it = True
assert(found_it)
def test_ci_generate_pkg_with_deps(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, project_dir_env):
"""Test pipeline generation for a package w/ dependencies"""
project_dir_env(tmpdir.strpath)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- flatten-deps
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
enable-artifacts-buildcache: True
mappings:
- match:
- flatten-deps
runner-attributes:
tags:
- donotcare
- match:
- dependency-install
runner-attributes:
tags:
- donotcare
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
found = []
for ci_key in yaml_contents.keys():
ci_obj = yaml_contents[ci_key]
if 'dependency-install' in ci_key:
assert('stage' in ci_obj)
assert(ci_obj['stage'] == 'stage-0')
found.append('dependency-install')
if 'flatten-deps' in ci_key:
assert('stage' in ci_obj)
assert(ci_obj['stage'] == 'stage-1')
found.append('flatten-deps')
assert('flatten-deps' in found)
assert('dependency-install' in found)
def test_ci_generate_for_pr_pipeline(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
project_dir_env):
"""Test that PR pipelines do not include a final stage job for
rebuilding the mirror index, even if that job is specifically
configured"""
project_dir_env(tmpdir.strpath)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- flatten-deps
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
enable-artifacts-buildcache: True
mappings:
- match:
- flatten-deps
runner-attributes:
tags:
- donotcare
- match:
- dependency-install
runner-attributes:
tags:
- donotcare
service-job-attributes:
image: donotcare
tags: [donotcare]
rebuild-index: False
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
os.environ['SPACK_PIPELINE_TYPE'] = 'spack_pull_request'
os.environ['SPACK_PR_BRANCH'] = 'fake-test-branch'
monkeypatch.setattr(
ci, 'SPACK_PR_MIRRORS_ROOT_URL', r"file:///fake/mirror")
monkeypatch.setattr(
ci, 'SPACK_SHARED_PR_MIRROR_URL', r"file:///fake/mirror_two")
try:
ci_cmd('generate', '--output-file', outputfile)
finally:
del os.environ['SPACK_PIPELINE_TYPE']
del os.environ['SPACK_PR_BRANCH']
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
assert('rebuild-index' not in yaml_contents)
assert('variables' in yaml_contents)
pipeline_vars = yaml_contents['variables']
assert('SPACK_PIPELINE_TYPE' in pipeline_vars)
assert(pipeline_vars['SPACK_PIPELINE_TYPE'] == 'spack_pull_request')
def test_ci_generate_with_external_pkg(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
project_dir_env):
"""Make sure we do not generate jobs for external pkgs"""
project_dir_env(tmpdir.strpath)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
- externaltest
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
mappings:
- match:
- archive-files
- externaltest
runner-attributes:
tags:
- donotcare
image: donotcare
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
monkeypatch.setattr(
ci, 'SPACK_PR_MIRRORS_ROOT_URL', r"file:///fake/mirror")
monkeypatch.setattr(
ci, 'SPACK_SHARED_PR_MIRROR_URL', r"file:///fake/mirror_two")
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
yaml_contents = syaml.load(f)
# Check that the "externaltool" package was not erroneously staged
assert not any('externaltool' in key for key in yaml_contents)
@pytest.mark.xfail(reason='fails intermittently and covered by gitlab ci')
def test_ci_rebuild(tmpdir, mutable_mock_env_path,
install_mockery, mock_packages, monkeypatch,
mock_gnupghome, mock_fetch, project_dir_env,
mock_binary_index):
project_dir_env(tmpdir.strpath)
working_dir = tmpdir.join('working_dir')
log_dir = os.path.join(working_dir.strpath, 'logs')
repro_dir = os.path.join(working_dir.strpath, 'repro')
env_dir = working_dir.join('concrete_env')
mirror_dir = working_dir.join('mirror')
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
broken_specs_path = os.path.join(working_dir.strpath, 'naughty-list')
broken_specs_url = url_util.join('file://', broken_specs_path)
temp_storage_url = 'file:///path/to/per/pipeline/storage'
ci_job_url = 'https://some.domain/group/project/-/jobs/42'
ci_pipeline_url = 'https://some.domain/group/project/-/pipelines/7'
signing_key_dir = spack_paths.mock_gpg_keys_path
signing_key_path = os.path.join(signing_key_dir, 'package-signing-key')
with open(signing_key_path) as fd:
signing_key = fd.read()
spack_yaml_contents = """
spack:
definitions:
- packages: [archive-files]
specs:
- $packages
mirrors:
test-mirror: {0}
gitlab-ci:
broken-specs-url: {1}
temporary-storage-url-prefix: {2}
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
image: donotcare
cdash:
build-group: Not important
url: https://my.fake.cdash
project: Not used
site: Nothing
""".format(mirror_url, broken_specs_url, temp_storage_url)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write(spack_yaml_contents)
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test') as env:
with env.write_transaction():
env.concretize()
env.write()
if not os.path.exists(env_dir.strpath):
os.makedirs(env_dir.strpath)
shutil.copyfile(env.manifest_path,
os.path.join(env_dir.strpath, 'spack.yaml'))
shutil.copyfile(env.lock_path,
os.path.join(env_dir.strpath, 'spack.lock'))
root_spec_build_hash = None
job_spec_dag_hash = None
job_spec_full_hash = None
for h, s in env.specs_by_hash.items():
if s.name == 'archive-files':
root_spec_build_hash = h
job_spec_dag_hash = s.dag_hash()
job_spec_full_hash = s.full_hash()
assert root_spec_build_hash
assert job_spec_dag_hash
assert job_spec_full_hash
def fake_cdash_register(build_name, base_url, project, site, track):
return ('fakebuildid', 'fakestamp')
monkeypatch.setattr(ci, 'register_cdash_build', fake_cdash_register)
monkeypatch.setattr(spack.cmd.ci, 'CI_REBUILD_INSTALL_BASE_ARGS', [
'notcommand'
])
monkeypatch.setattr(spack.cmd.ci, 'INSTALL_FAIL_CODE', 127)
with env_dir.as_cwd():
env_cmd('activate', '--without-view', '--sh', '-d', '.')
# Create environment variables as gitlab would do it
set_env_var('SPACK_ARTIFACTS_ROOT', working_dir.strpath)
set_env_var('SPACK_JOB_LOG_DIR', log_dir)
set_env_var('SPACK_JOB_REPRO_DIR', repro_dir)
set_env_var('SPACK_LOCAL_MIRROR_DIR', mirror_dir.strpath)
set_env_var('SPACK_CONCRETE_ENV_DIR', env_dir.strpath)
set_env_var('CI_PIPELINE_ID', '7192')
set_env_var('SPACK_SIGNING_KEY', signing_key)
set_env_var('SPACK_ROOT_SPEC', root_spec_build_hash)
set_env_var('SPACK_JOB_SPEC_DAG_HASH', job_spec_dag_hash)
set_env_var('SPACK_JOB_SPEC_PKG_NAME', 'archive-files')
set_env_var('SPACK_COMPILER_ACTION', 'NONE')
set_env_var('SPACK_CDASH_BUILD_NAME', '(specs) archive-files')
set_env_var('SPACK_RELATED_BUILDS_CDASH', '')
set_env_var('SPACK_REMOTE_MIRROR_URL', mirror_url)
set_env_var('SPACK_PIPELINE_TYPE', 'spack_protected_branch')
set_env_var('CI_JOB_URL', ci_job_url)
set_env_var('CI_PIPELINE_URL', ci_pipeline_url)
ci_cmd('rebuild', fail_on_error=False)
expected_repro_files = [
'install.sh',
'root.yaml',
'archive-files.yaml',
'spack.yaml',
'spack.lock'
]
repro_files = os.listdir(repro_dir)
assert(all([f in repro_files for f in expected_repro_files]))
install_script_path = os.path.join(repro_dir, 'install.sh')
install_line = None
with open(install_script_path) as fd:
for line in fd:
if line.startswith('"notcommand"'):
install_line = line
assert(install_line)
def mystrip(s):
return s.strip('"').rstrip('\n').rstrip('"')
install_parts = [mystrip(s) for s in install_line.split(' ')]
assert('--keep-stage' in install_parts)
assert('--require-full-hash-match' in install_parts)
assert('--no-check-signature' not in install_parts)
assert('--no-add' in install_parts)
assert('-f' in install_parts)
flag_index = install_parts.index('-f')
assert('archive-files.yaml' in install_parts[flag_index + 1])
broken_spec_file = os.path.join(broken_specs_path, job_spec_full_hash)
with open(broken_spec_file) as fd:
broken_spec_content = fd.read()
assert(ci_job_url in broken_spec_content)
assert(ci_pipeline_url) in broken_spec_content
env_cmd('deactivate')
def test_ci_nothing_to_rebuild(tmpdir, mutable_mock_env_path,
install_mockery, mock_packages, monkeypatch,
mock_fetch, project_dir_env, mock_binary_index):
project_dir_env(tmpdir.strpath)
working_dir = tmpdir.join('working_dir')
mirror_dir = working_dir.join('mirror')
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
spack_yaml_contents = """
spack:
definitions:
- packages: [archive-files]
specs:
- $packages
mirrors:
test-mirror: {0}
gitlab-ci:
enable-artifacts-buildcache: True
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
image: donotcare
""".format(mirror_url)
install_cmd('archive-files')
buildcache_cmd('create', '-a', '-f', '-u', '--mirror-url',
mirror_url, 'archive-files')
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write(spack_yaml_contents)
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test') as env:
env.concretize()
root_spec_build_hash = None
job_spec_dag_hash = None
for h, s in env.specs_by_hash.items():
if s.name == 'archive-files':
root_spec_build_hash = h
job_spec_dag_hash = s.dag_hash()
# Create environment variables as gitlab would do it
set_env_var('SPACK_ARTIFACTS_ROOT', working_dir.strpath)
set_env_var('SPACK_JOB_LOG_DIR', 'log_dir')
set_env_var('SPACK_JOB_REPRO_DIR', 'repro_dir')
set_env_var('SPACK_LOCAL_MIRROR_DIR', mirror_dir.strpath)
set_env_var('SPACK_CONCRETE_ENV_DIR', tmpdir.strpath)
set_env_var('SPACK_ROOT_SPEC', root_spec_build_hash)
set_env_var('SPACK_JOB_SPEC_DAG_HASH', job_spec_dag_hash)
set_env_var('SPACK_JOB_SPEC_PKG_NAME', 'archive-files')
set_env_var('SPACK_COMPILER_ACTION', 'NONE')
set_env_var('SPACK_REMOTE_MIRROR_URL', mirror_url)
def fake_dl_method(spec, *args, **kwargs):
print('fake download buildcache {0}'.format(spec.name))
monkeypatch.setattr(
spack.binary_distribution, 'download_single_spec', fake_dl_method)
ci_out = ci_cmd('rebuild', output=str)
assert('No need to rebuild archive-files' in ci_out)
assert('fake download buildcache archive-files' in ci_out)
env_cmd('deactivate')
@pytest.mark.disable_clean_stage_check
def test_push_mirror_contents(tmpdir, mutable_mock_env_path,
install_mockery_mutable_config, mock_packages,
mock_fetch, mock_stage, mock_gnupghome,
project_dir_env):
project_dir_env(tmpdir.strpath)
working_dir = tmpdir.join('working_dir')
mirror_dir = working_dir.join('mirror')
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
signing_key_dir = spack_paths.mock_gpg_keys_path
signing_key_path = os.path.join(signing_key_dir, 'package-signing-key')
with open(signing_key_path) as fd:
signing_key = fd.read()
ci.import_signing_key(signing_key)
spack_yaml_contents = """
spack:
definitions:
- packages: [patchelf]
specs:
- $packages
mirrors:
test-mirror: {0}
gitlab-ci:
enable-artifacts-buildcache: True
mappings:
- match:
- patchelf
runner-attributes:
tags:
- donotcare
image: donotcare
service-job-attributes:
tags:
- nonbuildtag
image: basicimage
""".format(mirror_url)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write(spack_yaml_contents)
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test') as env:
spec_map = ci.get_concrete_specs(
env, 'patchelf', 'patchelf', '', 'FIND_ANY')
concrete_spec = spec_map['patchelf']
spec_json = concrete_spec.to_json(hash=ht.build_hash)
json_path = str(tmpdir.join('spec.json'))
with open(json_path, 'w') as ypfd:
ypfd.write(spec_json)
install_cmd('--keep-stage', json_path)
# env, spec, json_path, mirror_url, build_id, sign_binaries
ci.push_mirror_contents(env, json_path, mirror_url, True)
ci.write_cdashid_to_mirror('42', concrete_spec, mirror_url)
buildcache_path = os.path.join(mirror_dir.strpath, 'build_cache')
# Now test the --prune-dag (default) option of spack ci generate
mirror_cmd('add', 'test-ci', mirror_url)
outputfile_pruned = str(tmpdir.join('pruned_pipeline.yml'))
ci_cmd('generate', '--output-file', outputfile_pruned)
with open(outputfile_pruned) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
assert('no-specs-to-rebuild' in yaml_contents)
# Make sure there are no other spec jobs or rebuild-index
assert(len(yaml_contents.keys()) == 1)
the_elt = yaml_contents['no-specs-to-rebuild']
assert('tags' in the_elt)
assert('nonbuildtag' in the_elt['tags'])
assert('image' in the_elt)
assert(the_elt['image'] == 'basicimage')
outputfile_not_pruned = str(tmpdir.join('unpruned_pipeline.yml'))
ci_cmd('generate', '--no-prune-dag', '--output-file',
outputfile_not_pruned)
# Test the --no-prune-dag option of spack ci generate
with open(outputfile_not_pruned) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
found_spec_job = False
for ci_key in yaml_contents.keys():
if '(specs) patchelf' in ci_key:
the_elt = yaml_contents[ci_key]
assert('variables' in the_elt)
job_vars = the_elt['variables']
assert('SPACK_SPEC_NEEDS_REBUILD' in job_vars)
assert(job_vars['SPACK_SPEC_NEEDS_REBUILD'] == 'False')
found_spec_job = True
assert(found_spec_job)
mirror_cmd('rm', 'test-ci')
# Test generating buildcache index while we have bin mirror
buildcache_cmd('update-index', '--mirror-url', mirror_url)
index_path = os.path.join(buildcache_path, 'index.json')
with open(index_path) as idx_fd:
index_object = json.load(idx_fd)
validate(index_object, db_idx_schema)
# Now that index is regenerated, validate "buildcache list" output
buildcache_list_output = buildcache_cmd('list', output=str)
assert('patchelf' in buildcache_list_output)
# Also test buildcache_spec schema
bc_files_list = os.listdir(buildcache_path)
for file_name in bc_files_list:
if file_name.endswith('.spec.json'):
spec_json_path = os.path.join(buildcache_path, file_name)
with open(spec_json_path) as json_fd:
json_object = sjson.load(json_fd)
validate(json_object, specfile_schema)
logs_dir = working_dir.join('logs_dir')
if not os.path.exists(logs_dir.strpath):
os.makedirs(logs_dir.strpath)
ci.copy_stage_logs_to_artifacts(concrete_spec, logs_dir.strpath)
logs_dir_list = os.listdir(logs_dir.strpath)
assert('spack-build-out.txt' in logs_dir_list)
# Also just make sure that if something goes wrong with the
# stage logs copy, no exception is thrown
ci.copy_stage_logs_to_artifacts(None, logs_dir.strpath)
dl_dir = working_dir.join('download_dir')
if not os.path.exists(dl_dir.strpath):
os.makedirs(dl_dir.strpath)
buildcache_cmd('download', '--spec-file', json_path, '--path',
dl_dir.strpath, '--require-cdashid')
dl_dir_list = os.listdir(dl_dir.strpath)
assert(len(dl_dir_list) == 3)
def test_push_mirror_contents_exceptions(monkeypatch, capsys):
def failing_access(*args, **kwargs):
raise Exception('Error: Access Denied')
monkeypatch.setattr(spack.ci, '_push_mirror_contents', failing_access)
# Input doesn't matter, as wwe are faking exceptional output
url = 'fakejunk'
ci.push_mirror_contents(None, None, url, None)
captured = capsys.readouterr()
std_out = captured[0]
expect_msg = 'Permission problem writing to {0}'.format(url)
assert expect_msg in std_out
def test_ci_generate_override_runner_attrs(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
project_dir_env):
"""Test that we get the behavior we want with respect to the provision
of runner attributes like tags, variables, and scripts, both when we
inherit them from the top level, as well as when we override one or
more at the runner level"""
project_dir_env(tmpdir.strpath)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- flatten-deps
- a
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
tags:
- toplevel
variables:
ONE: toplevelvarone
TWO: toplevelvartwo
before_script:
- pre step one
- pre step two
script:
- main step
after_script:
- post step one
mappings:
- match:
- flatten-deps
runner-attributes:
tags:
- specific-one
variables:
THREE: specificvarthree
- match:
- dependency-install
- match:
- a
runner-attributes:
tags:
- specific-a
- toplevel
variables:
ONE: specificvarone
TWO: specificvartwo
before_script:
- custom pre step one
script:
- custom main step
after_script:
- custom post step one
service-job-attributes:
image: donotcare
tags: [donotcare]
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
monkeypatch.setattr(
spack.main, 'get_version', lambda: '0.15.3-416-12ad69eb1')
monkeypatch.setattr(
ci, 'SPACK_PR_MIRRORS_ROOT_URL', r"file:///fake/mirror")
monkeypatch.setattr(
ci, 'SPACK_SHARED_PR_MIRROR_URL', r"file:///fake/mirror_two")
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
assert('variables' in yaml_contents)
global_vars = yaml_contents['variables']
assert('SPACK_VERSION' in global_vars)
assert(global_vars['SPACK_VERSION'] == '0.15.3-416-12ad69eb1')
assert('SPACK_CHECKOUT_VERSION' in global_vars)
assert(global_vars['SPACK_CHECKOUT_VERSION'] == '12ad69eb1')
for ci_key in yaml_contents.keys():
if '(specs) b' in ci_key:
assert(False)
if '(specs) a' in ci_key:
# Make sure a's attributes override variables, and all the
# scripts. Also, make sure the 'toplevel' tag doesn't
# appear twice, but that a's specific extra tag does appear
the_elt = yaml_contents[ci_key]
assert(the_elt['variables']['ONE'] == 'specificvarone')
assert(the_elt['variables']['TWO'] == 'specificvartwo')
assert('THREE' not in the_elt['variables'])
assert(len(the_elt['tags']) == 2)
assert('specific-a' in the_elt['tags'])
assert('toplevel' in the_elt['tags'])
assert(len(the_elt['before_script']) == 1)
assert(the_elt['before_script'][0] ==
'custom pre step one')
assert(len(the_elt['script']) == 1)
assert(the_elt['script'][0] == 'custom main step')
assert(len(the_elt['after_script']) == 1)
assert(the_elt['after_script'][0] ==
'custom post step one')
if '(specs) dependency-install' in ci_key:
# Since the dependency-install match omits any
# runner-attributes, make sure it inherited all the
# top-level attributes.
the_elt = yaml_contents[ci_key]
assert(the_elt['variables']['ONE'] == 'toplevelvarone')
assert(the_elt['variables']['TWO'] == 'toplevelvartwo')
assert('THREE' not in the_elt['variables'])
assert(len(the_elt['tags']) == 1)
assert(the_elt['tags'][0] == 'toplevel')
assert(len(the_elt['before_script']) == 2)
assert(the_elt['before_script'][0] == 'pre step one')
assert(the_elt['before_script'][1] == 'pre step two')
assert(len(the_elt['script']) == 1)
assert(the_elt['script'][0] == 'main step')
assert(len(the_elt['after_script']) == 1)
assert(the_elt['after_script'][0] == 'post step one')
if '(specs) flatten-deps' in ci_key:
# The flatten-deps match specifies that we keep the two
# top level variables, but add a third specifc one. It
# also adds a custom tag which should be combined with
# the top-level tag.
the_elt = yaml_contents[ci_key]
assert(the_elt['variables']['ONE'] == 'toplevelvarone')
assert(the_elt['variables']['TWO'] == 'toplevelvartwo')
assert(the_elt['variables']['THREE'] == 'specificvarthree')
assert(len(the_elt['tags']) == 2)
assert('specific-one' in the_elt['tags'])
assert('toplevel' in the_elt['tags'])
assert(len(the_elt['before_script']) == 2)
assert(the_elt['before_script'][0] == 'pre step one')
assert(the_elt['before_script'][1] == 'pre step two')
assert(len(the_elt['script']) == 1)
assert(the_elt['script'][0] == 'main step')
assert(len(the_elt['after_script']) == 1)
assert(the_elt['after_script'][0] == 'post step one')
def test_ci_generate_with_workarounds(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
project_dir_env):
"""Make sure the post-processing cli workarounds do what they should"""
project_dir_env(tmpdir.strpath)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- callpath%[email protected]
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
mappings:
- match: ['%[email protected]']
runner-attributes:
tags:
- donotcare
image: donotcare
enable-artifacts-buildcache: true
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
monkeypatch.setattr(
ci, 'SPACK_PR_MIRRORS_ROOT_URL', r"file:///fake/mirror")
monkeypatch.setattr(
ci, 'SPACK_SHARED_PR_MIRROR_URL', r"file:///fake/mirror_two")
ci_cmd('generate', '--output-file', outputfile, '--dependencies')
with open(outputfile) as f:
contents = f.read()
yaml_contents = syaml.load(contents)
found_one = False
for ci_key in yaml_contents.keys():
if ci_key.startswith('(specs) '):
found_one = True
job_obj = yaml_contents[ci_key]
assert('needs' not in job_obj)
assert('dependencies' in job_obj)
assert(found_one is True)
@pytest.mark.disable_clean_stage_check
def test_ci_rebuild_index(tmpdir, mutable_mock_env_path,
install_mockery, mock_packages, mock_fetch,
mock_stage):
working_dir = tmpdir.join('working_dir')
mirror_dir = working_dir.join('mirror')
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
spack_yaml_contents = """
spack:
specs:
- callpath
mirrors:
test-mirror: {0}
gitlab-ci:
mappings:
- match:
- patchelf
runner-attributes:
tags:
- donotcare
image: donotcare
""".format(mirror_url)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write(spack_yaml_contents)
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test') as env:
spec_map = ci.get_concrete_specs(
env, 'callpath', 'callpath', '', 'FIND_ANY')
concrete_spec = spec_map['callpath']
spec_yaml = concrete_spec.to_yaml(hash=ht.build_hash)
yaml_path = str(tmpdir.join('spec.yaml'))
with open(yaml_path, 'w') as ypfd:
ypfd.write(spec_yaml)
install_cmd('--keep-stage', '-f', yaml_path)
buildcache_cmd('create', '-u', '-a', '-f', '--mirror-url',
mirror_url, 'callpath')
ci_cmd('rebuild-index')
buildcache_path = os.path.join(mirror_dir.strpath, 'build_cache')
index_path = os.path.join(buildcache_path, 'index.json')
with open(index_path) as idx_fd:
index_object = json.load(idx_fd)
validate(index_object, db_idx_schema)
def test_ci_generate_bootstrap_prune_dag(
install_mockery_mutable_config, mock_packages, mock_fetch,
mock_archive, mutable_config, monkeypatch, tmpdir,
mutable_mock_env_path, project_dir_env):
"""Test compiler bootstrapping with DAG pruning. Specifically, make
sure that if we detect the bootstrapped compiler needs to be rebuilt,
we ensure the spec we want to build with that compiler is scheduled
for rebuild as well."""
# Create a temp mirror directory for buildcache usage
project_dir_env(tmpdir.strpath)
mirror_dir = tmpdir.join('mirror_dir')
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
# Install a compiler, because we want to put it in a buildcache
install_cmd('[email protected]%[email protected]')
# Put installed compiler in the buildcache
buildcache_cmd('create', '-u', '-a', '-f', '-d', mirror_dir.strpath,
'[email protected]%[email protected]')
# Now uninstall the compiler
uninstall_cmd('-y', '[email protected]%[email protected]')
monkeypatch.setattr(spack.concretize.Concretizer,
'check_for_compiler_existence', False)
spack.config.set('config:install_missing_compilers', True)
assert CompilerSpec('[email protected]') not in compilers.all_compiler_specs()
# Configure the mirror where we put that buildcache w/ the compiler
mirror_cmd('add', 'test-mirror', mirror_url)
install_cmd('--no-check-signature', 'a%[email protected]')
# Put spec built with installed compiler in the buildcache
buildcache_cmd('create', '-u', '-a', '-f', '-d', mirror_dir.strpath,
'a%[email protected]')
# Now uninstall the spec
uninstall_cmd('-y', 'a%[email protected]')
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
definitions:
- bootstrap:
- [email protected]%[email protected]
specs:
- a%[email protected]
mirrors:
atestm: {0}
gitlab-ci:
bootstrap:
- name: bootstrap
compiler-agnostic: true
mappings:
- match:
- arch=test-debian6-x86_64
runner-attributes:
tags:
- donotcare
- match:
- arch=test-debian6-core2
runner-attributes:
tags:
- meh
""".format(mirror_url))
# Without this monkeypatch, pipeline generation process would think that
# nothing in the environment needs rebuilding. With the monkeypatch, the
# process sees the compiler as needing a rebuild, which should then result
# in the specs built with that compiler needing a rebuild too.
def fake_get_mirrors_for_spec(spec=None, full_hash_match=False,
mirrors_to_check=None, index_only=False):
if spec.name == 'gcc':
return []
else:
return [{
'spec': spec,
'mirror_url': mirror_url,
}]
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
monkeypatch.setattr(
ci, 'SPACK_PR_MIRRORS_ROOT_URL', r"file:///fake/mirror")
monkeypatch.setattr(
ci, 'SPACK_SHARED_PR_MIRROR_URL', r"file:///fake/mirror_two")
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as of:
yaml_contents = of.read()
original_yaml_contents = syaml.load(yaml_contents)
# without the monkeypatch, everything appears up to date and no
# rebuild jobs are generated.
assert(original_yaml_contents)
assert('no-specs-to-rebuild' in original_yaml_contents)
monkeypatch.setattr(spack.binary_distribution,
'get_mirrors_for_spec',
fake_get_mirrors_for_spec)
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as of:
yaml_contents = of.read()
new_yaml_contents = syaml.load(yaml_contents)
assert(new_yaml_contents)
# This 'needs' graph reflects that even though specs 'a' and 'b' do
# not otherwise need to be rebuilt (thanks to DAG pruning), they
# both end up in the generated pipeline because the compiler they
# depend on is bootstrapped, and *does* need to be rebuilt.
needs_graph = {
'(bootstrap) gcc': [],
'(specs) b': [
'(bootstrap) gcc',
],
'(specs) a': [
'(bootstrap) gcc',
'(specs) b',
],
}
_validate_needs_graph(new_yaml_contents, needs_graph, False)
def test_ci_subcommands_without_mirror(tmpdir, mutable_mock_env_path,
mock_packages,
install_mockery, project_dir_env,
mock_binary_index):
"""Make sure we catch if there is not a mirror and report an error"""
project_dir_env(tmpdir.strpath)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
gitlab-ci:
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
image: donotcare
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
with ev.read('test'):
# Check the 'generate' subcommand
output = ci_cmd('generate', '--output-file', outputfile,
output=str, fail_on_error=False)
ex = 'spack ci generate requires an env containing a mirror'
assert(ex in output)
# Also check the 'rebuild-index' subcommand
output = ci_cmd('rebuild-index', output=str, fail_on_error=False)
ex = 'spack ci rebuild-index requires an env containing a mirror'
assert(ex in output)
def test_ensure_only_one_temporary_storage():
"""Make sure 'gitlab-ci' section of env does not allow specification of
both 'enable-artifacts-buildcache' and 'temporary-storage-url-prefix'."""
gitlab_ci_template = """
gitlab-ci:
{0}
mappings:
- match:
- notcheckedhere
runner-attributes:
tags:
- donotcare
"""
enable_artifacts = 'enable-artifacts-buildcache: True'
temp_storage = 'temporary-storage-url-prefix: file:///temp/mirror'
specify_both = """{0}
{1}
""".format(enable_artifacts, temp_storage)
specify_neither = ''
# User can specify "enable-artifacts-buildcache" (boolean)
yaml_obj = syaml.load(gitlab_ci_template.format(enable_artifacts))
validate(yaml_obj, gitlab_ci_schema)
# User can also specify "temporary-storage-url-prefix" (string)
yaml_obj = syaml.load(gitlab_ci_template.format(temp_storage))
validate(yaml_obj, gitlab_ci_schema)
# However, specifying both should fail to validate
yaml_obj = syaml.load(gitlab_ci_template.format(specify_both))
with pytest.raises(ValidationError):
validate(yaml_obj, gitlab_ci_schema)
# Specifying neither should be fine too, as neither of these properties
# should be required
yaml_obj = syaml.load(gitlab_ci_template.format(specify_neither))
validate(yaml_obj, gitlab_ci_schema)
def test_ci_generate_temp_storage_url(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
project_dir_env, mock_binary_index):
"""Verify correct behavior when using temporary-storage-url-prefix"""
project_dir_env(tmpdir.strpath)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- archive-files
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
temporary-storage-url-prefix: file:///work/temp/mirror
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
image: donotcare
""")
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
outputfile = str(tmpdir.join('.gitlab-ci.yml'))
monkeypatch.setattr(
ci, 'SPACK_PR_MIRRORS_ROOT_URL', r"file:///fake/mirror")
monkeypatch.setattr(
ci, 'SPACK_SHARED_PR_MIRROR_URL', r"file:///fake/mirror_two")
with ev.read('test'):
ci_cmd('generate', '--output-file', outputfile)
with open(outputfile) as of:
pipeline_doc = syaml.load(of.read())
assert('cleanup' in pipeline_doc)
cleanup_job = pipeline_doc['cleanup']
assert('script' in cleanup_job)
cleanup_task = cleanup_job['script'][0]
assert(cleanup_task.startswith('spack -d mirror destroy'))
assert('stages' in pipeline_doc)
stages = pipeline_doc['stages']
# Cleanup job should be 2nd to last, just before rebuild-index
assert('stage' in cleanup_job)
assert(cleanup_job['stage'] == stages[-2])
def test_ci_generate_read_broken_specs_url(tmpdir, mutable_mock_env_path,
install_mockery,
mock_packages, monkeypatch,
project_dir_env):
"""Verify that `broken-specs-url` works as intended"""
project_dir_env(tmpdir.strpath)
spec_a = Spec('a')
spec_a.concretize()
a_full_hash = spec_a.full_hash()
spec_flattendeps = Spec('flatten-deps')
spec_flattendeps.concretize()
flattendeps_full_hash = spec_flattendeps.full_hash()
# Mark 'a' as broken (but not 'flatten-deps')
broken_spec_a_path = str(tmpdir.join(a_full_hash))
with open(broken_spec_a_path, 'w') as bsf:
bsf.write('')
broken_specs_url = 'file://{0}'.format(tmpdir.strpath)
# Test that `spack ci generate` notices this broken spec and fails.
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write("""\
spack:
specs:
- flatten-deps
- a
mirrors:
some-mirror: https://my.fake.mirror
gitlab-ci:
broken-specs-url: "{0}"
mappings:
- match:
- a
- flatten-deps
- b
- dependency-install
runner-attributes:
tags:
- donotcare
image: donotcare
""".format(broken_specs_url))
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test'):
# Check output of the 'generate' subcommand
output = ci_cmd('generate', output=str, fail_on_error=False)
assert('known to be broken' in output)
ex = '({0})'.format(a_full_hash)
assert(ex in output)
ex = '({0})'.format(flattendeps_full_hash)
assert(ex not in output)
def test_ci_reproduce(tmpdir, mutable_mock_env_path,
install_mockery, mock_packages, monkeypatch,
last_two_git_commits, project_dir_env, mock_binary_index):
project_dir_env(tmpdir.strpath)
working_dir = tmpdir.join('repro_dir')
image_name = 'org/image:tag'
spack_yaml_contents = """
spack:
definitions:
- packages: [archive-files]
specs:
- $packages
mirrors:
test-mirror: file:///some/fake/mirror
gitlab-ci:
mappings:
- match:
- archive-files
runner-attributes:
tags:
- donotcare
image: {0}
""".format(image_name)
filename = str(tmpdir.join('spack.yaml'))
with open(filename, 'w') as f:
f.write(spack_yaml_contents)
with tmpdir.as_cwd():
env_cmd('create', 'test', './spack.yaml')
with ev.read('test') as env:
with env.write_transaction():
env.concretize()
env.write()
if not os.path.exists(working_dir.strpath):
os.makedirs(working_dir.strpath)
shutil.copyfile(env.manifest_path,
os.path.join(working_dir.strpath, 'spack.yaml'))
shutil.copyfile(env.lock_path,
os.path.join(working_dir.strpath, 'spack.lock'))
root_spec = None
job_spec = None
for h, s in env.specs_by_hash.items():
if s.name == 'archive-files':
root_spec = s
job_spec = s
job_spec_yaml_path = os.path.join(
working_dir.strpath, 'archivefiles.yaml')
with open(job_spec_yaml_path, 'w') as fd:
fd.write(job_spec.to_yaml(hash=ht.full_hash))
root_spec_yaml_path = os.path.join(
working_dir.strpath, 'root.yaml')
with open(root_spec_yaml_path, 'w') as fd:
fd.write(root_spec.to_yaml(hash=ht.full_hash))
artifacts_root = os.path.join(working_dir.strpath, 'scratch_dir')
pipeline_path = os.path.join(artifacts_root, 'pipeline.yml')
ci_cmd('generate', '--output-file', pipeline_path,
'--artifacts-root', artifacts_root)
job_name = ci.get_job_name(
'specs', False, job_spec, 'test-debian6-core2', None)
repro_file = os.path.join(working_dir.strpath, 'repro.json')
repro_details = {
'job_name': job_name,
'job_spec_yaml': 'archivefiles.yaml',
'root_spec_yaml': 'root.yaml',
'ci_project_dir': working_dir.strpath
}
with open(repro_file, 'w') as fd:
fd.write(json.dumps(repro_details))
install_script = os.path.join(working_dir.strpath, 'install.sh')
with open(install_script, 'w') as fd:
fd.write('#!/bin/bash\n\n#fake install\nspack install blah\n')
spack_info_file = os.path.join(
working_dir.strpath, 'spack_info.txt')
with open(spack_info_file, 'w') as fd:
fd.write('\nMerge {0} into {1}\n\n'.format(
last_two_git_commits[1], last_two_git_commits[0]))
def fake_download_and_extract_artifacts(url, work_dir):
pass
monkeypatch.setattr(ci, 'download_and_extract_artifacts',
fake_download_and_extract_artifacts)
rep_out = ci_cmd('reproduce-build',
'https://some.domain/api/v1/projects/1/jobs/2/artifacts',
'--working-dir',
working_dir.strpath,
output=str)
expect_out = 'docker run --rm -v {0}:{0} -ti {1}'.format(
working_dir.strpath, image_name)
assert(expect_out in rep_out)
|
hb_quant/huobi/model/etf/unitprice.py | wenli135/Binance-volatility-trading-bot | 611 | 11197861 | class UnitPrice:
def __init__(self):
self.currency = ""
self.amount = 0.0
def print_object(self, format_data=""):
from huobi.utils.print_mix_object import PrintBasic
PrintBasic.print_basic(self.currency, format_data + "Currency")
PrintBasic.print_basic(self.amount, format_data + "Amount") |
tests/openbb_terminal/cryptocurrency/defi/test_terramoney_fcd_model.py | tehcoderer/GamestonkTerminal | 255 | 11197893 | <reponame>tehcoderer/GamestonkTerminal<filename>tests/openbb_terminal/cryptocurrency/defi/test_terramoney_fcd_model.py
# IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.cryptocurrency.defi import terramoney_fcd_model
@pytest.mark.vcr
def test_get_staking_account_info(recorder):
result_tuple = terramoney_fcd_model.get_staking_account_info(
address="terra1jvwelvs7rdk6j3mqdztq5tya99w8lxk6l9hcqg",
)
recorder.capture_list(result_tuple)
@pytest.mark.vcr
def test_get_validators(recorder):
df = terramoney_fcd_model.get_validators()
recorder.capture_list(df)
@pytest.mark.vcr
def test_get_proposals(recorder):
df = terramoney_fcd_model.get_proposals(status="Voting")
recorder.capture_list(df)
@pytest.mark.vcr
def test_get_account_growth(recorder):
df = terramoney_fcd_model.get_account_growth(cumulative=True)
recorder.capture_list(df)
@pytest.mark.vcr
def test_get_staking_ratio_history(recorder):
df = terramoney_fcd_model.get_staking_ratio_history()
recorder.capture_list(df)
@pytest.mark.vcr
def test_get_staking_returns_history(recorder):
df = terramoney_fcd_model.get_staking_returns_history()
recorder.capture_list(df)
|
examples/rmi.py | pitmanst/jpype | 531 | 11197894 | <filename>examples/rmi.py
# *****************************************************************************
# Copyright 2004-2008 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
# the hava classes used are defined the the test harness. the class jpype.rmi.ServerImpl must be started before this script can be run.
from jpype import *
import os.path
root = os.path.abspath(os.path.dirname(__file__))
startJVM(getDefaultJVMPath(), "-ea", "-Djava.class.path=%s/../test/classes" % root)
p = java.rmi.Naming.lookup("rmi://localhost:2004/server")
print p, p.__class__
p.callRemote()
shutdownJVM()
|
ptstat/core.py | timmyzhao/ptstat | 116 | 11197895 | <gh_stars>100-1000
import torch
from torch.autograd import Variable
# TODO:
# Remove Variable() everywhere when auto-promotion implemented.
# Make size() method indexable.
# Option to remove asserts.
# Rename log_pdf to log_p ?
def _to_v(x, size=None, cuda=False):
if isinstance(x, Variable):
y = x
elif torch.is_tensor(x):
y = Variable(x)
else:
y = Variable(torch.cuda.FloatTensor([x])) if cuda else Variable(torch.FloatTensor([x]))
if size:
assert y.size() == (1, ), str(y.size())
y = y.expand(size)
return y
# [batch_size] -> [batch_size, num_classes].
def to_1hot(label, num_classes):
assert len(label.size()) == 1, str(label.size())
if label.is_cuda:
y = torch.cuda.FloatTensor(label.size(0), num_classes).zero_()
else:
y = torch.zeros(label.size(0), num_classes)
y.scatter_(1, label.data.unsqueeze(1), 1)
return Variable(y)
# [batch_size, num_classes] -> [batch_size].
def to_label(one_hot):
assert len(one_hot.size()) == 2, str(one_hot.size())
_, y = torch.max(one_hot, 1).squeeze()
return y
# [batch_size].
def label(batch, value, cuda):
if cuda:
return Variable(torch.cuda.LongTensor(batch).fill_(value))
else:
return Variable(torch.LongTensor(batch).fill_(value))
# First dimension is batch / independent samples.
# Second dimension is RV dimensionality (not identically distributed).
class RandomVariable:
def _size(self):
raise NotImplementedError("size is not implemented")
def _log_pdf(self, x):
raise NotImplementedError("log_pdf is not implemented")
def _sample(self):
raise NotImplementedError("sample is not implemented")
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
# [batch_size, rv_dimension]
def size(self):
return self._size()
# [batch_size]
def log_pdf(self, x):
assert self.size() == x.size(), str(self.size()) + " ~ " + str(x.size())
batch_log_pdf = self._log_pdf(x)
assert batch_log_pdf.size() == (self.size()[0], ), str(batch_log_pdf.size()) + " ~ " + str((self.size()[0], ))
return batch_log_pdf
# [batch_size, rv_dimension]
def sample(self):
batch_samples = self._sample()
assert self.size() == batch_samples.size(), str(self.size()) + " ~ " + str(batch_samples.size())
return batch_samples
# [batch_size]
def entropy(self):
batch_entropy = self._entropy()
assert batch_entropy.size() == (self.size()[0], ), str(batch_entropy.size()) + " ~ " + str((self.size()[0], ))
return batch_entropy
_kld_dispatch = {}
# [batch_size]
def kld(p, q):
assert p.size() == q.size()
batch_kld = _kld_dispatch[(type(p), type(q))](p, q)
assert batch_kld.size() == (p.size()[0], )
assert torch.min(batch_kld.data) >= 0
return batch_kld
|
configs/_base_/schedules/imagenet_bs2048_coslr.py | YuxinZou/mmclassification | 1,190 | 11197906 | # optimizer
optimizer = dict(
type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_iters=2500,
warmup_ratio=0.25)
runner = dict(type='EpochBasedRunner', max_epochs=100)
|
wouso/core/scoring/tests.py | ruxandraS/wouso | 117 | 11197920 | <gh_stars>100-1000
from django.test import TestCase
from django.db.models.query import QuerySet
from django.contrib.auth.models import User
from wouso.core.config.models import IntegerListSetting
from wouso.core.game.models import Game
from wouso.core import scoring, signals
from wouso.core.tests import WousoTest
from wouso.core.user.models import Player
from models import Formula, Coin, History
from sm import FormulaParsingError, setup_scoring, CORE_POINTS, check_setup, update_points, calculate
class ScoringTestCase(TestCase):
def setUp(self):
self.user, new = User.objects.get_or_create(username='33')
self.game = Game.get_instance()
self.coin = Coin.add('_test')
def tearDown(self):
#self.user.delete()
self.game.delete()
self.coin.delete()
def testHistoryFor(self):
no_history = scoring.history_for(self.user, self.game, external_id=999)
self.assertEqual(len(no_history), 0)
def testScoreSimple(self):
scoring.score_simple(self.user.get_profile(), self.coin, game=self.game, external_id=2, amount=10)
multiple = scoring.history_for(self.user, self.game, external_id=2)
self.assertTrue(isinstance(multiple, QuerySet))
self.assertEqual(len(multiple), 1)
history = list(multiple)[0]
self.assertTrue(isinstance(history, History))
self.assertEqual(history.amount, 10)
def testCalculate(self):
formula = Formula.add('_test_formula',
expression='_test=5', owner=self.game)
# Call by name
ret = scoring.calculate('_test_formula')
self.assertTrue(isinstance(ret, dict))
# Call by object
ret = scoring.calculate(formula)
self.assertTrue(isinstance(ret, dict))
self.assertEqual(ret['_test'], 5)
formula2 = Formula.add('_test_formula2',
expression='_test=5*3', owner=self.game)
ret = scoring.calculate(formula2)
self.assertTrue(isinstance(ret, dict))
self.assertEqual(ret['_test'], 15)
# Multiple coins
formula2.expression = '_test=5*3; points=4'
ret = scoring.calculate(formula2)
self.assertTrue(isinstance(ret, dict))
self.assertEqual(ret['_test'], 15)
self.assertEqual(ret['points'], 4)
# Fail safe
formula2.expression = '_test=5*cucu'
try:
ret = scoring.calculate(formula2)
# no error? wtf
self.assertFalse(True)
except Exception as e:
self.assertTrue(isinstance(e, FormulaParsingError))
def testScore(self):
formula = Formula.add('_test_formula_sc',
expression='_test=13', owner=self.game)
scoring.score(self.user.get_profile(), self.game, formula,
external_id=3)
hs = scoring.history_for(self.user, self.game, external_id=3)
self.assertTrue(isinstance(hs, QuerySet))
history = list(hs)[0]
# check if specific coin has been updated
self.assertEqual(history.coin, self.coin)
self.assertEqual(history.amount, 13)
class UpdateScoringTest(WousoTest):
def test_update_points_level_upgrade_first_time(self):
level_up_points = 80
IntegerListSetting.get('level_limits').set_value(str(level_up_points))
Coin.add('points')
Coin.add('gold')
Formula.add('level-gold', expression='gold=10*{level}', owner=None)
# Upgrade player's level
player = self._get_player()
player.points = level_up_points + 1
player.level_no = 1
player.save()
update_points(player, None)
coins = History.user_coins(player.user)
self.assertEqual(coins['gold'], 10 * player.max_level)
def test_update_points_level_downgrade(self):
level_up_points = 80
IntegerListSetting.get('level_limits').set_value(str(level_up_points))
Coin.add('points')
Coin.add('gold')
Formula.add('level-gold', expression='gold=10*{level}', owner=None)
# Upgrade player's level
player = self._get_player()
player.points = level_up_points + 1
player.level_no = 1
player.save()
update_points(player, None)
# Downgrade player's level
player.points = level_up_points - 1
player.save()
update_points(player, None)
coins = History.user_coins(player.user)
self.assertEqual(coins['gold'], 10 * player.max_level)
def test_update_points_level_upgrade_back(self):
level_up_points = 80
IntegerListSetting.get('level_limits').set_value(str(level_up_points))
Coin.add('points')
Coin.add('gold')
Formula.add('level-gold', expression='gold=10*{level}', owner=None)
# Upgrade player's level
player = self._get_player()
player.points = level_up_points + 1
player.level_no = 1
player.save()
update_points(player, None)
# Downgrade player's level
player.points = level_up_points - 1
player.save()
update_points(player, None)
#Upgrade player's level back
player.points = level_up_points + 1
player.save()
update_points(player, None)
coins = History.user_coins(player.user)
self.assertEqual(coins['gold'], 10 * player.max_level)
class ScoringHistoryTest(WousoTest):
def test_user_coins(self):
Coin.add('points')
Coin.add('gold')
player = self._get_player()
scoring.score_simple(player, 'points', 10)
self.assertIn('points', History.user_coins(player.user))
def test_user_points(self):
coin = Coin.add('points')
player = self._get_player()
scoring.score_simple(player, 'points', 10)
up = History.user_points(user=player.user)
self.assertTrue('wouso' in up)
self.assertTrue(coin.name in up['wouso'])
self.assertEqual(up['wouso'][coin.name], 10)
def test_accessors(self):
player = self._get_player()
self.assertEqual(scoring.user_coins(player), scoring.user_coins(player.user))
def test_sync_methods(self):
player = self._get_player()
coin = Coin.add('points')
History.objects.create(user=player.user, coin=coin, amount=10)
self.assertEqual(player.points, 0)
scoring.sync_user(player)
self.assertEqual(player.points, 10)
History.objects.create(user=player.user, coin=coin, amount=10)
self.assertEqual(player.points, 10)
scoring.sync_all_user_points()
player = Player.objects.get(pk=player.pk)
self.assertEqual(player.points, 20)
class ScoringSetupTest(TestCase):
def test_check_setup(self):
setup_scoring()
self.assertTrue(check_setup())
def test_setup(self):
setup_scoring()
for c in CORE_POINTS:
self.assertTrue(Coin.get(c))
class ScoringFirstLogin(WousoTest):
def test_first_login_points(self):
f = Formula.add('start-points', expression='points=10')
Coin.add('points')
player = self._get_player()
self.assertEqual(player.points, 0)
# this won't work, since the activity is sent in our custom view
#self.client.login(username=player.user.username, password='<PASSWORD>')
# using this instead
signals.addActivity.send(sender=None, user_from=player, action="login", game=None, public=False)
player = Player.objects.get(pk=player.pk)
self.assertEqual(player.points, 10)
class ScoringTestFunctions(TestCase):
def test_fibbonaci_formula(self):
formula = Formula.add('test-fib', expression='points=fib(0)')
value = calculate(formula)['points']
self.assertEqual(value, 0)
formula.expression = 'points=fib(1)'
formula.save()
value = calculate(formula)['points']
self.assertEqual(value, 1)
formula.expression = 'points=fib(2)'
formula.save()
value = calculate(formula)['points']
self.assertEqual(value, 1)
formula.expression = 'points=fib(3)'
formula.save()
value = calculate(formula)['points']
self.assertEqual(value, 2)
formula.expression = 'points=fib(4)'
formula.save()
value = calculate(formula)['points']
self.assertEqual(value, 3)
|
examples/dummy_plugin/dummy_plugin/jobs.py | psmware-ltd/nautobot | 384 | 11197921 | <filename>examples/dummy_plugin/dummy_plugin/jobs.py
import time
from nautobot.extras.jobs import IntegerVar, Job
name = "DummyPlugin jobs"
class DummyJob(Job):
class Meta:
name = "Dummy job, does nothing"
class DummyLoggingJob(Job):
interval = IntegerVar(default=4, description="The time in seconds to sleep.")
class Meta:
name = "Dummy logging job."
description = "I log stuff to demonstrate how UI logging works."
def run(self, data, commit):
interval = data["interval"]
self.log_debug(message=f"Running for {interval} seconds.")
for step in range(1, interval + 1):
time.sleep(1)
self.log_info(message=f"Step {step}")
self.log_success(obj=None)
return f"Ran for {interval} seconds"
jobs = (DummyJob, DummyLoggingJob)
|
wrappers/python/tests/did/test_set_endpoint_for_did.py | sklump/indy-sdk | 636 | 11197927 | <filename>wrappers/python/tests/did/test_set_endpoint_for_did.py
import pytest
from indy import did, error
@pytest.mark.asyncio
async def test_set_endpoint_for_did_works(wallet_handle, identity_trustee1, endpoint):
(_did, verkey) = identity_trustee1
await did.set_endpoint_for_did(wallet_handle, _did, endpoint, verkey)
@pytest.mark.asyncio
async def test_set_endpoint_for_did_works_for_invalid_did(wallet_handle, verkey_my1, endpoint):
with pytest.raises(error.CommonInvalidStructure):
await did.set_endpoint_for_did(wallet_handle, 'invalid_base58string', endpoint, verkey_my1)
|
tests/contrib/test_dictionary_storage.py | anleo1000/oauth2client | 5,079 | 11197935 | <gh_stars>1000+
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for oauth2client.contrib.dictionary_storage"""
import unittest
import oauth2client
from oauth2client import client
from oauth2client.contrib import dictionary_storage
def _generate_credentials(scopes=None):
return client.OAuth2Credentials(
'access_tokenz',
'client_idz',
'client_secretz',
'refresh_tokenz',
'3600',
oauth2client.GOOGLE_TOKEN_URI,
'Test',
id_token={
'sub': '123',
'email': '<EMAIL>'
},
scopes=scopes)
class DictionaryStorageTests(unittest.TestCase):
def test_constructor_defaults(self):
dictionary = {}
key = 'test-key'
storage = dictionary_storage.DictionaryStorage(dictionary, key)
self.assertEqual(dictionary, storage._dictionary)
self.assertEqual(key, storage._key)
self.assertIsNone(storage._lock)
def test_constructor_explicit(self):
dictionary = {}
key = 'test-key'
storage = dictionary_storage.DictionaryStorage(dictionary, key)
lock = object()
storage = dictionary_storage.DictionaryStorage(
dictionary, key, lock=lock)
self.assertEqual(storage._lock, lock)
def test_get(self):
credentials = _generate_credentials()
dictionary = {}
key = 'credentials'
storage = dictionary_storage.DictionaryStorage(dictionary, key)
self.assertIsNone(storage.get())
dictionary[key] = credentials.to_json()
returned = storage.get()
self.assertIsNotNone(returned)
self.assertEqual(returned.access_token, credentials.access_token)
self.assertEqual(returned.id_token, credentials.id_token)
self.assertEqual(returned.refresh_token, credentials.refresh_token)
self.assertEqual(returned.client_id, credentials.client_id)
def test_put(self):
credentials = _generate_credentials()
dictionary = {}
key = 'credentials'
storage = dictionary_storage.DictionaryStorage(dictionary, key)
storage.put(credentials)
returned = storage.get()
self.assertIn(key, dictionary)
self.assertIsNotNone(returned)
self.assertEqual(returned.access_token, credentials.access_token)
self.assertEqual(returned.id_token, credentials.id_token)
self.assertEqual(returned.refresh_token, credentials.refresh_token)
self.assertEqual(returned.client_id, credentials.client_id)
def test_delete(self):
credentials = _generate_credentials()
dictionary = {}
key = 'credentials'
storage = dictionary_storage.DictionaryStorage(dictionary, key)
storage.put(credentials)
self.assertIn(key, dictionary)
storage.delete()
self.assertNotIn(key, dictionary)
self.assertIsNone(storage.get())
|
chatbot_env/Lib/site-packages/sklearn/metrics/cluster/tests/test_unsupervised.py | rakmakan/Chatbot | 6,989 | 11197948 | import numpy as np
import scipy.sparse as sp
import pytest
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_warns_message
from sklearn.metrics.cluster import silhouette_score
from sklearn.metrics.cluster import silhouette_samples
from sklearn.metrics import pairwise_distances
from sklearn.metrics.cluster import calinski_harabasz_score
from sklearn.metrics.cluster import calinski_harabaz_score
from sklearn.metrics.cluster import davies_bouldin_score
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X_dense = dataset.data
X_csr = csr_matrix(X_dense)
X_dok = sp.dok_matrix(X_dense)
X_lil = sp.lil_matrix(X_dense)
y = dataset.target
for X in [X_dense, X_csr, X_dok, X_lil]:
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
score_precomputed = silhouette_score(D, y, metric='precomputed')
assert score_precomputed > 0
# Test without calculating D
score_euclidean = silhouette_score(X, y, metric='euclidean')
pytest.approx(score_precomputed, score_euclidean)
if X is X_dense:
score_dense_without_sampling = score_precomputed
else:
pytest.approx(score_euclidean,
score_dense_without_sampling)
# Test with sampling
score_precomputed = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
score_euclidean = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert score_precomputed > 0
assert score_euclidean > 0
pytest.approx(score_euclidean, score_precomputed)
if X is X_dense:
score_dense_with_sampling = score_precomputed
else:
pytest.approx(score_euclidean, score_dense_with_sampling)
def test_cluster_size_1():
# Assert Silhouette Coefficient == 0 when there is 1 sample in a cluster
# (cluster 0). We also test the case where there are identical samples
# as the only members of a cluster (cluster 2). To our knowledge, this case
# is not discussed in reference material, and we choose for it a sample
# score of 1.
X = [[0.], [1.], [1.], [2.], [3.], [3.]]
labels = np.array([0, 1, 1, 1, 2, 2])
# Cluster 0: 1 sample -> score of 0 by Rousseeuw's convention
# Cluster 1: intra-cluster = [.5, .5, 1]
# inter-cluster = [1, 1, 1]
# silhouette = [.5, .5, 0]
# Cluster 2: intra-cluster = [0, 0]
# inter-cluster = [arbitrary, arbitrary]
# silhouette = [1., 1.]
silhouette = silhouette_score(X, labels)
assert not np.isnan(silhouette)
ss = silhouette_samples(X, labels)
assert_array_equal(ss, [0, .5, .5, 0, 1, 1])
def test_silhouette_paper_example():
# Explicitly check per-sample results against Rousseeuw (1987)
# Data from Table 1
lower = [5.58,
7.00, 6.50,
7.08, 7.00, 3.83,
4.83, 5.08, 8.17, 5.83,
2.17, 5.75, 6.67, 6.92, 4.92,
6.42, 5.00, 5.58, 6.00, 4.67, 6.42,
3.42, 5.50, 6.42, 6.42, 5.00, 3.92, 6.17,
2.50, 4.92, 6.25, 7.33, 4.50, 2.25, 6.33, 2.75,
6.08, 6.67, 4.25, 2.67, 6.00, 6.17, 6.17, 6.92, 6.17,
5.25, 6.83, 4.50, 3.75, 5.75, 5.42, 6.08, 5.83, 6.67, 3.67,
4.75, 3.00, 6.08, 6.67, 5.00, 5.58, 4.83, 6.17, 5.67, 6.50, 6.92]
D = np.zeros((12, 12))
D[np.tril_indices(12, -1)] = lower
D += D.T
names = ['BEL', 'BRA', 'CHI', 'CUB', 'EGY', 'FRA', 'IND', 'ISR', 'USA',
'USS', 'YUG', 'ZAI']
# Data from Figure 2
labels1 = [1, 1, 2, 2, 1, 1, 2, 1, 1, 2, 2, 1]
expected1 = {'USA': .43, 'BEL': .39, 'FRA': .35, 'ISR': .30, 'BRA': .22,
'EGY': .20, 'ZAI': .19, 'CUB': .40, 'USS': .34, 'CHI': .33,
'YUG': .26, 'IND': -.04}
score1 = .28
# Data from Figure 3
labels2 = [1, 2, 3, 3, 1, 1, 2, 1, 1, 3, 3, 2]
expected2 = {'USA': .47, 'FRA': .44, 'BEL': .42, 'ISR': .37, 'EGY': .02,
'ZAI': .28, 'BRA': .25, 'IND': .17, 'CUB': .48, 'USS': .44,
'YUG': .31, 'CHI': .31}
score2 = .33
for labels, expected, score in [(labels1, expected1, score1),
(labels2, expected2, score2)]:
expected = [expected[name] for name in names]
# we check to 2dp because that's what's in the paper
pytest.approx(expected,
silhouette_samples(D, np.array(labels),
metric='precomputed'),
abs=1e-2)
pytest.approx(score,
silhouette_score(D, np.array(labels),
metric='precomputed'),
abs=1e-2)
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
err_msg = (r'Number of labels is %d\. Valid values are 2 '
r'to n_samples - 1 \(inclusive\)' % len(np.unique(y)))
with pytest.raises(ValueError, match=err_msg):
silhouette_score(X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
err_msg = (r'Number of labels is %d\. Valid values are 2 '
r'to n_samples - 1 \(inclusive\)' % len(np.unique(y)))
with pytest.raises(ValueError, match=err_msg):
silhouette_score(X, y)
def test_non_encoded_labels():
dataset = datasets.load_iris()
X = dataset.data
labels = dataset.target
assert (
silhouette_score(X, labels * 2 + 10) == silhouette_score(X, labels))
assert_array_equal(
silhouette_samples(X, labels * 2 + 10), silhouette_samples(X, labels))
def test_non_numpy_labels():
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
assert (
silhouette_score(list(X), list(y)) == silhouette_score(X, y))
@pytest.mark.parametrize('dtype', (np.float32, np.float64))
def test_silhouette_nonzero_diag(dtype):
# Make sure silhouette_samples requires diagonal to be zero.
# Non-regression test for #12178
# Construct a zero-diagonal matrix
dists = pairwise_distances(
np.array([[0.2, 0.1, 0.12, 1.34, 1.11, 1.6]], dtype=dtype).T)
labels = [0, 0, 0, 1, 1, 1]
# small values on the diagonal are OK
dists[2][2] = np.finfo(dists.dtype).eps * 10
silhouette_samples(dists, labels, metric='precomputed')
# values bigger than eps * 100 are not
dists[2][2] = np.finfo(dists.dtype).eps * 1000
with pytest.raises(ValueError, match='contains non-zero'):
silhouette_samples(dists, labels, metric='precomputed')
def assert_raises_on_only_one_label(func):
"""Assert message when there is only one label"""
rng = np.random.RandomState(seed=0)
with pytest.raises(ValueError, match="Number of labels is"):
func(rng.rand(10, 2), np.zeros(10))
def assert_raises_on_all_points_same_cluster(func):
"""Assert message when all point are in different clusters"""
rng = np.random.RandomState(seed=0)
with pytest.raises(ValueError, match="Number of labels is"):
func(rng.rand(10, 2), np.arange(10))
def test_calinski_harabasz_score():
assert_raises_on_only_one_label(calinski_harabasz_score)
assert_raises_on_all_points_same_cluster(calinski_harabasz_score)
# Assert the value is 1. when all samples are equals
assert 1. == calinski_harabasz_score(np.ones((10, 2)),
[0] * 5 + [1] * 5)
# Assert the value is 0. when all the mean cluster are equal
assert 0. == calinski_harabasz_score([[-1, -1], [1, 1]] * 10,
[0] * 10 + [1] * 10)
# General case (with non numpy arrays)
X = ([[0, 0], [1, 1]] * 5 + [[3, 3], [4, 4]] * 5 +
[[0, 4], [1, 3]] * 5 + [[3, 1], [4, 0]] * 5)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
pytest.approx(calinski_harabasz_score(X, labels),
45 * (40 - 4) / (5 * (4 - 1)))
def test_deprecated_calinski_harabaz_score():
depr_message = ("Function 'calinski_harabaz_score' has been renamed "
"to 'calinski_harabasz_score' "
"and will be removed in version 0.23.")
assert_warns_message(FutureWarning, depr_message,
calinski_harabaz_score,
np.ones((10, 2)), [0] * 5 + [1] * 5)
def test_davies_bouldin_score():
assert_raises_on_only_one_label(davies_bouldin_score)
assert_raises_on_all_points_same_cluster(davies_bouldin_score)
# Assert the value is 0. when all samples are equals
assert davies_bouldin_score(np.ones((10, 2)),
[0] * 5 + [1] * 5) == pytest.approx(0.0)
# Assert the value is 0. when all the mean cluster are equal
assert davies_bouldin_score([[-1, -1], [1, 1]] * 10,
[0] * 10 + [1] * 10) == pytest.approx(0.0)
# General case (with non numpy arrays)
X = ([[0, 0], [1, 1]] * 5 + [[3, 3], [4, 4]] * 5 +
[[0, 4], [1, 3]] * 5 + [[3, 1], [4, 0]] * 5)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
pytest.approx(davies_bouldin_score(X, labels), 2 * np.sqrt(0.5) / 3)
# Ensure divide by zero warning is not raised in general case
with pytest.warns(None) as record:
davies_bouldin_score(X, labels)
div_zero_warnings = [
warning for warning in record
if "divide by zero encountered" in warning.message.args[0]
]
assert len(div_zero_warnings) == 0
# General case - cluster have one sample
X = ([[0, 0], [2, 2], [3, 3], [5, 5]])
labels = [0, 0, 1, 2]
pytest.approx(davies_bouldin_score(X, labels), (5. / 4) / 3)
|
sympy/physics/mechanics/joint.py | shilpiprd/sympy | 8,323 | 11197955 | # coding=utf-8
from abc import ABC, abstractmethod
from sympy import pi
from sympy.physics.mechanics.body import Body
from sympy.physics.vector import Vector, dynamicsymbols, cross
from sympy.physics.vector.frame import ReferenceFrame
import warnings
__all__ = ['Joint', 'PinJoint', 'PrismaticJoint']
class Joint(ABC):
"""Abstract base class for all specific joints.
Explanation
===========
A joint subtracts degrees of freedom from a body. This is the base class
for all specific joints and holds all common methods acting as an interface
for all joints. Custom joint can be created by inheriting Joint class and
defining all abstract functions.
The abstract methods are:
- ``_generate_coordinates``
- ``_generate_speeds``
- ``_orient_frames``
- ``_set_angular_velocity``
- ``_set_linar_velocity``
Parameters
==========
name : string
A unique name for the joint.
parent : Body
The parent body of joint.
child : Body
The child body of joint.
coordinates: List of dynamicsymbols, optional
Generalized coordinates of the joint.
speeds : List of dynamicsymbols, optional
Generalized speeds of joint.
parent_joint_pos : Vector, optional
Vector from the parent body's mass center to the point where the parent
and child are connected. The default value is the zero vector.
child_joint_pos : Vector, optional
Vector from the child body's mass center to the point where the parent
and child are connected. The default value is the zero vector.
parent_axis : Vector, optional
Axis fixed in the parent body which aligns with an axis fixed in the
child body. The default is x axis in parent's reference frame.
child_axis : Vector, optional
Axis fixed in the child body which aligns with an axis fixed in the
parent body. The default is x axis in child's reference frame.
Attributes
==========
name : string
The joint's name.
parent : Body
The joint's parent body.
child : Body
The joint's child body.
coordinates : list
List of the joint's generalized coordinates.
speeds : list
List of the joint's generalized speeds.
parent_point : Point
The point fixed in the parent body that represents the joint.
child_point : Point
The point fixed in the child body that represents the joint.
parent_axis : Vector
The axis fixed in the parent frame that represents the joint.
child_axis : Vector
The axis fixed in the child frame that represents the joint.
kdes : list
Kinematical differential equations of the joint.
Notes
=====
The direction cosine matrix between the child and parent is formed using a
simple rotation about an axis that is normal to both ``child_axis`` and
``parent_axis``. In general, the normal axis is formed by crossing the
``child_axis`` into the ``parent_axis`` except if the child and parent axes
are in exactly opposite directions. In that case the rotation vector is chosen
using the rules in the following table where ``C`` is the child reference
frame and ``P`` is the parent reference frame:
.. list-table::
:header-rows: 1
* - ``child_axis``
- ``parent_axis``
- ``rotation_axis``
* - ``-C.x``
- ``P.x``
- ``P.z``
* - ``-C.y``
- ``P.y``
- ``P.x``
* - ``-C.z``
- ``P.z``
- ``P.y``
* - ``-C.x-C.y``
- ``P.x+P.y``
- ``P.z``
* - ``-C.y-C.z``
- ``P.y+P.z``
- ``P.x``
* - ``-C.x-C.z``
- ``P.x+P.z``
- ``P.y``
* - ``-C.x-C.y-C.z``
- ``P.x+P.y+P.z``
- ``(P.x+P.y+P.z) × P.x``
"""
def __init__(self, name, parent, child, coordinates=None, speeds=None,
parent_joint_pos=None, child_joint_pos=None, parent_axis=None,
child_axis=None):
if not isinstance(name, str):
raise TypeError('Supply a valid name.')
self._name = name
if not isinstance(parent, Body):
raise TypeError('Parent must be an instance of Body.')
self._parent = parent
if not isinstance(child, Body):
raise TypeError('Parent must be an instance of Body.')
self._child = child
self._coordinates = self._generate_coordinates(coordinates)
self._speeds = self._generate_speeds(speeds)
self._kdes = self._generate_kdes()
self._parent_axis = self._axis(parent, parent_axis)
self._child_axis = self._axis(child, child_axis)
self._parent_point = self._locate_joint_pos(parent, parent_joint_pos)
self._child_point = self._locate_joint_pos(child, child_joint_pos)
self._orient_frames()
self._set_angular_velocity()
self._set_linear_velocity()
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
@property
def name(self):
return self._name
@property
def parent(self):
"""Parent body of Joint."""
return self._parent
@property
def child(self):
"""Child body of Joint."""
return self._child
@property
def coordinates(self):
"""List generalized coordinates of the joint."""
return self._coordinates
@property
def speeds(self):
"""List generalized coordinates of the joint.."""
return self._speeds
@property
def kdes(self):
"""Kinematical differential equations of the joint."""
return self._kdes
@property
def parent_axis(self):
"""The axis of parent frame."""
return self._parent_axis
@property
def child_axis(self):
"""The axis of child frame."""
return self._child_axis
@property
def parent_point(self):
"""The joint's point where parent body is connected to the joint."""
return self._parent_point
@property
def child_point(self):
"""The joint's point where child body is connected to the joint."""
return self._child_point
@abstractmethod
def _generate_coordinates(self, coordinates):
"""Generate list generalized coordinates of the joint."""
pass
@abstractmethod
def _generate_speeds(self, speeds):
"""Generate list generalized speeds of the joint."""
pass
@abstractmethod
def _orient_frames(self):
"""Orient frames as per the joint."""
pass
@abstractmethod
def _set_angular_velocity(self):
pass
@abstractmethod
def _set_linear_velocity(self):
pass
def _generate_kdes(self):
kdes = []
t = dynamicsymbols._t
for i in range(len(self.coordinates)):
kdes.append(-self.coordinates[i].diff(t) + self.speeds[i])
return kdes
def _axis(self, body, ax):
if ax is None:
ax = body.frame.x
return ax
if not isinstance(ax, Vector):
raise TypeError("Axis must be of type Vector.")
if not ax.dt(body.frame) == 0:
msg = ('Axis cannot be time-varying when viewed from the '
'associated body.')
raise ValueError(msg)
return ax
def _locate_joint_pos(self, body, joint_pos):
if joint_pos is None:
joint_pos = Vector(0)
if not isinstance(joint_pos, Vector):
raise ValueError('Joint Position must be supplied as Vector.')
if not joint_pos.dt(body.frame) == 0:
msg = ('Position Vector cannot be time-varying when viewed from '
'the associated body.')
raise ValueError(msg)
point_name = self._name + '_' + body.name + '_joint'
return body.masscenter.locatenew(point_name, joint_pos)
def _alignment_rotation(self, parent, child):
# Returns the axis and angle between two axis(vectors).
angle = parent.angle_between(child)
axis = cross(child, parent).normalize()
return angle, axis
def _generate_vector(self):
parent_frame = self.parent.frame
components = self.parent_axis.to_matrix(parent_frame)
x, y, z = components[0], components[1], components[2]
if x != 0:
if y!=0:
if z!=0:
return cross(self.parent_axis,
parent_frame.x)
if z!=0:
return parent_frame.y
return parent_frame.z
if x == 0:
if y!=0:
if z!=0:
return parent_frame.x
return parent_frame.x
return parent_frame.y
def _set_orientation(self):
#Helper method for `orient_axis()`
self.child.frame.orient_axis(self.parent.frame, self.parent_axis, 0)
angle, axis = self._alignment_rotation(self.parent_axis,
self.child_axis)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
if axis != Vector(0) or angle == pi:
if angle == pi:
axis = self._generate_vector()
int_frame = ReferenceFrame('int_frame')
int_frame.orient_axis(self.child.frame, self.child_axis, 0)
int_frame.orient_axis(self.parent.frame, axis, angle)
return int_frame
return self.parent.frame
class PinJoint(Joint):
"""Pin (Revolute) Joint.
Explanation
===========
A pin joint is defined such that the joint rotation axis is fixed in both
the child and parent and the location of the joint is relative to the mass
center of each body. The child rotates an angle, θ, from the parent about
the rotation axis and has a simple angular speed, ω, relative to the
parent. The direction cosine matrix between the child and parent is formed
using a simple rotation about an axis that is normal to both ``child_axis``
and ``parent_axis``, see the Notes section for a detailed explanation of
this.
Parameters
==========
name : string
A unique name for the joint.
parent : Body
The parent body of joint.
child : Body
The child body of joint.
coordinates: dynamicsymbol, optional
Generalized coordinates of the joint.
speeds : dynamicsymbol, optional
Generalized speeds of joint.
parent_joint_pos : Vector, optional
Vector from the parent body's mass center to the point where the parent
and child are connected. The default value is the zero vector.
child_joint_pos : Vector, optional
Vector from the child body's mass center to the point where the parent
and child are connected. The default value is the zero vector.
parent_axis : Vector, optional
Axis fixed in the parent body which aligns with an axis fixed in the
child body. The default is x axis in parent's reference frame.
child_axis : Vector, optional
Axis fixed in the child body which aligns with an axis fixed in the
parent body. The default is x axis in child's reference frame.
Attributes
==========
name : string
The joint's name.
parent : Body
The joint's parent body.
child : Body
The joint's child body.
coordinates : list
List of the joint's generalized coordinates.
speeds : list
List of the joint's generalized speeds.
parent_point : Point
The point fixed in the parent body that represents the joint.
child_point : Point
The point fixed in the child body that represents the joint.
parent_axis : Vector
The axis fixed in the parent frame that represents the joint.
child_axis : Vector
The axis fixed in the child frame that represents the joint.
kdes : list
Kinematical differential equations of the joint.
Examples
=========
A single pin joint is created from two bodies and has the following basic
attributes:
>>> from sympy.physics.mechanics import Body, PinJoint
>>> parent = Body('P')
>>> parent
P
>>> child = Body('C')
>>> child
C
>>> joint = PinJoint('PC', parent, child)
>>> joint
PinJoint: PC parent: P child: C
>>> joint.name
'PC'
>>> joint.parent
P
>>> joint.child
C
>>> joint.parent_point
PC_P_joint
>>> joint.child_point
PC_C_joint
>>> joint.parent_axis
P_frame.x
>>> joint.child_axis
C_frame.x
>>> joint.coordinates
[theta_PC(t)]
>>> joint.speeds
[omega_PC(t)]
>>> joint.child.frame.ang_vel_in(joint.parent.frame)
omega_PC(t)*P_frame.x
>>> joint.child.frame.dcm(joint.parent.frame)
Matrix([
[1, 0, 0],
[0, cos(theta_PC(t)), sin(theta_PC(t))],
[0, -sin(theta_PC(t)), cos(theta_PC(t))]])
>>> joint.child_point.pos_from(joint.parent_point)
0
To further demonstrate the use of the pin joint, the kinematics of simple
double pendulum that rotates about the Z axis of each connected body can be
created as follows.
>>> from sympy import symbols, trigsimp
>>> from sympy.physics.mechanics import Body, PinJoint
>>> l1, l2 = symbols('l1 l2')
First create bodies to represent the fixed ceiling and one to represent
each pendulum bob.
>>> ceiling = Body('C')
>>> upper_bob = Body('U')
>>> lower_bob = Body('L')
The first joint will connect the upper bob to the ceiling by a distance of
``l1`` and the joint axis will be about the Z axis for each body.
>>> ceiling_joint = PinJoint('P1', ceiling, upper_bob,
... child_joint_pos=-l1*upper_bob.frame.x,
... parent_axis=ceiling.frame.z,
... child_axis=upper_bob.frame.z)
The second joint will connect the lower bob to the upper bob by a distance
of ``l2`` and the joint axis will also be about the Z axis for each body.
>>> pendulum_joint = PinJoint('P2', upper_bob, lower_bob,
... child_joint_pos=-l2*lower_bob.frame.x,
... parent_axis=upper_bob.frame.z,
... child_axis=lower_bob.frame.z)
Once the joints are established the kinematics of the connected bodies can
be accessed. First the direction cosine matrices of pendulum link relative
to the ceiling are found:
>>> upper_bob.frame.dcm(ceiling.frame)
Matrix([
[ cos(theta_P1(t)), sin(theta_P1(t)), 0],
[-sin(theta_P1(t)), cos(theta_P1(t)), 0],
[ 0, 0, 1]])
>>> trigsimp(lower_bob.frame.dcm(ceiling.frame))
Matrix([
[ cos(theta_P1(t) + theta_P2(t)), sin(theta_P1(t) + theta_P2(t)), 0],
[-sin(theta_P1(t) + theta_P2(t)), cos(theta_P1(t) + theta_P2(t)), 0],
[ 0, 0, 1]])
The position of the lower bob's masscenter is found with:
>>> lower_bob.masscenter.pos_from(ceiling.masscenter)
l1*U_frame.x + l2*L_frame.x
The angular velocities of the two pendulum links can be computed with
respect to the ceiling.
>>> upper_bob.frame.ang_vel_in(ceiling.frame)
omega_P1(t)*C_frame.z
>>> lower_bob.frame.ang_vel_in(ceiling.frame)
omega_P1(t)*C_frame.z + omega_P2(t)*U_frame.z
And finally, the linear velocities of the two pendulum bobs can be computed
with respect to the ceiling.
>>> upper_bob.masscenter.vel(ceiling.frame)
l1*omega_P1(t)*U_frame.y
>>> lower_bob.masscenter.vel(ceiling.frame)
l1*omega_P1(t)*U_frame.y + l2*(omega_P1(t) + omega_P2(t))*L_frame.y
"""
def __init__(self, name, parent, child, coordinates=None, speeds=None,
parent_joint_pos=None, child_joint_pos=None, parent_axis=None,
child_axis=None):
super().__init__(name, parent, child, coordinates, speeds,
parent_joint_pos, child_joint_pos, parent_axis,
child_axis)
def __str__(self):
return (f'PinJoint: {self.name} parent: {self.parent} '
f'child: {self.child}')
def _generate_coordinates(self, coordinate):
coordinates = []
if coordinate is None:
theta = dynamicsymbols('theta' + '_' + self._name)
coordinate = theta
coordinates.append(coordinate)
return coordinates
def _generate_speeds(self, speed):
speeds = []
if speed is None:
omega = dynamicsymbols('omega' + '_' + self._name)
speed = omega
speeds.append(speed)
return speeds
def _orient_frames(self):
frame = self._set_orientation()
self.child.frame.orient_axis(frame, self.parent_axis,
self.coordinates[0])
def _set_angular_velocity(self):
self.child.frame.set_ang_vel(self.parent.frame, self.speeds[0] *
self.parent_axis.normalize())
def _set_linear_velocity(self):
self.parent_point.set_vel(self.parent.frame, 0)
self.child_point.set_vel(self.parent.frame, 0)
self.child_point.set_pos(self.parent_point, 0)
self.child.masscenter.v2pt_theory(self.parent.masscenter,
self.parent.frame, self.child.frame)
class PrismaticJoint(Joint):
"""Prismatic (Sliding) Joint.
Explanation
===========
It is defined such that the child body translates with respect to the parent
body along the body fixed parent axis. The location of the joint is defined
by two points in each body which coincides when the generalized coordinate is zero. The direction cosine matrix between
the child and parent is formed using a simple rotation about an axis that is normal to
both ``child_axis`` and ``parent_axis``, see the Notes section for a detailed explanation of
this.
Parameters
==========
name : string
A unique name for the joint.
parent : Body
The parent body of joint.
child : Body
The child body of joint.
coordinates: dynamicsymbol, optional
Generalized coordinates of the joint.
speeds : dynamicsymbol, optional
Generalized speeds of joint.
parent_joint_pos : Vector, optional
Vector from the parent body's mass center to the point where the parent
and child are connected. The default value is the zero vector.
child_joint_pos : Vector, optional
Vector from the child body's mass center to the point where the parent
and child are connected. The default value is the zero vector.
parent_axis : Vector, optional
Axis fixed in the parent body which aligns with an axis fixed in the
child body. The default is x axis in parent's reference frame.
child_axis : Vector, optional
Axis fixed in the child body which aligns with an axis fixed in the
parent body. The default is x axis in child's reference frame.
Attributes
==========
name : string
The joint's name.
parent : Body
The joint's parent body.
child : Body
The joint's child body.
coordinates : list
List of the joint's generalized coordinates.
speeds : list
List of the joint's generalized speeds.
parent_point : Point
The point fixed in the parent body that represents the joint.
child_point : Point
The point fixed in the child body that represents the joint.
parent_axis : Vector
The axis fixed in the parent frame that represents the joint.
child_axis : Vector
The axis fixed in the child frame that represents the joint.
kdes : list
Kinematical differential equations of the joint.
Examples
=========
A single prismatic joint is created from two bodies and has the following basic
attributes:
>>> from sympy.physics.mechanics import Body, PrismaticJoint
>>> parent = Body('P')
>>> parent
P
>>> child = Body('C')
>>> child
C
>>> joint = PrismaticJoint('PC', parent, child)
>>> joint
PrismaticJoint: PC parent: P child: C
>>> joint.name
'PC'
>>> joint.parent
P
>>> joint.child
C
>>> joint.parent_point
PC_P_joint
>>> joint.child_point
PC_C_joint
>>> joint.parent_axis
P_frame.x
>>> joint.child_axis
C_frame.x
>>> joint.coordinates
[x_PC(t)]
>>> joint.speeds
[v_PC(t)]
>>> joint.child.frame.ang_vel_in(joint.parent.frame)
0
>>> joint.child.frame.dcm(joint.parent.frame)
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> joint.child_point.pos_from(joint.parent_point)
x_PC(t)*P_frame.x
To further demonstrate the use of the prismatic joint, the kinematics of
two masses sliding, one moving relative to a fixed body and the other relative to the
moving body. about the X axis of each connected body can be created as follows.
>>> from sympy.physics.mechanics import PrismaticJoint, Body
First create bodies to represent the fixed ceiling and one to represent
a particle.
>>> wall = Body('W')
>>> Part1 = Body('P1')
>>> Part2 = Body('P2')
The first joint will connect the particle to the ceiling and the
joint axis will be about the X axis for each body.
>>> J1 = PrismaticJoint('J1', wall, Part1)
The second joint will connect the second particle to the first particle
and the joint axis will also be about the X axis for each body.
>>> J2 = PrismaticJoint('J2', Part1, Part2)
Once the joint is established the kinematics of the connected bodies can
be accessed. First the direction cosine matrices of Part relative
to the ceiling are found:
>>> Part1.dcm(wall)
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> Part2.dcm(wall)
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
The position of the particles' masscenter is found with:
>>> Part1.masscenter.pos_from(wall.masscenter)
x_J1(t)*W_frame.x
>>> Part2.masscenter.pos_from(wall.masscenter)
x_J1(t)*W_frame.x + x_J2(t)*P1_frame.x
The angular velocities of the two particle links can be computed with
respect to the ceiling.
>>> Part1.ang_vel_in(wall)
0
>>> Part2.ang_vel_in(wall)
0
And finally, the linear velocities of the two particles can be computed
with respect to the ceiling.
>>> Part1.masscenter_vel(wall)
v_J1(t)*W_frame.x
>>> Part2.masscenter.vel(wall.frame)
v_J1(t)*W_frame.x + v_J2(t)*P1_frame.x
"""
def __init__(self, name, parent, child, coordinates=None, speeds=None, parent_joint_pos=None,
child_joint_pos=None, parent_axis=None, child_axis=None):
super().__init__(name, parent, child, coordinates, speeds, parent_joint_pos,
child_joint_pos, parent_axis, child_axis)
def __str__(self):
return (f'PrismaticJoint: {self.name} parent: {self.parent} '
f'child: {self.child}')
def _generate_coordinates(self, coordinate):
coordinates = []
if coordinate is None:
x = dynamicsymbols('x' + '_' + self._name)
coordinate = x
coordinates.append(coordinate)
return coordinates
def _generate_speeds(self, speed):
speeds = []
if speed is None:
y = dynamicsymbols('v' + '_' + self._name)
speed = y
speeds.append(speed)
return speeds
def _orient_frames(self):
frame = self._set_orientation()
self.child.frame.orient_axis(frame, self.parent_axis, 0)
def _set_angular_velocity(self):
self.child.frame.set_ang_vel(self.parent.frame, 0)
def _set_linear_velocity(self):
self.parent_point.set_vel(self.parent.frame, 0)
self.child_point.set_vel(self.child.frame, 0)
self.child_point.set_pos(self.parent_point, self.coordinates[0] * self.parent_axis.normalize())
self.child_point.set_vel(self.parent.frame, self.speeds[0] * self.parent_axis.normalize())
self.child.masscenter.set_vel(self.parent.frame, self.speeds[0] * self.parent_axis.normalize())
|
scripts/automation/trex_control_plane/interactive/trex/emu/api.py | timgates42/trex-core | 956 | 11197964 | <filename>scripts/automation/trex_control_plane/interactive/trex/emu/api.py<gh_stars>100-1000
# some common data
from ..common.trex_exceptions import *
from ..common.trex_api_annotators import *
from ..common.trex_logger import Logger
# TRex Emu profile
from .trex_emu_profile import *
from .trex_emu_client import *
# TRex client misc
from trex.common.trex_api_annotators import *
from trex.utils.parsing_opts import *
from .trex_emu_conversions import *
|
py/testdir_hosts/notest_short.py | gigliovale/h2o | 882 | 11197977 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1,java_heap_GB=14)
@classmethod
def tearDownClass(cls):
# time.sleep(3600)
h2o.tear_down_cloud()
def test_short(self):
csvFilename = 'part-00000b'
### csvFilename = 'short'
print "this data is only on 0xdata machines"
importFolderPath = '/home/hduser/data'
csvPathname = importFolderPath + "/" + csvFilename
# FIX! does 'separator=' take ints or ?? hex format
# looks like it takes the hex string (two chars)
start = time.time()
# hardwire TAB as a separator, as opposed to white space (9)
parseResult = h2i.import_parse(path=csvPathname, schema='local', timeoutSecs=500, separator=9)
print "Parse of", parseResult['destination_key'], "took", time.time() - start, "seconds"
print "Parse result['destination_key']:", parseResult['destination_key']
start = time.time()
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=500)
print "Inspect:", parseResult['destination_key'], "took", time.time() - start, "seconds"
h2o_cmd.infoFromInspect(inspect, csvPathname)
# numRows = inspect['numRows']
# numCols = inspect['numCols']
keepPattern = "oly_|mt_|b_"
y = "is_purchase"
print "y:", y
# don't need the intermediate Dicts produced from columnInfoFromInspect
x = h2o_glm.goodXFromColumnInfo(y, keepPattern=keepPattern, key=parseResult['destination_key'], timeoutSecs=300)
print "x:", x
kwargs = {
'x': x,
'y': y,
# 'case_mode': '>',
# 'case': 0,
'family': 'binomial',
'lambda': 1.0E-5,
'alpha': 0.5,
'max_iter': 5,
'thresholds': 0.5,
'n_folds': 1,
'weight': 100,
'beta_epsilon': 1.0E-4,
}
timeoutSecs = 1800
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, pollTimeoutSecs=60, **kwargs)
elapsed = time.time() - start
print "glm completed in", elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
if __name__ == '__main__':
h2o.unit_main()
|
pyscf/adc/test/test_radc/test_dfadc_N2.py | QuESt-Calculator/pyscf | 501 | 11197984 | # Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import adc
from pyscf import df
r = 1.098
mol = gto.Mole()
mol.atom = [
['N', ( 0., 0. , -r/2 )],
['N', ( 0., 0. , r/2)],]
mol.basis = {'N':'cc-pvdz'}
mol.verbose = 0
mol.build()
mf = scf.RHF(mol).density_fit(auxbasis='cc-pvdz-jkfit')
mf.kernel()
myadc = adc.ADC(mf)
myadc = adc.ADC(mf).density_fit(auxbasis='cc-pvdz-ri')
def tearDownModule():
global mol,mf
del mol,mf
class KnownValues(unittest.TestCase):
def test_df_gs(self):
mf = scf.RHF(mol).run()
myadc.with_df = df.DF(mol, auxbasis='cc-pvdz-ri')
e, t_amp1, t_amp2 = myadc.kernel_gs()
self.assertAlmostEqual(e, -0.31081009625, 6)
def test_dfhf_dfadc_gs(self):
myadc.with_df = df.DF(mol, auxbasis='cc-pvdz-ri')
e, t_amp1, t_amp2 = myadc.kernel_gs()
self.assertAlmostEqual(e, -0.3108102956, 6)
def test_dfadc3_ip(self):
myadc = adc.ADC(mf).density_fit(auxbasis='cc-pvdz-ri')
myadc.max_memory = 2
myadc.method = "adc(3)"
myadc.method_type = "ip"
e,v,p,x = myadc.kernel(nroots=3)
e_corr = myadc.e_corr
self.assertAlmostEqual(e_corr, -0.3061165912 , 6)
self.assertAlmostEqual(e[0], 0.55609388, 6)
self.assertAlmostEqual(e[1], 0.60109239, 6)
self.assertAlmostEqual(e[2], 0.60109239, 6)
self.assertAlmostEqual(p[0], 1.83255357, 6)
self.assertAlmostEqual(p[1], 1.86389642, 6)
self.assertAlmostEqual(p[2], 1.86389642, 6)
def test_dfhf_dfadc2_ea(self):
myadc.max_memory = 20
myadc.method = "adc(2)"
myadc.method_type = "ea"
e,v,p,x = myadc.kernel(nroots=4)
myadc.analyze()
self.assertAlmostEqual(e[0], 0.14260766, 6)
self.assertAlmostEqual(e[1], 0.14260766, 6)
self.assertAlmostEqual(e[2], 0.55083845, 6)
self.assertAlmostEqual(e[3], 0.76736577, 6)
self.assertAlmostEqual(p[0], 1.86603796, 6)
self.assertAlmostEqual(p[1], 1.86603796, 6)
self.assertAlmostEqual(p[2], 1.92699634, 6)
self.assertAlmostEqual(p[3], 1.88366005, 6)
def test_hf_dfadc2_ea(self):
mf = scf.RHF(mol).run()
myadc = adc.ADC(mf).density_fit(auxbasis='cc-pvdz-ri')
myadc.max_memory = 20
myadc.method = "adc(2)"
myadc.method_type = "ea"
e,v,p,x = myadc.kernel(nroots=4)
self.assertAlmostEqual(e[0], 0.14265314, 6)
self.assertAlmostEqual(e[1], 0.14265314, 6)
self.assertAlmostEqual(e[2], 0.55092042, 6)
self.assertAlmostEqual(e[3], 0.76714415, 6)
self.assertAlmostEqual(p[0], 1.86604908, 6)
self.assertAlmostEqual(p[1], 1.86604908, 6)
self.assertAlmostEqual(p[2], 1.92697854, 6)
self.assertAlmostEqual(p[3], 1.88386011, 6)
if __name__ == "__main__":
print("DF-ADC calculations for different RADC methods for nitrogen molecule")
unittest.main()
|
vit/actions.py | kinifwyne/vit | 179 | 11197997 | class Actions(object):
def __init__(self, action_registry):
self.action_registry = action_registry
def register(self):
self.action_registrar = self.action_registry.get_registrar()
# Global.
self.action_registrar.register('QUIT', 'Quit the application')
self.action_registrar.register('QUIT_WITH_CONFIRM', 'Quit the application, after confirmation')
self.action_registrar.register('GLOBAL_ESCAPE', 'Top-level escape function')
self.action_registrar.register('REFRESH', 'Refresh the current report')
self.action_registrar.register('TASK_ADD', 'Add a task (supports tab completion)')
self.action_registrar.register('REPORT_FILTER', 'Filter current report using provided FILTER arguments (supports tab completion)')
self.action_registrar.register('TASK_UNDO', 'Undo last task change')
self.action_registrar.register('TASK_SYNC', 'Synchronize with configured taskd server')
self.action_registrar.register('COMMAND_BAR_EX', "Open the command bar in 'ex' mode")
self.action_registrar.register('COMMAND_BAR_EX_TASK_READ_WAIT', "Open the command bar in 'ex' mode with '!rw task ' appended")
self.action_registrar.register('COMMAND_BAR_SEARCH_FORWARD', 'Search forward for provided STRING')
self.action_registrar.register('COMMAND_BAR_SEARCH_REVERSE', 'Search reverse for provided STRING')
self.action_registrar.register('COMMAND_BAR_SEARCH_NEXT', 'Search next')
self.action_registrar.register('COMMAND_BAR_SEARCH_PREVIOUS', 'Search previous')
self.action_registrar.register('COMMAND_BAR_TASK_CONTEXT', 'Set task context')
self.action_registrar.register(self.action_registry.noop_action_name, 'Used to disable a default keybinding action')
# List.
self.action_registrar.register('LIST_UP', 'Move list focus up one entry')
self.action_registrar.register('LIST_DOWN', 'Move list focus down one entry')
self.action_registrar.register('LIST_PAGE_UP', 'Move list focus up one page')
self.action_registrar.register('LIST_PAGE_DOWN', 'Move list focus down one page')
self.action_registrar.register('LIST_HOME', 'Move list focus to top of the list')
self.action_registrar.register('LIST_END', 'Move list focus to bottom of the list')
self.action_registrar.register('LIST_SCREEN_TOP', 'Move list focus to top of the screen')
self.action_registrar.register('LIST_SCREEN_MIDDLE', 'Move list focus to middle of the screen')
self.action_registrar.register('LIST_SCREEN_BOTTOM', 'Move list focus to bottom of the screen')
self.action_registrar.register('LIST_FOCUS_VALIGN_CENTER', 'Move focused item to center of the screen')
# Task.
self.action_registrar.register('TASK_ANNOTATE', 'Add an annotation to a task')
self.action_registrar.register('TASK_DELETE', 'Delete task')
self.action_registrar.register('TASK_DENOTATE', 'Denotate a task')
self.action_registrar.register('TASK_MODIFY', 'Modify task (supports tab completion)')
self.action_registrar.register('TASK_START_STOP', 'Start/stop task')
self.action_registrar.register('TASK_DONE', 'Mark task done')
self.action_registrar.register('TASK_PRIORITY', 'Modify task priority')
self.action_registrar.register('TASK_PROJECT', 'Modify task project (supports tab completion)')
self.action_registrar.register('TASK_TAGS', 'Modify task tags (supports tab completion, +TAG adds, -TAG removes)')
self.action_registrar.register('TASK_WAIT', 'Wait a task')
self.action_registrar.register('TASK_EDIT', 'Edit a task via the default editor')
self.action_registrar.register('TASK_SHOW', 'Show task details')
def get(self):
return self.action_registry.actions
|
blender/arm/lightmapper/utility/denoiser/oidn.py | onelsonic/armory | 2,583 | 11198042 | import bpy, os, sys, re, platform, subprocess
import numpy as np
class TLM_OIDN_Denoise:
image_array = []
image_output_destination = ""
denoised_array = []
def __init__(self, oidnProperties, img_array, dirpath):
self.oidnProperties = oidnProperties
self.image_array = img_array
self.image_output_destination = dirpath
self.check_binary()
def check_binary(self):
oidnPath = self.oidnProperties.tlm_oidn_path
if oidnPath != "":
file = os.path.basename(os.path.realpath(oidnPath))
filename, file_extension = os.path.splitext(file)
if platform.system() == 'Windows':
if(file_extension == ".exe"):
pass
else:
self.oidnProperties.tlm_oidn_path = os.path.join(self.oidnProperties.tlm_oidn_path,"oidnDenoise.exe")
else:
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Please provide OIDN path")
def denoise(self):
for image in self.image_array:
if image not in self.denoised_array:
image_path = os.path.join(self.image_output_destination, image)
#Save to pfm
loaded_image = bpy.data.images.load(image_path, check_existing=False)
width = loaded_image.size[0]
height = loaded_image.size[1]
image_output_array = np.zeros([width, height, 3], dtype="float32")
image_output_array = np.array(loaded_image.pixels)
image_output_array = image_output_array.reshape(height, width, 4)
image_output_array = np.float32(image_output_array[:,:,:3])
image_output_denoise_destination = image_path[:-4] + ".pfm"
image_output_denoise_result_destination = image_path[:-4] + "_denoised.pfm"
with open(image_output_denoise_destination, "wb") as fileWritePFM:
self.save_pfm(fileWritePFM, image_output_array)
#Denoise
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Loaded image: " + str(loaded_image))
verbose = bpy.context.scene.TLM_SceneProperties.tlm_verbose
affinity = self.oidnProperties.tlm_oidn_affinity
if verbose:
print("Denoiser search: " + bpy.path.abspath(self.oidnProperties.tlm_oidn_path))
v = "3"
else:
v = "0"
if affinity:
a = "1"
else:
a = "0"
threads = str(self.oidnProperties.tlm_oidn_threads)
maxmem = str(self.oidnProperties.tlm_oidn_maxmem)
if platform.system() == 'Windows':
oidnPath = bpy.path.abspath(self.oidnProperties.tlm_oidn_path)
pipePath = [oidnPath, '-f', 'RTLightmap', '-hdr', image_output_denoise_destination, '-o', image_output_denoise_result_destination, '-verbose', v, '-threads', threads, '-affinity', a, '-maxmem', maxmem]
elif platform.system() == 'Darwin':
oidnPath = bpy.path.abspath(self.oidnProperties.tlm_oidn_path)
pipePath = [oidnPath + ' -f ' + ' RTLightmap ' + ' -hdr ' + image_output_denoise_destination + ' -o ' + image_output_denoise_result_destination + ' -verbose ' + v]
else:
oidnPath = bpy.path.abspath(self.oidnProperties.tlm_oidn_path)
oidnPath = oidnPath.replace(' ', '\\ ')
image_output_denoise_destination = image_output_denoise_destination.replace(' ', '\\ ')
image_output_denoise_result_destination = image_output_denoise_result_destination.replace(' ', '\\ ')
pipePath = [oidnPath + ' -f ' + ' RTLightmap ' + ' -hdr ' + image_output_denoise_destination + ' -o ' + image_output_denoise_result_destination + ' -verbose ' + v]
if not verbose:
denoisePipe = subprocess.Popen(pipePath, stdout=subprocess.PIPE, stderr=None, shell=True)
else:
denoisePipe = subprocess.Popen(pipePath, shell=True)
denoisePipe.communicate()[0]
if platform.system() != 'Windows':
image_output_denoise_result_destination = image_output_denoise_result_destination.replace('\\', '')
with open(image_output_denoise_result_destination, "rb") as f:
denoise_data, scale = self.load_pfm(f)
ndata = np.array(denoise_data)
ndata2 = np.dstack((ndata, np.ones((width,height))))
img_array = ndata2.ravel()
loaded_image.pixels = img_array
loaded_image.filepath_raw = image_output_denoise_result_destination = image_path[:-10] + "_denoised.hdr"
loaded_image.file_format = "HDR"
loaded_image.save()
self.denoised_array.append(image)
print(image_path)
def clean(self):
self.denoised_array.clear()
self.image_array.clear()
for file in self.image_output_destination:
if file.endswith("_baked.hdr"):
baked_image_array.append(file)
#self.image_output_destination
#Clean temporary files here..
#...pfm
#...denoised.hdr
def load_pfm(self, file, as_flat_list=False):
#start = time()
header = file.readline().decode("utf-8").rstrip()
if header == "PF":
color = True
elif header == "Pf":
color = False
else:
raise Exception("Not a PFM file.")
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("utf-8"))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception("Malformed PFM header.")
scale = float(file.readline().decode("utf-8").rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(file, endian + "f")
shape = (height, width, 3) if color else (height, width)
if as_flat_list:
result = data
else:
result = np.reshape(data, shape)
#print("PFM import took %.3f s" % (time() - start))
return result, scale
def save_pfm(self, file, image, scale=1):
#start = time()
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32 (got %s)" % image.dtype.name)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write(b"PF\n" if color else b"Pf\n")
file.write(b"%d %d\n" % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write(b"%f\n" % scale)
image.tofile(file)
#print("PFM export took %.3f s" % (time() - start))
|
caffe2/experiments/python/funhash_op_test.py | wenhaopeter/read_pytorch_code | 585 | 11198044 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestFunHash(hu.HypothesisTestCase):
@given(n_out=st.integers(min_value=5, max_value=20),
n_in=st.integers(min_value=10, max_value=20),
n_data=st.integers(min_value=2, max_value=8),
n_weight=st.integers(min_value=8, max_value=15),
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
weight = np.random.rand(n_weight).astype(np.float32)
alpha = np.random.rand(n_alpha).astype(np.float32)
val = val.astype(np.float32)
key = key.astype(np.int64)
seg = seg.astype(np.int32)
op = core.CreateOperator(
'FunHash',
['val', 'key', 'seg', 'weight', 'alpha'],
['out'],
num_outputs=n_out)
# Check over multiple devices
self.assertDeviceChecks(
dc, op, [val, key, seg, weight, alpha], [0])
# Gradient check wrt weight
self.assertGradientChecks(
gc, op, [val, key, seg, weight, alpha], 3, [0])
# Gradient check wrt alpha
self.assertGradientChecks(
gc, op, [val, key, seg, weight, alpha], 4, [0])
op2 = core.CreateOperator(
'FunHash',
['val', 'key', 'seg', 'weight'],
['out'],
num_outputs=n_out)
# Check over multiple devices
self.assertDeviceChecks(
dc, op2, [val, key, seg, weight], [0])
# Gradient check wrt weight
self.assertGradientChecks(
gc, op2, [val, key, seg, weight], 3, [0])
|
popong/precinct/mismatch_case_precinct_map.py | Jooyaro/southkorea-maps | 404 | 11198050 | <filename>popong/precinct/mismatch_case_precinct_map.py
# -*- coding: utf-8 -*-
# 지도 파일과 선거구 파일 사이의 불일치
mismatches = {}
# 지도 파일에서 시 정보가 미반영된 구 (e.g., 성산구 -> 창원시성산구)
mismatches["municipality_rename"] = {
2012: {37012: u"포항시북구", 37011: u"포항시남구", 38111: u"창원시의창구", 38112: u"창원시성산구", 38113: u"창원시마산합포구", 38114: u"창원시마산회원구", 38115: u"창원시진해구"},
2013: {}
}
# 지도 파일 작성 시점과 선거구 획정 시점 사이에 읍면동 (submunicipality) 이름변경되거나 분할되거나 합쳐진 경우 등 (e.g., 장지동 -> 위례동)
mismatches["submunicipality_changed"] = {
19: {
(23080, u'청라동'): [u'청라1동', u'청라2동'], # merged
(22070, u'두류1\xb72동'): u'두류1,2동',
(33012, u'봉명2송정동'): u'봉명2·송정동',
(31200, u'진동면'): u'군내면' # does not exist in the map; pick one near
},
20: {
# name changed or splitted
(11240, u'위례동'): u'장지동',
(21120, u'가덕도동'): u'천가동',
(21090, u'우3동'): u'우1동',
(23040, u'송도3동'): u'송도1동',
(23070, u'계양3동'): u'계양1동',
(25040, u'노은3동'): u'노은2동',
(31012, u'금곡동'): u'금호동',
(31012, u'호매실동'): u'금호동',
(31014, u'광교1동'): u'광교동',
(31014, u'광교2동'): u'광교동',
(31023, u'정자동'): u'정자1동',
(31150, u'장곡동'): u'연성동',
(31150, u'월곶동'): u'군자동',
(31230, u'운양동'): u'김포2동',
(31240, u'남양읍'): u'남양동',
(31240, u'동탄4동'): u'동탄면',
(31250, u'남한산성면'): u'중부면',
# data quality issue
(22070, u'두류1\xb72동'): u'두류1,2동',
(21050, u'당감1동'): u'당감제1동',
(21050, u'가야1동'): u'가야제1동',
# merged
(21050, u'부전1동'): [u'범전동', u'부전1동'],
(21050, u'전포1동'): [u'전포1동', u'전포3동'],
(21050, u'범천2동'): [u'범천2동', u'범천4동'],
(21090, u'반송1동'): [u'반송1동', u'반송3동'],
(31200, u'군내면'): [u'군내면', u'장단면'] # no population
}
}
# 새로 만들어진 시군구 (municipality)
mismatches["new_municipality"] = {
19: {},
20: {
u'청주시상당구': [u'낭성면', u'미원면', u'가덕면', u'남일면', u'문의면', u'중앙동', u'성안동', u'탑·대성동', u'영운동', u'금천동', u'용담·명암·산성동', u'용암제1동', u'용암제2동'],
u'청주시서원구': [u'남이면', u'현도면', u'사직1동', u'사직2동', u'사창동', u'모충동', u'산남동', u'분평동', u'수곡1동', u'수곡2동', u'성화·개신·죽림동'],
u'청주시흥덕구': [u'오송읍', u'강내면', u'옥산면', u'운천·신봉동', u'복대제1동', u'복대제2동', u'가경동', u'봉명제1동', u'봉명제2·송정동', u'강서제1동', u'강서제2동'],
u'청주시청원구': [u'내수읍', u'오창읍', u'북이면', u'우암동', u'내덕제1동', u'내덕제2동', u'율량·사천동', u'오근장동'],
}
} |
src/datasets/inspect.py | huggingface/nlp | 3,395 | 11198085 | # Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
""" List and inspect datasets and metrics."""
from typing import Dict, List, Mapping, Optional, Sequence, Union
import huggingface_hub
from .download.download_config import DownloadConfig
from .download.download_manager import DownloadMode
from .download.streaming_download_manager import StreamingDownloadManager
from .info import DatasetInfo
from .load import (
dataset_module_factory,
extend_dataset_builder_for_streaming,
import_main_class,
load_dataset_builder,
metric_module_factory,
)
from .utils.logging import get_logger
from .utils.version import Version
logger = get_logger(__name__)
class SplitsNotFoundError(ValueError):
pass
def list_datasets(with_community_datasets=True, with_details=False):
"""List all the datasets scripts available on the Hugging Face Hub.
Args:
with_community_datasets (:obj:`bool`, optional, default ``True``): Include the community provided datasets.
with_details (:obj:`bool`, optional, default ``False``): Return the full details on the datasets instead of only the short name.
Example:
```py
>>> from datasets import list_datasets
>>> list_datasets()
['acronym_identification',
'ade_corpus_v2',
'adversarial_qa',
'aeslc',
'afrikaans_ner_corpus',
'ag_news',
...
]
```
"""
datasets = huggingface_hub.list_datasets(full=with_details)
if not with_community_datasets:
datasets = [dataset for dataset in datasets if "/" not in dataset.id]
if not with_details:
datasets = [dataset.id for dataset in datasets]
return datasets
def list_metrics(with_community_metrics=True, with_details=False):
"""List all the metrics script available on the Hugging Face Hub.
Args:
with_community_metrics (:obj:`bool`, optional, default ``True``): Include the community provided metrics.
with_details (:obj:`bool`, optional, default ``False``): Return the full details on the metrics instead of only the short name.
Example:
```py
>>> from datasets import list_metrics
>>> list_metrics()
['accuracy',
'bertscore',
'bleu',
'bleurt',
'cer',
'chrf',
...
]
```
"""
metrics = huggingface_hub.list_metrics()
if not with_community_metrics:
metrics = [metric for metric in metrics if "/" not in metric.id]
if not with_details:
metrics = [metric.id for metric in metrics]
return metrics
def inspect_dataset(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
"""
Allow inspection/modification of a dataset script by copying on local drive at local_path.
Args:
path (`str`): Path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name
as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``.
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`list_datasets`])
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``.
local_path (`str`): Path to the local folder to copy the dataset script to.
download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters.
**download_kwargs (additional keyword arguments): Optional arguments for [`DownloadConfig`] which will override
the attributes of `download_config` if supplied.
"""
dataset_module = dataset_module_factory(
path, download_config=download_config, force_local_path=local_path, **download_kwargs
)
print(
f"The processing script for dataset {path} can be inspected at {local_path}. "
f"The main class is in {dataset_module.module_path}. "
f"You can modify this processing script and use it with `datasets.load_dataset({local_path})`."
)
def inspect_metric(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
r"""
Allow inspection/modification of a metric script by copying it on local drive at local_path.
Args:
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
local_path (``str``): path to the local folder to copy the datset script to.
download_config (Optional ``datasets.DownloadConfig``): specific download configuration parameters.
**download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
"""
metric_module = metric_module_factory(
path, download_config=download_config, force_local_path=local_path, **download_kwargs
)
print(
f"The processing scripts for metric {path} can be inspected at {local_path}. "
f"The main class is in {metric_module.module_path}. "
f"You can modify this processing scripts and use it with `datasets.load_metric({local_path})`."
)
def get_dataset_infos(
path: str,
data_files: Optional[Union[Dict, List, str]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[DownloadMode] = None,
revision: Optional[Union[str, Version]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
**config_kwargs,
):
"""Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict.
Args:
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
revision (Optional ``Union[str, datasets.Version]``):
If specified, the dataset module will be loaded from the datasets repository at this version.
By default:
- it is set to the local version of the lib.
- it will also try to load it from the master branch if it's not available at the local version of the lib.
Specifying a version that is different from your local version of the lib might cause compatibility issues.
download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
download_mode (:class:`DownloadMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
data_files (:obj:`Union[Dict, List, str]`, optional): Defining the data_files of the dataset configuration.
use_auth_token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If True, will get token from `"~/.huggingface"`.
**config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied.
Example:
```py
>>> from datasets import get_dataset_infos
>>> get_dataset_infos('rotten_tomatoes')
{'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...}
```
"""
config_names = get_dataset_config_names(
path=path,
revision=revision,
download_config=download_config,
download_mode=download_mode,
data_files=data_files,
)
return {
config_name: get_dataset_config_info(
path=path,
config_name=config_name,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
revision=revision,
use_auth_token=use_auth_token,
**config_kwargs,
)
for config_name in config_names
}
def get_dataset_config_names(
path: str,
revision: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[DownloadMode] = None,
force_local_path: Optional[str] = None,
dynamic_modules_path: Optional[str] = None,
data_files: Optional[Union[Dict, List, str]] = None,
**download_kwargs,
):
"""Get the list of available config names for a particular dataset.
Args:
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
revision (Optional ``Union[str, datasets.Version]``):
If specified, the dataset module will be loaded from the datasets repository at this version.
By default:
- it is set to the local version of the lib.
- it will also try to load it from the master branch if it's not available at the local version of the lib.
Specifying a version that is different from your local version of the lib might cause compatibility issues.
download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
download_mode (:class:`DownloadMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
force_local_path (Optional str): Optional path to a local path to download and prepare the script to.
Used to inspect or modify the script folder.
dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
By default the datasets and metrics are stored inside the `datasets_modules` module.
data_files (:obj:`Union[Dict, List, str]`, optional): Defining the data_files of the dataset configuration.
**download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override the attributes in download_config if supplied,
for example ``use_auth_token``
Example:
```py
>>> from datasets import get_dataset_config_names
>>> get_dataset_config_names("glue")
['cola',
'sst2',
'mrpc',
'qqp',
'stsb',
'mnli',
'mnli_mismatched',
'mnli_matched',
'qnli',
'rte',
'wnli',
'ax']
```
"""
dataset_module = dataset_module_factory(
path,
revision=revision,
download_config=download_config,
download_mode=download_mode,
force_local_path=force_local_path,
dynamic_modules_path=dynamic_modules_path,
data_files=data_files,
**download_kwargs,
)
builder_cls = import_main_class(dataset_module.module_path)
return list(builder_cls.builder_configs.keys()) or [dataset_module.builder_kwargs.get("config_name", "default")]
def get_dataset_config_info(
path: str,
config_name: Optional[str] = None,
data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[DownloadMode] = None,
revision: Optional[Union[str, Version]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
**config_kwargs,
) -> DatasetInfo:
"""Get the meta information (DatasetInfo) about a dataset for a particular config
Args:
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
config_name (:obj:`str`, optional): Defining the name of the dataset configuration.
data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s).
download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters.
download_mode (:class:`DownloadMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load:
- For datasets in the `huggingface/datasets` library on GitHub like "squad", the default version of the module is the local version of the lib.
You can specify a different version from your local version of the lib (e.g. "master" or "1.2.0") but it might cause compatibility issues.
- For community datasets like "lhoestq/squad" that have their own git repository on the Datasets Hub, the default version "main" corresponds to the "main" branch.
You can specify a different version that the default "main" by using a commit sha or a git tag of the dataset repository.
use_auth_token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If True, will get token from `"~/.huggingface"`.
**config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied.
"""
builder = load_dataset_builder(
path,
name=config_name,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
revision=revision,
use_auth_token=use_auth_token,
**config_kwargs,
)
extend_dataset_builder_for_streaming(builder, use_auth_token=use_auth_token)
info = builder.info
if info.splits is None:
try:
download_config = download_config.copy() if download_config else DownloadConfig()
if use_auth_token is not None:
download_config.use_auth_token = use_auth_token
info.splits = {
split_generator.name: {"name": split_generator.name, "dataset_name": path}
for split_generator in builder._split_generators(
StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
)
}
except Exception as err:
raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err
return info
def get_dataset_split_names(
path: str,
config_name: Optional[str] = None,
data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[DownloadMode] = None,
revision: Optional[Union[str, Version]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
**config_kwargs,
):
"""Get the list of available splits for a particular config and dataset.
Args:
path (``str``): path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
- a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
config_name (:obj:`str`, optional): Defining the name of the dataset configuration.
data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s).
download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters.
download_mode (:class:`DownloadMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load:
- For datasets in the `huggingface/datasets` library on GitHub like "squad", the default version of the module is the local version of the lib.
You can specify a different version from your local version of the lib (e.g. "master" or "1.2.0") but it might cause compatibility issues.
- For community datasets like "lhoestq/squad" that have their own git repository on the Datasets Hub, the default version "main" corresponds to the "main" branch.
You can specify a different version that the default "main" by using a commit sha or a git tag of the dataset repository.
use_auth_token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If True, will get token from `"~/.huggingface"`.
**config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied.
Example:
```py
>>> from datasets import get_dataset_split_names
>>> get_dataset_split_names('rotten_tomatoes')
['train', 'validation', 'test']
```
"""
info = get_dataset_config_info(
path,
config_name=config_name,
data_files=data_files,
download_config=download_config,
download_mode=download_mode,
revision=revision,
use_auth_token=use_auth_token,
**config_kwargs,
)
return list(info.splits.keys())
|
session_encdec.py | sordonia/HierarchicalEncoderDecoder | 116 | 11198099 | <reponame>sordonia/HierarchicalEncoderDecoder<filename>session_encdec.py<gh_stars>100-1000
"""
Query suggestion hierarchical encoder-decoder code.
The code is inspired from nmt encdec code in groundhog
but we do not rely on groundhog infrastructure.
"""
__docformat__ = 'restructedtext en'
__authors__ = ("<NAME>")
__contact__ = "<NAME> <<EMAIL>>"
import theano
import theano.tensor as T
import numpy as np
import cPickle
import logging
logger = logging.getLogger(__name__)
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.nnet.conv3d2d import *
from collections import OrderedDict
from model import *
from utils import *
import operator
# Theano speed-up
theano.config.scan.allow_gc = False
def add_to_params(params, new_param):
params.append(new_param)
return new_param
class EncoderDecoderBase():
def __init__(self, state, rng, parent):
self.rng = rng
self.parent = parent
self.state = state
self.__dict__.update(state)
self.session_rec_activation = eval(self.session_rec_activation)
self.query_rec_activation = eval(self.query_rec_activation)
self.params = []
class Encoder(EncoderDecoderBase):
def init_params(self):
""" sent weights """
self.W_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.idim, self.rankdim), name='W_emb'))
self.W_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in'))
self.W_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='W_hh'))
self.b_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_hh'))
if self.query_step_type == "gated":
self.W_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in_r'))
self.W_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='W_in_z'))
self.W_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='W_hh_r'))
self.W_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='W_hh_z'))
self.b_z = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_z'))
self.b_r = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='b_r'))
""" Context weights """
self.Ws_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, self.sdim), name='Ws_in'))
self.Ws_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.sdim, self.sdim)), name='Ws_hh'))
self.bs_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.sdim,), dtype='float32'), name='bs_hh'))
if self.session_step_type == "gated":
self.Ws_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, self.sdim), name='Ws_in_r'))
self.Ws_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, self.sdim), name='Ws_in_z'))
self.Ws_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.sdim, self.sdim)), name='Ws_hh_r'))
self.Ws_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.sdim, self.sdim)), name='Ws_hh_z'))
self.bs_z = add_to_params(self.params, theano.shared(value=np.zeros((self.sdim,), dtype='float32'), name='bs_z'))
self.bs_r = add_to_params(self.params, theano.shared(value=np.zeros((self.sdim,), dtype='float32'), name='bs_r'))
def plain_query_step(self, x_t, m_t, h_tm1, hr_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
h_t = self.query_rec_activation(T.dot(x_t, self.W_in) + T.dot(hr_tm1, self.W_hh) + self.b_hh)
hr_t = m_t * h_t
return h_t, hr_t,
def gated_query_step(self, x_t, m_t, h_tm1, hr_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
r_t = T.nnet.sigmoid(T.dot(x_t, self.W_in_r) + T.dot(hr_tm1, self.W_hh_r) + self.b_r)
z_t = T.nnet.sigmoid(T.dot(x_t, self.W_in_z) + T.dot(hr_tm1, self.W_hh_z) + self.b_z)
h_tilde = self.query_rec_activation(T.dot(x_t, self.W_in) + T.dot(r_t * hr_tm1, self.W_hh) + self.b_hh)
h_t = (np.float32(1.0) - z_t) * hr_tm1 + z_t * h_tilde
hr_t = m_t * h_t
# return both reset state and non-reset state
return h_t, hr_t, r_t, z_t, h_tilde
def plain_session_step(self, h_t, m_t, hs_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
hs_update = self.session_rec_activation(T.dot(h_t, self.Ws_in) + T.dot(hs_tm1, self.Ws_hh) + self.bs_hh)
hs_t = (m_t) * hs_tm1 + (1 - m_t) * hs_update
return hs_t,
def gated_session_step(self, h_t, m_t, hs_tm1):
rs_t = T.nnet.sigmoid(T.dot(h_t, self.Ws_in_r) + T.dot(hs_tm1, self.Ws_hh_r) + self.bs_r)
zs_t = T.nnet.sigmoid(T.dot(h_t, self.Ws_in_z) + T.dot(hs_tm1, self.Ws_hh_z) + self.bs_z)
hs_tilde = self.session_rec_activation(T.dot(h_t, self.Ws_in) + T.dot(rs_t * hs_tm1, self.Ws_hh) + self.bs_hh)
hs_update = (np.float32(1.) - zs_t) * hs_tm1 + zs_t * hs_tilde
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
hs_t = (m_t) * hs_tm1 + (1 - m_t) * hs_update
return hs_t, hs_tilde, rs_t, zs_t
def approx_embedder(self, x):
return self.W_emb[x]
def build_encoder(self, x, xmask=None, **kwargs):
one_step = False
if len(kwargs):
one_step = True
# if x.ndim == 2 then
# x = (n_steps, batch_size)
if x.ndim == 2:
batch_size = x.shape[1]
# else x = (word_1, word_2, word_3, ...)
# or x = (last_word_1, last_word_2, last_word_3, ..)
# in this case batch_size is
else:
batch_size = 1
# if it is not one_step then we initialize everything to 0
if not one_step:
h_0 = T.alloc(np.float32(0), batch_size, self.qdim)
hr_0 = T.alloc(np.float32(0), batch_size, self.qdim)
hs_0 = T.alloc(np.float32(0), batch_size, self.sdim)
# in sampling mode (i.e. one step) we require
else:
# in this case x.ndim != 2
assert x.ndim != 2
assert 'prev_h' in kwargs
assert 'prev_hr' in kwargs
assert 'prev_hs' in kwargs
h_0 = kwargs['prev_h']
hr_0 = kwargs['prev_hr']
hs_0 = kwargs['prev_hs']
xe = self.approx_embedder(x)
if xmask == None:
xmask = T.neq(x, self.eoq_sym)
# Gated Encoder
if self.query_step_type == "gated":
f_enc = self.gated_query_step
o_enc_info = [h_0, hr_0, None, None, None]
else:
f_enc = self.plain_query_step
o_enc_info = [h_0, hr_0]
if self.session_step_type == "gated":
f_hier = self.gated_session_step
o_hier_info = [hs_0, None, None, None]
else:
f_hier = self.plain_session_step
o_hier_info = [hs_0]
# Run through all the sentence (encode everything)
if not one_step:
_res, _ = theano.scan(
f_enc, sequences=[xe, xmask], outputs_info=o_enc_info)
# Make just one step further
else:
_res = f_enc(xe, xmask, h_0, hr_0)
# Get the hidden state sequence
h = _res[0]
hr = _res[1]
# All hierarchical sentence
# The hs sequence is based on the original mask
if not one_step:
_res, _ = theano.scan(
f_hier, sequences=[h, xmask], outputs_info=o_hier_info)
# Just one step further
else:
_res = f_hier(h, xmask, hs_0)
if isinstance(_res, list) or isinstance(_res, tuple):
hs = _res[0]
else:
hs = _res
return (h, hr), hs, (_res[2], _res[3])
def __init__(self, state, rng, parent):
EncoderDecoderBase.__init__(self, state, rng, parent)
self.init_params()
class Decoder(EncoderDecoderBase):
EVALUATION = 0
BEAM_SEARCH = 1
def __init__(self, state, rng, parent, encoder):
EncoderDecoderBase.__init__(self, state, rng, parent)
# Take as input the encoder instance for the embeddings..
# To modify in the future
self.encoder = encoder
self.trng = MRG_RandomStreams(self.seed)
self.init_params()
def init_params(self):
""" Decoder weights """
self.Wd_emb = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.idim, self.rankdim), name='Wd_emb'))
self.Wd_hh = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='Wd_hh'))
self.Wd_in = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='Wd_in'))
self.bd_hh = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_hh'))
self.Wd_s_0 = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_0'))
self.bd_s_0 = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_s_0'))
if self.decoder_bias_type == 'all':
self.Wd_s_q = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_q'))
if self.query_step_type == "gated":
self.Wd_in_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='Wd_in_r'))
self.Wd_in_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, self.qdim), name='Wd_in_z'))
self.Wd_hh_r = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='Wd_hh_r'))
self.Wd_hh_z = add_to_params(self.params, theano.shared(value=OrthogonalInit(self.rng, (self.qdim, self.qdim)), name='Wd_hh_z'))
self.bd_r = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_r'))
self.bd_z = add_to_params(self.params, theano.shared(value=np.zeros((self.qdim,), dtype='float32'), name='bd_z'))
if self.decoder_bias_type == 'all':
self.Wd_s_z = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_z'))
self.Wd_s_r = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, self.qdim), name='Wd_s_r'))
out_target_dim = self.qdim
if not self.maxout_out:
out_target_dim = self.rankdim
self.Wd_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.qdim, out_target_dim), name='Wd_out'))
self.bd_out = add_to_params(self.params, theano.shared(value=np.zeros((self.idim,), dtype='float32'), name='bd_out'))
# Set up deep output
if self.deep_out:
self.Wd_e_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.rankdim, out_target_dim), name='Wd_e_out'))
self.bd_e_out = add_to_params(self.params, theano.shared(value=np.zeros((out_target_dim,), dtype='float32'), name='bd_e_out'))
if self.decoder_bias_type != 'first':
self.Wd_s_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, out_target_dim), name='Wd_s_out'))
""" Rank """
if hasattr(self, 'train_rank'):
self.Wr_out = add_to_params(self.params, theano.shared(value=NormalInit(self.rng, self.sdim, 1), name='Wr_out'))
self.br_out = add_to_params(self.params, theano.shared(value=np.zeros((1,), dtype='float32'), name='br_out'))
def build_rank_layer(self, hs):
return T.dot(hs, self.Wr_out) + self.br_out
def build_output_layer(self, hs, xd, hd):
pre_activ = T.dot(hd, self.Wd_out)
if self.deep_out:
pre_activ += T.dot(xd, self.Wd_e_out) + self.bd_e_out
if self.decoder_bias_type != 'first':
pre_activ += T.dot(hs, self.Wd_s_out)
# ^ if bias all, bias the deep output
if self.maxout_out:
pre_activ = Maxout(2)(pre_activ)
return pre_activ
def build_next_probs_predictor(self, hs, x, prev_hd):
"""
Return output probabilities given prev_words x, hierarchical pass hs, and previous hd
hs should always be the same (and should not be updated).
"""
return self.build_decoder(hs, x, mode=Decoder.BEAM_SEARCH, prev_hd=prev_hd)
def approx_embedder(self, x):
# Here we use the same embeddings learnt in the encoder !!!
return self.encoder.approx_embedder(x)
def output_softmax(self, pre_activ):
# returns a (timestep, bs, idim) matrix (huge)
return SoftMax(T.dot(pre_activ, self.Wd_emb.T) + self.bd_out)
def build_decoder(self, hs, x, xmask=None, y=None, y_neg=None, mode=EVALUATION, prev_hd=None, step_num=None):
# Check parameter consistency
if mode == Decoder.EVALUATION:
assert not prev_hd
assert y
else:
assert not y
assert prev_hd
# if mode == EVALUATION
# xd = (timesteps, batch_size, qdim)
#
# if mode != EVALUATION
# xd = (n_samples, dim)
xd = self.approx_embedder(x)
if not xmask:
xmask = T.neq(x, self.eoq_sym)
# we must zero out the </s> embedding
# i.e. the embedding x_{-1} is the 0 vector
# as well as hd_{-1} which will be reseted in the scan functions
if xd.ndim != 3:
assert mode != Decoder.EVALUATION
xd = (xd.dimshuffle((1, 0)) * xmask).dimshuffle((1, 0))
else:
assert mode == Decoder.EVALUATION
xd = (xd.dimshuffle((2,0,1)) * xmask).dimshuffle((1,2,0))
# Run the decoder
if mode == Decoder.EVALUATION:
hd_init = T.alloc(np.float32(0), x.shape[1], self.qdim)
else:
hd_init = prev_hd
if self.query_step_type == "gated":
f_dec = self.gated_step
o_dec_info = [hd_init, None, None, None]
else:
f_dec = self.plain_step
o_dec_info = [hd_init]
# If the mode of the decoder is EVALUATION
# then we evaluate by default all the sentence
# xd - i.e. xd.ndim == 3, xd = (timesteps, batch_size, qdim)
if mode == Decoder.EVALUATION:
_res, _ = theano.scan(f_dec,
sequences=[xd, xmask, hs],\
outputs_info=o_dec_info)
# else we evaluate only one step of the recurrence using the
# previous hidden states and the previous computed hierarchical
# states.
else:
_res = f_dec(xd, xmask, hs, prev_hd)
if isinstance(_res, list) or isinstance(_res, tuple):
hd = _res[0]
else:
hd = _res
pre_activ = self.build_output_layer(hs, xd, hd)
# EVALUATION : Return target_probs + all the predicted ranks
# target_probs.ndim == 3
if mode == Decoder.EVALUATION:
target_probs = GrabProbs(self.output_softmax(pre_activ), y)
return target_probs, hd, _res
# BEAM_SEARCH : Return output (the softmax layer) + the new hidden states
elif mode == Decoder.BEAM_SEARCH:
return self.output_softmax(pre_activ), hd
def gated_step(self, xd_t, m_t, hs_t, hd_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
hd_tm1 = (m_t) * hd_tm1 + (1 - m_t) * self.query_rec_activation(T.dot(hs_t, self.Wd_s_0) + self.bd_s_0)
# hd_{t - 1} = tanh(W_s_0 hs_t + bd_s_0) else hd_{t - 1} is left unchanged (m_t = 1)
# In the 'all' decoder bias type each hidden state of the decoder
# RNN receives the hs_t vector as bias without modification
if self.decoder_bias_type == 'all':
rd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_r) + T.dot(hd_tm1, self.Wd_hh_r) + T.dot(hs_t, self.Wd_s_r) + self.bd_r)
zd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_z) + T.dot(hd_tm1, self.Wd_hh_z) + T.dot(hs_t, self.Wd_s_z) + self.bd_z)
hd_tilde = self.query_rec_activation(T.dot(xd_t, self.Wd_in)
+ T.dot(rd_t * hd_tm1, self.Wd_hh)
+ T.dot(hs_t, self.Wd_s_q)
+ self.bd_hh)
hd_t = (np.float32(1.) - zd_t) * hd_tm1 + zd_t * hd_tilde
output = (hd_t, rd_t, zd_t, hd_tilde)
else:
# Do not bias all the decoder (force to store very useful information in the first state)
rd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_r) + T.dot(hd_tm1, self.Wd_hh_r) + self.bd_r)
zd_t = T.nnet.sigmoid(T.dot(xd_t, self.Wd_in_z) + T.dot(hd_tm1, self.Wd_hh_z) + self.bd_z)
hd_tilde = self.query_rec_activation(T.dot(xd_t, self.Wd_in)
+ T.dot(rd_t * hd_tm1, self.Wd_hh)
+ self.bd_hh)
hd_t = (np.float32(1.) - zd_t) * hd_tm1 + zd_t * hd_tilde
output = (hd_t, rd_t, zd_t, hd_tilde)
return output
def plain_step(self, xd_t, m_t, hs_t, hd_tm1):
if m_t.ndim >= 1:
m_t = m_t.dimshuffle(0, 'x')
# We already assume that xd are zeroed out
hd_tm1 = (m_t) * hd_tm1 + (1 - m_t) * self.query_rec_activation(T.dot(hs_t, self.Wd_s_0) + self.bd_s_0)
# ^ iff x_{t - 1} = </s> (m_t = 0) then x_{t-1} = 0
# and hd_{t - 1} = 0 else hd_{t - 1} is left unchanged (m_t = 1)
if self.decoder_bias_type == 'first':
# Do not bias all the decoder (force to store very useful information in the first state)
hd_t = self.query_rec_activation( T.dot(xd_t, self.Wd_in)
+ T.dot(hd_tm1, self.Wd_hh)
+ self.bd_hh )
output = (hd_t,)
elif self.decoder_bias_type == 'all':
hd_t = self.query_rec_activation( T.dot(xd_t, self.Wd_in)
+ T.dot(hd_tm1, self.Wd_hh)
+ T.dot(hs_t, self.Wd_s_q)
+ self.bd_hh )
output = (hd_t,)
return output
####
class SessionEncoderDecoder(Model):
def indices_to_words(self, seq, exclude_start_end=False):
"""
Converts a list of words to a list
of word ids. Use unk_sym if a word is not
known.
"""
def convert():
for word_index in seq:
if word_index > len(self.idx_to_str):
raise ValueError('Word index is too large for the model vocabulary!')
if word_index == self.eos_sym:
break
if not exclude_start_end or (word_index != self.eoq_sym and word_index != self.soq_sym):
yield self.idx_to_str[word_index]
return list(convert())
def words_to_indices(self, seq):
"""
Converts a list of words to a list
of word ids. Use unk_sym if a word is not
known.
"""
return [self.str_to_idx.get(word, self.unk_sym) for word in seq]
def compute_updates(self, training_cost, params):
updates = []
grads = T.grad(training_cost, params)
grads = OrderedDict(zip(params, grads))
# Clip stuff
c = numpy.float32(self.cutoff)
clip_grads = []
norm_gs = T.sqrt(sum(T.sum(g ** 2) for p, g in grads.items()))
normalization = T.switch(T.ge(norm_gs, c), c / norm_gs, np.float32(1.))
notfinite = T.or_(T.isnan(norm_gs), T.isinf(norm_gs))
for p, g in grads.items():
clip_grads.append((p, T.switch(notfinite, numpy.float32(.1) * p, g * normalization)))
grads = OrderedDict(clip_grads)
if self.updater == 'adagrad':
updates = Adagrad(grads, self.lr)
elif self.updater == 'sgd':
raise Exception("Sgd not implemented!")
elif self.updater == 'adadelta':
updates = Adadelta(grads)
elif self.updater == 'rmsprop':
updates = RMSProp(grads, self.lr)
elif self.updater == 'adam':
updates = Adam(grads)
else:
raise Exception("Updater not understood!")
return updates
def build_train_function(self):
if not hasattr(self, 'train_fn'):
# Compile functions
logger.debug("Building train function")
self.train_fn = theano.function(
inputs=[self.x_data, self.x_ranks, self.x_max_length, self.x_cost_mask],
outputs=self.training_cost, updates=self.updates, name="train_fn")
return self.train_fn
def build_eval_function(self):
if not hasattr(self, 'eval_fn'):
# Compile functions
logger.debug("Building evaluation function")
self.eval_fn = theano.function(inputs=[self.x_data, self.x_ranks, self.x_max_length, self.x_cost_mask],
outputs=self.training_cost, name="eval_fn")
return self.eval_fn
def build_score_function(self):
if not hasattr(self, 'score_fn'):
self.score_fn = theano.function(
inputs=[self.x_data, self.x_max_length],
outputs=[self.per_example_cost],
name="score_fn")
return self.score_fn
def build_rank_prediction_function(self):
if not hasattr(self, 'rank_fn'):
(h, hr), hs, _ = self.encoder.build_encoder(self.aug_x_data)
ranks = self.decoder.build_rank_layer(hs)
self.rank_fn = theano.function(
inputs=[self.x_data],
outputs=[ranks],
name="rank_fn")
return self.rank_fn
def build_get_states_function(self):
if not hasattr(self, 'get_states_fn'):
# Compile functions
logger.debug("Get states of the network")
outputs = [self.h, self.hs, self.hd, self.rs, self.us] + [x for x in self.decoder_states]
self.get_states_fn = theano.function(inputs=[self.x_data, self.x_max_length],
outputs=outputs, name="get_states_fn")
return self.get_states_fn
def build_next_probs_function(self):
if not hasattr(self, 'next_probs_fn'):
outputs, hd = self.decoder.build_next_probs_predictor(
self.beam_hs, self.beam_source, prev_hd=self.beam_hd)
self.next_probs_fn = theano.function(
inputs=[self.beam_hs, self.beam_source, self.beam_hd],
outputs=[outputs, hd],
name="next_probs_fn")
return self.next_probs_fn
def build_first_vector(self):
if not hasattr(self, 'first_vec_fn'):
(h, hr), hs, _ = self.encoder.build_encoder(self.aug_x_data)
hd0 = self.decoder.query_rec_activation(T.dot(hs, self.decoder.Wd_s_0) + self.decoder.bd_s_0)
self.first_vec_fn = theano.function(inputs=[self.x_data],
outputs=[h, hs, hd0], name="first_vec_fn")
return self.first_vec_fn
def build_encoder_function(self):
if not hasattr(self, 'encoder_fn'):
(h, hr), hs, _ = self.encoder.build_encoder(self.aug_x_data)
self.encoder_fn = theano.function(inputs=[self.x_data],
outputs=[h, hr, hs], name="encoder_fn")
return self.encoder_fn
def __init__(self, state):
Model.__init__(self)
self.state = state
# Compatibility towards older models
self.__dict__.update(state)
self.rng = numpy.random.RandomState(state['seed'])
# Load dictionary
raw_dict = cPickle.load(open(self.dictionary, 'r'))
# Probabilities for each term in the corpus
self.noise_probs = [x[2] for x in sorted(raw_dict, key=operator.itemgetter(1))]
self.noise_probs = numpy.array(self.noise_probs, dtype='float64')
self.noise_probs /= numpy.sum(self.noise_probs)
self.noise_probs = self.noise_probs ** 0.75
self.noise_probs /= numpy.sum(self.noise_probs)
self.t_noise_probs = theano.shared(self.noise_probs.astype('float32'), 't_noise_probs')
# Dictionaries to convert str to idx and vice-versa
self.str_to_idx = dict([(tok, tok_id) for tok, tok_id, _ in raw_dict])
self.idx_to_str = dict([(tok_id, tok) for tok, tok_id, freq in raw_dict])
if '</q>' not in self.str_to_idx \
or '</s>' not in self.str_to_idx:
raise Exception("Error, malformed dictionary!")
# Number of words in the dictionary
self.idim = len(self.str_to_idx)
self.state['idim'] = self.idim
logger.debug("Initializing encoder")
self.encoder = Encoder(self.state, self.rng, self)
logger.debug("Initializing decoder")
self.decoder = Decoder(self.state, self.rng, self, self.encoder)
# Init params
self.params = self.encoder.params + self.decoder.params
assert len(set(self.params)) == (len(self.encoder.params) + len(self.decoder.params))
self.y_neg = T.itensor3('y_neg')
self.x_data = T.imatrix('x_data')
self.x_ranks = T.imatrix('x_ranks')
self.x_cost_mask = T.matrix('cost_mask')
self.x_max_length = T.iscalar('x_max_length')
# The training is done with a trick. We append a special </q> at the beginning of the dialog
# so that we can predict also the first sent in the dialog starting from the dialog beginning token (</q>).
self.aug_x_data = T.concatenate([T.alloc(np.int32(self.eoq_sym), 1, self.x_data.shape[1]), self.x_data])
training_x = self.aug_x_data[:self.x_max_length]
training_y = self.aug_x_data[1:self.x_max_length+1]
training_ranks = self.x_ranks[:self.x_max_length-1].flatten()
training_ranks_mask = T.neq(training_ranks, 0).flatten()
# Here we find the end-of-sentence tokens in the minibatch.
training_hs_mask = T.neq(training_x, self.eoq_sym)
training_x_cost_mask = self.x_cost_mask[:self.x_max_length].flatten()
# Backward compatibility
if 'decoder_bias_type' in self.state:
logger.debug("Decoder bias type {}".format(self.decoder_bias_type))
logger.info("Build encoder")
(self.h, _), self.hs, (self.rs, self.us) = \
self.encoder.build_encoder(training_x, xmask=training_hs_mask)
logger.info("Build decoder (EVAL)")
target_probs, self.hd, self.decoder_states = \
self.decoder.build_decoder(self.hs, training_x, xmask=training_hs_mask, \
y=training_y, mode=Decoder.EVALUATION)
logger.info("Build rank predictor")
self.predicted_ranks = self.decoder.build_rank_layer(self.hs)
# Prediction cost and rank cost
self.per_example_cost = -T.log2(target_probs).reshape((self.x_max_length, self.x_data.shape[1]))
self.rank_cost = T.sum(((self.predicted_ranks[1:].flatten() - training_ranks) ** 2) * (training_ranks_mask)) / T.sum(training_ranks_mask)
self.training_cost = T.sum(-T.log2(target_probs) * training_x_cost_mask) + np.float32(self.lambda_rank) * self.rank_cost
self.updates = self.compute_updates(self.training_cost / training_x.shape[1], self.params)
# Beam-search variables
self.beam_source = T.lvector("beam_source")
self.beam_hs = T.matrix("beam_hs")
self.beam_step_num = T.lscalar("beam_step_num")
self.beam_hd = T.matrix("beam_hd")
|
VMD 3D Pose Baseline Multi-Objects/packages/lifting/utils/__init__.py | kyapp69/OpenMMD | 717 | 11198100 | <reponame>kyapp69/OpenMMD
# -*- coding: utf-8 -*-
"""
Created on Mar 23 13:57 2017
@author: <NAME>'
"""
from .prob_model import *
from .draw import *
from .cpm import *
from .process import *
from . import config
from . import upright_fast
|
rlpytorch/runner/single_process.py | douglasrizzo/ELF | 2,230 | 11198120 | <filename>rlpytorch/runner/single_process.py<gh_stars>1000+
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from ..args_provider import ArgsProvider
import tqdm
class SingleProcessRun:
def __init__(self):
''' Initialization for SingleProcessRun. Accepted arguments:
``num_minibatch``,
``num_episode``,
``tqdm``
'''
self.args = ArgsProvider(
call_from = self,
define_args = [
("num_minibatch", 5000),
("num_episode", 10000),
("tqdm", dict(action="store_true")),
]
)
def setup(self, GC, episode_start=None, episode_summary=None):
''' Setup for SingleProcessRun.
Args:
GC(`GameContext`): Game Context
episode_start(func): operations to perform before each episode
episode_summary(func): operations to summarize after each epidsode
'''
self.GC = GC
self.episode_summary = episode_summary
self.episode_start = episode_start
def run(self):
''' Main training loop. Initialize Game Context and looping the required episodes.
Call episode_start and episode_summary before and after each episode if necessary.
Visualize with a progress bar if ``tqdm`` is set.
Print training stats after each episode.
In the end, print summary for game context and stop it.
'''
self.GC.Start()
args = self.args
for k in range(args.num_episode):
if self.episode_start is not None:
self.episode_start(k)
if args.tqdm: iterator = tqdm.trange(args.num_minibatch, ncols=50)
else: iterator = range(args.num_minibatch)
for i in iterator:
self.GC.Run()
if self.episode_summary is not None:
self.episode_summary(k)
self.GC.PrintSummary()
self.GC.Stop()
def run_multithread(self):
''' Start training in a multithreaded environment '''
def train_thread():
args = self.args
for i in range(args.num_episode):
for k in range(args.num_minibatch):
if self.episode_start is not None:
self.episode_start(k)
if k % 500 == 0:
print("Receive minibatch %d/%d" % (k, args.num_minibatch))
self.GC.RunGroup("train")
# Print something.
self.episode_summary(i)
def actor_thread():
while True:
self.GC.RunGroup("actor")
self.GC.Start()
# Start the two threads.
train_th = threading.Thread(target=train_thread)
actor_th = threading.Thread(target=actor_thread)
train_th.start()
actor_th.start()
train_th.join()
actor_th.join()
|
Lib/test/test_compiler/test_static/patch.py | vdavalon01/cinder | 1,886 | 11198143 | import asyncio
import re
from compiler.pycodegen import PythonCodeGenerator
from unittest.mock import Mock, patch
from .common import StaticTestBase
class StaticPatchTests(StaticTestBase):
def test_patch_function(self):
codestr = """
def f():
return 42
def g():
return f()
"""
with self.in_module(codestr) as mod:
g = mod.g
for i in range(100):
g()
with patch(f"{mod.__name__}.f", autospec=True, return_value=100) as p:
self.assertEqual(g(), 100)
def test_patch_async_function(self):
codestr = """
class C:
async def f(self) -> int:
return 42
def g(self):
return self.f()
"""
with self.in_module(codestr) as mod:
C = mod.C
c = C()
for i in range(100):
try:
c.g().send(None)
except StopIteration as e:
self.assertEqual(e.args[0], 42)
with patch(f"{mod.__name__}.C.f", autospec=True, return_value=100) as p:
try:
c.g().send(None)
except StopIteration as e:
self.assertEqual(e.args[0], 100)
def test_patch_async_method_incorrect_type(self):
codestr = """
class C:
async def f(self) -> int:
return 42
def g(self):
return self.f()
"""
with self.in_module(codestr) as mod:
C = mod.C
c = C()
for i in range(100):
try:
c.g().send(None)
except StopIteration as e:
self.assertEqual(e.args[0], 42)
with patch(f"{mod.__name__}.C.f", autospec=True, return_value="not an int"):
with self.assertRaises(TypeError):
c.g().send(None)
def test_patch_async_method_raising(self):
codestr = """
class C:
async def f(self) -> int:
return 42
def g(self):
return self.f()
"""
def raise_error(self):
raise IndexError("failure!")
with self.in_module(codestr) as mod:
C = mod.C
c = C()
for i in range(100):
try:
c.g().send(None)
except StopIteration as e:
self.assertEqual(e.args[0], 42)
with patch(f"{mod.__name__}.C.f", raise_error):
with self.assertRaises(IndexError):
c.g().send(None)
def test_patch_async_method_non_coroutine(self):
codestr = """
class C:
async def f(self) -> int:
return 42
def g(self):
return self.f()
"""
loop = asyncio.new_event_loop()
def future_return(self):
fut = loop.create_future()
fut.set_result(100)
return fut
with self.in_module(codestr) as mod:
C = mod.C
c = C()
for i in range(100):
try:
c.g().send(None)
except StopIteration as e:
self.assertEqual(e.args[0], 42)
with patch(f"{mod.__name__}.C.f", future_return):
asyncio.run(c.g())
loop.close()
def test_patch_parentclass_slot(self):
codestr = """
class A:
def f(self) -> int:
return 3
class B(A):
pass
def a_f_invoker() -> int:
return A().f()
def b_f_invoker() -> int:
return B().f()
"""
with self.in_module(codestr) as mod:
A = mod.A
a_f_invoker = mod.a_f_invoker
b_f_invoker = mod.b_f_invoker
setattr(A, "f", lambda _: 7)
self.assertEqual(a_f_invoker(), 7)
self.assertEqual(b_f_invoker(), 7)
def test_self_patching_function(self):
codestr = """
def x(d, d2=1): pass
def removeit(d):
global f
f = x
def f(d):
if d:
removeit(d)
return 42
def g(d):
return f(d)
"""
with self.in_module(codestr) as mod:
g = mod.g
f = mod.f
import weakref
wr = weakref.ref(f, lambda *args: self.assertEqual(i, -1))
del f
for i in range(100):
g(False)
i = -1
self.assertEqual(g(True), 42)
i = 0
self.assertEqual(g(True), None)
def test_patch_function_unwatchable_dict(self):
codestr = """
def f():
return 42
def g():
return f()
"""
with self.in_module(codestr) as mod:
g = mod.g
for i in range(100):
g()
with patch(
f"{mod.__name__}.f",
autospec=True,
return_value=100,
) as p:
mod.__dict__[42] = 1
self.assertEqual(g(), 100)
def test_patch_function_deleted_func(self):
codestr = """
def f():
return 42
def g():
return f()
"""
with self.in_module(codestr) as mod:
g = mod.g
for i in range(100):
g()
del mod.f
with self.assertRaisesRegex(
TypeError,
re.escape(
"bad name provided for class loader, "
+ f"'f' doesn't exist in ('{mod.__name__}', 'f')"
),
):
g()
def test_patch_static_function(self):
codestr = """
class C:
@staticmethod
def f():
return 42
def g():
return C.f()
"""
with self.in_module(codestr) as mod:
g = mod.g
for i in range(100):
self.assertEqual(g(), 42)
with patch(f"{mod.__name__}.C.f", autospec=True, return_value=100) as p:
self.assertEqual(g(), 100)
def test_patch_static_function_non_autospec(self):
codestr = """
class C:
@staticmethod
def f():
return 42
def g():
return C.f()
"""
with self.in_module(codestr) as mod:
g = mod.g
for i in range(100):
g()
with patch(f"{mod.__name__}.C.f", return_value=100) as p:
self.assertEqual(g(), 100)
def test_patch_primitive_ret_type(self):
for type_name, value, patched in [
("cbool", True, False),
("cbool", False, True),
("int8", 0, 1),
("int16", 0, 1),
("int32", 0, 1),
("int64", 0, 1),
("uint8", 0, 1),
("uint16", 0, 1),
("uint32", 0, 1),
("uint64", 0, 1),
]:
with self.subTest(type_name=type, value=value, patched=patched):
codestr = f"""
from __static__ import {type_name}, box
class C:
def f(self) -> {type_name}:
return {value!r}
def g():
return box(C().f())
"""
with self.in_module(codestr) as mod:
g = mod.g
for i in range(100):
self.assertEqual(g(), value)
with patch(f"{mod.__name__}.C.f", return_value=patched) as p:
self.assertEqual(g(), patched)
def test_patch_primitive_ret_type_overflow(self):
codestr = f"""
from __static__ import int8, box
class C:
def f(self) -> int8:
return 1
def g():
return box(C().f())
"""
with self.in_module(codestr) as mod:
g = mod.g
for i in range(100):
self.assertEqual(g(), 1)
with patch(f"{mod.__name__}.C.f", return_value=256) as p:
with self.assertRaisesRegex(
OverflowError,
"unexpected return type from C.f, expected "
"int8, got out-of-range int \\(256\\)",
):
g()
def test_invoke_strict_module_patching(self):
codestr = """
def f():
return 42
def g():
return f()
"""
with self.in_strict_module(codestr, enable_patching=True) as mod:
g = mod.g
for i in range(100):
self.assertEqual(g(), 42)
self.assertInBytecode(g, "INVOKE_FUNCTION", ((mod.__name__, "f"), 0))
mod.patch("f", lambda: 100)
self.assertEqual(g(), 100)
def test_invoke_patch_non_vectorcall(self):
codestr = """
def f():
return 42
def g():
return f()
"""
with self.in_strict_module(codestr, enable_patching=True) as mod:
g = mod.g
self.assertInBytecode(g, "INVOKE_FUNCTION", ((mod.__name__, "f"), 0))
self.assertEqual(g(), 42)
mod.patch("f", Mock(return_value=100))
self.assertEqual(g(), 100)
def test_patch_method(self):
codestr = """
class C:
def f(self):
pass
def g():
return C().f()
"""
with self.in_module(codestr) as mod:
g = mod.g
C = mod.C
orig = C.f
C.f = lambda *args: args
for i in range(100):
v = g()
self.assertEqual(type(v), tuple)
self.assertEqual(type(v[0]), C)
C.f = orig
self.assertEqual(g(), None)
def test_patch_method_ret_none_error(self):
codestr = """
class C:
def f(self) -> None:
pass
def g():
return C().f()
"""
with self.in_module(codestr) as mod:
g = mod.g
C = mod.C
C.f = lambda *args: args
with self.assertRaisesRegex(
TypeError,
"unexpected return type from C.f, expected NoneType, got tuple",
):
v = g()
def test_patch_method_ret_none(self):
codestr = """
class C:
def f(self) -> None:
pass
def g():
return C().f()
"""
with self.in_module(codestr) as mod:
g = mod.g
C = mod.C
C.f = lambda *args: None
self.assertEqual(g(), None)
def test_patch_method_bad_ret(self):
codestr = """
class C:
def f(self) -> int:
return 42
def g():
return C().f()
"""
with self.in_module(codestr) as mod:
g = mod.g
C = mod.C
C.f = lambda *args: "abc"
with self.assertRaisesRegex(
TypeError, "unexpected return type from C.f, expected int, got str"
):
v = g()
def test_vtable_shadow_builtin_subclass_after_init(self):
"""Shadowing methods on subclass of list after vtables are inited."""
class MyList(list):
pass
def myreverse(self):
return 1
codestr = """
def f(l: list):
l.reverse()
return l
"""
f = self.find_code(self.compile(codestr), "f")
self.assertInBytecode(
f, "INVOKE_METHOD", ((("builtins", "list", "reverse"), 0))
)
with self.in_module(codestr) as mod:
# Now cause vtables to be inited
self.assertEqual(mod.f([1, 2]), [2, 1])
# And now patch
MyList.reverse = myreverse
self.assertEqual(MyList().reverse(), 1)
def test_vtable_shadow_builtin_subclass_before_init(self):
"""Shadowing methods on subclass of list before vtables are inited."""
# Create a subclass of list...
class MyList(list):
pass
def myreverse(self):
return 1
# ... and override a slot from list with a non-static func
MyList.reverse = myreverse
codestr = """
def f(l: list):
l.reverse()
return l
"""
f = self.find_code(self.compile(codestr), "f")
self.assertInBytecode(
f, "INVOKE_METHOD", ((("builtins", "list", "reverse"), 0))
)
with self.in_module(codestr) as mod:
# Now cause vtables to be inited
self.assertEqual(mod.f([1, 2]), [2, 1])
# ... and this should not blow up when we remove the override.
del MyList.reverse
self.assertEqual(MyList().reverse(), None)
def test_vtable_shadow_static_subclass(self):
"""Shadowing methods of a static type before its inited should not bypass typechecks."""
# Define a static type and shadow a subtype method before invoking.
codestr = """
class StaticType:
def foo(self) -> int:
return 1
class SubType(StaticType):
pass
def goodfoo(self):
return 2
SubType.foo = goodfoo
def f(x: StaticType) -> int:
return x.foo()
"""
f = self.find_code(self.compile(codestr), "f")
self.assertInBytecode(
f, "INVOKE_METHOD", ((("<module>", "StaticType", "foo"), 0))
)
with self.in_module(codestr) as mod:
SubType = mod.SubType
# Now invoke:
self.assertEqual(mod.f(SubType()), 2)
# And replace the function again, forcing us to find the right slot type:
def badfoo(self):
return "foo"
SubType.foo = badfoo
with self.assertRaisesRegex(TypeError, "expected int, got str"):
mod.f(SubType())
def test_vtable_shadow_static_subclass_nonstatic_patch(self):
"""Shadowing methods of a static type before its inited should not bypass typechecks."""
code1 = """
def nonstaticfoo(self):
return 2
"""
with self.in_module(
code1, code_gen=PythonCodeGenerator, name="nonstatic"
) as mod1:
# Define a static type and shadow a subtype method with a non-static func before invoking.
codestr = """
from nonstatic import nonstaticfoo
class StaticType:
def foo(self) -> int:
return 1
class SubType(StaticType):
pass
SubType.foo = nonstaticfoo
def f(x: StaticType) -> int:
return x.foo()
def badfoo(self):
return "foo"
"""
code = self.compile(codestr)
f = self.find_code(code, "f")
self.assertInBytecode(
f, "INVOKE_METHOD", ((("<module>", "StaticType", "foo"), 0))
)
with self.in_module(codestr) as mod:
SubType = mod.SubType
badfoo = mod.badfoo
# And replace the function again, forcing us to find the right slot type:
SubType.foo = badfoo
with self.assertRaisesRegex(TypeError, "expected int, got str"):
mod.f(SubType())
def test_vtable_shadow_grandparent(self):
codestr = """
class Base:
def foo(self) -> int:
return 1
class Sub(Base):
pass
class Grand(Sub):
pass
def f(x: Base) -> int:
return x.foo()
def grandfoo(self):
return "foo"
"""
f = self.find_code(self.compile(codestr), "f")
self.assertInBytecode(f, "INVOKE_METHOD", ((("<module>", "Base", "foo"), 0)))
with self.in_module(codestr) as mod:
Grand = mod.Grand
grandfoo = mod.grandfoo
f = mod.f
# init vtables
self.assertEqual(f(Grand()), 1)
# patch in an override of the grandparent method
Grand.foo = grandfoo
with self.assertRaisesRegex(TypeError, "expected int, got str"):
f(Grand())
def test_invoke_type_modified(self):
codestr = """
class C:
def f(self):
return 1
def x(c: C):
x = c.f()
x += c.f()
return x
"""
code = self.compile(codestr, modname="foo")
x = self.find_code(code, "x")
self.assertInBytecode(x, "INVOKE_METHOD", (("foo", "C", "f"), 0))
with self.in_module(codestr) as mod:
x, C = mod.x, mod.C
self.assertEqual(x(C()), 2)
C.f = lambda self: 42
self.assertEqual(x(C()), 84)
def test_invoke_type_modified_pre_invoke(self):
codestr = """
class C:
def f(self):
return 1
def x(c: C):
x = c.f()
x += c.f()
return x
"""
code = self.compile(codestr, modname="foo")
x = self.find_code(code, "x")
self.assertInBytecode(x, "INVOKE_METHOD", (("foo", "C", "f"), 0))
with self.in_module(codestr) as mod:
x, C = mod.x, mod.C
C.f = lambda self: 42
self.assertEqual(x(C()), 84)
def test_override_modified_base_class(self):
codestr = """
class B:
def f(self):
return 1
def f(x: B):
return x.f()
"""
with self.in_module(codestr) as mod:
B = mod.B
f = mod.f
B.f = lambda self: 2
class D(B):
def f(self):
return 3
d = D()
self.assertEqual(f(d), 3)
def test_override_remove_base_method(self):
codestr = """
from typing import Optional
class B:
def f(self) -> "B":
return self
class D(B): pass
def f(x: B):
return x.f()
"""
with self.in_module(codestr) as mod:
B = mod.B
D = mod.D
f = mod.f
b = B()
d = D()
self.assertEqual(f(b), b)
self.assertEqual(f(d), d)
del B.f
with self.assertRaises(AttributeError):
f(b)
with self.assertRaises(AttributeError):
f(d)
def test_override_remove_derived_method(self):
codestr = """
from typing import Optional
class B:
def f(self) -> "Optional[B]":
return self
class D(B):
def f(self) -> Optional["B"]:
return None
def f(x: B):
return x.f()
"""
with self.in_module(codestr) as mod:
B = mod.B
D = mod.D
f = mod.f
b = B()
d = D()
self.assertEqual(f(b), b)
self.assertEqual(f(d), None)
del D.f
self.assertEqual(f(b), b)
self.assertEqual(f(d), d)
def test_override_remove_method(self):
codestr = """
from typing import Optional
class B:
def f(self) -> "Optional[B]":
return self
def f(x: B):
return x.f()
"""
with self.in_module(codestr) as mod:
B = mod.B
f = mod.f
b = B()
self.assertEqual(f(b), b)
del B.f
with self.assertRaises(AttributeError):
f(b)
def test_override_remove_method_add_type_check(self):
codestr = """
from typing import Optional
class B:
def f(self) -> "B":
return self
def f(x: B):
return x.f()
"""
with self.in_module(codestr) as mod:
B = mod.B
f = mod.f
b = B()
self.assertEqual(f(b), b)
del B.f
with self.assertRaises(AttributeError):
f(b)
B.f = lambda self: None
with self.assertRaises(TypeError):
f(b)
def test_override_update_derived(self):
codestr = """
from typing import Optional
class B:
def f(self) -> "Optional[B]":
return self
class D(B):
pass
def f(x: B):
return x.f()
"""
with self.in_module(codestr) as mod:
B = mod.B
D = mod.D
f = mod.f
b = B()
d = D()
self.assertEqual(f(b), b)
self.assertEqual(f(d), d)
B.f = lambda self: None
self.assertEqual(f(b), None)
self.assertEqual(f(d), None)
def test_override_update_derived_2(self):
codestr = """
from typing import Optional
class B:
def f(self) -> "Optional[B]":
return self
class D1(B): pass
class D(D1):
pass
def f(x: B):
return x.f()
"""
with self.in_module(codestr) as mod:
B = mod.B
D = mod.D
f = mod.f
b = B()
d = D()
self.assertEqual(f(b), b)
self.assertEqual(f(d), d)
B.f = lambda self: None
self.assertEqual(f(b), None)
self.assertEqual(f(d), None)
def test_patch_final_bad_ret_heap_type(self):
codestr = """
from typing import final
class A:
def __init__(self):
self.x: int = 42
class B:
def __init__(self):
self.y = 'abc'
@final
class C:
def f(self) -> A:
return A()
def g(self) -> int:
return self.f().x
"""
with self.in_module(codestr) as mod:
C = mod.C
B = mod.B
c = C()
C.f = lambda self: B()
with self.assertRaisesRegex(
TypeError, "unexpected return type from C.f, expected A, got B"
):
c.g()
def test_patch_final_bad_ret(self):
codestr = """
from typing import final
@final
class C:
def f(self) -> int:
return 42
def g(self) -> int:
return self.f()
"""
with self.in_module(codestr) as mod:
C = mod.C
c = C()
C.f = lambda self: "abc"
with self.assertRaisesRegex(
TypeError, "unexpected return type from C.f, expected int, got str"
):
c.g()
C.f = lambda self: 1.0
with self.assertRaisesRegex(
TypeError, "unexpected return type from C.f, expected int, got float"
):
c.g()
def test_patch_final_bad_ret_del(self):
codestr = """
from typing import final
@final
class C:
def f(self) -> int:
return 42
def g(self) -> int:
return self.f()
"""
with self.in_module(codestr) as mod:
C = mod.C
c = C()
C.f = lambda self: "abc"
for i in range(100):
with self.assertRaisesRegex(
TypeError, "unexpected return type from C.f, expected int, got str"
):
c.g()
del C.f
with self.assertRaisesRegex(TypeError, "C.f has been deleted"):
c.g()
def test_patch_final_async_function(self):
codestr = """
from typing import final
@final
class C:
async def f(self) -> int:
return 42
def g(self):
return self.f()
"""
with self.in_module(codestr) as mod:
C = mod.C
c = C()
for i in range(100):
try:
c.g().send(None)
except StopIteration as e:
self.assertEqual(e.args[0], 42)
with patch(f"{mod.__name__}.C.f", autospec=True, return_value=100) as p:
try:
c.g().send(None)
except StopIteration as e:
self.assertEqual(e.args[0], 100)
def test_patch_final_classmethod(self):
codestr = """
from typing import final
@final
class C:
@classmethod
def f(cls) -> int:
return 42
@classmethod
def g(cls) -> int:
return cls.f()
"""
with self.in_module(codestr) as mod:
C = mod.C
with patch.object(C, "f", wraps=C.f) as p:
self.assertEqual(C.f(), 42)
self.assertInBytecode(C.g, "INVOKE_FUNCTION")
# Ensure that the invoke in g() also hits the patched function.
self.assertEqual(C.g(), 42)
def test_patch_final_async_classmethod(self):
codestr = """
from typing import final
@final
class C:
@classmethod
async def f(cls) -> int:
return 44
@classmethod
async def g(cls) -> int:
return await cls.f()
"""
with self.in_module(codestr) as mod:
C = mod.C
with patch.object(C, "f", wraps=C.f) as p:
self.assertEqual(asyncio.run(C.f()), 44)
self.assertInBytecode(C.g, "INVOKE_FUNCTION")
# Ensure that the invoke in g() also hits the patched function.
self.assertEqual(asyncio.run(C.g()), 44)
def test_patch_classmethod(self):
codestr = """
class C:
@classmethod
def f(cls) -> int:
return 42
@classmethod
def g(cls) -> int:
return cls.f()
"""
with self.in_module(codestr) as mod:
C = mod.C
with patch.object(C, "f", wraps=C.f) as p:
self.assertEqual(C.f(), 42)
self.assertInBytecode(C.g, "INVOKE_METHOD")
# Ensure that the invoke in g() also hits the patched function.
self.assertEqual(C.g(), 42)
def test_patch_async_classmethod(self):
codestr = """
class C:
@classmethod
async def f(cls) -> int:
return 44
@classmethod
async def g(cls) -> int:
return await cls.f()
"""
with self.in_module(codestr) as mod:
C = mod.C
with patch.object(C, "f", wraps=C.f) as p:
self.assertEqual(asyncio.run(C.f()), 44)
self.assertInBytecode(C.g, "INVOKE_METHOD")
# Ensure that the invoke in g() also hits the patched function.
self.assertEqual(asyncio.run(C.g()), 44)
def test_patch_final_async_method_incorrect_type(self):
codestr = """
from typing import final
@final
class C:
async def f(self) -> int:
return 42
def g(self):
return self.f()
"""
with self.in_module(codestr) as mod:
C = mod.C
c = C()
for i in range(100):
try:
c.g().send(None)
except StopIteration as e:
self.assertEqual(e.args[0], 42)
with patch(f"{mod.__name__}.C.f", autospec=True, return_value="not an int"):
with self.assertRaises(TypeError):
c.g().send(None)
def test_patch_property_bad_ret(self):
codestr = """
class C:
@property
def f(self) -> int:
return 42
def g(self) -> int:
return self.f
"""
with self.in_module(codestr) as mod:
C = mod.C
c = C()
C.f = property(lambda self: "abc")
with self.assertRaisesRegex(
TypeError, "unexpected return type from C.f, expected int, got str"
):
c.g()
def test_patch_property_bad_ret_final(self):
codestr = """
from typing import final
@final
class C:
@property
def f(self) -> int:
return 42
def g(self) -> int:
return self.f
"""
with self.in_module(codestr) as mod:
C = mod.C
c = C()
C.f = property(lambda self: "abc")
with self.assertRaisesRegex(
TypeError, "unexpected return type from C.f, expected int, got str"
):
c.g()
def test_primitive_boxing_with_patching_leaves_original_values_intact(self):
codestr = """
from __static__ import int64
def takes_int64(x: int64) -> None:
pass
def foo(b: bool) -> int64:
x: int64 = 42
if b:
takes_int64(x)
x = 43 # Ensure that we don't hit the assert of x having type Long|CInt64
return x
"""
with self.in_strict_module(codestr, enable_patching=True) as mod:
f = mod.foo
self.assertEqual(f(True), 43)
def test_no_inline_with_patching(self):
codestr = """
from __static__ import int64, cbool, inline
@inline
def x(i: int64) -> cbool:
return i == 1
def foo(i: int64) -> cbool:
return x(i)
"""
with self.in_module(codestr, optimize=2, enable_patching=True) as mod:
foo = mod.foo
self.assertEqual(foo(0), False)
self.assertEqual(foo(1), True)
self.assertEqual(foo(2), False)
self.assertNotInBytecode(foo, "STORE_LOCAL")
self.assertInBytecode(foo, "INVOKE_FUNCTION")
def test_patch_namespace_local(self):
acode = """
def f() -> int:
return 1
"""
bcode = """
from a import f
def g():
return f() * 10
"""
comp = self.compiler(a=acode, b=bcode)
with comp.in_module("b") as bmod:
self.assertEqual(bmod.g(), 10)
bmod.f = lambda: 2
self.assertEqual(bmod.g(), 20)
def test_patch_namespace_re_export(self):
acode = """
def f() -> int:
return 1
"""
bcode = """
from a import f
"""
ccode = """
import b
def g():
return b.f() * 10
"""
comp = self.compiler(a=acode, b=bcode, c=ccode)
with comp.in_module("c") as cmod:
self.assertEqual(cmod.g(), 10)
cmod.b.f = lambda: 2
self.assertEqual(cmod.g(), 20)
def test_patch_namespace_origin(self):
acode = """
def f() -> int:
return 1
"""
bcode = """
import a
def g():
return a.f() * 10
"""
comp = self.compiler(a=acode, b=bcode)
with comp.in_module("b") as bmod:
self.assertEqual(bmod.g(), 10)
bmod.a.f = lambda: 2
self.assertEqual(bmod.g(), 20)
def test_patch_namespace_locally_reassigned(self):
acode = """
def f() -> int:
return 1
"""
for kind in ["import_as", "assign"]:
with self.subTest(kind=kind):
imp = (
"from a import f as ff"
if kind == "import_as"
else "import a; ff = a.f"
)
bcode = f"""
{imp}
def g():
return ff() * 10
"""
comp = self.compiler(a=acode, b=bcode)
with comp.in_module("b") as bmod:
self.assertEqual(bmod.g(), 10)
bmod.ff = lambda: 2
self.assertEqual(bmod.g(), 20)
def test_double_patch_final_property(self):
codestr = """
from typing import final
@final
class C:
def f(self) -> int:
return self.prop
@property
def prop(self) -> int:
return 1
"""
with self.in_module(codestr) as mod:
c = mod.C()
self.assertEqual(c.f(), 1)
mod.C.prop = property(lambda s: 2)
self.assertEqual(c.f(), 2)
mod.C.prop = property(lambda s: 3)
self.assertEqual(c.f(), 3)
def test_double_patch_inherited_property(self):
codestr = """
class B:
def f(self) -> int:
return self.prop
@property
def prop(self) -> int:
return 1
class C(B):
pass
"""
with self.in_module(codestr) as mod:
c = mod.C()
self.assertEqual(c.f(), 1)
mod.C.prop = property(lambda s: 2)
self.assertEqual(c.f(), 2)
mod.C.prop = property(lambda s: 3)
self.assertEqual(c.f(), 3)
def test_patch_property_custom_patch_before_use(self):
codestr = """
class C:
@property
def prop(self) -> int:
return 1
def f(x: C):
return x.prop
"""
class Desc:
def __get__(self, inst, ctx):
return 42
with self.in_module(codestr) as mod:
mod.C.prop = Desc()
self.assertEqual(mod.f(mod.C()), 42)
def test_patch_property_custom_desc(self):
codestr = """
class C:
@property
def prop(self) -> int:
return 1
def f(x: C):
return x.prop
"""
class Desc:
def __get__(self, inst, ctx):
return 42
with self.in_module(codestr) as mod:
self.assertEqual(mod.f(mod.C()), 1)
mod.C.prop = Desc()
self.assertEqual(mod.f(mod.C()), 42)
def test_patch_property_custom_desc_set(self):
codestr = """
class C:
def __init__(self):
self.value = 0
@property
def prop(self) -> int:
return self.value
@prop.setter
def prop(self, value) -> None:
self.value = value
def f(x: C):
x.prop = 42
"""
called = False
class Desc:
def __get__(self, inst, ctx):
return 42
def __set__(self, inst, value):
nonlocal called
called = True
with self.in_module(codestr) as mod:
c = mod.C()
mod.f(c)
self.assertEqual(c.value, 42)
mod.C.prop = Desc()
mod.f(c)
self.assertEqual(c.value, 42)
self.assertTrue(called)
def test_patch_property_custom_desc_bad_ret(self):
codestr = """
class C:
@property
def prop(self) -> int:
return 1
def f(x: C):
return x.prop
"""
class Desc:
def __get__(self, inst, ctx):
return "abc"
with self.in_module(codestr) as mod:
self.assertEqual(mod.f(mod.C()), 1)
mod.C.prop = Desc()
self.assertRaises(
TypeError,
"unexpected return type from C.prop, expected int, got str",
mod.f,
mod.C(),
)
def test_patch_readonly_property_with_settable(self):
codestr = """
class C:
@property
def prop(self) -> int:
return 1
def f(self):
self.prop = 3
"""
with self.in_module(codestr) as mod:
c = mod.C()
self.assertEqual(c.prop, 1)
calls = []
def _set(self, val):
calls.append(val)
mod.C.prop = property(lambda s: 2, _set)
c.f()
self.assertEqual(calls, [3])
def test_patch_settable_property_with_readonly(self):
codestr = """
class C:
def __init__(self, prop: int) -> None:
self._prop = prop
@property
def prop(self) -> int:
return self._prop
@prop.setter
def prop(self, value: int) -> None:
self._prop = value
def f(self, prop: int) -> None:
self.prop = prop
"""
with self.in_module(codestr) as mod:
c = mod.C(2)
self.assertEqual(c.prop, 2)
mod.C.prop = property(lambda s: s._prop * 10)
self.assertEqual(c.prop, 20)
with self.assertRaisesRegex(AttributeError, r"can't set attribute"):
c.f(3)
def test_patch_property_del(self):
codestr = """
class C:
def __init__(self, prop: int) -> None:
self._prop = prop
@property
def prop(self) -> int:
return self._prop
@prop.setter
def prop(self, val: int) -> None:
self._prop = val
def get(self) -> int:
return self.prop
def set(self, val: int) -> None:
self.prop = val
"""
with self.in_module(codestr) as mod:
c = mod.C(1)
self.assertEqual(c.get(), 1)
c.set(2)
self.assertEqual(c.get(), 2)
del mod.C.prop
with self.assertRaisesRegex(
AttributeError, "'C' object has no attribute 'prop'"
):
c.prop
with self.assertRaisesRegex(
AttributeError, "'C' object has no attribute 'prop'"
):
c.prop = 2
with self.assertRaisesRegex(
AttributeError, "'C' object has no attribute 'prop'"
):
c.get()
with self.assertRaisesRegex(
AttributeError, "'C' object has no attribute 'prop'"
):
c.set(3)
def test_patch_method_del(self):
codestr = """
class C:
def f(self) -> int:
return 1
def g(self) -> int:
return self.f()
"""
with self.in_module(codestr) as mod:
c = mod.C()
self.assertEqual(c.g(), 1)
del mod.C.f
with self.assertRaisesRegex(
AttributeError, "'C' object has no attribute 'f'"
):
c.f()
with self.assertRaisesRegex(
AttributeError, "'C' object has no attribute 'f'"
):
c.g()
def test_patch_property_del_on_base(self):
codestr = """
class B:
def __init__(self, prop: int) -> None:
self._prop = prop
@property
def prop(self) -> int:
return self._prop
class C(B):
def get(self) -> int:
return self.prop
"""
with self.in_module(codestr) as mod:
c = mod.C(1)
self.assertEqual(c.get(), 1)
del mod.B.prop
with self.assertRaisesRegex(
AttributeError, "'C' object has no attribute 'prop'"
):
c.prop
with self.assertRaisesRegex(
AttributeError, "'C' object has no attribute 'prop'"
):
c.get()
def test_patch_cached_property_with_descr(self):
codestr = """
from cinder import cached_property
class C:
@cached_property
def x(self) -> int:
return 3
def f(c: C) -> int:
return c.x
"""
with self.in_strict_module(codestr) as mod:
setattr(mod.C, "x", 42)
self.assertEqual(mod.C().x, 42)
self.assertEqual(mod.f(mod.C()), 42)
def test_property_patch_with_bad_type(self):
codestr = """
class C:
@property
def x(self) -> int:
return 3
"""
with self.in_strict_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, "Cannot assign a str, because C.x is expected to be a int"
):
setattr(mod.C, "x", "42")
# ensures that the value was not patched
self.assertEqual(mod.C().x, 3)
def test_property_patch_with_good_type(self):
codestr = """
class C:
@property
def x(self) -> int:
return 3
def f(c: C) -> int:
return c.x
"""
with self.in_strict_module(codestr) as mod:
c = mod.C()
setattr(mod.C, "x", 42)
self.assertEqual(c.x, 42)
self.assertEqual(mod.f(c), 42)
def test_cached_property_patch_with_bad_type(self):
codestr = """
from cinder import cached_property
class C:
@cached_property
def x(self) -> int:
return 3
def f(c: C) -> int:
return c.x
"""
with self.in_strict_module(codestr) as mod:
with self.assertRaisesRegex(
TypeError, "Cannot assign a str, because C.x is expected to be a int"
):
setattr(mod.C, "x", "42")
def test_cached_property_patch_with_good_type(self):
codestr = """
from cinder import cached_property
class C:
@cached_property
def x(self) -> int:
return 3
def f(c: C) -> int:
return c.x
"""
with self.in_strict_module(codestr) as mod:
c = mod.C()
setattr(mod.C, "x", 42)
self.assertEqual(c.x, 42)
self.assertEqual(mod.f(c), 42)
def test_cached_property_patch_with_none(self):
codestr = """
from cinder import cached_property
from typing import Optional
class C:
@cached_property
def x(self) -> Optional[int]:
return 3
def f(c: C) -> Optional[int]:
return c.x
"""
with self.in_strict_module(codestr) as mod:
c = mod.C()
setattr(mod.C, "x", None)
self.assertEqual(c.x, None)
self.assertEqual(mod.f(c), None)
|
lagom/metric/returns.py | zuoxingdong/lagom | 383 | 11198148 | import numpy as np
from lagom.transform import geometric_cumsum
from lagom.utils import numpify
def returns(gamma, rewards):
return geometric_cumsum(gamma, rewards)[0, :].astype(np.float32)
def bootstrapped_returns(gamma, rewards, last_V, reach_terminal):
r"""Return (discounted) accumulated returns with bootstrapping for a
batch of episodic transitions.
Formally, suppose we have all rewards :math:`(r_1, \dots, r_T)`, it computes
.. math::
Q_t = r_t + \gamma r_{t+1} + \dots + \gamma^{T - t} r_T + \gamma^{T - t + 1} V(s_{T+1})
.. note::
The state values for terminal states are masked out as zero !
"""
last_V = numpify(last_V, np.float32).item()
if reach_terminal:
out = geometric_cumsum(gamma, np.append(rewards, 0.0))
else:
out = geometric_cumsum(gamma, np.append(rewards, last_V))
return out[0, :-1].astype(np.float32)
|
tensorflow_lite_support/metadata/python/tests/metadata_parser_test.py | khanhlvg/tflite-support | 242 | 11198161 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_lite_support.metadata.metadata_parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from tensorflow_lite_support.metadata.python import metadata_parser
class MetadataParserTest(tf.test.TestCase):
def testVersionWellFormedSemanticVersion(self):
# Validates that the version is well-formed (x.y.z).
self.assertTrue(
re.match('[0-9]+\\.[0-9]+\\.[0-9]+',
metadata_parser.MetadataParser.VERSION))
if __name__ == '__main__':
tf.test.main()
|
reinforcement_learning/rl_stock_trading_coach_customEnv/src/evaluate-coach.py | jerrypeng7773/amazon-sagemaker-examples | 2,610 | 11198170 | import argparse
import os
import rl_coach
from rl_coach.base_parameters import Frameworks, TaskParameters
from rl_coach.core_types import EnvironmentSteps
from sagemaker_rl.coach_launcher import CoachConfigurationList, SageMakerCoachPresetLauncher
def inplace_replace_in_file(filepath, old, new):
with open(filepath, "r") as f:
contents = f.read()
with open(filepath, "w") as f:
contents = contents.replace(old, new)
f.write(contents)
class MyLauncher(SageMakerCoachPresetLauncher):
def default_preset_name(self):
"""This points to a .py file that configures everything about the RL job.
It can be overridden at runtime by specifying the RLCOACH_PRESET hyperparameter.
"""
return "preset-stock-trading-ddqn"
def start_single_threaded(self, task_parameters, graph_manager, args):
"""Override to use custom evaluate_steps, instead of infinite steps. Just evaluate."""
graph_manager.agent_params.visualization.dump_csv = (
False # issues with CSV export in evaluation only
)
graph_manager.create_graph(task_parameters)
graph_manager.evaluate(EnvironmentSteps(args.evaluate_steps))
graph_manager.close()
def get_config_args(self, parser):
"""Overrides the default CLI parsing.
Sets the configuration parameters for what a SageMaker run should do.
Note, this does not support the "play" mode.
"""
### Parse Arguments
# first, convert the parser to a Namespace object with all default values.
empty_arg_list = []
args, _ = parser.parse_known_args(args=empty_arg_list)
parser = self.sagemaker_argparser()
sage_args, unknown = parser.parse_known_args()
### Set Arguments
args.preset = sage_args.RLCOACH_PRESET
backend = os.getenv("COACH_BACKEND", "tensorflow")
args.framework = args.framework = Frameworks[backend]
args.checkpoint_save_dir = None
args.checkpoint_restore_dir = "/opt/ml/input/data/checkpoint"
# Correct TensorFlow checkpoint file (https://github.com/tensorflow/tensorflow/issues/9146)
if backend == "tensorflow":
checkpoint_filepath = os.path.join(args.checkpoint_restore_dir, "checkpoint")
inplace_replace_in_file(checkpoint_filepath, "/opt/ml/output/data/checkpoint", ".")
# Override experiment_path used for outputs (note CSV not stored, see `start_single_threaded`).
args.experiment_path = "/opt/ml/output/intermediate"
rl_coach.logger.experiment_path = "/opt/ml/output/intermediate" # for gifs
args.evaluate = True # not actually used, but must be set (see `evaluate_steps`)
args.evaluate_steps = sage_args.evaluate_steps
args.no_summary = True # so process doesn't hang at end
# must be set
self.hyperparameters = CoachConfigurationList()
return args
def sagemaker_argparser(self):
"""
Expose only the CLI arguments that make sense in the SageMaker context.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--RLCOACH_PRESET",
help="(string) Name of the file with the RLCoach preset",
default=self.default_preset_name(),
type=str,
)
parser.add_argument(
"--evaluate_steps",
help="(int) Number of evaluation steps to takr",
default=1000,
type=int,
)
return parser
@classmethod
def evaluate_main(cls):
"""Entrypoint for training.
Parses command-line arguments and starts training.
"""
evaluator = cls()
evaluator.launch()
if __name__ == "__main__":
MyLauncher.evaluate_main()
|
pghoard/monitoring/statsd.py | pellcorp/pghoard | 731 | 11198177 | """
StatsD client
Supports telegraf's statsd protocol extension for 'key=value' tags:
https://github.com/influxdata/telegraf/tree/master/plugins/inputs/statsd
"""
import socket
class StatsClient:
def __init__(self, config):
self._dest_addr = (config.get("host", "127.0.0.1"), config.get("port", 8125))
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._tags = config.get("tags", {})
self._message_format = config.get("format", "telegraf")
def gauge(self, metric, value, tags=None):
self._send(metric, b"g", value, tags)
def increase(self, metric, inc_value=1, tags=None):
self._send(metric, b"c", inc_value, tags)
def timing(self, metric, value, tags=None):
self._send(metric, b"ms", value, tags)
def unexpected_exception(self, ex, where, tags=None):
all_tags = {
"exception": ex.__class__.__name__,
"where": where,
}
all_tags.update(tags or {})
self.increase("pghoard.exception", tags=all_tags)
def _send(self, metric, metric_type, value, tags):
if None in self._dest_addr:
# stats sending is disabled
return
# telegraf format: "user.logins,service=payroll,region=us-west:1|c"
# datadog format: metric.name:value|type|@sample_rate|#tag1:value,tag2
# http://docs.datadoghq.com/guides/dogstatsd/#datagram-format
parts = [metric.encode("utf-8"), b":", str(value).encode("utf-8"), b"|", metric_type]
send_tags = self._tags.copy()
send_tags.update(tags or {})
if self._message_format == "datadog":
for index, (tag, val) in enumerate(send_tags.items()):
if index == 0:
separator = "|#"
else:
separator = ","
if val is None:
pattern = "{}{}"
else:
pattern = "{}{}:{}"
parts.append(pattern.format(separator, tag, val).encode("utf-8"))
else:
for tag, val in send_tags.items():
parts.insert(1, ",{}={}".format(tag, val).encode("utf-8"))
self._socket.sendto(b"".join(parts), self._dest_addr)
|
instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py | open-telemetry/opentelemetry-python-contrib | 208 | 11198247 | <filename>instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used
on any ASGI framework (such as Django-channels / Quart) to track requests
timing through OpenTelemetry.
Usage (Quart)
-------------
.. code-block:: python
from quart import Quart
from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware
app = Quart(__name__)
app.asgi_app = OpenTelemetryMiddleware(app.asgi_app)
@app.route("/")
async def hello():
return "Hello!"
if __name__ == "__main__":
app.run(debug=True)
Usage (Django 3.0)
------------------
Modify the application's ``asgi.py`` file as shown below.
.. code-block:: python
import os
from django.core.asgi import get_asgi_application
from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'asgi_example.settings')
application = get_asgi_application()
application = OpenTelemetryMiddleware(application)
Usage (Raw ASGI)
----------------
.. code-block:: python
from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware
app = ... # An ASGI application.
app = OpenTelemetryMiddleware(app)
Configuration
-------------
Request/Response hooks
**********************
Utilize request/reponse hooks to execute custom logic to be performed before/after performing a request. The server request hook takes in a server span and ASGI
scope object for every incoming request. The client request hook is called with the internal span and an ASGI scope which is sent as a dictionary for when the method recieve is called.
The client response hook is called with the internal span and an ASGI event which is sent as a dictionary for when the method send is called.
.. code-block:: python
def server_request_hook(span: Span, scope: dict):
if span and span.is_recording():
span.set_attribute("custom_user_attribute_from_request_hook", "some-value")
def client_request_hook(span: Span, scope: dict):
if span and span.is_recording():
span.set_attribute("custom_user_attribute_from_client_request_hook", "some-value")
def client_response_hook(span: Span, message: dict):
if span and span.is_recording():
span.set_attribute("custom_user_attribute_from_response_hook", "some-value")
OpenTelemetryMiddleware().(application, server_request_hook=server_request_hook, client_request_hook=client_request_hook, client_response_hook=client_response_hook)
API
---
"""
import typing
import urllib
from functools import wraps
from typing import Tuple
from asgiref.compatibility import guarantee_single_callable
from opentelemetry import context, trace
from opentelemetry.instrumentation.asgi.version import __version__ # noqa
from opentelemetry.instrumentation.utils import http_status_to_status_code
from opentelemetry.propagate import extract
from opentelemetry.propagators.textmap import Getter
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.trace import Span
from opentelemetry.trace.status import Status, StatusCode
from opentelemetry.util.http import remove_url_credentials
_ServerRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]
_ClientRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]
_ClientResponseHookT = typing.Optional[typing.Callable[[Span, dict], None]]
class ASGIGetter(Getter):
def get(
self, carrier: dict, key: str
) -> typing.Optional[typing.List[str]]:
"""Getter implementation to retrieve a HTTP header value from the ASGI
scope.
Args:
carrier: ASGI scope object
key: header name in scope
Returns:
A list with a single string with the header value if it exists,
else None.
"""
headers = carrier.get("headers")
if not headers:
return None
# asgi header keys are in lower case
key = key.lower()
decoded = [
_value.decode("utf8")
for (_key, _value) in headers
if _key.decode("utf8") == key
]
if not decoded:
return None
return decoded
def keys(self, carrier: dict) -> typing.List[str]:
return list(carrier.keys())
asgi_getter = ASGIGetter()
def collect_request_attributes(scope):
"""Collects HTTP request attributes from the ASGI scope and returns a
dictionary to be used as span creation attributes."""
server_host, port, http_url = get_host_port_url_tuple(scope)
query_string = scope.get("query_string")
if query_string and http_url:
if isinstance(query_string, bytes):
query_string = query_string.decode("utf8")
http_url = http_url + ("?" + urllib.parse.unquote(query_string))
result = {
SpanAttributes.HTTP_SCHEME: scope.get("scheme"),
SpanAttributes.HTTP_HOST: server_host,
SpanAttributes.NET_HOST_PORT: port,
SpanAttributes.HTTP_FLAVOR: scope.get("http_version"),
SpanAttributes.HTTP_TARGET: scope.get("path"),
SpanAttributes.HTTP_URL: remove_url_credentials(http_url),
}
http_method = scope.get("method")
if http_method:
result[SpanAttributes.HTTP_METHOD] = http_method
http_host_value_list = asgi_getter.get(scope, "host")
if http_host_value_list:
result[SpanAttributes.HTTP_SERVER_NAME] = ",".join(
http_host_value_list
)
http_user_agent = asgi_getter.get(scope, "user-agent")
if http_user_agent:
result[SpanAttributes.HTTP_USER_AGENT] = http_user_agent[0]
if "client" in scope and scope["client"] is not None:
result[SpanAttributes.NET_PEER_IP] = scope.get("client")[0]
result[SpanAttributes.NET_PEER_PORT] = scope.get("client")[1]
# remove None values
result = {k: v for k, v in result.items() if v is not None}
return result
def get_host_port_url_tuple(scope):
"""Returns (host, port, full_url) tuple."""
server = scope.get("server") or ["0.0.0.0", 80]
port = server[1]
server_host = server[0] + (":" + str(port) if str(port) != "80" else "")
full_path = scope.get("root_path", "") + scope.get("path", "")
http_url = scope.get("scheme", "http") + "://" + server_host + full_path
return server_host, port, http_url
def set_status_code(span, status_code):
"""Adds HTTP response attributes to span using the status_code argument."""
if not span.is_recording():
return
try:
status_code = int(status_code)
except ValueError:
span.set_status(
Status(
StatusCode.ERROR,
"Non-integer HTTP status: " + repr(status_code),
)
)
else:
span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)
span.set_status(
Status(http_status_to_status_code(status_code, server_span=True))
)
def get_default_span_details(scope: dict) -> Tuple[str, dict]:
"""Default implementation for get_default_span_details
Args:
scope: the asgi scope dictionary
Returns:
a tuple of the span name, and any attributes to attach to the span.
"""
span_name = (
scope.get("path", "").strip()
or f"HTTP {scope.get('method', '').strip()}"
)
return span_name, {}
class OpenTelemetryMiddleware:
"""The ASGI application middleware.
This class is an ASGI middleware that starts and annotates spans for any
requests it is invoked with.
Args:
app: The ASGI application callable to forward requests to.
default_span_details: Callback which should return a string and a tuple, representing the desired default span name and a
dictionary with any additional span attributes to set.
Optional: Defaults to get_default_span_details.
server_request_hook: Optional callback which is called with the server span and ASGI
scope object for every incoming request.
client_request_hook: Optional callback which is called with the internal span and an ASGI
scope which is sent as a dictionary for when the method recieve is called.
client_response_hook: Optional callback which is called with the internal span and an ASGI
event which is sent as a dictionary for when the method send is called.
tracer_provider: The optional tracer provider to use. If omitted
the current globally configured one is used.
"""
def __init__(
self,
app,
excluded_urls=None,
default_span_details=None,
server_request_hook: _ServerRequestHookT = None,
client_request_hook: _ClientRequestHookT = None,
client_response_hook: _ClientResponseHookT = None,
tracer_provider=None,
):
self.app = guarantee_single_callable(app)
self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)
self.excluded_urls = excluded_urls
self.default_span_details = (
default_span_details or get_default_span_details
)
self.server_request_hook = server_request_hook
self.client_request_hook = client_request_hook
self.client_response_hook = client_response_hook
async def __call__(self, scope, receive, send):
"""The ASGI application
Args:
scope: A ASGI environment.
receive: An awaitable callable yielding dictionaries
send: An awaitable callable taking a single dictionary as argument.
"""
if scope["type"] not in ("http", "websocket"):
return await self.app(scope, receive, send)
_, _, url = get_host_port_url_tuple(scope)
if self.excluded_urls and self.excluded_urls.url_disabled(url):
return await self.app(scope, receive, send)
token = context.attach(extract(scope, getter=asgi_getter))
span_name, additional_attributes = self.default_span_details(scope)
try:
with self.tracer.start_as_current_span(
span_name, kind=trace.SpanKind.SERVER,
) as span:
if span.is_recording():
attributes = collect_request_attributes(scope)
attributes.update(additional_attributes)
for key, value in attributes.items():
span.set_attribute(key, value)
if callable(self.server_request_hook):
self.server_request_hook(span, scope)
@wraps(receive)
async def wrapped_receive():
with self.tracer.start_as_current_span(
" ".join((span_name, scope["type"], "receive"))
) as receive_span:
if callable(self.client_request_hook):
self.client_request_hook(receive_span, scope)
message = await receive()
if receive_span.is_recording():
if message["type"] == "websocket.receive":
set_status_code(receive_span, 200)
receive_span.set_attribute("type", message["type"])
return message
@wraps(send)
async def wrapped_send(message):
with self.tracer.start_as_current_span(
" ".join((span_name, scope["type"], "send"))
) as send_span:
if callable(self.client_response_hook):
self.client_response_hook(send_span, message)
if send_span.is_recording():
if message["type"] == "http.response.start":
status_code = message["status"]
set_status_code(span, status_code)
set_status_code(send_span, status_code)
elif message["type"] == "websocket.send":
set_status_code(span, 200)
set_status_code(send_span, 200)
send_span.set_attribute("type", message["type"])
await send(message)
await self.app(scope, wrapped_receive, wrapped_send)
finally:
context.detach(token)
|
src/genie/libs/parser/iosxe/show_device_tracking.py | balmasea/genieparser | 204 | 11198253 | <filename>src/genie/libs/parser/iosxe/show_device_tracking.py
import re
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Any, Optional, Or
from genie.libs.parser.utils.common import Common
# ==================================
# Schema for:
# * 'show device-tracking database'
# ==================================
class ShowDeviceTrackingDatabaseSchema(MetaParser):
"""Schema for show device-tracking database."""
schema = {
"binding_table_count": int,
"dynamic_entry_count": int,
"binding_table_limit": int,
"device": {
int: {
"dev_code": str,
"network_layer_address": str,
"link_layer_address": str,
"interface": str,
"vlan_id": int,
"pref_level_code": int,
"age": str,
"state": str,
Optional("time_left"): str,
}
}
}
# ==================================
# Parser for:
# * 'show device-tracking database'
# ==================================
class ShowDeviceTrackingDatabase(ShowDeviceTrackingDatabaseSchema):
"""Parser for show device-tracking database"""
cli_command = 'show device-tracking database'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
device_tracking_database_dict = {}
# Binding Table has 10 entries, 0 dynamic (limit 200000)
# Codes: L - Local, S - Static, ND - Neighbor Discovery, ARP - Address Resolution Protocol, DH4 - IPv4 DHCP, DH6 - IPv6 DHCP, PKT - Other Packet, API - API created
# Preflevel flags (prlvl):
# 0001:MAC and LLA match 0002:Orig trunk 0004:Orig access
# 0008:Orig trusted trunk 0010:Orig trusted access 0020:DHCP assigned
# 0040:Cga authenticated 0080:Cert authenticated 0100:Statically assigned
#
#
# Network Layer Address Link Layer Address Interface vlan prlvl age state Time left
# L 10.22.66.10 7081.05ff.eb40 Vl230 230 0100 10194mn REACHABLE
# L 10.22.28.10 7081.05ff.eb40 Vl238 238 0100 10255mn REACHABLE
# L 10.22.24.10 7081.05ff.eb40 Vl236 236 0100 10330mn REACHABLE
# L 10.22.20.10 7081.05ff.eb40 Vl234 234 0100 10329mn REACHABLE
# L 10.22.16.10 7081.05ff.eb40 Vl232 232 0100 10330mn REACHABLE
# S 10.22.12.10 7081.05ff.eb41 E0/0 228 0100 10330mn REACHABLE N/A
# ND 10.22.8.10 7081.05ff.eb42 E0/1 226 0005 235mn STALE try 0 73072 s
# ND 10.22.4.10 7081.05ff.eb43 E0/2 224 0005 60s REACHABLE 250 s
# ND 10.22.0.10 7081.05ff.eb40 E0/3 222 0005 3mn REACHABLE 83 s try 0
# L 10.10.68.10 7081.05ff.eb40 Vl243 243 0100 10330mn REACHABLE
# Binding Table has 10 entries, 0 dynamic (limit 200000)
binding_table_capture = re.compile(
r"^Binding\s+Table\s+has\s+(?P<binding_table_count>\d+)\s+entries,\s+(?P<dynamic_entry_count>\d+)\s+dynamic\s+\(limit\s+(?P<binding_table_limit>\d+)\)$")
# Codes: L - Local, S - Static, ND - Neighbor Discovery, ARP - Address Resolution Protocol, DH4 - IPv4 DHCP, DH6 - IPv6 DHCP, PKT - Other Packet, API - API created
codes_capture = re.compile(
r"^Codes:\s+L\s+-\s+Local,\s+S\s+-\s+Static,\s+ND\s+-\s+Neighbor\s+Discovery,\s+ARP\s+-\s+Address\s+Resolution\s+Protocol,\s+DH4\s+-\s+IPv4\s+DHCP,\s+DH6\s+-\s+IPv6\s+DHCP,\s+PKT\s+-\s+Other\s+Packet,\s+API\s+-\s+API\s+created$")
# Preflevel flags (prlvl):
pref_level_flag_codes_capture = re.compile(r"^Preflevel\s+flags\s+\(prlvl\):$")
# 0001:MAC and LLA match 0002:Orig trunk 0004:Orig access
pref_level_flags_2_capture = re.compile(
r"^0001:MAC\s+and\s+LLA\s+match\s+0002:Orig\s+trunk\s+0004:Orig\s+access$")
# 0008:Orig trusted trunk 0010:Orig trusted access 0020:DHCP assigned
_capture = re.compile(r"^0008:Orig\s+trusted\s+trunk\s+0010:Orig\s+trusted\s+access\s+0020:DHCP\s+assigned$")
# 0040:Cga authenticated 0080:Cert authenticated 0100:Statically assigned
_capture = re.compile(r"^0040:Cga\s+authenticated\s+0080:Cert\s+authenticated\s+0100:Statically\s+assigned$")
# Network Layer Address Link Layer Address Interface vlan prlvl age state Time left
device_info_header_capture = re.compile(
r"^Network\s+Layer\s+Address\s+Link\s+Layer\s+Address\s+Interface\s+vlan\s+prlvl\s+age\s+state\s+Time\s+left$")
# L 10.22.66.10 7081.05ff.eb40 Vl230 230 0100 10194mn REACHABLE
device_info_capture = re.compile(
r"^(?P<dev_code>\S+)\s+(?P<network_layer_address>\S+)\s+(?P<link_layer_address>\S+)\s+(?P<interface>\S+)\s+(?P<vlan_id>\d+)\s+(?P<pref_level_code>\d+)\s+(?P<age>\S+)\s+(?P<state>\S+)$")
# DH4 10.160.43.197 94d4.69ff.e606 Te8/0/37 1023 0025 116s REACHABLE 191 s try 0(557967 s)
device_info_capture_database = re.compile(
r"^(?P<dev_code>\S+)\s+"
r"(?P<network_layer_address>\S+)\s+(?P<link_layer_address>\S+)\s+"
r"(?P<interface>\S+)\s+(?P<vlan_id>\d+)\s+"
r"(?P<pref_level_code>\d+)\s+(?P<age>\S+)\s+(?P<state>\S+)\s+(?P<time_left>(try\s\d\s\d+\ss)|(N/A)|(\d+.*)|(\d+\ss\stry\d))$")
device_index = 0
for line in out.splitlines():
line = line.strip()
# Binding Table has 10 entries, 0 dynamic (limit 200000)
if binding_table_capture.match(line):
binding_table_capture_match = binding_table_capture.match(line)
groups = binding_table_capture_match.groupdict()
binding_table_count = int(groups['binding_table_count'])
dynamic_entry_count = int(groups['dynamic_entry_count'])
binding_table_limit = int(groups['binding_table_limit'])
device_tracking_database_dict['binding_table_count'] = binding_table_count
device_tracking_database_dict['dynamic_entry_count'] = dynamic_entry_count
device_tracking_database_dict['binding_table_limit'] = binding_table_limit
continue
# Codes: L - Local, S - Static, ND - Neighbor Discovery, ARP - Address Resolution Protocol, DH4 - IPv4 DHCP, DH6 - IPv6 DHCP, PKT - Other Packet, API - API created
elif codes_capture.match(line):
codes_capture_match = codes_capture.match(line)
groups = codes_capture_match.groupdict()
continue
# Preflevel flags (prlvl):
elif pref_level_flag_codes_capture.match(line):
pref_level_flag_codes_capture_match = pref_level_flag_codes_capture.match(line)
groups = pref_level_flag_codes_capture_match.groupdict()
continue
# 0001:MAC and LLA match 0002:Orig trunk 0004:Orig access
elif pref_level_flags_2_capture.match(line):
pref_level_flags_2_capture_match = pref_level_flags_2_capture.match(line)
groups = pref_level_flags_2_capture_match.groupdict()
continue
# 0008:Orig trusted trunk 0010:Orig trusted access 0020:DHCP assigned
elif _capture.match(line):
_capture_match = _capture.match(line)
groups = _capture_match.groupdict()
continue
# 0040:Cga authenticated 0080:Cert authenticated 0100:Statically assigned
elif _capture.match(line):
_capture_match = _capture.match(line)
groups = _capture_match.groupdict()
continue
# Network Layer Address Link Layer Address Interface vlan prlvl age state Time left
elif device_info_header_capture.match(line):
device_info_header_capture_match = device_info_header_capture.match(line)
groups = device_info_header_capture_match.groupdict()
continue
# DH4 10.160.43.197 94d4.69ff.e606 Te8/0/37 1023 0025 116s REACHABLE 191 s try 0(557967 s)
elif device_info_capture_database.match(line):
device_index += 1
device_info_capture_database_match = device_info_capture_database.match(line)
groups = device_info_capture_database_match.groupdict()
dev_code = groups['dev_code']
network_layer_address = groups['network_layer_address']
link_layer_address = groups['link_layer_address']
interface = groups['interface']
vlan_id = int(groups['vlan_id'])
pref_level_code = int(groups['pref_level_code'])
age = groups['age']
state = groups['state']
time_left = groups['time_left']
if not device_tracking_database_dict.get('device', {}):
device_tracking_database_dict['device'] = {}
device_tracking_database_dict['device'][device_index] = {}
device_tracking_database_dict['device'][device_index].update({'dev_code': dev_code})
device_tracking_database_dict['device'][device_index]['network_layer_address'] = network_layer_address
device_tracking_database_dict['device'][device_index]['link_layer_address'] = link_layer_address
device_tracking_database_dict['device'][device_index]['interface'] = interface
device_tracking_database_dict['device'][device_index]['vlan_id'] = vlan_id
device_tracking_database_dict['device'][device_index]['pref_level_code'] = pref_level_code
device_tracking_database_dict['device'][device_index]['age'] = age
device_tracking_database_dict['device'][device_index]['state'] = state
device_tracking_database_dict['device'][device_index]['time_left'] = time_left
continue
# L 10.22.66.10 7081.05ff.eb40 Vl230 230 0100 10194mn REACHABLE
elif device_info_capture.match(line):
device_index = device_index + 1
device_info_capture_match = device_info_capture.match(line)
groups = device_info_capture_match.groupdict()
dev_code = groups['dev_code']
network_layer_address = groups['network_layer_address']
link_layer_address = groups['link_layer_address']
interface = groups['interface']
vlan_id = int(groups['vlan_id'])
pref_level_code = int(groups['pref_level_code'])
age = groups['age']
state = groups['state']
if not device_tracking_database_dict.get('device', {}):
device_tracking_database_dict['device'] = {}
device_tracking_database_dict['device'][device_index] = {}
device_tracking_database_dict['device'][device_index].update({'dev_code': dev_code})
device_tracking_database_dict['device'][device_index]['network_layer_address'] = network_layer_address
device_tracking_database_dict['device'][device_index]['link_layer_address'] = link_layer_address
device_tracking_database_dict['device'][device_index]['interface'] = interface
device_tracking_database_dict['device'][device_index]['vlan_id'] = vlan_id
device_tracking_database_dict['device'][device_index]['pref_level_code'] = pref_level_code
device_tracking_database_dict['device'][device_index]['age'] = age
device_tracking_database_dict['device'][device_index]['state'] = state
continue
return device_tracking_database_dict
# ======================================
# Schema for:
# * 'show device-tracking database interface {interface}'
# ======================================
class ShowDeviceTrackingDatabaseInterfaceSchema(MetaParser):
"""Schema for show device-tracking database interface {interface}."""
schema = {
"binding_table": {
"dynamic": int,
"entries": int,
Optional("limit"): int
},
"network_layer_address": {
Any(): {
"age": str,
"code": str,
"interface": str,
"link_layer_address": str,
"prlvl": str,
"state": str,
Optional("time_left"): str,
"vlan": int,
}
},
}
# ======================================
# Parser for:
# * show device-tracking database interface {interface}'
# ======================================
class ShowDeviceTrackingDatabaseInterface(ShowDeviceTrackingDatabaseInterfaceSchema):
"""Parser for show device-tracking database interface {interface}"""
cli_command = 'show device-tracking database interface {interface}'
def cli(self, interface, output=None):
if output is None:
cmd = self.cli_command.format(interface=interface)
out = self.device.execute(cmd)
else:
out = output
# Binding Table has 87 entries, 75 dynamic (limit 100000)
# Codes: L - Local, S - Static, ND - Neighbor Discovery, ARP - Address Resolution Protocol, DH4 - IPv4 DHCP, DH6 - IPv6 DHCP, PKT - Other Packet, API - API created
# Preflevel flags (prlvl):
# 0001:MAC and LLA match 0002:Orig trunk 0004:Orig access
# 0008:Orig trusted trunk 0010:Orig trusted access 0020:DHCP assigned
# 0040:Cga authenticated 0080:Cert authenticated 0100:Statically assigned
# Network Layer Address Link Layer Address Interface vlan prlvl age state Time left
# L 10.160.48.1 0000.0cff.94fe Vl1024 1024 0100 42473mn REACHABLE
# DH4 10.160.43.197 94d4.69ff.e606 Te8/0/37 1023 0025 116s REACHABLE 191 s try 0(557967 s)
# DH4 10.160.42.157 0896.adff.899b Gi7/0/11 1023 0025 33s REACHABLE 271 s try 0(447985 s)
# DH4 10.160.42.124 00b1.e3ff.c71d Te2/0/39 1023 0025 30s REACHABLE 272 s try 0(450251 s)
#
# ...OUTPUT OMMITTED...
#
# L 2001:db8:350b:919::1 0000.0cff.94fd Vl1023 1023 0100 42475mn REACHABLE
# L 2001:db8:350b:411::1 0000.0cff.94fc Vl1022 1022 0100 42473mn REACHABLE
# Binding Table has 87 entries, 75 dynamic (limit 100000)
# portDB has 2 entries for interface Gi0/1/1, 2 dynamic
p1 = re.compile(
r"^(.+) +has +(?P<entries>\d+) +entries"
r"( +for +interface +\S+)?"
r", +(?P<dynamic>\d+) +dynamic( +\(limit +(?P<limit>\d+)\))?$"
)
# DH4 10.160.43.197 94d4.69ff.e606 Te8/0/37 1023 0025 116s REACHABLE 191 s try 0(557967 s)
# L 10.160.48.1 0000.0cff.94fe Vl1024 1024 0100 42473mn REACHABLE
# ND FE80::E6C7:22FF:FEFF:8239 e4c7.22ff.8239 Gi1/0/24 1023 0005 34s REACHABLE 268 s
p2 = re.compile(
r"^(?P<code>\S+)\s+(?P<network_layer_address>"
r"\d+\.\d+\.\d+\.\d+|\S+\:\:\S+\:\S+\:\S+\:\S+)"
r"\s+(?P<link_layer_address>\S+\.\S+\.\S+)"
r"\s+(?P<interface>\S+)\s+(?P<vlan>\d+)"
r"\s+(?P<prlvl>\d+)\s+(?P<age>\d+\S+)"
r"\s+(?P<state>\S+)(\s+(?P<time_left>\d+.*))?$"
)
device_info_obj = {}
for line in out.splitlines():
line = line.strip()
# Binding Table has 87 entries, 75 dynamic (limit 100000)
# portDB has 2 entries for interface Gi0/1/1, 2 dynamic
m = p1.match(line)
if m:
group = m.groupdict()
# convert str to int
binding_table_dict = {
k: int(v) for k, v in group.items() if v is not None
}
device_info_obj["binding_table"] = binding_table_dict
continue
# DH4 10.160.43.197 94d4.69ff.e606 Te8/0/37 1023 0025 116s REACHABLE 191 s try 0(557967 s)
m = p2.match(line)
if m:
group = m.groupdict()
network_layer_address = group["network_layer_address"]
# pull a key from dict to use as new_key
network_layer_addresses_dict = device_info_obj.setdefault(
"network_layer_address", {})
network_layer_address_dict = network_layer_addresses_dict.\
setdefault(network_layer_address, {})
network_layer_address_dict.update({
"age": group["age"],
"code": group["code"],
"interface": group["interface"],
"link_layer_address": group["link_layer_address"],
"prlvl": group["prlvl"],
"state": group["state"],
"vlan": int(group["vlan"]),
})
if group["time_left"]:
network_layer_address_dict["time_left"] = \
group["time_left"]
continue
return device_info_obj
# ========================
# Schema for:
# * 'show device-tracking database details'
# ========================
class ShowDeviceTrackingDatabaseDetailsSchema(MetaParser):
'''Schema for:
* 'show device-tracking database details'
'''
schema = {
"binding_table_configuration": {
"max/box": str,
"max/port": str,
"max/vlan": str,
"max/mac": str,
},
"binding_table_count": {
"dynamic": int,
"local": int,
"total": int,
},
"binding_table_state_count": {
Optional("verify"): int,
Optional("reachable"): int,
Optional("stale"): int,
Optional("down"): int,
"total": int,
},
"device": {
int: {
"dev_code": str,
"network_layer_address": str,
"link_layer_address": str,
"interface": str,
"mode": str,
"vlan_id": int,
"pref_level_code": int,
"age": str,
"state": str,
Optional("time_left"): str,
"filter": str,
"in_crimson": str,
"client_id": str,
Optional("policy"): str,
},
},
}
# ========================
# Parser for:
# * 'show device-tracking database details'
# ========================
class ShowDeviceTrackingDatabaseDetails(ShowDeviceTrackingDatabaseDetailsSchema):
'''Parser for:
* 'show device-tracking database details'
'''
cli_command = 'show device-tracking database details'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
device_tracking_database_details_dict = {}
device_index = 0
binding_key = ''
# Binding table configuration:
binding_table_configuration_capture = re.compile(r'^Binding\s+table\s+configuration:$')
# Binding table current counters:
binding_table_counter_capture = re.compile(r'^Binding\s+table\s+current\s+counters:$')
# Binding table counters by state:
binding_table_state_capture = re.compile(r'^Binding\s+table\s+counters\s+by\s+state:$')
# max/box : no limit
# max/vlan : no limit
# max/port : no limit
# max/mac : no limit
# dynamic : 1
# local : 1
# total : 4
# REACHABLE : 1
# STALE : 2
# DOWN : 1
# total : 4
binding_table_info = re.compile(r'^(?P<parameter>(\S+))\s+:\s+(?P<info>(.*))$')
# Network Layer Address Link Layer Address Interface mode vlan(prim) prlvl age state Time left Filter In Crimson Client ID Policy (feature)
device_header_capture = re.compile(r'^Network\s+Layer\s+Address\s+Link\s+Layer\s+Address\s+Interface\s+mode\s+vlan\(prim\)\s+prlvl\s+age\s+state\s+Time\s+left\s+Filter\s+In\s+Crimson\s+Client\s+ID\s+Policy\s+\(feature\)$')
# ND 172.16.58.3 dead.beef.0001(S) Twe1/0/42 access 39 ( 39) 0024 92mn STALE 83192 s no no 0000.0000.0000 test (Device-tracking)
# L 39.39.39.1 5c5a.c791.d69f(R) Vl39 svi 39 ( 39) 0100 11591mn REACHABLE no yes 0000.0000.0000
# S 10.10.10.10 dead.beef.0001(S) Twe1/0/42 access 39 ( 39) 0100 59mn STALE N/A no yes 0000.0000.0000
# S fc00:db20:35b:7399::5 000a.000b.000c(D) Twe1/0/1 trunk 100 ( 100) 0100 30565mn DOWN N/A no yes 0000.0000.0000
device_info_capture = re.compile(r'^(?P<dev_code>(\S+))\s+(?P<network_layer_address>(\S+))'
r'\s+(?P<link_layer_address>(\S+))\s+(?P<interface>(\S+))'
r'\s+(?P<mode>(\S+))\s+(?P<vlan_id>(\d+))\s+\(\s+\d+\)'
r'\s+(?P<pref_level_code>(\d+))\s+(?P<age>(\S+))'
r'\s+(?P<state>(\S+))\s+(?P<time_left>(try\s\d\s\d+\ss)|(N\/A)|(\d+\ss\stry\s\d)|(\d+\ss))?'
r'\s+(?P<filter>(yes|no))\s+(?P<in_crimson>(\S+))'
r'\s+(?P<client_id>(\S+))(\s+(?P<policy>(.*)))?$')
optional_parameters = [
'time_left',
'policy',
]
for line in out.splitlines():
line = line.strip()
if not line:
continue
match = binding_table_configuration_capture.match(line)
if match:
binding_key = "binding_table_configuration"
device_tracking_database_details_dict.setdefault(binding_key, {})
continue
match = binding_table_counter_capture.match(line)
if match:
binding_key = "binding_table_count"
device_tracking_database_details_dict.setdefault(binding_key, {})
continue
match = binding_table_state_capture.match(line)
if match:
binding_key = "binding_table_state_count"
device_tracking_database_details_dict.setdefault(binding_key, {})
continue
match = device_header_capture.match(line)
if match:
device_tracking_database_details_dict.setdefault('device', {})
continue
match = binding_table_info.match(line)
if match:
groups = match.groupdict()
key = groups['parameter'].lower()
value = groups['info']
binding_table_dict = device_tracking_database_details_dict.setdefault(binding_key, {})
if value.isdigit():
binding_table_dict[key] = int(value)
else:
binding_table_dict[key] = value
continue
match = device_info_capture.match(line)
if match:
device_index += 1
groups = match.groupdict()
for parameter in optional_parameters:
if groups[parameter] is None:
groups[parameter] = ''
if not device_tracking_database_details_dict.get('device', {}):
device_tracking_database_details_dict.setdefault('device', {})
device_dict = device_tracking_database_details_dict.setdefault('device', {}) \
.setdefault(device_index, {})
for key, value in groups.items():
if value.isdigit():
device_dict[key] = int(value)
else:
device_dict[key] = value
return device_tracking_database_details_dict
# =========================================
# Schema for:
# * 'show device-tracking policies'
# ==========================================
class ShowDeviceTrackingPoliciesSchema(MetaParser):
"""Schema for show device-tracking policies"""
schema = {
"policies": {
int: {
"target": str,
"policy_type": str,
"policy_name": str,
"feature": str,
"tgt_range": str,
}
}
}
# ======================================
# Parser for:
# * 'show device-tracking policies'
# ======================================
class ShowDeviceTrackingPolicies(ShowDeviceTrackingPoliciesSchema):
""" Parser for show device-tracking policies """
cli_command = ['show device-tracking policies',
'show device-tracking policies interface {interface}',
'show device-tracking policies vlan {vlan}',
]
def cli(self, interface='', vlan='', output=None):
if output is None:
if interface:
cmd = self.cli_command[1].format(interface=interface)
elif vlan:
cmd = self.cli_command[2].format(vlan=vlan)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
device_tracking_policies_dict = {}
policy_index = 0
policy_info_header_capture = re.compile(r'^Target\s+Type\s+Policy\s+Feature\s+Target\s+range$')
policy_info_capture = re.compile(
r"^(?P<target>(\S+)|(vlan\s+\S+))\s+(?P<policy_type>[a-zA-Z]+)\s+"
r"(?P<policy_name>\S+)\s+(?P<feature>(\S+\s?)+)\s+(?P<tgt_range>vlan\s+\S+)$")
lines = out.splitlines()
if len(lines) == 0:
return device_tracking_policies_dict
#Target Type Policy Feature Target range
policy_info_header_capture_match = policy_info_header_capture.match(lines[0].strip())
if policy_info_header_capture_match:
group = policy_info_header_capture_match.groupdict()
else:
return device_tracking_policies_dict
for line in lines[1:]:
line = line.strip()
# vlan 39 VLAN test1 Device-tracking vlan all
policy_info_capture_match = policy_info_capture.match(line)
if policy_info_capture_match:
policy_index += 1
group = policy_info_capture_match.groupdict()
target = group['target']
policy_type = group['policy_type']
policy_name = group['policy_name']
feature = group['feature'].strip()
tgt_range = group['tgt_range']
if not device_tracking_policies_dict.get('policies', {}):
device_tracking_policies_dict['policies'] = {}
device_tracking_policies_dict['policies'][policy_index] = {}
device_tracking_policies_dict['policies'][policy_index]['target'] = target
device_tracking_policies_dict['policies'][policy_index]['policy_type'] = policy_type
device_tracking_policies_dict['policies'][policy_index]['policy_name'] = policy_name
device_tracking_policies_dict['policies'][policy_index]['feature'] = feature
device_tracking_policies_dict['policies'][policy_index]['tgt_range'] = tgt_range
return device_tracking_policies_dict
# ========================
# Schema for:
# * 'show device-tracking policy {policy_name}'
# ========================
class ShowDeviceTrackingPolicySchema(MetaParser):
'''Schema for:
* 'show device-tracking policy {policy_name}'
'''
schema = {
"configuration": {
Optional("trusted_port"): str,
"security_level": str,
"device_role": str,
Optional("destination_glean"): str,
Optional("data_glean"): str,
Optional("prefix_glean"): str,
Any(): {
"is_gleaning": str,
Optional("protecting_prefix_list"): str,
},
Optional("limit_address_count"): {
Optional('ipv4'): int,
Optional('ipv6'): int,
},
Optional("cache_guard"): str,
Optional("origin"): str,
Optional("tracking"): str,
},
"device": {
Optional(int): {
"target": str,
"policy_type": str,
"policy_name": str,
"feature": str,
"tgt_range": str,
},
},
}
# ========================
# Parser for:
# * 'show device-tracking policy {policy_name}'
# ========================
class ShowDeviceTrackingPolicy(ShowDeviceTrackingPolicySchema):
'''Parser for:
* 'show device-tracking policy {policy_name}'
'''
cli_command = 'show device-tracking policy {policy_name}'
def cli(self, policy_name, output=None):
if output is None:
cmd = self.cli_command.format(policy_name=policy_name)
out = self.device.execute(cmd)
else:
out = output
device_tracking_policy_dict = {}
device_index = 0
# Device-tracking policy test configuration:
device_tracking_policy_configuration_header_capture = re.compile(r'^Device-tracking\s+policy\s+\S+\s+configuration:$')
# trusted-port
device_tracking_policy_trusted_port_capture = re.compile(r'^(?P<trusted_port>(trusted-port))$')
# security-level guard
device_tracking_policy_security_level_capture = re.compile(r'^security-level\s+(?P<security_level>(\S+))$')
# device-role node
device_tracking_policy_device_role_capture = re.compile(r'^device-role\s+(?P<device_role>(\S+))$')
# data-glean log-only
device_tracking_policy_data_glean_capture = re.compile(r'^data-glean\s+(?P<data_glean>(\S+))$')
# destination-glean log-only
device_tracking_policy_destination_glean_capture = re.compile(r'^destination-glean\s+(?P<destination_glean>(\S+))$')
# prefix-glean only
device_tracking_policy_prefix_glean_capture = re.compile(r'^prefix-glean\s+(?P<prefix_glean>(\S+))$')
# gleaning from Neighbor Discovery protecting prefix-list qux
# gleaning from DHCP6 protecting prefix-list baz
# gleaning from ARP protecting prefix-list foo
# gleaning from DHCP4 protecting prefix-list bar
# gleaning from protocol unkn protecting prefix-list quux
device_tracking_policy_gleaning_capture = re.compile(
r'^(?P<is_gleaning>((NOT\s+)?gleaning))\s+from\s+(?P<protocol>(\S+\s+\S+|\S+))'
r'(\s+protecting\s+prefix-list\s+(?P<protecting_prefix_list>(\S+)))?$'
)
# limit address-count for IPv4 per mac 5
# limit address-count for IPv6 per mac 1
device_tracking_policy_limit_address_count_capture = re.compile(
r'^limit\s+address-count\s+for\s+(?P<version>(IPv\d))\s+per\s+mac\s+(?P<limit_address_count>(\d+))$')
# cache poisoning guard enabled all
device_tracking_policy_cache_guard_capture = re.compile(
r'^cache\s+poisoning\s+guard\s+enabled\s+(?P<cache_guard>(\S+))$')
# origin fabric
device_tracking_policy_origin_capture = re.compile(r'^origin\s+(?P<origin>(\S+))$')
# tracking disable
device_tracking_policy_tracking_capture = re.compile(r'^tracking\s(\(.*\)\s)?(?P<tracking>(\S+))$')
# Target Type Policy Feature Target range
device_tracking_policy_targets_header_capture = re.compile(r'^Target\s+Type\s+Policy\s+Feature\s+Target\s+range$')
# Twe1/0/42 PORT test Device-tracking vlan all
device_tracking_policy_capture = re.compile(r'^(?P<target>(\S+|vlan\s+\d+))\s+(?P<policy_type>(\S+))'
r'\s+(?P<policy_name>(\S+))\s+(?P<feature>(\S+))'
r'\s+(?P<tgt_range>vlan\s+\S+)$')
capture_list = [
device_tracking_policy_trusted_port_capture,
device_tracking_policy_security_level_capture,
device_tracking_policy_device_role_capture,
device_tracking_policy_data_glean_capture,
device_tracking_policy_destination_glean_capture,
device_tracking_policy_prefix_glean_capture,
device_tracking_policy_gleaning_capture,
device_tracking_policy_limit_address_count_capture,
device_tracking_policy_cache_guard_capture,
device_tracking_policy_origin_capture,
device_tracking_policy_tracking_capture,
device_tracking_policy_capture,
]
for line in out.splitlines():
line = line.strip()
if not line:
continue
match = device_tracking_policy_configuration_header_capture.match(line)
if match:
configuration_dict = device_tracking_policy_dict.setdefault('configuration', {})
continue
match = device_tracking_policy_targets_header_capture.match(line)
if match:
device_dict = device_tracking_policy_dict.setdefault('device', {})
continue
for capture in capture_list:
match = capture.match(line)
if match:
groups = match.groupdict()
if capture == device_tracking_policy_trusted_port_capture:
for key, _ in groups.items():
configuration_dict[key] = 'yes'
elif capture == device_tracking_policy_limit_address_count_capture:
limit_key = 'limit_address_count'
limit_value = groups[limit_key]
version = groups['version'].lower()
limit_dict = configuration_dict.setdefault(limit_key, {})
limit_dict[version] = int(limit_value)
elif capture == device_tracking_policy_gleaning_capture:
protocol = groups['protocol']
if protocol == 'Neighbor Discovery':
protocol = 'nd'
elif protocol == 'protocol unkn':
protocol = 'protocol_unkn'
protocol = protocol.lower()
del groups['protocol']
if groups['protecting_prefix_list'] is None:
del groups['protecting_prefix_list']
gleaning_dict = configuration_dict.setdefault(protocol, {})
for key, value in groups.items():
gleaning_dict[key] = value
elif capture == device_tracking_policy_capture:
device_index += 1
policy_dict = device_dict.setdefault(device_index, {})
for key, value in groups.items():
policy_dict[key] = value
else:
for key, value in groups.items():
configuration_dict[key] = value
return device_tracking_policy_dict
# ========================
# Schema for:
# * 'show ipv6 nd raguard policy {policy_name}'
# ========================
class ShowIpv6RaGuardPolicySchema(MetaParser):
'''Schema for:
* 'show ipv6 nd raguard policy {policy_name}'
'''
schema = {
"configuration": {
"device_role": str,
Optional("max_hop_limit"): int,
Optional("min_hop_limit"): int,
Optional("managed_config_flag"): str,
Optional("other_config_flag"): str,
Optional("max_router_preference"): str,
Optional("match_ra_prefix"): str,
Optional("match_ipv6_access_list"): str,
Optional("trusted_port"): str
},
"device": {
Optional(int): {
"target": str,
"policy_type": str,
"policy_name": str,
"feature": str,
"tgt_range": str,
},
},
}
# ========================
# Parser for:
# * 'show ipv6 nd raguard policy {policy_name}'
# ========================
class ShowIpv6RaGuardPolicy(ShowIpv6RaGuardPolicySchema):
'''Parser for:
* 'show ipv6 nd raguard policy {policy_name}'
'''
cli_command = 'show ipv6 nd raguard policy {policy_name}'
def cli(self, policy_name, output=None):
if output is None:
cmd = self.cli_command.format(policy_name=policy_name)
out = self.device.execute(cmd)
else:
out = output
ipv6_nd_raguard_dict = {}
device_index = 0
# RA guard policy asdf configuration:
ipv6_nd_raguard_configuration_header_capture = re.compile(r'^RA\s+guard\s+policy\s+\S+\s+configuration:$')
# trusted-port
ipv6_nd_ragaurd_trusted_port_capture = re.compile(r'^(?P<trusted_port>(trusted-port))$')
# device-role router
ipv6_nd_ragaurd_device_role_capture = re.compile(r'^device-role\s+(?P<device_role>(\S+))$')
# hop-limit minimum 1
ipv6_nd_ragaurd_min_hop_limit_capture = re.compile(r'^hop-limit\s+minimum\s+(?P<min_hop_limit>(\d+))$')
# hop-limit maximum 3
ipv6_nd_ragaurd_max_hop_limit_capture = re.compile(r'^hop-limit\s+maximum\s+(?P<max_hop_limit>(\d+))$')
# managed-config-flag on
ipv6_nd_ragaurd_managed_config_flag_capture = re.compile(r'^managed-config-flag\s+(?P<managed_config_flag>(\S+))$')
# other-config-flag on
ipv6_nd_ragaurd_other_config_flag_capture = re.compile(r'^other-config-flag\s+(?P<other_config_flag>(\S+))$')
# router-preference maximum high
ipv6_nd_ragaurd_max_router_preference_capture = re.compile(r'^router-preference\s+maximum\s+(?P<max_router_preference>(\S+))$')
# match ra prefix-list bar
ipv6_nd_ragaurd_match_ra_prefix_list_capture = re.compile(r'^match\s+ra\s+prefix-list\s+(?P<match_ra_prefix>(\S+))$')
# match ipv6 access-list foo
ipv6_nd_ragaurd_match_ipv6_access_list_capture = re.compile(r'^match\s+ipv6\s+access-list\s+(?P<match_ipv6_access_list>(\S+))$')
# Target Type Policy Feature Target range
ipv6_nd_raguard_targets_header_capture = re.compile(r'^Target\s+Type\s+Policy\s+Feature\s+Target\s+range$')
# Twe1/0/42 PORT asdf RA guard vlan all
ipv6_nd_raguard_target_capture = re.compile(r'^(?P<target>(\S+|vlan\s+\d+))\s+(?P<policy_type>(\S+))'
r'\s+(?P<policy_name>(\S+))\s+(?P<feature>(\S+|\S+\s\S+))'
r'\s+(?P<tgt_range>vlan\s+\S+)$')
capture_list = [
ipv6_nd_ragaurd_device_role_capture,
ipv6_nd_ragaurd_trusted_port_capture,
ipv6_nd_ragaurd_max_hop_limit_capture,
ipv6_nd_ragaurd_min_hop_limit_capture,
ipv6_nd_ragaurd_managed_config_flag_capture,
ipv6_nd_ragaurd_other_config_flag_capture,
ipv6_nd_ragaurd_max_router_preference_capture,
ipv6_nd_ragaurd_match_ra_prefix_list_capture,
ipv6_nd_ragaurd_match_ipv6_access_list_capture,
ipv6_nd_raguard_target_capture,
]
for line in out.splitlines():
line = line.strip()
if not line:
continue
match = ipv6_nd_raguard_configuration_header_capture.match(line)
if match:
configuration_dict = ipv6_nd_raguard_dict.setdefault('configuration', {})
continue
match = ipv6_nd_raguard_targets_header_capture.match(line)
if match:
ipv6_nd_raguard_dict.setdefault('device', {})
continue
for capture in capture_list:
match = capture.match(line)
if match:
if capture == ipv6_nd_raguard_target_capture:
groups = match.groupdict()
device_index += 1
device_dict = ipv6_nd_raguard_dict.setdefault('device', {}) \
.setdefault(device_index, {})
for key, value in groups.items():
device_dict[key] = value
else:
groups = match.groupdict()
for key, value in groups.items():
if key == 'trusted_port':
configuration_dict[key] = 'yes'
continue
if value.isdigit():
configuration_dict[key] = int(value)
else:
configuration_dict[key] = value
return ipv6_nd_raguard_dict
# ========================
# Schema for:
# * 'show ipv6 source-guard policy {policy_name}'
# ========================
class ShowIpv6SourceGuardPolicySchema(MetaParser):
'''Schema for:
* 'show ipv6 source-guard policy {policy_name}'
'''
schema = {
"configuration": {
"validate_address": str,
Optional("validate_prefix"): str,
Optional("permit"): str,
Optional("trusted"): str,
Optional("deny"): str,
},
"device": {
Optional(int): {
"target": str,
"policy_type": str,
"policy_name": str,
"feature": str,
"tgt_range": str,
},
},
}
# ========================
# Parser for:
# * 'show ipv6 source-guard policy {policy_name}'
# ========================
class ShowIpv6SourceGuardPolicy(ShowIpv6SourceGuardPolicySchema):
'''Parser for:
* 'show ipv6 source-guard policy {policy_name}'
'''
cli_command = 'show ipv6 source-guard policy {policy_name}'
def cli(self, policy_name, output=None):
if output is None:
cmd = self.cli_command.format(policy_name=policy_name)
out = self.device.execute(cmd)
else:
out = output
ipv6_source_guard_dict = {}
device_index = 0
# Source guard policy test1 configuration:
ipv6_source_guard_configuration_header_capture = re.compile(r'^Source\s+guard\s+policy\s+\S+\s+configuration:$')
# trusted
ipv6_source_guard_trusted_capture = re.compile(r'^(?P<trusted>(trusted))$')
# validate prefix
ipv6_source_guard_prefix_capture = re.compile(r'^(?P<validate_prefix>(validate\s+prefix))$')
# validate address
ipv6_source_guard_address_capture = re.compile(r'^(?P<validate_address>((NOT\s)?validate\s+address))$')
# permit link-local
ipv6_source_guard_permit_capture = re.compile(r'^permit\s+(?P<permit>(\S+))$')
# deny global-autoconf
ipv6_source_guard_deny_capture = re.compile(r'^deny\s+(?P<deny>(\S+))$')
# Target Type Policy Feature Target range
ipv6_source_guard_targets_header_capture = re.compile(r'^Target\s+Type\s+Policy\s+Feature\s+Target\s+range$')
# Twe1/0/42 PORT test1 Source guard vlan all
ipv6_source_guard_target_capture = re.compile(r'^(?P<target>(\S+|vlan\s+\d+))\s+(?P<policy_type>(\S+))'
r'\s+(?P<policy_name>(\S+))\s+(?P<feature>(\S+|\S+\s\S+))'
r'\s+(?P<tgt_range>vlan\s+\S+)$')
capture_list = [
ipv6_source_guard_trusted_capture,
ipv6_source_guard_prefix_capture,
ipv6_source_guard_address_capture,
ipv6_source_guard_permit_capture,
ipv6_source_guard_deny_capture,
ipv6_source_guard_target_capture
]
for line in out.splitlines():
line = line.strip()
if not line:
continue
match = ipv6_source_guard_configuration_header_capture.match(line)
if match:
configuration_dict = ipv6_source_guard_dict.setdefault('configuration', {})
continue
match = ipv6_source_guard_targets_header_capture.match(line)
if match:
ipv6_source_guard_dict.setdefault('device', {})
continue
for capture in capture_list:
match = capture.match(line)
if match:
if capture == ipv6_source_guard_target_capture:
groups = match.groupdict()
device_index += 1
device_dict = ipv6_source_guard_dict.setdefault('device', {}) \
.setdefault(device_index, {})
for key, value in groups.items():
device_dict[key] = value
else:
groups = match.groupdict()
for key, value in groups.items():
if capture == ipv6_source_guard_trusted_capture or \
capture == ipv6_source_guard_prefix_capture:
configuration_dict[key] = 'yes'
elif capture == ipv6_source_guard_address_capture:
description = 'yes'
if "NOT" in value:
description = 'no'
configuration_dict[key] = description
else:
configuration_dict[key] = value
return ipv6_source_guard_dict
# ========================
# Schema for:
# * 'show device-tracking counters vlan {vlanid}'
# ========================
class ShowDeviceTrackingCountersVlanSchema(MetaParser):
'''Schema for:
* 'show device-tracking counters vlan {vlanid}'
'''
schema = {
"vlanid": {
int: {
Any(): {
Optional("acd&dad"): int,
Optional(Or("ndp","dhcpv6","arp","dhcpv4","probe_send","probe_reply")): {
Any(): int,
},
Any(): {
"protocol": str,
"message": str,
"dropped": int,
},
},
"faults": list,
},
},
}
# ========================
# Parser for:
# * 'show device-tracking counters vlan {vlanid}'
# ========================
class ShowDeviceTrackingCountersVlan(ShowDeviceTrackingCountersVlanSchema):
'''Parser for:
* 'show device-tracking counters vlan {vlanid}'
'''
cli_command = 'show device-tracking counters vlan {vlanid}'
def cli(self, vlanid, output=None):
if output is None:
cmd = self.cli_command.format(vlanid=vlanid)
out = self.device.execute(cmd)
else:
out = output
device_tracking_counters_vlanid_dict = {}
message_key = ''
# Received messages on vlan 39 :
received_messages_capture = re.compile(r'^Received\s+messages\s+on\s+vlan\s+\d+\s+:$')
# Received Broadcast/Multicast messages on vlan 39 :
received_broadcast_multicast_messages_capture = re.compile(r'^Received\s+Broadcast/Multicast\s+messages\s+on\s+vlan\s+\d+\s+:$')
# Bridged messages from vlan 39 :
bridged_messages_capture = re.compile(r'^Bridged\s+messages\s+from\s+vlan\s+\d+\s+:$')
# Broadcast/Multicast converted to unicast messages from vlan 39 :
broadcast_multicast_to_unicast_messages_capture = re.compile(r'^Broadcast/Multicast\s+converted\s+to\s+unicast\s+messages\s+from\s+vlan\s+\d+\s+:$')
# Probe message on vlan 39 :
probe_message_capture = re.compile(r'^Probe\s+message\s+on\s+vlan\s+\d+\s+:$')
# Limited Broadcast to Local message on vlan 39 :
limited_broadcast_to_local_messages_capture = re.compile(r'^Limited\s+Broadcast\s+to\s+Local\s+message\s+on\s+vlan\s+\d+\s+:$')
# Dropped messages on vlan 39 :
dropped_messages_capture = re.compile(r'^Dropped\s+messages\s+on\s+vlan\s+\d+\s+:$')
# Faults on vlan 39 :
faults_capture = re.compile(r'^Faults\s+on\s+vlan\s+\d+\s+:$')
# Protocol Protocol message
# NDP RS[15543] NS[5181] NA[10]
# DHCPv6
# ARP
# DHCPv4
# ACD&DAD --[5181]
protocol_info = re.compile(r'^(?P<protocol>(Protocol))\s+(?P<message>(.*))$')
ndp_info = re.compile(r'^(?P<protocol>(NDP))\s+(?P<message>(.*))?')
dhcp6_info = re.compile(r'^(?P<protocol>(DHCPv6))\s+(?P<message>(.*))?$')
arp_info = re.compile(r'^(?P<protocol>(ARP))\s+(?P<message>(.*))?$')
dhcp4_info = re.compile(r'^(?P<protocol>(DHCPv4))\s+(?P<message>(.*))?$')
acd_dad_info = re.compile(r'^(?P<protocol>(ACD&DAD))\s+\S+\[(?P<message>(\d+))\]?$')
# PROBE_SEND NS[3128]
# PROBE_REPLY NA[10]
probe_info = re.compile(r'^(?P<protocol>(PROBE_\S+))\s+(?P<message>(.*))?')
# Device-tracking: NDP NS [10]
# Flooding Suppress: NDP NS [36]
dropped_message_info = re.compile(r'^(?P<feature>((?!reason).*)):\s+(?P<protocol>(\S+))'
r'\s+(?P<message>(\S+))\s+\[(?P<dropped>(\d+))\]$')
# DHCPv6_REQUEST_NAK[1]
fault_info = re.compile(r'^(?P<fault>(FAULT_CODE_INVALID|DHCPv\d_\S+_(TIMEOUT|NAK|ERROR))).*$')
capture_list = [
ndp_info,
dhcp6_info,
arp_info,
dhcp4_info,
acd_dad_info,
probe_info,
dropped_message_info,
fault_info,
]
for line in out.splitlines():
line = line.strip()
if not line:
continue
if not device_tracking_counters_vlanid_dict:
message_dict = device_tracking_counters_vlanid_dict.setdefault('vlanid', {}) \
.setdefault(int(vlanid), {})
match = received_messages_capture.match(line)
if match:
message_key = "received"
message_dict.setdefault(message_key, {})
continue
match = received_broadcast_multicast_messages_capture.match(line)
if match:
message_key = "received_broadcast_multicast"
message_dict.setdefault(message_key, {})
continue
match = bridged_messages_capture.match(line)
if match:
message_key = "bridged"
message_dict.setdefault(message_key, {})
continue
match = broadcast_multicast_to_unicast_messages_capture.match(line)
if match:
message_key = "broadcast_multicast_to_unicast"
message_dict.setdefault(message_key, {})
continue
match = probe_message_capture.match(line)
if match:
message_key = "probe"
message_dict.setdefault(message_key, {})
continue
match = limited_broadcast_to_local_messages_capture.match(line)
if match:
message_key = "limited_broadcast_to_local"
message_dict.setdefault(message_key, {})
continue
match = dropped_messages_capture.match(line)
if match:
dropped_dict = message_dict.setdefault('dropped', {})
continue
match = faults_capture.match(line)
if match:
faults_list = message_dict.setdefault('faults', [])
continue
for capture in capture_list:
match = capture.match(line)
if match:
groups = match.groupdict()
if capture == dropped_message_info:
feature = groups['feature']
dropped_dict.setdefault(feature, {})
del groups['feature']
for key, value in groups.items():
if value.isdigit():
dropped_dict[feature][key] = int(value)
else:
dropped_dict[feature][key] = value.lower()
elif capture == fault_info:
message = groups['fault']
faults_list.append(message)
elif capture == acd_dad_info:
protocol = groups['protocol'].lower()
message = groups['message']
packet_dict = message_dict.setdefault(message_key, {})
packet_dict[protocol] = int(message)
else:
protocol = groups['protocol'].lower()
messages = groups['message'].split()
packet_dict = message_dict.setdefault(message_key, {}).setdefault(protocol, {})
packet_capture = re.compile(r'^(?P<packet>(\S+))\[(?P<num>(\d+))\]$')
for message in messages:
packet_match = packet_capture.match(message)
if packet_match:
packet_groups = packet_match.groupdict()
packet = packet_groups['packet']
num = packet_groups['num']
packet_dict[packet] = int(num)
return device_tracking_counters_vlanid_dict
# ==================================
# Schema for:
# * 'show device-tracking database mac'
# ==================================
class ShowDeviceTrackingDatabaseMacSchema(MetaParser):
"""Schema for show device-tracking database mac."""
schema = {
"device": {
int: {
"link_layer_address": str,
"interface": str,
"vlan_id": int,
"pref_level_code": str,
"state": str,
"policy": str,
Optional("time_left"): str,
Optional("input_index"): int,
}
}
}
# ==================================
# Parser for:
# * 'show device-tracking database mac'
# ==================================
class ShowDeviceTrackingDatabaseMac(ShowDeviceTrackingDatabaseMacSchema):
"""Parser for show device-tracking database mac."""
cli_command = 'show device-tracking database mac'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
device_tracking_database_mac_dict = {}
# MAC Interface vlan prlvl state Time left Policy Input_index
# dead.beef.0001 Twe1/0/42 39 NO TRUST MAC-STALE N/A 49
# 5c5a.c791.d69f Vl39 39 TRUSTED MAC-REACHABLE N/A dna_policy 108
# 0050.56b0.babc Twe1/0/42 39 NO TRUST MAC-REACHABLE 41 s test1 49
# 0050.56b0.afed Twe1/0/42 39 NO TRUST MAC-REACHABLE 21 s test1 49
# 000a.000b.000c Twe1/0/1 100 NO TRUST MAC-DOWN N/A 8
# dead.beef.0001 Twe1/0/42 39 NO TRUST MAC-STALE N/A 49
entry_capture = re.compile(
r"^(?P<link_layer_address>\S+)"
r"\s+(?P<interface>\S+)"
r"\s+(?P<vlan_id>\d+)"
r"\s+(?P<pre_level>(\S+\s\S+)|(\S+))"
r"\s+(?P<state>\S+)"
r"\s+((N/A)|(?P<time_left>(\d+\ss)))"
r"\s+(?P<policy>\S+)"
r"(\s+(?P<input_index>\d+))?"
)
device_index = 0
for line in out.splitlines():
line = line.strip()
entry_capture_match = entry_capture.match(line)
if entry_capture_match:
device_index += 1
groups = entry_capture_match.groupdict()
lla = groups['link_layer_address']
interface = groups['interface']
vlan_id = int(groups['vlan_id'])
pre_level = groups['pre_level']
state = groups['state']
policy = groups['policy']
index_dict = device_tracking_database_mac_dict.setdefault('device', {}).setdefault(device_index, {})
index_dict['link_layer_address'] = lla
index_dict['interface'] = interface
index_dict['vlan_id'] = vlan_id
index_dict['pref_level_code'] = pre_level
index_dict['state'] = state
index_dict['policy'] = policy
if groups['time_left']:
time_left = groups['time_left']
index_dict['time_left'] = time_left
if groups['input_index']:
input_index = int(groups['input_index'])
index_dict['input_index'] = input_index
continue
return device_tracking_database_mac_dict
# ==================================
# Schema for:
# * 'show device-tracking database mac {mac}'
# ==================================
class ShowDeviceTrackingDatabaseMacMacSchema(MetaParser):
"""Schema for show device-tracking database mac {mac}."""
schema = {
"macDB_count": int,
"vlan": int,
"dynamic_count": int,
"entries": {
int: {
"dev_code": str,
"network_layer_address": str,
"link_layer_address": str,
"interface": str,
"vlan_id": int,
"pref_level_code": int,
"age": str,
"state": str,
Optional("time_left"): str,
}
}
}
# ==================================
# Parser for:
# * 'show device-tracking database mac {mac}'
# ==================================
class ShowDeviceTrackingDatabaseMacMac(ShowDeviceTrackingDatabaseMacMacSchema):
"""Parser for show device-tracking database mac {mac}."""
cli_command = 'show device-tracking database mac {mac}'
def cli(self, mac, output=None):
if output is None:
cmd = self.cli_command.format(mac=mac)
out = self.device.execute(cmd)
else:
out = output
device_tracking_database_mac_dict = {}
# Codes: L - Local, S - Static, ND - Neighbor Discovery, ARP - Address Resolution Protocol, DH4 - IPv4 DHCP, DH6 - IPv6 DHCP, PKT - Other Packet, API - API created
# Preflevel flags (prlvl):
# 0001:MAC and LLA match 0002:Orig trunk 0004:Orig access
# 0008:Orig trusted trunk 0010:Orig trusted access 0020:DHCP assigned
# 0040:Cga authenticated 0080:Cert authenticated 0100:Statically assigned
# Network Layer Address Link Layer Address Interface vlan prlvl age state Time left
# macDB has 2 entries for mac dead.beef.0001,vlan 38, 0 dynamic
# S 10.10.10.11 dead.beef.0001 Twe1/0/41 38 0100 4s REACHABLE 308 s
# S 10.10.10.10 dead.beef.0001 Twe1/0/41 38 0100 77s REACHABLE 226 s
# macDB has 2 entries for mac dead.beef.0001,vlan 38, 0 dynamic
table_info_capture = re.compile(
r"^macDB has (?P<entries>\d+) entries for mac \S+,vlan (?P<vlan_id>\d+), (?P<dynamic_count>\d+) dynamic$"
)
# S 10.10.10.11 dead.beef.0001 Twe1/0/41 38 0100 4s REACHABLE 308 s
entry_capture = re.compile(
r"^(?P<code>\S+)\s+"
r"(?P<network_layer_address>\S+)\s+"
r"(?P<link_layer_address>\S+)\s+"
r"(?P<interface>\S+)\s+"
r"(?P<vlan_id>\d+)\s+"
r"(?P<prlvl>\d+)\s+"
r"(?P<age>\S+)\s+"
r"(?P<state>\S+)\s+"
r"((try\s\d\sN/A)|(?P<time_left>\S+\s\S+))$"
)
entry_num = 0
for line in out.splitlines():
line = line.strip()
# macDB has 2 entries for mac dead.beef.0001,vlan 38, 0 dynamic
match = table_info_capture.match(line)
if match:
groups = match.groupdict()
entries = int(groups['entries'])
vlan_id = int(groups['vlan_id'])
dynamic_count = int(groups['dynamic_count'])
device_tracking_database_mac_dict['macDB_count'] = entries
device_tracking_database_mac_dict['vlan'] = vlan_id
device_tracking_database_mac_dict['dynamic_count'] = dynamic_count
continue
# S 10.10.10.11 dead.beef.0001 Twe1/0/41 38 0100 4s REACHABLE 308 s
match = entry_capture.match(line)
if match:
entry_num += 1
groups = match.groupdict()
code = groups['code']
ip = groups['network_layer_address']
lla = groups['link_layer_address']
interface = groups['interface']
vlan = int(groups['vlan_id'])
prlvl = int(groups['prlvl'])
age = groups['age']
state = groups['state']
index_dict = device_tracking_database_mac_dict.setdefault('entries', {}).setdefault(entry_num, {})
index_dict['dev_code'] = code
index_dict['network_layer_address'] = ip
index_dict['link_layer_address'] = lla
index_dict['interface'] = interface
index_dict['vlan_id'] = vlan
index_dict['pref_level_code'] = prlvl
index_dict['age'] = age
index_dict['state'] = state
if groups['time_left']:
time_left = groups['time_left']
index_dict['time_left'] = time_left
continue
return device_tracking_database_mac_dict
# ==================================
# Schema for:
# * 'show device-tracking database mac {mac} details'
# ==================================
class ShowDeviceTrackingDatabaseMacMacDetailsSchema(MetaParser):
"""Schema for show device-tracking database mac {mac} details."""
schema = {
"entry_count": int,
"vlan_id": int,
"dynamic_count": int,
"binding_table_configuration": {
"max/box": str,
"max/port": str,
"max/vlan": str,
"max/mac": str,
},
"binding_table_count": {
"dynamic": int,
"local": int,
"total": int,
},
"binding_table_state_count": {
Optional("verify"): int,
Optional("reachable"): int,
Optional("stale"): int,
Optional("down"): int,
"total": int,
},
"entries": {
int: {
"dev_code": str,
"network_layer_address": str,
"link_layer_address": str,
"interface": str,
"mode": str,
"vlan_id": int,
"pref_level_code": int,
"age": str,
"state": str,
Optional("time_left"): str,
"filter": str,
"in_crimson": str,
"client_id": str,
Optional("policy"): str,
}
}
}
# ==================================
# Parser for:
# * 'show device-tracking database mac {mac} details'
# ==================================
class ShowDeviceTrackingDatabaseMacMacDetails(ShowDeviceTrackingDatabaseMacMacDetailsSchema):
"""Parser for show device-tracking database mac {mac} details."""
cli_command = 'show device-tracking database mac {mac} details'
def cli(self, mac, output=None):
if output is None:
cmd = self.cli_command.format(mac=mac)
out = self.device.execute(cmd)
else:
out = output
device_tracking_database_mac_details_dict = {}
# Binding table configuration:
# ----------------------------
# max/box : no limit
# max/vlan : no limit
# max/port : no limit
# max/mac : no limit
# Binding table current counters:
# ------------------------------
# dynamic : 0
# local : 0
# total : 2
# Binding table counters by state:
# ----------------------------------
# REACHABLE : 2
# total : 2
# Codes: L - Local, S - Static, ND - Neighbor Discovery, ARP - Address Resolution Protocol, DH4 - IPv4 DHCP, DH6 - IPv6 DHCP, PKT - Other Packet, API - API created
# Preflevel flags (prlvl):
# 0001:MAC and LLA match 0002:Orig trunk 0004:Orig access
# 0008:Orig trusted trunk 0010:Orig trusted access 0020:DHCP assigned
# 0040:Cga authenticated 0080:Cert authenticated 0100:Statically assigned
#
#
# Network Layer Address Link Layer Address Interface mode vlan(prim) prlvl age state Time left Filter In Crimson Client ID Policy (feature)
# macDB has 2 entries for mac dead.beef.0001,vlan 38, 0 dynamic
# S 10.10.10.11 dead.beef.0001(R) Twe1/0/41 trunk 38 ( 38) 0100 63s REACHABLE 249 s no yes 0000.0000.0000
# S 10.10.10.10 dead.beef.0001(R) Twe1/0/41 trunk 38 ( 38) 0100 136s REACHABLE 167 s no yes 0000.0000.0000
# Binding table configuration:
binding_table_config_capture = re.compile(r"^Binding table configuration:")
# max/box : no limit
# REACHABLE : 2
# total : 2
table_entry_capture = re.compile(r"^(?P<parameter>\S+)\s+:\s+(?P<info>((\d+)|(no limit)))")
# Binding table current counters:
binding_table_current_counters_capture = re.compile(r"^Binding table current counters:")
# Binding table counters by state:
binding_table_counters_by_state_capture = re.compile(r"^Binding table counters by state:")
# macDB has 2 entries for mac dead.beef.0001,vlan 38, 0 dynamic
table_info_capture = re.compile(
r"^macDB has (?P<entries>\d+) entries for mac \S+,vlan (?P<vlan_id>\d+), (?P<dynamic_count>\d+) dynamic$"
)
# S 10.10.10.11 dead.beef.0001(R) Twe1/0/41 trunk 38 ( 38) 0100 63s REACHABLE 249 s no yes 0000.0000.0000
entry_capture = re.compile(
r"^(?P<dev_code>\S+)"
r"\s+(?P<network_layer_address>(\S\S\.\S\S\.\S\S\.\S\S))"
r"\s+(?P<link_layer_address>\S+)"
r"\s+(?P<interface>\S+)"
r"\s+(?P<mode>\S+)"
r"\s+(?P<vlan>\d+)\s+\(\s+\d+\)"
r"\s+(?P<prlvl>\d+)"
r"\s+(?P<age>\S+)"
r"\s+(?P<state>\S+)"
r"(\s+(?P<time_left>(\S+\ss)))?"
r"\s+(?P<filter>\S+)"
r"\s+(?P<in_crimson>\S+)"
r"\s+(?P<client_id>\S+)"
r"(\s+(?P<policy>(.*)))?$"
)
key = ""
entry_counter = 0
for line in out.splitlines():
line = line.strip()
if not line:
continue
# Binding table configuration:
match = binding_table_config_capture.match(line)
if match:
key = 'binding_table_configuration'
device_tracking_database_mac_details_dict[key] = {}
continue
# Binding table current counters:
match = binding_table_current_counters_capture.match(line)
if match:
key = 'binding_table_count'
device_tracking_database_mac_details_dict[key] = {}
continue
# Binding table counters by state:
match = binding_table_counters_by_state_capture.match(line)
if match:
key = 'binding_table_state_count'
device_tracking_database_mac_details_dict[key] = {}
continue
# REACHABLE : 2
match = table_entry_capture.match(line)
if match:
groups = match.groupdict()
name = groups['parameter'].lower()
value = groups['info']
if key == 'binding_table_state_count' or key == 'binding_table_count':
value = int(value)
device_tracking_database_mac_details_dict[key][name] = value
continue
# macDB has 2 entries for mac dead.beef.0001,vlan 38, 0 dynamic
match = table_info_capture.match(line)
if match:
groups = match.groupdict()
entry_count = int(groups['entries'])
vlan_id = int(groups['vlan_id'])
dynamic_count = int(groups['dynamic_count'])
device_tracking_database_mac_details_dict['entry_count'] = entry_count
device_tracking_database_mac_details_dict['vlan_id'] = vlan_id
device_tracking_database_mac_details_dict['dynamic_count'] = dynamic_count
continue
# S 10.10.10.11 dead.beef.0001(R) Twe1/0/41 trunk 38 ( 38) 0100 63s REACHABLE 249 s no yes 0000.0000.0000
match = entry_capture.match(line)
if match:
groups = match.groupdict()
entry_counter += 1
dev_code = groups['dev_code']
network_layer_address = groups['network_layer_address']
lla = groups['link_layer_address']
interface = groups['interface']
mode = groups['mode']
vlan = int(groups['vlan'])
prlvl = int(groups['prlvl'])
age = groups['age']
state = groups['state']
filter = groups['filter']
in_crimson = groups['in_crimson']
client_id = groups['client_id']
index_dict = device_tracking_database_mac_details_dict.setdefault('entries', {}).setdefault(entry_counter, {})
index_dict['dev_code'] = dev_code
index_dict['network_layer_address'] = network_layer_address
index_dict['link_layer_address'] = lla
index_dict['interface'] = interface
index_dict['mode'] = mode
index_dict['vlan_id'] = vlan
index_dict['pref_level_code'] = prlvl
index_dict['age'] = age
index_dict['state'] = state
index_dict['filter'] = filter
index_dict['in_crimson'] = in_crimson
index_dict['client_id'] = client_id
if groups['time_left']:
time_left = groups['time_left']
index_dict['time_left'] = time_left
if groups['policy']:
policy = groups['policy']
index_dict['policy'] = policy
continue
return device_tracking_database_mac_details_dict
# ========================
# Schema for:
# * 'show device-tracking counters interface {interface}'
# ========================
class ShowDeviceTrackingCountersInterfaceSchema(MetaParser):
'''Schema for:
* 'show device-tracking counters interface {interface}'
'''
schema = {
"interface": {
str: {
"message_type": {
str: {
Optional("protocols"): {
Optional("acd_dad"): int,
Optional(Or("ndp","dhcpv6","arp","dhcpv4","probe_send","probe_reply")): {
Any(): int,
},
},
},
"dropped": {
Optional("feature"): {
Any(): {
"protocol": str,
"message": str,
"dropped": int,
},
},
},
"faults": list,
},
},
},
}
# ========================
# Parser for:
# * 'show device-tracking counters interface {interface}'
# ========================
class ShowDeviceTrackingCountersInterface(ShowDeviceTrackingCountersInterfaceSchema):
'''Parser for:
* 'show device-tracking counters interface {interface}'
'''
cli_command = 'show device-tracking counters interface {interface}'
def cli(self, interface, output=None):
if output is None:
cmd = self.cli_command.format(interface=interface)
out = self.device.execute(cmd)
else:
out = output
device_tracking_counters_interface_dict = {}
message_key = ''
# Received messages on Twe1/0/42:
p1 = re.compile(r'^Received\s+messages\s+on\s+\S+:$')
# Received Broadcast/Multicast messages on Twe1/0/42:
p2 = re.compile(r'^Received\s+Broadcast/Multicast\s+messages\s+on\s+\S+:$')
# Bridged messages from Twe1/0/42:
p3 = re.compile(r'^Bridged\s+messages\s+from\s+\S+:$')
# Broadcast/Multicast converted to unicast messages from Twe1/0/42:
p4 = re.compile(r'^Broadcast/Multicast\s+converted\s+to\s+unicast\s+messages\s+from\s+\S+:$')
# Probe message on Twe1/0/42:
p5 = re.compile(r'^Probe\s+message\s+on\s+\S+:$')
# Limited Broadcast to Local message on Twe1/0/42:
p6 = re.compile(r'^Limited\s+Broadcast\s+to\s+Local\s+message\s+on\s+\S+:$')
# Dropped messages on Twe1/0/42:
p7 = re.compile(r'^Dropped\s+messages\s+on\s+\S+:$')
# Faults on Twe1/0/42:
p8 = re.compile(r'^Faults\s+on\s+\S+:$')
# NDP RS[70160] NS[20760] NA[14]
# DHCPv6
# ARP
# DHCPv4
# PROBE_SEND NS[19935] REQ[3]
# PROBE_REPLY NA[14]
p9 = re.compile(r'^(?P<protocol>(NDP|DHCPv6|ARP|DHCPv4|PROBE_\S+))\s+(?P<message>(.*))?')
# ACD&DAD --[20760]
p10 = re.compile(r'^(?P<protocol>(ACD&DAD))\s+\S+\[(?P<message>(\d+))\]?$')
# Flooding Suppress: NDP NS [35]
p11 = re.compile(r'^(?P<feature>((?!reason).*)):\s+(?P<protocol>(\S+))'
r'\s+(?P<message>(\S+))\s+\[(?P<dropped>(\d+))\]$')
# DHCPv6_REBIND_NAK[3]
p12 = re.compile(r'^(?P<fault>(FAULT_CODE_INVALID|DHCPv\d_\S+_(TIMEOUT|NAK|ERROR))).*$')
for line in out.splitlines():
line = line.strip()
if not line:
continue
if not device_tracking_counters_interface_dict:
intf = Common.convert_intf_name(interface)
message_dict = device_tracking_counters_interface_dict.setdefault('interface', {}) \
.setdefault(intf, {}) \
.setdefault('message_type', {})
m = p1.match(line)
if m:
message_key = "received"
message_dict.setdefault(message_key, {})
continue
m = p2.match(line)
if m:
message_key = "received_broadcast_multicast"
message_dict.setdefault(message_key, {})
continue
m = p3.match(line)
if m:
message_key = "bridged"
message_dict.setdefault(message_key, {})
continue
m = p4.match(line)
if m:
message_key = "broadcast_multicast_to_unicast"
message_dict.setdefault(message_key, {})
continue
m = p5.match(line)
if m:
message_key = "probe"
message_dict.setdefault(message_key, {})
continue
m = p6.match(line)
if m:
message_key = "limited_broadcast_to_local"
message_dict.setdefault(message_key, {})
continue
m = p7.match(line)
if m:
dropped_dict = message_dict.setdefault('dropped', {})
continue
m = p8.match(line)
if m:
faults_list = message_dict.setdefault('faults', [])
continue
m = p9.match(line)
if m:
groups = m.groupdict()
protocol = groups['protocol'].lower()
messages = groups['message'].split()
packet_dict = message_dict.setdefault(message_key, {}).setdefault('protocols', {}) \
.setdefault(protocol, {})
packet_capture = re.compile(r'^(?P<packet>(\S+))\[(?P<num>(\d+))\]$')
for message in messages:
m1 = packet_capture.match(message)
if m1:
packet_groups = m1.groupdict()
packet = packet_groups['packet'].lower()
num = packet_groups['num']
packet_dict[packet] = int(num)
continue
m = p10.match(line)
if m:
groups = m.groupdict()
protocol = groups['protocol'].lower().replace('&', '_')
message = groups['message']
packet_dict = message_dict.setdefault(message_key, {}).setdefault('protocols', {})
packet_dict[protocol] = int(message)
continue
m = p11.match(line)
if m:
groups = m.groupdict()
feature = groups['feature'].replace('&', '_')
feature_dict = dropped_dict.setdefault('feature', {}).setdefault(feature, {})
del groups['feature']
for key, value in groups.items():
if value.isdigit():
feature_dict[key] = int(value)
else:
key = key.lower()
feature_dict[key] = value.lower()
continue
m = p12.match(line)
if m:
groups = m.groupdict()
message = groups['fault']
faults_list.append(message)
continue
return device_tracking_counters_interface_dict
# ====================================================
# Schema for 'show device-tracking events'
# ====================================================
class ShowDeviceTrackingEventsSchema(MetaParser):
""" Schema for show device-tracking events """
schema = {
'ssid': {
int:{
"events": {
int: {
"event_type": str,
Optional('event_name'): str,
Optional('prev_state'): str,
Optional('state'): str,
Optional('fsm_name'): str,
Optional('ipv4'): str,
Optional('static_mac'): str,
Optional('ipv6'): str,
Optional('dynamic_mac'): str,
"ssid": int,
"timestamp": str
}
}
}
}
}
# =============================================
# Parser for 'show device-tracking events'
# =============================================
class ShowDeviceTrackingEvents(ShowDeviceTrackingEventsSchema):
""" show device-tracking events """
cli_command = 'show device-tracking events'
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command)
else:
output = output
#fsm_run event
#[Fri Jun 18 22:14:40.000] SSID 0 FSM Feature Table running for event ACTIVE_REGISTER in state CREATING
p1 = re.compile(r'^\[(?P<timestamp>.+)\]\s+SSID\s+(?P<ssid>\d+)\s+FSM\s+(?P<fsm_name>.*)\s+running\s+for\s+event\s+(?P<event_name>(?<=event\s{1})\S+)\s+in\s+state\s+(?P<event_state>.+)$')
#fsm_transition event
#[Fri Jun 18 22:14:40.000] SSID 0 Transition from CREATING to READY upon event ACTIVE_REGISTER
p2 = re.compile(r'^\[(?P<timestamp>.+)\]\s+SSID\s+(?P<ssid>\d+)\s+Transition\s+from\s+(?P<prev_state>.+)\s+to\s+(?P<state>.+)\s+upon\s+event\s+(?P<event_name>.+)$')
#bt_entry event
#[Wed Jun 30 17:03:14.000] SSID 1 Created Entry origin Static MAC 000a.000a.000a IPV4 1.1.1.1
#[Wed Jun 30 17:03:14.000] SSID 1 Entry State changed origin Static MAC 000a.000a.000a IPV4 1.1.1.1
p3 = re.compile(r'^\[(?P<timestamp>.+)\]\s+SSID\s+(?P<ssid>\d+)\s(?P<entry_state>.+origin)\s+(?P<mac_addr_type>.+)\sMAC\s+(?P<mac_addr>([\w\d]{4}\.*){3})\s+(?P<ip_addr_type>\S+)\s+(?P<ip_addr>[\w\d\.:]+)$')
parser_dict = {}
ssid_event_no_dict = {}
for line in output.splitlines():
line = line.strip()
#[Fri Jun 18 22:14:40.000] SSID 0 FSM Feature Table running for event ACTIVE_REGISTER in state CREATING
m1 = p1.match(line)
if m1:
ssids = parser_dict.setdefault('ssid', {})
ssid = int(m1.groupdict()['ssid'])
timestamp = m1.groupdict()['timestamp']
ssid_obj = ssids.setdefault(ssid, {})
events = ssid_obj.setdefault("events", {})
ssid_event_no_dict.setdefault(ssid, 1)
event_no = ssid_event_no_dict[ssid]
event = {
'ssid': ssid,
'event_type': 'fsm_run',
'event_name': m1.groupdict()['event_name'],
'fsm_name': m1.groupdict()['fsm_name'],
'timestamp': timestamp
}
events[event_no] = event
ssid_event_no_dict[ssid]+=1
continue
#[Fri Jun 18 22:14:40.000] SSID 0 Transition from CREATING to READY upon event ACTIVE_REGISTER
m2 = p2.match(line)
if m2:
ssids = parser_dict.setdefault('ssid', {})
ssid = int(m2.groupdict()['ssid'])
timestamp = m2.groupdict()['timestamp']
ssid_obj = ssids.setdefault(ssid, {})
events = ssid_obj.setdefault("events", {})
ssid_event_no_dict.setdefault(ssid, 1)
event_no = ssid_event_no_dict[ssid]
event = {
'ssid': ssid,
'event_type': 'fsm_transition',
'event_name': m2.groupdict()['event_name'],
'state': m2.groupdict()['state'],
'prev_state': m2.groupdict()['prev_state'],
'timestamp': timestamp
}
events[event_no] = event
ssid_event_no_dict[ssid]+=1
continue
#[Wed Jun 30 17:03:14.000] SSID 1 Created Entry origin Static MAC 000a.000a.000a IPV4 1.1.1.1
#[Wed Jun 30 17:03:14.000] SSID 1 Entry State changed origin Static MAC 000a.000a.000a IPV4 1.1.1.1
m3 = p3.match(line)
if m3:
ssids = parser_dict.setdefault('ssid', {})
ssid = int(m3.groupdict()['ssid'])
timestamp = m3.groupdict()['timestamp']
ssid_obj = ssids.setdefault(ssid, {})
events = ssid_obj.setdefault("events", {})
ssid_event_no_dict.setdefault(ssid, 1)
event_no = ssid_event_no_dict[ssid]
mac_addr_type = (m3.groupdict()['mac_addr_type']).lower()
mac_addr_type+="_mac"
ip_addr_type = (m3.groupdict()['ip_addr_type']).lower()
event = {
'ssid': ssid,
'event_type': 'bt_entry',
'state': m3.groupdict()['entry_state'],
mac_addr_type: m3.groupdict()['mac_addr'],
ip_addr_type: m3.groupdict()['ip_addr'],
'timestamp': timestamp
}
events[event_no] = event
ssid_event_no_dict[ssid]+=1
continue
return parser_dict
# ====================================================
# Schema for 'show device-tracking features
# ====================================================
class ShowDeviceTrackingFeaturesSchema(MetaParser):
""" Schema for show device-tracking features """
schema = {
'features': {
str: {
'feature': str,
'priority': int,
'state': str
}
}
}
# =============================================
# Parser for 'show device-tracking features'
# =============================================
class ShowDeviceTrackingFeatures(ShowDeviceTrackingFeaturesSchema):
""" show device-tracking features """
cli_command = 'show device-tracking features'
def cli(self, output=None):
if output is None:
output = self.device.execute(self.cli_command)
else:
output = output
# Feature name priority state
# RA guard 192 READY
# Device-tracking 128 READY
# Source guard 32 READY
p1 = re.compile(r'(?P<feature>.+[^ ])\s+(?P<priority>\d+)\s+(?P<state>\w+)')
parser_dict = {}
for line in output.splitlines():
line = line.strip()
# Feature name priority state
# RA guard 192 READY
# Device-tracking 128 READY
# Source guard 32 READY
m = p1.match(line)
if m:
features = parser_dict.setdefault('features', {})
feature = features.setdefault(m.groupdict()['feature'], {})
feature.update({'feature': m.groupdict()['feature']})
feature.update({'priority': int(m.groupdict()['priority'])})
feature.update({'state': m.groupdict()['state']})
return parser_dict
# ==================================
# Schema for:
# * 'show device-tracking database mac details'
# ==================================
class ShowDeviceTrackingDatabaseMacDetailsSchema(MetaParser):
"""Schema for show device-tracking database mac details"""
schema = {
"device": {
int: {
"dev_code": str,
"link_layer_address": str,
"interface": str,
"vlan_id": int,
"pref_level": str,
"state": str,
Optional("time_left"): str,
"policy": str,
Optional("input_index"): int,
Optional("attached"): {
int: {
"ip": str,
}
}
}
}
}
# ==================================
# Parser for:
# * 'show device-tracking database mac details'
# ==================================
class ShowDeviceTrackingDatabaseMacDetails(ShowDeviceTrackingDatabaseMacDetailsSchema):
"""Parser for show device-tracking database mac details"""
cli_command = 'show device-tracking database mac details'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
device_tracking_database_mac_details_dict = {}
# MAC Interface vlan prlvl state Time left Policy Input_index
# S dead.beef.0001 Twe1/0/41 38 TRUSTED MAC-STALE 93013 s 47
# Attached IP: 10.10.10.11
# Attached IP: 10.10.10.10
# L c4b2.39ae.51df Vl1 1 TRUSTED MAC-DOWN default 60
# S dead.beef.0001 Twe1/0/41 38 TRUSTED MAC-STALE 93013 s 47 60
device_capture = re.compile(
r"^(?P<dev_code>\S+)"
r"\s+(?P<link_layer_address>(\S+\.\S+\.\S+))"
r"\s+(?P<interface>\S+)"
r"\s+(?P<vlan_id>\d+)"
r"\s+(?P<prlvl>\S+)"
r"\s+(?P<state>\S+)"
r"(\s+(?P<time_left>\S+\ss))?"
r"\s+(?P<policy>\S+)"
r"(\s+(?P<input_index>\d+))?$"
)
attached_capture = re.compile(
r"^Attached IP: (?P<ip>\S+)$"
)
device_index = 0
attached_counter = 0
for line in out.splitlines():
line = line.strip()
if not line:
continue
# S dead.beef.0001 Twe1/0/41 38 TRUSTED MAC-STALE 93013 s 47 60
match = device_capture.match(line)
if match:
device_index += 1
attached_counter = 0
groups = match.groupdict()
dev_code = groups['dev_code']
lla = groups['link_layer_address']
interface = groups['interface']
vlan = int(groups['vlan_id'])
pref_level = groups['prlvl']
state = groups['state']
policy = groups['policy']
index_dict = device_tracking_database_mac_details_dict.setdefault('device', {}).setdefault(device_index, {})
index_dict['dev_code'] = dev_code
index_dict['link_layer_address'] = lla
index_dict['interface'] = interface
index_dict['vlan_id'] = vlan
index_dict['pref_level'] = pref_level
index_dict['state'] = state
index_dict["policy"] = policy
if groups['time_left']:
time_left = groups['time_left']
index_dict['time_left'] = time_left
if groups['input_index']:
input_index = int(groups['input_index'])
index_dict['input_index'] = input_index
continue
# Attached IP: 10.10.10.11
match = attached_capture.match(line)
if match:
attached_counter += 1
groups = match.groupdict()
ip = groups['ip']
attached_dict = device_tracking_database_mac_details_dict['device'][device_index].setdefault('attached', {}).setdefault(attached_counter, {})
attached_dict['ip'] = ip
continue
return device_tracking_database_mac_details_dict
# ==================================
# Schema for:
# * 'show device-tracking messages'
# ==================================
class ShowDeviceTrackingMessagesSchema(MetaParser):
schema = {
'entries': {
int: {
"timestamp": str,
"vlan": int,
"interface": str,
Optional("mac"): str,
"protocol": str,
"ip": str,
"ignored": bool,
Optional("drop_reason"): str,
}
}
}
# ==================================
# Parser for:
# * 'show device-tracking messages'
# ==================================
class ShowDeviceTrackingMessages(ShowDeviceTrackingMessagesSchema):
cli_command = "show device-tracking messages"
def cli(self, output=None):
if output == None:
out = self.device.execute(self.cli_command)
else:
out = output
device_tracking_messages_dict = {}
# [Wed Jul 21 20:31:23.000] VLAN 1, From Et0/1 MAC aabb.cc00.0300: ARP::REP, 192.168.23.3,
# [Wed Jul 21 20:31:25.000] VLAN 1006, From Et0/1 MAC aabb.cc00.0300: ARP::REP, 192.168.23.3, Packet ignored.
# [Wed Jul 21 20:31:27.000] VLAN 10, From Et0/0 MAC aabb.cc00.0100: NDP::NA, FE80::A8BB:CCFF:FE00:100, Drop reason=Packet accepted but not forwarded
message_capture = re.compile(
r"^\[(?P<timestamp>(\S+\s\S+\s\d+\s\S+))\]"
r"\s+VLAN (?P<vlan>\d+),"
r"\s+From (?P<interface>\S+)"
r"(\s+MAC (?P<mac>([a-f0-9]+\.[a-f0-9]+\.[a-f0-9]+)):)?"
r"\s+(?P<protocol>([a-zA-Z]+::[a-zA-Z]+)),"
r"\s+(?P<ip>(\d+\.\d+\.\d+\.\d+)|(([A-F0-9]+:+)+[A-F0-9]+)),"
r"(\s+(?P<ignored>(Packet ignored))\.)?"
r"(\s+Drop reason=(?P<drop_reason>.*))?$"
)
entry_counter = 0
for line in out.splitlines():
line = line.strip()
if not line:
continue
# [Wed Jul 21 20:31:27.000] VLAN 10, From Et0/0 MAC aabb.cc00.0100: NDP::RA, FE80::A8BB:CCFF:FE00:100, Drop reason=Packet not authorized on port
match = message_capture.match(line)
if match:
entry_counter += 1
groups = match.groupdict()
timestamp = groups['timestamp']
vlan = int(groups['vlan'])
interface = groups['interface']
protocol = groups['protocol']
ip = groups['ip']
entry_dict = device_tracking_messages_dict.setdefault('entries', {}).setdefault(entry_counter, {})
entry_dict['timestamp'] = timestamp
entry_dict['vlan'] = vlan
entry_dict['interface'] = interface
entry_dict['protocol'] = protocol
entry_dict['ip'] = ip
if groups['mac']:
entry_dict['mac'] = groups['mac']
if groups['ignored']:
entry_dict['ignored'] = True
else:
entry_dict['ignored'] = False
if groups['drop_reason']:
entry_dict['drop_reason'] = groups['drop_reason']
continue
return device_tracking_messages_dict
|
details_soup.py | Abhijeet-AR/Competitive_Programming_Score_API | 140 | 11198269 | <filename>details_soup.py
import json
import re
# DO NOT import this after requests
import grequests
import requests
import os
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from util import get_safe_nested_key
class UsernameError(Exception):
pass
class PlatformError(Exception):
pass
class BrokenChangesError(Exception):
pass
class UserData:
def __init__(self, username=None):
self.__username = username
def update_username(self, username):
self.__username = username
def __codechef(self):
url = 'https://www.codechef.com/users/{}'.format(self.__username)
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
try:
rating = soup.find('div', class_='rating-number').text
except AttributeError:
raise UsernameError('User not Found')
stars = soup.find('span', class_='rating')
if stars:
stars = stars.text
highest_rating_container = soup.find('div', class_='rating-header')
highest_rating = highest_rating_container.find_next('small').text.split()[-1].rstrip(')')
rating_ranks_container = soup.find('div', class_='rating-ranks')
rating_ranks = rating_ranks_container.find_all('a')
global_rank = rating_ranks[0].strong.text
country_rank = rating_ranks[1].strong.text
if global_rank != 'NA':
global_rank = int(global_rank)
country_rank = int(country_rank)
def contests_details_get():
rating_table = soup.find('table', class_='rating-table')
rating_table_rows = rating_table.find_all('td')
'''Can add ranking url to contests'''
try:
long_challenge = {'name': 'Long Challenge', 'rating': int(rating_table_rows[1].text),
'global_rank': int(rating_table_rows[2].a.hx.text),
'country_rank': int(rating_table_rows[3].a.hx.text)}
except ValueError:
long_challenge = {'name': 'Long Challenge', 'rating': int(rating_table_rows[1].text),
'global_rank': rating_table_rows[2].a.hx.text,
'country_rank': rating_table_rows[3].a.hx.text}
try:
cook_off = {'name': 'Cook-off',
'rating': int(rating_table_rows[5].text),
'global_rank': int(rating_table_rows[6].a.hx.text),
'country_rank': int(rating_table_rows[7].a.hx.text)}
except ValueError:
cook_off = {'name': 'Cook-off',
'rating': int(rating_table_rows[5].text),
'global_rank': rating_table_rows[6].a.hx.text,
'country_rank': rating_table_rows[7].a.hx.text}
try:
lunch_time = {'name': 'Lunch Time', 'rating': int(rating_table_rows[9].text),
'global_rank': int(rating_table_rows[10].a.hx.text),
'country_rank': int(rating_table_rows[11].a.hx.text)}
except ValueError:
lunch_time = {'name': 'Lunch Time', 'rating': int(rating_table_rows[9].text),
'global_rank': rating_table_rows[10].a.hx.text,
'country_rank': rating_table_rows[11].a.hx.text}
return [long_challenge, cook_off, lunch_time]
def contest_rating_details_get():
start_ind = page.text.find('[', page.text.find('all_rating'))
end_ind = page.text.find(']', start_ind) + 1
next_opening_brack = page.text.find('[', start_ind + 1)
while next_opening_brack < end_ind:
end_ind = page.text.find(']', end_ind + 1) + 1
next_opening_brack = page.text.find('[', next_opening_brack + 1)
all_rating = json.loads(page.text[start_ind: end_ind])
for rating_contest in all_rating:
rating_contest.pop('color')
return all_rating
def problems_solved_get():
problem_solved_section = soup.find('section', class_='rating-data-section problems-solved')
no_solved = problem_solved_section.find_all('h5')
categories = problem_solved_section.find_all('article')
fully_solved = {'count': int(re.findall(r'\d+', no_solved[0].text)[0])}
if fully_solved['count'] != 0:
for category in categories[0].find_all('p'):
category_name = category.find('strong').text[:-1]
fully_solved[category_name] = []
for prob in category.find_all('a'):
fully_solved[category_name].append({'name': prob.text,
'link': 'https://www.codechef.com' + prob['href']})
partially_solved = {'count': int(re.findall(r'\d+', no_solved[1].text)[0])}
if partially_solved['count'] != 0:
for category in categories[1].find_all('p'):
category_name = category.find('strong').text[:-1]
partially_solved[category_name] = []
for prob in category.find_all('a'):
partially_solved[category_name].append({'name': prob.text,
'link': 'https://www.codechef.com' + prob['href']})
return fully_solved, partially_solved
def user_details_get():
user_details_attribute_exclusion_list = {'username', 'link', 'teams list', 'discuss profile'}
header_containers = soup.find_all('header')
name = header_containers[1].find('h1', class_="h2-style").text
user_details_section = soup.find('section', class_='user-details')
user_details_list = user_details_section.find_all('li')
user_details_response = {'name': name, 'username': user_details_list[0].text.split('★')[-1].rstrip('\n')}
for user_details in user_details_list:
attribute, value = user_details.text.split(':')[:2]
attribute = attribute.strip().lower()
value = value.strip()
if attribute not in user_details_attribute_exclusion_list:
user_details_response[attribute] = value
return user_details_response
full, partial = problems_solved_get()
details = {'status': 'Success', 'rating': int(rating), 'stars': stars, 'highest_rating': int(highest_rating),
'global_rank': global_rank, 'country_rank': country_rank,
'user_details': user_details_get(), 'contests': contests_details_get(),
'contest_ratings': contest_rating_details_get(), 'fully_solved': full, 'partially_solved': partial}
return details
def __codeforces(self):
urls = {
"user_info": {"url": f'https://codeforces.com/api/user.info?handles={self.__username}'},
"user_contests": {"url": f'https://codeforces.com/contests/with/{self.__username}'}
}
reqs = [grequests.get(item["url"]) for item in urls.values() if item.get("url")]
responses = grequests.map(reqs)
details_api = {}
contests = []
for page in responses:
if page.status_code != 200:
raise UsernameError('User not Found')
if page.request.url == urls["user_info"]["url"]:
details_api = page.json()
elif page.request.url == urls["user_contests"]["url"]:
soup = BeautifulSoup(page.text, 'html.parser')
table = soup.find('table', attrs={'class': 'user-contests-table'})
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
contests.append({
"Contest": cols[1],
"Rank": cols[3],
"Solved": cols[4],
"Rating Change": cols[5],
"New Rating": cols[6]
})
if details_api.get('status') != 'OK':
raise UsernameError('User not Found')
details_api = details_api['result'][0]
try:
rating = details_api['rating']
max_rating = details_api['maxRating']
rank = details_api['rank']
max_rank = details_api['maxRank']
except KeyError:
rating = 'Unrated'
max_rating = 'Unrated'
rank = 'Unrated'
max_rank = 'Unrated'
return {
'status': 'Success',
'username': self.__username,
'platform': 'Codeforces',
'rating': rating,
'max rating': max_rating,
'rank': rank,
'max rank': max_rank,
'contests': contests
}
def __spoj(self):
url = 'https://www.spoj.com/users/{}/'.format(self.__username)
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
details_container = soup.find_all('p')
points = details_container[2].text.split()[3][1:]
rank = details_container[2].text.split()[2][1:]
join_date = details_container[1].text.split()[1] + ' ' + details_container[1].text.split()[2]
institute = ' '.join(details_container[3].text.split()[1:])
try:
points = float(points)
except ValueError:
raise UsernameError('User not Found')
def get_solved_problems():
table = soup.find('table', class_='table table-condensed')
rows = table.findChildren('td')
solved_problems = []
for row in rows:
if row.a.text:
solved_problems.append(row.a.text)
return solved_problems
def get_todo():
try:
table = soup.find_all('table', class_='table')[1]
except:
return None
rows = table.findChildren('td')
todo_problems = []
for row in rows:
if row.a.text:
todo_problems.append(row.a.text)
return todo_problems
details = {'status': 'Success', 'username': self.__username, 'platform': 'SPOJ',
'points': float(points), 'rank': int(rank), 'solved': get_solved_problems(),
'todo': get_todo(), 'join data': join_date, 'institute': institute}
return details
def __interviewbit(self):
url = 'https://www.interviewbit.com/profile/{}'.format(self.__username)
page = requests.get(url)
if page.status_code != 200:
raise UsernameError('User not Found')
soup = BeautifulSoup(page.text, 'html.parser')
details_main = soup.find('div', class_='user-stats')
details_container = details_main.findChildren('div', recursive=False)
details = {'status': 'Success', 'username': self.__username, 'platform': 'Interviewbit',
'rank': int(details_container[0].find('div', class_='txt').text),
'score': int(details_container[1].find('div', class_='txt').text),
'streak': details_container[2].find('div', class_='txt').text}
return details
def __atcoder(self):
url = "https://atcoder.jp/users/{}".format(self.__username)
page = requests.get(url)
if page.status_code != 200:
raise UsernameError("User not Found")
soup = BeautifulSoup(page.text, "html.parser")
tables = soup.find_all("table", class_="dl-table")
if len(tables) < 2:
details = {
"status": "Success",
"username": self.__username,
"platform": "Atcoder",
"rating": "NA",
"highest": "NA",
"rank": "NA",
"level": "NA",
}
return details
rows = tables[1].find_all("td")
try:
rank = int(rows[0].text[:-2])
current_rating = int(rows[1].text)
spans = rows[2].find_all("span")
highest_rating = int(spans[0].text)
level = spans[2].text
except Exception as E:
raise BrokenChangesError(E)
details = {
"status": "Success",
"username": self.__username,
"platform": "Atcoder",
"rating": current_rating,
"highest": highest_rating,
"rank": rank,
"level": level,
}
return details
# DEPRECATED
def __leetcode(self):
url = 'https://leetcode.com/{}'.format(self.__username)
if requests.get(url).status_code != 200:
raise UsernameError('User not Found')
options = webdriver.ChromeOptions()
options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
options.add_argument("--headless")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--no-sandbox")
# driver = webdriver.PhantomJS(executable_path='./phantomjs')
driver = webdriver.Chrome(options=options, executable_path=os.environ.get("CHROMEDRIVER_PATH"))
try:
driver.get(url)
driver.implicitly_wait(10)
hover_ranking = driver.find_element_by_xpath(
'/html/body/div[1]/div[2]/div/div[1]/div[1]/div[2]/div/div[1]/div[3]/div')
ActionChains(driver).move_to_element(to_element=hover_ranking).perform()
ranking = driver.find_element_by_xpath('/html/body/div[4]/div/div/div/div[2]').text
print('rank: ', ranking)
total_problems_solved = driver.find_element_by_xpath(
'/html/body/div[1]/div[2]/div/div[1]/div[2]/div/div[1]/div[1]/div[2]').text
acceptance_rate_span_1 = driver.find_element_by_xpath(
'/html/body/div[1]/div[2]/div/div[1]/div[2]/div/div[1]/div[2]/div[2]/div/div[1]/span[1]').text
acceptance_rate_span_2 = driver.find_element_by_xpath(
'/html/body/div[1]/div[2]/div/div[1]/div[2]/div/div[1]/div[2]/div[2]/div/div[1]/span[2]').text
acceptance_rate = str(acceptance_rate_span_1) + str(acceptance_rate_span_2)
easy_questions_solved = driver.find_element_by_xpath(
'//*[@id="profile-root"]/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[2]/span[1]').text
total_easy_questions = driver.find_element_by_xpath(
'//*[@id="profile-root"]/div[2]/div/div[1]/div[2]/div/div[2]/div/div[1]/div[2]/span[2]').text
medium_questions_solved = driver.find_element_by_xpath(
'//*[@id="profile-root"]/div[2]/div/div[1]/div[2]/div/div[2]/div/div[2]/div[2]/span[1]').text
total_medium_questions = driver.find_element_by_xpath(
'//*[@id="profile-root"]/div[2]/div/div[1]/div[2]/div/div[2]/div/div[2]/div[2]/span[2]').text
hard_questions_solved = driver.find_element_by_xpath(
'//*[@id="profile-root"]/div[2]/div/div[1]/div[2]/div/div[2]/div/div[3]/div[2]/span[1]').text
total_hard_questions = driver.find_element_by_xpath(
'//*[@id="profile-root"]/div[2]/div/div[1]/div[2]/div/div[2]/div/div[3]/div[2]/span[2]').text
contribution_points = driver.find_element_by_xpath(
'/html/body/div[1]/div[2]/div/div[1]/div[3]/div[2]/div/div/div/li[1]/span').text
contribution_problems = driver.find_element_by_xpath(
'/html/body/div[1]/div[2]/div/div[1]/div[3]/div[2]/div/div/div/li[2]/span').text
contribution_testcases = driver.find_element_by_xpath(
'/html/body/div[1]/div[2]/div/div[1]/div[3]/div[2]/div/div/div/li[3]/span').text
reputation = driver.find_element_by_xpath(
'/html/body/div[1]/div[2]/div/div[1]/div[4]/div[2]/div/div/div/li/span').text
finally:
driver.close()
driver.quit()
details = {'status': 'Success', 'ranking': ranking[9:],
'total_problems_solved': total_problems_solved,
'acceptance_rate': acceptance_rate,
'easy_questions_solved': easy_questions_solved,
'total_easy_questions': total_easy_questions,
'medium_questions_solved': medium_questions_solved,
'total_medium_questions': total_medium_questions,
'hard_questions_solved': hard_questions_solved,
'total_hard_questions': total_hard_questions,
'contribution_points': contribution_points,
'contribution_problems': contribution_problems,
'contribution_testcases': contribution_testcases,
'reputation': reputation}
return details
def __leetcode_v2(self):
def __parse_response(response):
total_submissions_count = 0
total_easy_submissions_count = 0
total_medium_submissions_count = 0
total_hard_submissions_count = 0
ac_submissions_count = 0
ac_easy_submissions_count = 0
ac_medium_submissions_count = 0
ac_hard_submissions_count = 0
total_easy_questions = 0
total_medium_questions = 0
total_hard_questions = 0
total_problems_solved = 0
easy_questions_solved = 0
medium_questions_solved = 0
hard_questions_solved = 0
acceptance_rate = 0
easy_acceptance_rate = 0
medium_acceptance_rate = 0
hard_acceptance_rate = 0
total_problems_submitted = 0
easy_problems_submitted = 0
medium_problems_submitted = 0
hard_problems_submitted = 0
ranking = get_safe_nested_key(['data', 'matchedUser', 'profile', 'ranking'], response)
if ranking > 100000:
ranking = '~100000'
reputation = get_safe_nested_key(['data', 'matchedUser', 'profile', 'reputation'], response)
total_questions_stats = get_safe_nested_key(['data', 'allQuestionsCount'], response)
for item in total_questions_stats:
if item['difficulty'] == "Easy":
total_easy_questions = item['count']
if item['difficulty'] == "Medium":
total_medium_questions = item['count']
if item['difficulty'] == "Hard":
total_hard_questions = item['count']
ac_submissions = get_safe_nested_key(['data', 'matchedUser', 'submitStats', 'acSubmissionNum'], response)
for submission in ac_submissions:
if submission['difficulty'] == "All":
total_problems_solved = submission['count']
ac_submissions_count = submission['submissions']
if submission['difficulty'] == "Easy":
easy_questions_solved = submission['count']
ac_easy_submissions_count = submission['submissions']
if submission['difficulty'] == "Medium":
medium_questions_solved = submission['count']
ac_medium_submissions_count = submission['submissions']
if submission['difficulty'] == "Hard":
hard_questions_solved = submission['count']
ac_hard_submissions_count = submission['submissions']
total_submissions = get_safe_nested_key(['data', 'matchedUser', 'submitStats', 'totalSubmissionNum'],
response)
for submission in total_submissions:
if submission['difficulty'] == "All":
total_problems_submitted = submission['count']
total_submissions_count = submission['submissions']
if submission['difficulty'] == "Easy":
easy_problems_submitted = submission['count']
total_easy_submissions_count = submission['submissions']
if submission['difficulty'] == "Medium":
medium_problems_submitted = submission['count']
total_medium_submissions_count = submission['submissions']
if submission['difficulty'] == "Hard":
hard_problems_submitted = submission['count']
total_hard_submissions_count = submission['submissions']
if total_submissions_count > 0:
acceptance_rate = round(ac_submissions_count * 100 / total_submissions_count, 2)
if total_easy_submissions_count > 0:
easy_acceptance_rate = round(ac_easy_submissions_count * 100 / total_easy_submissions_count, 2)
if total_medium_submissions_count > 0:
medium_acceptance_rate = round(ac_medium_submissions_count * 100 / total_medium_submissions_count, 2)
if total_hard_submissions_count > 0:
hard_acceptance_rate = round(ac_hard_submissions_count * 100 / total_hard_submissions_count, 2)
contribution_points = get_safe_nested_key(['data', 'matchedUser', 'contributions', 'points'],
response)
contribution_problems = get_safe_nested_key(['data', 'matchedUser', 'contributions', 'questionCount'],
response)
contribution_testcases = get_safe_nested_key(['data', 'matchedUser', 'contributions', 'testcaseCount'],
response)
return {
'status': 'Success',
'ranking': str(ranking),
'total_problems_submitted': str(total_problems_submitted),
'total_problems_solved': str(total_problems_solved),
'acceptance_rate': f"{acceptance_rate}%",
'easy_problems_submitted': str(easy_problems_submitted),
'easy_questions_solved': str(easy_questions_solved),
'easy_acceptance_rate': f"{easy_acceptance_rate}%",
'total_easy_questions': str(total_easy_questions),
'medium_problems_submitted': str(medium_problems_submitted),
'medium_questions_solved': str(medium_questions_solved),
'medium_acceptance_rate': f"{medium_acceptance_rate}%",
'total_medium_questions': str(total_medium_questions),
'hard_problems_submitted': str(hard_problems_submitted),
'hard_questions_solved': str(hard_questions_solved),
'hard_acceptance_rate': f"{hard_acceptance_rate}%",
'total_hard_questions': str(total_hard_questions),
'contribution_points': str(contribution_points),
'contribution_problems': str(contribution_problems),
'contribution_testcases': str(contribution_testcases),
'reputation': str(reputation)
}
url = f'https://leetcode.com/{self.__username}'
if requests.get(url).status_code != 200:
raise UsernameError('User not Found')
payload = {
"operationName": "getUserProfile",
"variables": {
"username": self.__username
},
"query": "query getUserProfile($username: String!) { allQuestionsCount { difficulty count } matchedUser(username: $username) { contributions { points questionCount testcaseCount } profile { reputation ranking } submitStats { acSubmissionNum { difficulty count submissions } totalSubmissionNum { difficulty count submissions } } }}"
}
res = requests.post(url='https://leetcode.com/graphql',
json=payload,
headers={'referer': f'https://leetcode.com/{self.__username}/'})
res.raise_for_status()
res = res.json()
return __parse_response(res)
def get_details(self, platform):
if platform == 'codechef':
return self.__codechef()
if platform == 'codeforces':
return self.__codeforces()
if platform == 'spoj':
try:
return self.__spoj()
except AttributeError:
raise UsernameError('User not Found')
if platform == 'interviewbit':
return self.__interviewbit()
if platform == 'leetcode':
return self.__leetcode_v2()
if platform == 'atcoder':
return self.__atcoder()
raise PlatformError('Platform not Found')
if __name__ == '__main__':
ud = UserData('uwi')
ans = ud.get_details('leetcode')
print(ans)
# leetcode backward compatibility test. Commenting it out as it will fail in future
# leetcode_ud = UserData('saurabhprakash')
# leetcode_ans = leetcode_ud.get_details('leetcode')
# assert leetcode_ans == dict(status='Success', ranking='~100000', total_problems_solved='10',
# acceptance_rate='56.0%', easy_questions_solved='3', total_easy_questions='457',
# medium_questions_solved='5', total_medium_questions='901', hard_questions_solved='2',
# total_hard_questions='365', contribution_points='58', contribution_problems='0',
# contribution_testcases='0', reputation='0')
|
service/learner/brains/observation_preprocessor_test.py | lcrh/falken | 213 | 11198280 | <filename>service/learner/brains/observation_preprocessor_test.py
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for observation_preprocessor."""
import math
from absl.testing import absltest
from absl.testing import parameterized
from google.protobuf import text_format
from learner import test_data
from learner.brains import observation_preprocessor
from learner.brains import tfa_specs
import numpy as np
import tensorflow as tf
# pylint: disable=g-bad-import-order
import common.generate_protos # pylint: disable=unused-import
import brain_pb2
import observation_pb2
def get_hparams(feelers_version):
return dict(
always_compute_egocentric=True, feelers_version=feelers_version,
feelers_v2_output_channels=3, feelers_v2_kernel_size=5)
class ObservationPreprocessorTest(parameterized.TestCase):
@parameterized.parameters([
('v1', 'default'), ('v2', 'default'), ('v2', 'wrap')])
def test_preproc(self, feelers_version, yaw_range):
spec_proto = test_data.brain_spec()
if yaw_range == 'wrap':
# Force yaw_angles to loop around 360 degrees.
for field in spec_proto.observation_spec.player.entity_fields:
if field.HasField('feeler'):
for i, yaw in enumerate(
np.linspace(0, math.pi, len(field.feeler.yaw_angles))):
field.feeler.yaw_angles[i] = yaw
spec = tfa_specs.BrainSpec(spec_proto)
obs_preproc = observation_preprocessor.ObservationPreprocessor(
spec, get_hparams(feelers_version))
data = test_data.observation_data(50, [0.0, 0.0, 0.0])
# Set camera position / rotation.
data.camera.position.x = data.player.position.x
data.camera.position.y = data.player.position.y
data.camera.position.z = data.player.position.z
data.camera.rotation.x = 0
data.camera.rotation.y = 0
data.camera.rotation.z = 1 # 180 degrees rotated around z
data.camera.rotation.w = 0
tfa_val = spec.observation_spec.tfa_value(data)
# Apply preprocessing layers to tf_val
preprocessed, _ = obs_preproc(tfa_val) # Ignore state component.
preprocessed = preprocessed.numpy().tolist()
def _dist(d):
"""Preprocess distances to match observation preprocessor."""
return np.log(d + 1)
want = [
0.0, # global_entities/0/drink - one-hot, category 0
0.0, # global_entities/0/drink - one-hot, category 1
1.0, # global_entities/0/drink - one-hot, category 2
2 * (66 / 100) - 1, # global_entities/0/evilness
1, # player/feeler
1.1, # player/feeler
2, # player/feeler
2.1, # player/feeler
3, # player/feeler
3.1, # player/feeler
2 * (50 / 100) - 1, # player/health
0.0, # XZ-angle camera to entity1
0.0, # YZ-angle camera to entity1
_dist(1.0), # distance camera to entity1
0.0, # XZ-angle camera to entity2
-math.pi/2, # YZ-angle camera to entity2
_dist(1.0), # distance camera to entity2
math.pi/2, # XZ-angle camera to entity3
0.0, # YZ-angle camera to entity3
_dist(2.0), # distance camera to entity3
0.0, # XZ-angle player to entity1
0.0, # YZ-angle player to entity1
_dist(1.0), # distance player to entity1
0.0, # XZ-angle player to entity2
math.pi/2, # YZ-angle player to entity2
_dist(1.0), # distance player to entity2
-math.pi/2, # XZ-angle player to entity3
0.0, # YZ-angle player to entity3
_dist(2.0) # distance player to entity3
]
# We're rounding aggressively because batch norm adds noise.
self.assertSequenceAlmostEqual(preprocessed, want, delta=0.05)
@parameterized.parameters(['v1', 'v2'])
def test_preproc_batch(self, feelers_version):
spec = tfa_specs.BrainSpec(test_data.brain_spec())
obs_preproc = observation_preprocessor.ObservationPreprocessor(
spec, get_hparams(feelers_version))
tfa_val = spec.observation_spec.tfa_value(
test_data.observation_data(50, [0.0, 0.0, 0.0]))
# Create batch of nested observations of size 5
tfa_val = tf.nest.map_structure(
lambda x: tf.stack([x, x, x, x, x]),
tfa_val)
# Apply preprocessing layers to tf_val
preprocessed, _ = obs_preproc(tfa_val) # Ignore state component.
self.assertEqual(preprocessed.shape, (5, 29))
@parameterized.parameters(['v1', 'v2'])
def test_preproc_missing_player(self, feelers_version):
proto_obs_spec = test_data.observation_spec()
proto_obs_spec.ClearField('player') # Delete player pos from spec.
proto_act_spec = test_data.action_spec() # Delete player references.
# Remove joystick actions since they reference the player and camera.
# The last 3 actions in the example model are joystick actions, so we
# remove them from the list.
del proto_act_spec.actions[-3:]
brain_spec = test_data.brain_spec()
brain_spec.observation_spec.CopyFrom(proto_obs_spec)
brain_spec.action_spec.CopyFrom(proto_act_spec)
spec = tfa_specs.BrainSpec(brain_spec)
obs_preproc = observation_preprocessor.ObservationPreprocessor(
spec, get_hparams(feelers_version))
proto_data = test_data.observation_data(50, [0.0, 0.0, 0.0])
proto_data.ClearField('player') # Delete player from data.
tfa_val = spec.observation_spec.tfa_value(proto_data)
# Apply preprocessing layers to tf_val
preprocessed, _ = obs_preproc(tfa_val) # Ignore state component.
preprocessed = preprocessed.numpy().tolist()
want = [
0.0, # global_entities/0/drink - one-hot, categpry 0
0.0, # global_entities/0/drink - one-hot, category 1
1.0, # global_entities/0/drink - one-hot, category 2
2 * (66.0 / 100.0) - 1, # global_entities/0/evilness
]
self.assertSequenceAlmostEqual(preprocessed, want, delta=0.05)
@parameterized.product(dir_mode=['angle', 'unit_circle'],
dist_mode=['linear', 'log_plus_one'],
num_batch_dims=[0, 1, 2])
def test_egocentric_modes(self, dir_mode, dist_mode, num_batch_dims):
brain_spec = brain_pb2.BrainSpec()
text_format.Parse(
"""
observation_spec {
player {
position {}
rotation {}
}
global_entities {
position {}
rotation {}
}
}
action_spec {
actions {
name: "joy_pitch_yaw"
joystick {
axes_mode: DELTA_PITCH_YAW
controlled_entity: "player"
}
}
}
""", brain_spec)
hparams = {
'egocentric_direction_mode': dir_mode,
'egocentric_distance_mode': dist_mode,
}
spec = tfa_specs.BrainSpec(brain_spec)
obs_preproc = observation_preprocessor.ObservationPreprocessor(
spec, hparams)
observation_data = observation_pb2.ObservationData()
text_format.Parse(
"""
player {
position {}
rotation {}
}
global_entities {
position {
x: 1
y: -1
z: 1
}
rotation {}
}
""", observation_data)
tfa_val = spec.observation_spec.tfa_value(observation_data)
# Stack into a batch of the requested size.
for _ in range(num_batch_dims):
tfa_val = tf.nest.map_structure(
lambda x: tf.stack([x, x]), tfa_val)
preprocessed, _ = obs_preproc(tfa_val) # Ignore state component.
preprocessed = preprocessed.numpy()
# Unpack result of first batch.
for _ in range(num_batch_dims):
preprocessed = preprocessed[0]
if dir_mode == 'angle':
want = [-math.pi/4, math.pi/4] # -45, 45 degree in radians.
self.assertSequenceAlmostEqual(preprocessed[:len(want)], want, delta=0.05)
preprocessed = preprocessed[len(want):]
else:
assert dir_mode == 'unit_circle'
v = 1 / math.sqrt(2) # X and Y component of 45 degree 2D unit vec.
want = [v, v, -v, v]
self.assertSequenceAlmostEqual(preprocessed[:len(want)], want, delta=0.05)
preprocessed = preprocessed[len(want):]
if dist_mode == 'linear':
want = [math.sqrt(3)] # diagonal of the unit cube.
self.assertSequenceAlmostEqual(want, preprocessed, delta=0.05)
else:
assert dist_mode == 'log_plus_one'
want = [math.log(math.sqrt(3) + 1)]
self.assertSequenceAlmostEqual(want, preprocessed, delta=0.05)
if __name__ == '__main__':
absltest.main()
|
ibis/backends/pandas/tests/test_dispatcher.py | GrapeBaBa/ibis | 986 | 11198313 | <gh_stars>100-1000
import pytest
from multipledispatch import Dispatcher
from multipledispatch.conflict import AmbiguityWarning
from ibis.backends.pandas.dispatcher import TwoLevelDispatcher
class A1:
pass
class A2(A1):
pass
class A3(A2):
pass
class B1:
pass
class B2(B1):
pass
class B3(B2):
pass
@pytest.fixture
def foo_dispatchers():
foo = TwoLevelDispatcher('foo', doc='Test dispatcher foo')
foo_m = Dispatcher('foo_m', doc='Control dispatcher foo_m')
@foo.register(A1, B1)
@foo_m.register(A1, B1)
def foo0(x, y):
return 0
@foo.register(A1, B2)
@foo_m.register(A1, B2)
def foo1(x, y):
return 1
@foo.register(A2, B1)
@foo_m.register(A2, B1)
def foo2(x, y):
return 2
@foo.register(A2, B2)
@foo_m.register(A2, B2)
def foo3(x, y):
return 3
@foo.register(
(A1, A2),
)
@foo_m.register(
(A1, A2),
)
def foo4(x):
return 4
return foo, foo_m
@pytest.fixture
def foo(foo_dispatchers):
return foo_dispatchers[0]
@pytest.fixture
def foo_m(foo_dispatchers):
return foo_dispatchers[1]
def test_cache(foo, mocker):
"""Test that cache is properly set after calling with args."""
spy = mocker.spy(foo, 'dispatch')
a1, b1 = A1(), B1()
assert (A1, B1) not in foo._cache
foo(a1, b1)
assert (A1, B1) in foo._cache
foo(a1, b1)
spy.assert_called_once_with(A1, B1)
def test_dispatch(foo, mocker):
"""Test that calling dispatcher with a signature that is registered
does not trigger a linear search through dispatch_iter."""
spy = mocker.spy(foo, 'dispatch_iter')
# This should not trigger a linear search
foo(A1(), B1())
assert not spy.called, (
"Calling dispatcher with registered signature should "
"not trigger linear search"
)
foo(A3(), B3())
spy.assert_called_once_with(A3, B3)
@pytest.mark.parametrize(
'args',
[
(A1(), B1()),
(A1(), B2()),
(A1(), B3()),
(A2(), B1()),
(A2(), B2()),
(A2(), B3()),
(A3(), B1()),
(A3(), B2()),
(A3(), B3()),
(A1(),),
(A2(),),
(A3(),),
],
)
def test_registered(foo_dispatchers, args):
foo, foo_m = foo_dispatchers
assert foo(*args) == foo_m(*args)
def test_ordering(foo, foo_m):
assert foo.ordering == foo_m.ordering
def test_funcs(foo, foo_m):
assert foo.funcs == foo_m.funcs
@pytest.mark.parametrize(
'args', [(B1(),), (B2(),), (A1(), A1()), (A1(), A2(), A3())]
)
def test_unregistered(foo, args):
with pytest.raises(
NotImplementedError, match="Could not find signature for foo.*"
):
foo(*args)
def test_ambiguities_warning():
bar = TwoLevelDispatcher('bar')
bar.register(A1, B1)(lambda a, b: 0)
bar.register(A1, B2)(lambda a, b: 1)
bar.register(A2, B1)(lambda a, b: 2)
with pytest.warns(AmbiguityWarning, match=".*Consider.*\n\n.*(A2, B2).*"):
bar.reorder()
def test_ambiguities_no_warning():
bar = TwoLevelDispatcher('bar')
bar.register(A1, B1)(lambda a, b: 0)
bar.register(A1, B2)(lambda a, b: 1)
bar.register(A2, B1)(lambda a, b: 2)
bar.register(A2, B2)(lambda a, b: 3)
with pytest.warns(None) as warnings:
bar.reorder()
assert len(warnings) == 0
|
benchmarks/benchmarks/go_benchmark_functions/go_benchmark.py | Ennosigaeon/scipy | 9,095 | 11198314 | # -*- coding: utf-8 -*-
import numpy as np
from numpy import abs, asarray
from ..common import safe_import
with safe_import():
from scipy.special import factorial
class Benchmark:
"""
Defines a global optimization benchmark problem.
This abstract class defines the basic structure of a global
optimization problem. Subclasses should implement the ``fun`` method
for a particular optimization problem.
Attributes
----------
N : int
The dimensionality of the problem.
bounds : sequence
The lower/upper bounds to be used for minimizing the problem.
This a list of (lower, upper) tuples that contain the lower and upper
bounds for the problem. The problem should not be asked for evaluation
outside these bounds. ``len(bounds) == N``.
xmin : sequence
The lower bounds for the problem
xmax : sequence
The upper bounds for the problem
fglob : float
The global minimum of the evaluated function.
global_optimum : sequence
A list of vectors that provide the locations of the global minimum.
Note that some problems have multiple global minima, not all of which
may be listed.
nfev : int
the number of function evaluations that the object has been asked to
calculate.
change_dimensionality : bool
Whether we can change the benchmark function `x` variable length (i.e.,
the dimensionality of the problem)
custom_bounds : sequence
a list of tuples that contain lower/upper bounds for use in plotting.
"""
def __init__(self, dimensions):
"""
Initialises the problem
Parameters
----------
dimensions : int
The dimensionality of the problem
"""
self._dimensions = dimensions
self.nfev = 0
self.fglob = np.nan
self.global_optimum = None
self.change_dimensionality = False
self.custom_bounds = None
def __str__(self):
return '{0} ({1} dimensions)'.format(self.__class__.__name__, self.N)
def __repr__(self):
return self.__class__.__name__
def initial_vector(self):
"""
Random initialisation for the benchmark problem.
Returns
-------
x : sequence
a vector of length ``N`` that contains random floating point
numbers that lie between the lower and upper bounds for a given
parameter.
"""
return asarray([np.random.uniform(l, u) for l, u in self.bounds])
def success(self, x, tol=1.e-5):
"""
Tests if a candidate solution at the global minimum.
The default test is
Parameters
----------
x : sequence
The candidate vector for testing if the global minimum has been
reached. Must have ``len(x) == self.N``
tol : float
The evaluated function and known global minimum must differ by less
than this amount to be at a global minimum.
Returns
-------
bool : is the candidate vector at the global minimum?
"""
val = self.fun(asarray(x))
if abs(val - self.fglob) < tol:
return True
# the solution should still be in bounds, otherwise immediate fail.
if np.any(x > np.asfarray(self.bounds)[:, 1]):
return False
if np.any(x < np.asfarray(self.bounds)[:, 0]):
return False
# you found a lower global minimum. This shouldn't happen.
if val < self.fglob:
raise ValueError("Found a lower global minimum",
x,
val,
self.fglob)
return False
def fun(self, x):
"""
Evaluation of the benchmark function.
Parameters
----------
x : sequence
The candidate vector for evaluating the benchmark problem. Must
have ``len(x) == self.N``.
Returns
-------
val : float
the evaluated benchmark function
"""
raise NotImplementedError
def change_dimensions(self, ndim):
"""
Changes the dimensionality of the benchmark problem
The dimensionality will only be changed if the problem is suitable
Parameters
----------
ndim : int
The new dimensionality for the problem.
"""
if self.change_dimensionality:
self._dimensions = ndim
else:
raise ValueError('dimensionality cannot be changed for this'
'problem')
@property
def bounds(self):
"""
The lower/upper bounds to be used for minimizing the problem.
This a list of (lower, upper) tuples that contain the lower and upper
bounds for the problem. The problem should not be asked for evaluation
outside these bounds. ``len(bounds) == N``.
"""
if self.change_dimensionality:
return [self._bounds[0]] * self.N
else:
return self._bounds
@property
def N(self):
"""
The dimensionality of the problem.
Returns
-------
N : int
The dimensionality of the problem
"""
return self._dimensions
@property
def xmin(self):
"""
The lower bounds for the problem
Returns
-------
xmin : sequence
The lower bounds for the problem
"""
return asarray([b[0] for b in self.bounds])
@property
def xmax(self):
"""
The upper bounds for the problem
Returns
-------
xmax : sequence
The upper bounds for the problem
"""
return asarray([b[1] for b in self.bounds])
|
bots/stocks/dark_pool_shorts/pos.py | tehcoderer/GamestonkTerminal | 255 | 11198316 | <filename>bots/stocks/dark_pool_shorts/pos.py
import logging
import disnake
from bots import imps
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import lambda_long_number_format
from openbb_terminal.stocks.dark_pool_shorts import stockgrid_model
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def pos_command(sort="dpp_dollar", ascending: bool = False, num: int = 10):
"""Dark pool short position [Stockgrid]"""
# Debug user input
if imps.DEBUG:
logger.debug("dps-pos %s %s", sort, num)
# Check for argument
possible_sorts = ("sv", "sv_pct", "nsv", "nsv_dollar", "dpp", "dpp_dollar")
if sort not in possible_sorts:
raise Exception(f"The possible sorts are: {', '.join(possible_sorts)}")
if num < 0:
raise Exception("Number has to be above 0")
# Retrieve data
df = stockgrid_model.get_dark_pool_short_positions(sort, ascending)
if df.empty:
raise Exception("No available data found")
df = df.iloc[:num]
# Debug user output
if imps.DEBUG:
logger.debug(df.to_string())
# Output data
title = "Stocks: [Stockgrid] Dark Pool Short Position"
df = df.applymap(lambda x: lambda_long_number_format(x, 2))
df = df.drop(columns=["Date"])
formats = {
"Short Volume %": "{}%",
"Net Short Volume $": "${}",
"Dark Pools Position $": "${}",
}
for col, f in formats.items():
df[col] = df[col].map(lambda x: f.format(x)) # pylint: disable=W0640
df["Short Volume"] = df.apply(
lambda x: f"{x['Short Volume']} (<b>{x['Short Volume %']}</b>)", axis=1
)
df["Net Short Volume"] = df.apply(
lambda x: f"{x['Net Short Volume']:>9} (<b>{x['Net Short Volume $']:>9}</b>)",
axis=1,
)
df["Dark Pools Position"] = df.apply(
lambda x: f"{x['Dark Pools Position']:>9} (<b>{x['Dark Pools Position $']:>9}</b>)",
axis=1,
)
df = df.drop(
columns=["Short Volume %", "Net Short Volume $", "Dark Pools Position $"]
)
df.columns = [
"Ticker",
"Short Vol.",
"Net Short Vol.",
"DP Position",
]
df.set_index("Ticker", inplace=True)
dindex = len(df.index)
if dindex > 15:
embeds: list = []
# Output
i, i2, end = 0, 0, 15
df_pg, embeds_img, images_list = [], [], []
while i < dindex:
df_pg = df.iloc[i:end]
df_pg.append(df_pg)
fig = imps.plot_df(
df_pg,
fig_size=(720, (40 * dindex)),
col_width=[2, 4, 4, 4],
tbl_header=imps.PLT_TBL_HEADER,
tbl_cells=imps.PLT_TBL_CELLS,
font=imps.PLT_TBL_FONT,
row_fill_color=imps.PLT_TBL_ROW_COLORS,
paper_bgcolor="rgba(0, 0, 0, 0)",
)
fig.update_traces(cells=(dict(align=["center", "right"])))
imagefile = "dps-pos.png"
imagefile = imps.save_image(imagefile, fig)
if imps.IMAGES_URL or not imps.IMG_HOST_ACTIVE:
image_link = imps.multi_image(imagefile)
images_list.append(imagefile)
else:
image_link = imps.multi_image(imagefile)
embeds_img.append(
f"{image_link}",
)
embeds.append(
disnake.Embed(
title=title,
colour=imps.COLOR,
),
)
i2 += 1
i += 15
end += 15
# Author/Footer
for i in range(0, i2):
embeds[i].set_author(
name=imps.AUTHOR_NAME,
url=imps.AUTHOR_URL,
icon_url=imps.AUTHOR_ICON_URL,
)
embeds[i].set_footer(
text=imps.AUTHOR_NAME,
icon_url=imps.AUTHOR_ICON_URL,
)
i = 0
for i in range(0, i2):
embeds[i].set_image(url=embeds_img[i])
i += 1
embeds[0].set_footer(text=f"Page 1 of {len(embeds)}")
choices = [
disnake.SelectOption(label="Home", value="0", emoji="🟢"),
]
output = {
"view": imps.Menu,
"title": title,
"embed": embeds,
"choices": choices,
"embeds_img": embeds_img,
"images_list": images_list,
}
else:
fig = imps.plot_df(
df,
fig_size=(720, (40 * dindex)),
col_width=[2, 4, 4, 4],
tbl_header=imps.PLT_TBL_HEADER,
tbl_cells=imps.PLT_TBL_CELLS,
font=imps.PLT_TBL_FONT,
row_fill_color=imps.PLT_TBL_ROW_COLORS,
paper_bgcolor="rgba(0, 0, 0, 0)",
)
fig.update_traces(cells=(dict(align=["center", "right"])))
imagefile = imps.save_image("dps-pos.png", fig)
output = {
"title": title,
"imagefile": imagefile,
}
return output
|
lenstronomy/SimulationAPI/data_api.py | heather999/lenstronomy | 107 | 11198319 | from lenstronomy.SimulationAPI.observation_api import SingleBand
from lenstronomy.Data.imaging_data import ImageData
import lenstronomy.Util.util as util
import numpy as np
__all__ = ['DataAPI']
class DataAPI(SingleBand):
"""
This class is a wrapper of the general description of data in SingleBand() to translate those quantities into
configurations in the core lenstronomy Data modules to simulate images according to those quantities.
This class is meant to be an example of a wrapper. More possibilities in terms of PSF and data type
options are available. Have a look in the specific modules if you are interested in.
"""
def __init__(self, numpix, **kwargs_single_band):
"""
:param numpix: number of pixels per axis in the simulation to be modelled
:param kwargs_single_band: keyword arguments used to create instance of SingleBand class
"""
self.numpix = numpix
SingleBand.__init__(self, **kwargs_single_band)
@property
def data_class(self):
"""
creates a Data() instance of lenstronomy based on knowledge of the observation
:return: instance of Data() class
"""
data_class = ImageData(**self.kwargs_data)
return data_class
@property
def kwargs_data(self):
"""
:return: keyword arguments for ImageData class instance
"""
x_grid, y_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix = util.make_grid_with_coordtransform(
numPix=self.numpix, deltapix=self.pixel_scale, subgrid_res=1, left_lower=False, inverse=False)
# CCD gain corrected exposure time to allow a direct Poisson estimates based on IID counts
scaled_exposure_time = self.flux_iid(1)
kwargs_data = {'image_data': np.zeros((self.numpix, self.numpix)), 'ra_at_xy_0': ra_at_xy_0,
'dec_at_xy_0': dec_at_xy_0,
'transform_pix2angle': Mpix2coord,
'background_rms': self.background_noise,
'exposure_time': scaled_exposure_time}
return kwargs_data
|
torch/ao/sparsity/experimental/pruner/parametrization.py | Hacky-DH/pytorch | 60,067 | 11198320 | <filename>torch/ao/sparsity/experimental/pruner/parametrization.py
import torch
from torch import nn
from typing import Any, List
class PruningParametrization(nn.Module):
def __init__(self, original_outputs):
super().__init__()
self.original_outputs = set(range(original_outputs.item()))
self.pruned_outputs = set() # Will contain indicies of outputs to prune
def forward(self, x):
valid_outputs = self.original_outputs - self.pruned_outputs
return x[list(valid_outputs)]
class ZeroesParametrization(nn.Module):
r"""Zero out pruned channels instead of removing.
E.g. used for Batch Norm pruning, which should match previous Conv2d layer."""
def __init__(self, original_outputs):
super().__init__()
self.original_outputs = set(range(original_outputs.item()))
self.pruned_outputs = set() # Will contain indicies of outputs to prune
def forward(self, x):
x.data[list(self.pruned_outputs)] = 0
return x
class ActivationReconstruction:
def __init__(self, parametrization):
self.param = parametrization
def __call__(self, module, input, output):
max_outputs = self.param.original_outputs
pruned_outputs = self.param.pruned_outputs
valid_columns = list(max_outputs - pruned_outputs)
# get size of reconstructed output
sizes = list(output.shape)
sizes[1] = len(max_outputs)
# get valid indices of reconstructed output
indices: List[Any] = []
for size in output.shape:
indices.append(slice(0, size, 1))
indices[1] = valid_columns
reconstructed_tensor = torch.zeros(sizes)
reconstructed_tensor[indices] = output
return reconstructed_tensor
class BiasHook:
def __init__(self, parametrization, prune_bias):
self.param = parametrization
self.prune_bias = prune_bias
def __call__(self, module, input, output):
pruned_outputs = self.param.pruned_outputs
if getattr(module, '_bias', None) is not None:
bias = module._bias.data
if self.prune_bias:
bias[list(pruned_outputs)] = 0
# reshape bias to broadcast over output dimensions
idx = [1] * len(output.shape)
idx[1] = -1
bias = bias.reshape(idx)
output += bias
return output
|
colour/models/tests/test_igpgtg.py | rift-labs-developer/colour | 1,380 | 11198327 | # -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.models.igpgtg` module.
"""
import numpy as np
import unittest
from itertools import permutations
from colour.models import XYZ_to_IgPgTg, IgPgTg_to_XYZ
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['TestXYZ_to_IgPgTg', 'TestIgPgTg_to_XYZ']
class TestXYZ_to_IgPgTg(unittest.TestCase):
"""
Defines :func:`colour.models.igpgtg.XYZ_to_IgPgTg` definition unit tests
methods.
"""
def test_XYZ_to_IgPgTg(self):
"""
Tests :func:`colour.models.igpgtg.XYZ_to_IgPgTg` definition.
"""
np.testing.assert_almost_equal(
XYZ_to_IgPgTg(np.array([0.20654008, 0.12197225, 0.05136952])),
np.array([0.42421258, 0.18632491, 0.10689223]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_IgPgTg(np.array([0.14222010, 0.23042768, 0.10495772])),
np.array([0.50912820, -0.14804331, 0.11921472]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_IgPgTg(np.array([0.07818780, 0.06157201, 0.28099326])),
np.array([0.29095152, -0.04057508, -0.18220795]),
decimal=7)
def test_n_dimensional_XYZ_to_IgPgTg(self):
"""
Tests :func:`colour.models.igpgtg.XYZ_to_IgPgTg` definition
n-dimensional support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
IgPgTg = XYZ_to_IgPgTg(XYZ)
XYZ = np.tile(XYZ, (6, 1))
IgPgTg = np.tile(IgPgTg, (6, 1))
np.testing.assert_almost_equal(XYZ_to_IgPgTg(XYZ), IgPgTg, decimal=7)
XYZ = np.reshape(XYZ, (2, 3, 3))
IgPgTg = np.reshape(IgPgTg, (2, 3, 3))
np.testing.assert_almost_equal(XYZ_to_IgPgTg(XYZ), IgPgTg, decimal=7)
def test_domain_range_scale_XYZ_to_IgPgTg(self):
"""
Tests :func:`colour.models.igpgtg.XYZ_to_IgPgTg` definition domain and
range scale support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
IgPgTg = XYZ_to_IgPgTg(XYZ)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_IgPgTg(XYZ * factor), IgPgTg * factor, decimal=7)
@ignore_numpy_errors
def test_nan_XYZ_to_IgPgTg(self):
"""
Tests :func:`colour.models.igpgtg.XYZ_to_IgPgTg` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
XYZ_to_IgPgTg(XYZ)
class TestIgPgTg_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.models.igpgtg.IgPgTg_to_XYZ` definition unit tests
methods.
"""
def test_IgPgTg_to_XYZ(self):
"""
Tests :func:`colour.models.igpgtg.IgPgTg_to_XYZ` definition.
"""
np.testing.assert_almost_equal(
IgPgTg_to_XYZ(np.array([0.42421258, 0.18632491, 0.10689223])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
IgPgTg_to_XYZ(np.array([0.50912820, -0.14804331, 0.11921472])),
np.array([0.14222010, 0.23042768, 0.10495772]),
decimal=7)
np.testing.assert_almost_equal(
IgPgTg_to_XYZ(np.array([0.29095152, -0.04057508, -0.18220795])),
np.array([0.07818780, 0.06157201, 0.28099326]),
decimal=7)
def test_n_dimensional_IgPgTg_to_XYZ(self):
"""
Tests :func:`colour.models.igpgtg.IgPgTg_to_XYZ` definition
n-dimensional support.
"""
IgPgTg = np.array([0.42421258, 0.18632491, 0.10689223])
XYZ = IgPgTg_to_XYZ(IgPgTg)
IgPgTg = np.tile(IgPgTg, (6, 1))
XYZ = np.tile(XYZ, (6, 1))
np.testing.assert_almost_equal(IgPgTg_to_XYZ(IgPgTg), XYZ, decimal=7)
IgPgTg = np.reshape(IgPgTg, (2, 3, 3))
XYZ = np.reshape(XYZ, (2, 3, 3))
np.testing.assert_almost_equal(IgPgTg_to_XYZ(IgPgTg), XYZ, decimal=7)
def test_domain_range_scale_IgPgTg_to_XYZ(self):
"""
Tests :func:`colour.models.igpgtg.IgPgTg_to_XYZ` definition domain and
range scale support.
"""
IgPgTg = np.array([0.42421258, 0.18632491, 0.10689223])
XYZ = IgPgTg_to_XYZ(IgPgTg)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
IgPgTg_to_XYZ(IgPgTg * factor), XYZ * factor, decimal=7)
@ignore_numpy_errors
def test_nan_IgPgTg_to_XYZ(self):
"""
Tests :func:`colour.models.igpgtg.IgPgTg_to_XYZ` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
IgPgTg = np.array(case)
IgPgTg_to_XYZ(IgPgTg)
if __name__ == '__main__':
unittest.main()
|
WebMirror/management/rss_parser_funcs/feed_parse_extractAvaritiakun.py | fake-name/ReadableWebProxy | 193 | 11198329 | def extractAvaritiakun(item):
"""
Avaritia-kun
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
tagmap = [
('Divine protection', 'The divine protection that I got was a power that increase a girl level with semen', 'translated'),
('Rose', 'Rose in a Yuri Field', 'translated'),
('Rose and lily field', 'Rose in a Yuri Field', 'translated'),
('Perfect Sex', 'Perfect Sex', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
pinball/scheduler/schedule.py | robinmaben/pinball | 1,143 | 11198339 | # Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of schedule metadata included in schedule tokens.
A Schedule defines when something should run. (The thing is abstract; a
WorkflowSchedule runs workflows.) It has a starting run time, and is
(optionally) repeated periodically. A schedule also has an OverrunPolicy that
defines how it should behave if a previous run didn't finish by the time the
thing is run again through this schedule.
"""
import abc
import datetime
import math
import time
from pinball.config.pinball_config import PinballConfig
from pinball.config.utils import get_log
from pinball.config.utils import timestamp_to_str
from pinball.master.thrift_lib.ttypes import ModifyRequest
from pinball.parser.config_parser import ParserCaller
from pinball.parser.utils import load_parser_with_caller
from pinball.persistence.token_data import TokenData
from pinball.scheduler.overrun_policy import OverrunPolicy
from pinball.ui.data import Status
from pinball.ui.data_builder import DataBuilder
from pinball.workflow.name import Name
from pinball.workflow.signaller import Signal
from pinball.workflow.signaller import Signaller
from pinball.workflow.utils import load_path
__author__ = '<NAME>, <NAME>'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = ['<NAME>', '<NAME>']
__license__ = 'Apache'
__version__ = '2.0'
LOG = get_log('pinball.scheduler.schedule')
class Schedule(TokenData):
"""Parent class for specialized schedule types."""
__metaclass__ = abc.ABCMeta
def __init__(self, next_run_time=None, recurrence_seconds=None,
overrun_policy=OverrunPolicy.SKIP):
self.next_run_time = next_run_time
self.recurrence_seconds = recurrence_seconds
self.overrun_policy = overrun_policy
def advance_next_run_time(self):
"""Advance the scheduled run time beyond the current time."""
now = time.time()
if self.next_run_time <= now:
# Set next run time to the lowest timestamp based off
# recurrence that is greater than the current time.
delta_runs = math.ceil((now - self.next_run_time) /
self.recurrence_seconds)
LOG.info('advancing the next run time now %f '
'next_run_time %d recurrence_seconds %d delta_runs %f',
now, self.next_run_time,
self.recurrence_seconds, delta_runs)
self.next_run_time += int(delta_runs * self.recurrence_seconds)
if now == self.next_run_time:
self.next_run_time += self.recurrence_seconds
assert self.next_run_time > now
def corresponds_to(self, schedule):
"""Assess correspondence to another schedule.
Schedules correspond to each other if their next run times are shifted
by a multiplication of recurrence seconds, and all other fields are
the same.
Args:
schedule: The schedule to compare with.
Returns:
True iff the schedules correspond to each other.
"""
if (self.overrun_policy != schedule.overrun_policy or
self.recurrence_seconds != schedule.recurrence_seconds):
return False
delta = self.next_run_time - schedule.next_run_time
delta_multiplicator = 1. * delta / self.recurrence_seconds
return delta_multiplicator == int(delta_multiplicator)
@abc.abstractmethod
def run(self, emailer, store):
"""Run the routine pointed to by this schedule."""
return None
@abc.abstractmethod
def is_running(self, store):
"""Checks if the previous run is still active.
Args:
store: The store to query for status.
Returns:
True iff the run is running.
"""
return False
@abc.abstractmethod
def is_failed(self, store):
"""Checks if the most recent run has failed.
Args:
store: The store to query for status.
Returns:
True iff the run has failed.
"""
return False
@abc.abstractmethod
def abort_running(self, client, store):
"""Abort all active runs.
Args:
client: The client to communicate with the master.
store: The store to retrieve runs status.
Returns:
True iff the workflow has been aborted.
"""
return False
class WorkflowSchedule(Schedule):
"""Schedule for a workflow."""
def __init__(
self,
next_run_time=None,
recurrence_seconds=None,
overrun_policy=OverrunPolicy.SKIP,
parser_params=PinballConfig.PARSER_PARAMS,
workflow=None,
emails=None,
max_running_instances=None):
Schedule.__init__(self, next_run_time, recurrence_seconds,
overrun_policy)
self.parser_params = parser_params
self.workflow = workflow
self.emails = emails if emails is not None else []
self.max_running_instances = max_running_instances if max_running_instances \
else PinballConfig.DEFAULT_MAX_WORKFLOW_RUNNING_INSTANCES
def __str__(self):
if self.next_run_time:
next_run_time = timestamp_to_str(self.next_run_time)
else:
next_run_time = str(self.next_run_time)
if self.recurrence_seconds:
delta = datetime.timedelta(seconds=self.recurrence_seconds)
recurrence = str(delta)
else:
recurrence = str(self.recurrence_seconds)
if self.overrun_policy is not None:
overrun_policy = OverrunPolicy.to_string(self.overrun_policy)
else:
overrun_policy = str(self.overrun_policy)
return ('WorkflowSchedule(next_run_time=%s, recurrence=%s, '
'overrun_policy=%s, parser_params=%s, workflow=%s, '
'email=%s, max_running_instances=%s)' % (next_run_time,
recurrence,
overrun_policy,
self.parser_params,
self.workflow,
self.emails,
str(self.max_running_instances)))
def __repr__(self):
return self.__str__()
def advance_next_run_time(self):
# TODO(pawel): remove after debugging.
LOG.info('advancing the next run time for workflow %s', self.workflow)
super(WorkflowSchedule, self).advance_next_run_time()
def corresponds_to(self, schedule):
if (self.parser_params != schedule.parser_params or
self.workflow != schedule.workflow or
self.emails != schedule.emails or
self.max_running_instances != schedule.max_running_instances):
return False
return super(WorkflowSchedule, self).corresponds_to(schedule)
def run(self, emailer, store):
if not self._check_workflow_instances(emailer, self.workflow, store):
LOG.warn('too many instances running for workflow %s', self.workflow)
return None
config_parser = load_parser_with_caller(
PinballConfig.PARSER,
self.parser_params,
ParserCaller.SCHEDULE
)
workflow_tokens = config_parser.get_workflow_tokens(self.workflow)
if not workflow_tokens:
LOG.error('workflow %s not found', self.workflow)
return None
result = ModifyRequest()
result.updates = workflow_tokens
assert result.updates
token = result.updates[0]
name = Name.from_job_token_name(token.name)
if not name.instance:
name = Name.from_event_token_name(token.name)
LOG.info('exporting workflow %s instance %s. Its tokens are under %s',
name.workflow, name.instance, name.get_instance_prefix())
return result
def is_running(self, store):
data_builder = DataBuilder(store, use_cache=True)
workflow_data = data_builder.get_workflow(self.workflow)
if not workflow_data:
return False
return workflow_data.status == Status.RUNNING
def is_failed(self, store):
data_builder = DataBuilder(store, use_cache=True)
workflow_data = data_builder.get_workflow(self.workflow)
if not workflow_data:
return False
return (workflow_data.status != Status.RUNNING and
workflow_data.status != Status.SUCCESS)
def _get_running_instances(self, store):
"""Find running instances of the workflow.
Args:
store: The store to query for wokflow instance status.
Returns:
List of running workflow instance names.
"""
data_builder = DataBuilder(store, use_cache=True)
instances = data_builder.get_instances(self.workflow)
result = []
for instance in instances:
if instance.status == Status.RUNNING:
result.append(instance.instance)
return result
def abort_running(self, client, store):
running_instances = self._get_running_instances(store)
for instance in running_instances:
signaller = Signaller(client,
workflow=self.workflow,
instance=instance)
signaller.set_action(Signal.ABORT)
if not signaller.is_action_set(Signal.ABORT):
return False
return True
def _check_workflow_instances(self, emailer, workflow_name, store):
"""Check the number of running instances of the workflow.
Besides of the return, also send out warning email if too many
instances running for the given workflow.
Args:
emailer: The email sender.
workflow_name: Name of the workflow.
store: The store to retrieve runs status.
Returns:
False if running instance number exceeds the max_running_instances setting;
Otherwise, True.
"""
running_instances = self._get_running_instances(store)
if self.max_running_instances and len(running_instances) > self.max_running_instances:
LOG.warn('Too many (%s) instances running for workflow %s !',
len(running_instances), workflow_name)
if emailer:
emailer.send_too_many_running_instances_warning_message(self.emails,
workflow_name,
len(running_instances),
self.max_running_instances)
else:
LOG.warn('Emailer is not set! Failed to send too many instances running warning '
'email for workflow %s', workflow_name)
return False
return True
|
mseg/utils/mask_utils.py | mintar/mseg-api | 213 | 11198344 | <reponame>mintar/mseg-api
#!/usr/bin/python3
import cv2
import math
import numpy as np
import matplotlib.pyplot as plt
import pdb
from PIL import Image, ImageDraw
import torch
from typing import List, Mapping, Optional, Tuple
from mseg.utils.conn_comp import scipy_conn_comp
from mseg.utils.colormap import colormap
from mseg.utils.cv2_utils import form_hstacked_imgs, add_text_cv2, form_vstacked_imgs, cv2_write_rgb
from mseg.utils.resize_util import resize_img_by_short_side
from mseg.utils.dir_utils import create_leading_fpath_dirs
NUM_PX_PER_ROW = 50
NUM_PX_PER_COL = 400
COLORMAP_OFFSET = 40
MIN_DISCERNABLE_RES_FOR_TEXT = 200
LIGHT_BLUE = np.array([153, 221, 255])
LIME_GREEN = np.array([57, 255, 20])
def get_mean_mask_location(mask):
"""Given a binary mask, find the mean location for entries equal to 1.
Args:
mask
Returns:
coordinate of mean pixel location as (x,y)
"""
coords = np.vstack(np.where(mask == 1)).T
return np.mean(coords, axis=0).astype(np.int32)
def find_max_cardinality_mask(mask_list: List[np.ndarray]):
"""
Return the index of this element in the list
"""
mask_cardinalities = [np.sum(mask) for mask in mask_list]
return np.argmax(np.array(mask_cardinalities))
def search_jittered_location_in_mask(mean_x: float, mean_y: float, conncomp: np.ndarray) -> Tuple[int, int]:
"""
For visualizing classnames in an image.
When we wish to place text over a mask, for nonconvex regions, we cannot
use mask pixel mean location (may not fall within mask), so we will
jitter the location until we find a valid location within mask.
"""
H, W = conncomp.shape
num_attempts = 100
for i in range(num_attempts):
# grow the jitter up to half width of image at end
SCALE = ((i + 1) / num_attempts) * (W / 2)
dx, dy = np.random.randn(2) * SCALE
# print(f'On iter {i}, mul noise w/ {SCALE} to get dx,dy={dx},{dy}')
x = int(mean_x + dx)
y = int(mean_y + dy)
# Enforce validity
x = max(0, x)
x = min(W - 1, x)
y = max(0, y)
y = min(H - 1, y)
if conncomp[y, x] != 1:
continue
else:
return x, y
return mean_x, mean_y
def save_classnames_in_image_sufficientpx(
rgb_img: np.ndarray,
label_img: np.ndarray,
id_to_class_name_map: Mapping[int, str],
font_color=(0, 0, 0),
save_to_disk: bool = False,
save_fpath: str = "",
min_conncomp_px: int = 4000,
font_scale: int = 1,
):
"""
Write a classname over each connected component of a label
map as long as the connected component has a sufficiently
large number of pixels (specified as argument).
Args:
rgb_img: Numpy array (H,W,3) representing RGB image
label_img: Numpy array (H,W) representing label map
id_to_class_name_map: mapping from class ID to classname
font_color: 3-tuple representing RGB font color
save_to_disk: whether to save image to disk
save_fpath: absolute file path
min_conncomp_px: minimum number of pixels to justify
placing a text label over connected component
font_scale: scale of font text
Returns:
rgb_img: Numpy array (H,W,3) with embedded classanmes
"""
H, W, C = rgb_img.shape
class_to_conncomps_dict = scipy_conn_comp(label_img)
for class_idx, conncomps_list in class_to_conncomps_dict.items():
for conncomp in conncomps_list:
if conncomp.sum() < min_conncomp_px:
continue
text = id_to_class_name_map[class_idx]
y, x = get_mean_mask_location(conncomp)
x -= 55 # move the text so approx. centered over mask.
x = max(0, x)
x = min(W - 1, x)
# jitter location if nonconvex object mean not within its mask
if conncomp[y, x] != 1:
x, y = search_jittered_location_in_mask(x, y, conncomp)
# print(f'Class idx: {class_idx}: (x,y)=({x},{y})')
rgb_img = add_text_cv2(
rgb_img, text, coords_to_plot_at=(x, y), font_color=font_color, font_scale=font_scale, thickness=2
)
if save_to_disk:
cv2_write_rgb(save_fpath, rgb_img)
return rgb_img
def save_classnames_in_image_maxcardinality(
rgb_img, label_img, id_to_class_name_map, font_color=(0, 0, 0), save_to_disk: bool = False, save_fpath: str = ""
) -> np.ndarray:
"""
Args:
rgb_img
label_img
id_to_class_name_map: Mapping[int,str]
Returns:
rgb_img
"""
H, W, C = rgb_img.shape
class_to_conncomps_dict = scipy_conn_comp(label_img)
for class_idx, conncomps_list in class_to_conncomps_dict.items():
mask_idx = find_max_cardinality_mask(conncomps_list)
maxsz_conncomp = conncomps_list[mask_idx]
text = id_to_class_name_map[class_idx]
y, x = get_mean_mask_location(maxsz_conncomp)
x -= 55
x = max(0, x)
x = min(W - 1, x)
# print(f'Class idx: {class_idx}: (x,y)=({x},{y})')
rgb_img = add_text_cv2(
rgb_img, text, coords_to_plot_at=(x, y), font_color=font_color, font_scale=1, thickness=2
)
if save_to_disk:
cv2_write_rgb(save_fpath, rgb_img)
return rgb_img
def form_mask_triple_embedded_classnames(
rgb_img: np.ndarray,
label_img: np.ndarray,
id_to_class_name_map: Mapping[int, str],
save_fpath: str,
save_to_disk: bool = False,
) -> np.ndarray:
"""
Args:
rgb_img:
label_img:
id_to_class_name_map
save_fpath
save_to_disk
Returns:
Array, representing 3 horizontally concatenated images: from left-to-right, they are
RGB, RGB+Semantic Masks, Semantic Masks
"""
rgb_with_mask = convert_instance_img_to_mask_img(label_img, rgb_img.copy())
# or can do max cardinality conn comp of each class
rgb2 = save_classnames_in_image_sufficientpx(rgb_with_mask, label_img, id_to_class_name_map)
mask_img = convert_instance_img_to_mask_img(label_img, img_rgb=None)
rgb3 = save_classnames_in_image_sufficientpx(mask_img, label_img, id_to_class_name_map)
return form_hstacked_imgs([rgb_img, rgb2, rgb3], save_fpath, save_to_disk)
def write_six_img_grid_w_embedded_names(
rgb_img: np.ndarray,
pred: np.ndarray,
label_img: np.ndarray,
id_to_class_name_map: Mapping[int, str],
save_fpath: str,
) -> None:
"""
Create a 6-image tile grid with the following structure:
------------------------------------------------------------
RGB Image | Blended RGB+GT Label Map | GT Label Map
------------------------------------------------------------
RGB Image | Blended RGB+Pred Label Map | Predicted Label Map
------------------------------------------------------------
We embed classnames directly into the predicted and ground
truth label maps, instead of using a colorbar.
Args:
rgb_img:
pred: predicted label map
label_img
id_to_class_name_map
save_fpath
"""
assert label_img.ndim == 2
assert pred.ndim == 2
assert rgb_img.ndim == 3
label_hgrid = form_mask_triple_embedded_classnames(
rgb_img, label_img, id_to_class_name_map, save_fpath="dummy.jpg", save_to_disk=False
)
pred_hgrid = form_mask_triple_embedded_classnames(
rgb_img, pred, id_to_class_name_map, save_fpath="dummy.jpg", save_to_disk=False
)
vstack_img = form_vstacked_imgs(img_list=[label_hgrid, pred_hgrid], vstack_save_fpath=save_fpath, save_to_disk=True)
def map_semantic_img_fast_pytorch(semantic_img: np.ndarray, label_mapping_arr: np.ndarray) -> np.ndarray:
"""Quickly remap a semantic labelmap (integers) to a new taxonomy.
TODO: may need to make a copy here, if it won't make one for us.
Args:
semantic_img: Pytorch CPU long tensor representing (M,N) matrix,
with Torch Tensor type Long (int64)
label_mapping_arr: Pytorch CPU long tensor representing (K+1,1) array, where
K represents the number of classes. With Torch Tensor type Long (int64)
Returns:
img: with Torch Tensor type Long (int64)
"""
return label_mapping_arr[semantic_img.squeeze()].squeeze()
def form_label_mapping_array_pytorch(label_mapping_dict: Mapping[int, int]) -> np.ndarray:
"""
Args:
label_mapping_dict: dictionary from int to int, from original class ID to a new class ID.
This is NOT the id_to_class_name dictionary.
Returns:
label_mapping_arr, with Torch Tensor type Long (int64)
"""
keys_max = max(list(label_mapping_dict.keys()))
arr_len = keys_max + 1
label_mapping_arr = torch.zeros(arr_len).type(torch.LongTensor)
for k, v in label_mapping_dict.items():
label_mapping_arr[k] = v
return label_mapping_arr
def map_semantic_img_fast(semantic_img: np.ndarray, label_mapping_arr: np.ndarray) -> np.ndarray:
"""
Args:
semantic_img:
label_mapping_arr:
Returns:
img:
"""
return label_mapping_arr[semantic_img.squeeze()].squeeze()
def form_label_mapping_array(label_mapping_dict: Mapping[int, int]) -> np.ndarray:
"""
Args:
label_mapping_dict: dictionary from int to int, from original class ID to a new class ID.
This is NOT the id_to_class_name dictionary.
dtype: data type, either np.uint8 or np.uint16 (default)
Returns:
label_mapping_arr
"""
v_max = max(list(label_mapping_dict.values()))
keys_max = max(list(label_mapping_dict.keys()))
arr_len = keys_max + 1
UINT8_MAX = np.iinfo("uint8").max # 255 is uint8 max
UINT16_MAX = np.iinfo("uint16").max # 65535 is uint16 max
if v_max > UINT8_MAX and v_max <= UINT16_MAX:
dtype = np.uint16
elif v_max <= UINT8_MAX:
dtype = np.uint8
else:
print("Type wont fit, quitting...")
quit()
label_mapping_arr = np.zeros(arr_len, dtype=dtype)
for k, v in label_mapping_dict.items():
label_mapping_arr[k] = v
return label_mapping_arr
def rgb_img_to_obj_cls_img(label_img_rgb: np.ndarray, dataset_ordered_colors: np.ndarray) -> np.ndarray:
"""Any unmapped pixels (given no corresponding RGB values) will default to zero'th-class.
Args:
label_img_rgb: Numpy array of shape (M,N,3)
dataset_ordered_colors: Numpy array of shape (K,3) with RGB values for K classes
Returns:
object_cls_img: grayscale image
"""
object_cls_img = np.zeros((label_img_rgb.shape[0], label_img_rgb.shape[1]), dtype=np.uint8)
for i, color in enumerate(dataset_ordered_colors):
indices = np.where(np.all(label_img_rgb == color, axis=-1))
object_cls_img[indices[0], indices[1]] = i
return object_cls_img
def save_mask_triple_isolated_mask(
rgb_img: np.ndarray, label_img: np.ndarray, id_to_class_name_map, class_name: str, save_fpath: str
) -> None:
"""Save a triplet of images to disk (RGB image, label map, and a blended version of the two).
Args:
rgb_img:
label_img:
id_to_class_name_map:
class_name:
save_fpath:
"""
for id, proposed_class_name in id_to_class_name_map.items():
if class_name == proposed_class_name:
break
isolated_rgb_mask = np.ones_like(rgb_img) * 255
y, x = np.where(label_img == id)
isolated_rgb_mask[y, x, :] = rgb_img.copy()[y, x, :]
rgb_with_mask = convert_instance_img_to_mask_img(label_img, rgb_img.copy())
mask_img = convert_instance_img_to_mask_img(label_img)
concat_img = form_hstacked_imgs(
[rgb_img, isolated_rgb_mask, rgb_with_mask, mask_img], save_fpath, save_to_disk=False
)
cv2.imwrite(save_fpath, concat_img[:, :, ::-1])
def save_img_with_blendedmaskimg(
rgb_img: np.ndarray, label_img: np.ndarray, save_fpath: str, save_to_disk: bool = False
) -> None:
"""
Args:
rgb_img:
label_img:
save_fpath
save_to_disk
Returns:
Array, representing 3 horizontally concatenated images: from left-to-right, they are
RGB, RGB+Semantic Masks, Semantic Masks
"""
rgb_with_mask = highlight_binary_mask(label_img, rgb_img.copy())
return form_hstacked_imgs([rgb_img, rgb_with_mask], save_fpath, save_to_disk)
def save_binary_mask_triple(
rgb_img: np.ndarray, label_img: np.ndarray, save_fpath: str, save_to_disk: bool = False
) -> np.ndarray:
"""Currently mask img background is light-blue. Instead, could set it to white. np.array([255,255,255])
Args:
rgb_img:
label_img:
save_fpath
save_to_disk
Returns:
Array, representing 3 horizontally concatenated images: from left-to-right, they are
RGB, RGB+Semantic Masks, Semantic Masks
"""
img_h, img_w, _ = rgb_img.shape
rgb_with_mask = highlight_binary_mask(label_img, rgb_img.copy())
blank_img = np.ones((img_h, img_w, 3), dtype=np.uint8) * 255
y, x = np.where(label_img == 0)
blank_img[y, x, :] = LIME_GREEN # LIGHT_BLUE
mask_img = highlight_binary_mask(label_img, blank_img)
return form_hstacked_imgs([rgb_img, rgb_with_mask, mask_img], save_fpath, save_to_disk)
def save_binary_mask_double(
rgb_img: np.ndarray, label_img: np.ndarray, save_fpath: str, save_to_disk: bool = False
) -> np.ndarray:
"""Currently blended mask img background is lime green.
Args:
rgb_img:
label_img:
save_fpath
save_to_disk
Returns:
Array, representing 2 horizontally concatenated images: from left-to-right, they are
RGB, RGB+Semantic Masks
"""
img_h, img_w, _ = rgb_img.shape
lime_green_rgb = vis_mask(rgb_img.copy(), 1 - label_img, LIME_GREEN, alpha=0.2)
rgb_with_mask = highlight_binary_mask(label_img, lime_green_rgb)
return form_hstacked_imgs([rgb_img, rgb_with_mask], save_fpath, save_to_disk)
def highlight_binary_mask(label_mask: np.ndarray, img_rgb: Optional[np.ndarray] = None) -> np.ndarray:
"""
Given a grayscale image where intensities denote instance IDs (same intensity denotes
belonging to same instance), convert this to an RGB image where all pixels corresponding
to the same instance get the same color. Note that two instances may not have unique colors,
do to a finite-length colormap.
Args:
instance_img: Numpy array of shape (M,N), representing grayscale image, in [0,255]
img_rgb: Numpy array representing RGB image, possibly blank, in [0,255]
Returns:
img_rgb:
"""
img_h, img_w = label_mask.shape
if img_rgb is None:
img_rgb = np.ones((img_h, img_w, 3), dtype=np.uint8) * 255
assert label_mask.dtype in [
np.uint8,
np.uint16,
np.int32,
np.uint32,
np.int64,
], "Label map is not composed of integers."
assert img_rgb.dtype in [np.uint8, np.uint16]
our_colormap = colormap(rgb=True)
# np.unique will always sort the values
both01 = np.allclose(np.unique(label_mask), np.array([0, 1]))
all1 = np.allclose(np.unique(label_mask), np.array([1]))
assert both01 or all1
# 40 is blue, 50 is pink
col = np.array([255, 0, 255], dtype=np.uint8)
img_rgb = vis_mask(img_rgb, label_mask, col, alpha=0.4)
return img_rgb
def save_pred_vs_label_7tuple(
img_rgb: np.ndarray,
pred_img: np.ndarray,
label_img: np.ndarray,
id_to_class_name_map: Mapping[int, str],
save_fpath: str,
) -> None:
"""7-tuple consists of
(1-3) rgb mask 3-sequence for label,
(4-6) rgb mask 3-sequence for predictions,
(7) color palette
Args:
img_rgb
pred_img
label_img
id_to_class_name_map
save_fpath
"""
img_h, img_w, _ = img_rgb.shape
assert pred_img.shape == (img_h, img_w)
assert label_img.shape == (img_h, img_w)
if min(img_h, img_w) < MIN_DISCERNABLE_RES_FOR_TEXT:
save_pred_vs_label_7tuple(
img_rgb=resize_img_by_short_side(
img_rgb.copy(), short_side_len=MIN_DISCERNABLE_RES_FOR_TEXT, img_type="rgb"
),
pred_img=resize_img_by_short_side(
pred_img.copy(), short_side_len=MIN_DISCERNABLE_RES_FOR_TEXT, img_type="label"
),
label_img=resize_img_by_short_side(
label_img.copy(), short_side_len=MIN_DISCERNABLE_RES_FOR_TEXT, img_type="label"
),
id_to_class_name_map=id_to_class_name_map,
save_fpath=save_fpath.replace(".png", "_upsample.png"),
)
NUM_HSTACKED_IMGS = 3
hstack_img1 = form_mask_triple(img_rgb, label_img, save_fpath, save_to_disk=False)
hstack_img2 = form_mask_triple(img_rgb, pred_img, save_fpath, save_to_disk=False)
vstack_img1 = np.vstack([hstack_img1, hstack_img2])
save_dir = "/".join(save_fpath.split("/")[:-1])
present_color_ids = np.union1d(np.unique(label_img), np.unique(pred_img))
num_present_colors = len(present_color_ids)
max_colors_per_col = int(math.ceil(num_present_colors / NUM_HSTACKED_IMGS))
palette_img = form_contained_classes_color_guide(
present_color_ids, id_to_class_name_map, "", "", save_to_disk=False, max_colors_per_col=max_colors_per_col
)
vstack_img2 = vstack_img_with_palette(vstack_img1, palette_img)
save_fpath = save_fpath.replace(".png", "_pred_labels_palette.png")
cv2.imwrite(save_fpath, vstack_img2[:, :, ::-1])
def save_pred_vs_label_4tuple(
img_rgb: np.ndarray, label_img: np.ndarray, id_to_class_name_map: Mapping[int, str], save_fpath: str
) -> None:
"""7-tuple consists of
(1-3) rgb mask 3-sequence for label or predictions
(4) color palette
Args:
img_rgb
label_img
id_to_class_name_map
save_fpath
"""
img_h, img_w, _ = img_rgb.shape
assert label_img.shape == (img_h, img_w)
if min(img_h, img_w) < MIN_DISCERNABLE_RES_FOR_TEXT:
save_pred_vs_label_4tuple(
img_rgb=resize_img_by_short_side(
img_rgb.copy(), short_side_len=MIN_DISCERNABLE_RES_FOR_TEXT, img_type="rgb"
),
label_img=resize_img_by_short_side(
label_img.copy(), short_side_len=MIN_DISCERNABLE_RES_FOR_TEXT, img_type="label"
),
id_to_class_name_map=id_to_class_name_map,
save_fpath=save_fpath.replace(".png", "_upsample.png"),
)
NUM_HSTACKED_IMGS = 3
hstack_img = form_mask_triple(img_rgb, label_img, save_fpath, save_to_disk=False)
save_dir = "/".join(save_fpath.split("/")[:-1])
present_color_ids = np.unique(label_img)
num_present_colors = len(present_color_ids)
max_colors_per_col = int(math.ceil(num_present_colors / NUM_HSTACKED_IMGS))
palette_img = form_contained_classes_color_guide(
present_color_ids, id_to_class_name_map, "", "", save_to_disk=False, max_colors_per_col=max_colors_per_col
)
vstack_img2 = vstack_img_with_palette(hstack_img, palette_img)
save_fpath = save_fpath.replace(".png", "_pred_labels_palette.png")
cv2.imwrite(save_fpath, vstack_img2[:, :, ::-1])
def vstack_img_with_palette(top_img: np.ndarray, palette_img: np.ndarray) -> np.ndarray:
"""Vertically stack an image and a palette image, placing the palette image below it.
Args:
top_img
palette_img
Returns:
vstack_img
"""
img_n_rows = top_img.shape[0]
palette_n_rows = palette_img.shape[0]
img_n_cols = top_img.shape[1]
palette_n_cols = palette_img.shape[1]
fx = img_n_cols / palette_n_cols
fy = fx
rsz_cols = int(np.round(fx * palette_n_cols))
rsz_rows = int(np.round(fy * palette_n_rows))
rsz_palette_img = cv2.resize(palette_img, dsize=(rsz_cols, rsz_rows), interpolation=cv2.INTER_NEAREST)
concat_rows = img_n_rows + rsz_rows
vstack_img = np.zeros((concat_rows, img_n_cols, 3), dtype=np.uint8)
vstack_img[:img_n_rows, :, :] = top_img
vstack_img[img_n_rows:, :, :] = rsz_palette_img
return vstack_img
def save_mask_triple_with_color_guide(
img_rgb: np.ndarray,
label_img: np.ndarray,
id_to_class_name_map: Mapping[int, str],
fname_stem: str,
save_dir: str,
save_fpath: str,
) -> None:
"""
Args:
img_rgb: Array representing 3-channel image in RGB order
label_img: Array representing grayscale image, where intensities correspond to semantic classses
id_to_class_name_map: dictionary that maps a grayscale intensity to a class name
fname_stem: string, representing unique name for image, e.g. `coco_40083bx` for `coco_40083bx.png`
save_dir: string, dir where to save output image, e.g. /my/save/directory
save_fpath: string, representing full absolute path to where image will be saved, e.g.
/my/save/directory/coco_40083bx.png
"""
# for every 10 classes, save a new image
palette_img = form_contained_classes_color_guide(
label_img, id_to_class_name_map, fname_stem, save_dir, save_to_disk=False
)
hstack_img = form_mask_triple(img_rgb, label_img, save_fpath, save_to_disk=False)
rgb_plus_palette_img = hstack_img_with_palette(hstack_img, palette_img)
save_fpath = save_fpath.replace(".png", "_concat.png")
cv2.imwrite(save_fpath, rgb_plus_palette_img[:, :, ::-1])
def hstack_img_with_palette(left_img: np.array, palette_img: np.array) -> np.ndarray:
"""Horizontally stack a left image with a palette image on the right."""
img_n_rows = left_img.shape[0]
palette_n_rows = palette_img.shape[0]
img_n_cols = left_img.shape[1]
palette_n_cols = palette_img.shape[1]
fy = img_n_rows / palette_n_rows
fx = fy
rsz_cols = int(np.round(fx * palette_n_cols))
rsz_rows = int(np.round(fy * palette_n_rows))
rsz_palette_img = cv2.resize(palette_img, dsize=(rsz_cols, rsz_rows), interpolation=cv2.INTER_NEAREST)
concat_cols = img_n_cols + rsz_cols
hstack_img = np.zeros((img_n_rows, concat_cols, 3), dtype=np.uint8)
hstack_img[:, :img_n_cols, :] = left_img
hstack_img[:, img_n_cols:, :] = rsz_palette_img
return hstack_img
def form_contained_classes_color_guide(
label_img: np.ndarray,
id_to_class_name_map: Mapping[int, str],
fname_stem: str,
save_dir: str,
save_to_disk: bool = True,
max_colors_per_col: int = 10,
) -> np.ndarray:
"""
Write out an image explaining the classes inside an image.
Args:
label_img
id_to_class_name_map
fname_stem
save_dir
Returns:
palette_img: Array with cells colored with class color from palette
"""
ids_present = np.unique(label_img)
num_cols = math.ceil(len(ids_present) / max_colors_per_col)
num_rows = max_colors_per_col
palette_img = np.zeros((NUM_PX_PER_ROW * num_rows, NUM_PX_PER_COL * num_cols, 3), dtype=np.uint8)
for i, labelid in enumerate(ids_present):
col_idx = i // max_colors_per_col
row_idx = i % max_colors_per_col
class_name = id_to_class_name_map[labelid]
id_img = np.ones((NUM_PX_PER_ROW, NUM_PX_PER_COL), dtype=np.uint16) * labelid
blank_img = 255 * np.ones((NUM_PX_PER_ROW, NUM_PX_PER_COL, 3), dtype=np.uint8)
blank_img = add_text_cv2(blank_img, str(class_name))
color_img = convert_instance_img_to_mask_img(id_img, blank_img)
# vertical pixel start
v_start = row_idx * NUM_PX_PER_ROW
v_end = (row_idx + 1) * NUM_PX_PER_ROW
# horizontal pixel start
h_start = NUM_PX_PER_COL * col_idx
h_end = h_start + NUM_PX_PER_COL
palette_img[v_start:v_end, h_start:h_end, :] = color_img
palette_save_fpath = f"{save_dir}/{fname_stem}_colors.png"
if save_to_disk:
cv2.imwrite(palette_save_fpath, palette_img[:, :, ::-1])
return palette_img
def form_mask_triple(
rgb_img: np.ndarray, label_img: np.ndarray, save_fpath: str, save_to_disk: bool = False
) -> np.ndarray:
"""
Args:
rgb_img:
label_img:
save_fpath
save_to_disk
Returns:
Array, representing 3 horizontally concatenated images: from left-to-right, they are
RGB, RGB+Semantic Masks, Semantic Masks
"""
rgb_with_mask = convert_instance_img_to_mask_img(label_img, rgb_img.copy())
mask_img = convert_instance_img_to_mask_img(label_img, img_rgb=None)
return form_hstacked_imgs([rgb_img, rgb_with_mask, mask_img], save_fpath, save_to_disk)
def form_mask_triple_vertical(
rgb_img: np.ndarray, label_img: np.ndarray, save_fpath: str, save_to_disk: bool = False
) -> np.ndarray:
"""
Args:
rgb_img:
label_img:
save_fpath
save_to_disk
Returns:
- Array, representing 3 horizontally concatenated images: from left-to-right, they are
RGB, RGB+Semantic Masks, Semantic Masks
"""
rgb_with_mask = convert_instance_img_to_mask_img(label_img, rgb_img.copy())
mask_img = convert_instance_img_to_mask_img(label_img, img_rgb=None)
return form_vstacked_imgs([rgb_img, rgb_with_mask, mask_img], save_fpath, save_to_disk)
def convert_instance_img_to_mask_img(instance_img: np.ndarray, img_rgb: Optional[np.ndarray] = None) -> np.ndarray:
"""
Given a grayscale image where intensities denote instance IDs (same intensity denotes
belonging to same instance), convert this to an RGB image where all pixels corresponding
to the same instance get the same color. Note that two instances may not have unique colors,
do to a finite-length colormap.
Args:
instance_img: Numpy array of shape (M,N), representing grayscale image, in [0,255]
img_rgb: Numpy array representing RGB image, possibly blank, in [0,255]
Returns:
img_rgb:
"""
img_h, img_w = instance_img.shape
if img_rgb is None:
img_rgb = np.ones((img_h, img_w, 3), dtype=np.uint8) * 255
assert instance_img.dtype in [
np.uint8,
np.uint16,
np.int32,
np.uint32,
np.int64,
], "Label map is not composed of integers."
assert img_rgb.dtype in [np.uint8, np.uint16]
our_colormap = colormap(rgb=True)
num_unique_colors = our_colormap.shape[0]
# np.unique will always sort the values
if np.unique(instance_img).size > 0:
for i, instance_id in enumerate(np.unique(instance_img)):
col = our_colormap[(COLORMAP_OFFSET + instance_id) % num_unique_colors]
mask = instance_img == instance_id
img_rgb = vis_mask(img_rgb, mask, col, alpha=0.4)
return img_rgb
def vis_mask(img: np.ndarray, mask: np.ndarray, col: Tuple[int, int, int], alpha: float = 0.4):
"""
Visualizes a single binary mask by coloring the region inside a binary mask
as a specific color, and then blending it with an RGB image.
Args:
img: Numpy array, representing RGB image with values in the [0,255] range
mask: Numpy integer array, with values in [0,1] representing mask region
col: color, tuple of integers in [0,255] representing RGB values
alpha: blending coefficient (higher alpha shows more of mask,
lower alpha preserves original image)
Returns:
image: Numpy array, representing an RGB image, representing a blended image
of original RGB image and specified colors in mask region.
"""
img = img.astype(np.float32)
idx = np.nonzero(mask)
img[idx[0], idx[1], :] *= 1.0 - alpha
img[idx[0], idx[1], :] += alpha * col
return img.astype(np.uint8)
def visualize_colormap():
""" """
label_img = np.random.randint(0, 80, size=(100, 100))
id_to_class_name_map = {i: str(i) for i in range(80)}
form_contained_classes_color_guide(
label_img=label_img,
id_to_class_name_map=id_to_class_name_map,
fname_stem="fb_colormap",
save_dir=".",
save_to_disk=True,
max_colors_per_col=100,
)
def swap_px_inside_mask(
label_img: np.ndarray, segment_mask: np.ndarray, old_val: int, new_val: int, require_strict_boundaries: bool
):
"""
Args:
label_img: label map before any update has taken place.
segment_mask: 0/1 binary image showing segment pixels
old_val: old pixel value/category
new_val: new pixel value/category
require_strict_boundaries:
Returns:
- label_img: updated label map
"""
unique_vals = np.unique(segment_mask)
zeros_and_ones = np.array([0, 1], dtype=np.uint8)
all_ones = np.array([1], dtype=np.uint8)
# ok if the mask is all 1's (fill entire image)
assert np.allclose(zeros_and_ones, unique_vals) or np.allclose(all_ones, unique_vals)
assert label_img.ndim == 2
y, x = np.where(segment_mask == 1)
# if perfect agreement between label image and instance image, then we will enforce
# strict boundary agreement between the two.
if require_strict_boundaries:
assert np.allclose(np.unique(label_img[y, x]), np.array([old_val], dtype=np.uint8))
label_img[y, x] = new_val
return label_img
def get_instance_mask_class_votes(
instance_mask: np.ndarray, label_img: np.ndarray, verbose: bool = False
) -> Tuple[np.ndarray, int]:
"""
Since the class masks to instance masks don't match up exactly, and are provided
in images with very different resolutions, we have to take the majority vote
for which semantic class the instance mask belongs to.
Args:
instance_mask
label_img
Returns:
label_votes: 1d array, containing all category votes from instance mask
majority_vote: most likely category for this instance.
"""
coords = np.vstack(np.where(instance_mask == 1)).T
label_votes = label_img[coords[:, 0], coords[:, 1]]
majority_vote = get_np_mode(label_votes)
if verbose:
unique_class_idxs = np.unique(label_votes)
if unique_class_idxs.size > 1:
for class_idx in unique_class_idxs:
percent = (label_votes == class_idx).sum() / label_votes.size * 100
print(f"\t {percent:.2f}%")
print()
return label_votes, majority_vote
def get_np_mode(x: np.ndarray) -> int:
"""Get mode value of a 1d or 2d integer array.
Args:
x: Numpy array of integers:
Returns:
integer representing mode of array values
"""
assert x.dtype in [np.uint8, np.uint16, np.int16, np.int32, np.int64]
counts = np.bincount(x)
return np.argmax(counts)
def get_mask_from_polygon(polygon, img_h: int, img_w: int):
"""Rasterize a 2d polygon to a binary mask.
Note: 60x faster than the Matplotlib rasterizer... well done pillow!
Args:
polygon: iterable e.g. [(x1,y1),(x2,y2),...]
img_h: integer representing image height
img_w: integer representing image width
Returns:
mask
PIL.Image.new(mode, size, color=0)
Creates a new image with the given mode and size.
Parameters:
mode – The mode to use for the new image. See: Modes.
size – A 2-tuple, containing (width, height) in pixels.
color – What color to use for the image. Default is black.
If given, this should be a single integer or floating point
value for single-band modes, and a tuple for multi-band modes
(one value per band). When creating RGB images, you can also
use color strings as supported by the ImageColor module. If
the color is None, the image is not initialised.
https://pillow.readthedocs.io/en/3.1.x/reference/ImageDraw.html
The polygon outline consists of straight lines between the given coordinates,
plus a straight line between the last and the first coordinate.
Parameters:
xy – Sequence of either 2-tuples like [(x, y), (x, y), ...] or numeric values like [x, y, x, y, ...].
outline – Color to use for the outline.
fill – Color to use for the fill.
"""
polygon = [tuple([x, y]) for (x, y) in polygon]
# this is the image that we want to create
mask_img = Image.new("L", size=(img_w, img_h), color=0)
# include outline
# a drawer to draw into the image
if len(set([y for (x, y) in polygon])) == 1:
# draw only a line. because of known bug in .polygon() method
# https://github.com/python-pillow/Pillow/issues/4674
ImageDraw.Draw(mask_img).line(polygon, fill=1)
else:
ImageDraw.Draw(mask_img).polygon(polygon, outline=1, fill=1)
mask = np.array(mask_img)
return mask
def get_present_classes_in_img(label_img: np.ndarray, id_to_classname_map) -> List[str]:
"""
Args:
label_img:
Returns:
list of strings, representing classnames
"""
present_class_idxs = np.unique(label_img)
present_classnames = [id_to_classname_map[idx] for idx in present_class_idxs]
return present_classnames
def get_most_populous_class(segment_mask: np.array, label_map: np.ndarray) -> int:
"""
Args:
- segment_mask
- label_map
Returns:
- class_mode_idx: integer representing most populous class index
"""
class_indices = label_map[segment_mask.nonzero()]
class_mode_idx = get_np_mode(class_indices)
return class_mode_idx
def get_polygons_from_binary_img(binary_img: np.ndarray) -> Tuple[List[np.ndarray], Optional[bool]]:
"""
cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
Internal contours (holes) are placed in hierarchy-2.
cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
Args:
binary_img: Numpy array with all 0s or 1s
Returns:
res: list of polygons, each (N,2) np.ndarray
"""
assert all([val in [0, 1] for val in np.unique(binary_img)])
binary_img = binary_img.astype("uint8")
res, hierarchy = cv2.findContours(binary_img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
if hierarchy is None: # empty mask
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = [x.squeeze() for x in res]
res = [x for x in res if x.size >= 6] # should have at least 3 vertices to be valid
return res, has_holes
|
observations/r/attenu.py | hajime9652/observations | 199 | 11198371 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def attenu(path):
"""The Joyner–Boore Attenuation Data
This data gives peak accelerations measured at various observation
stations for 23 earthquakes in California. The data have been used by
various workers to estimate the attenuating affect of distance on ground
acceleration.
A data frame with 182 observations on 5 variables.
+--------+-----------+-----------+------------------------------------+
| [,1] | event | numeric | Event Number |
+--------+-----------+-----------+------------------------------------+
| [,2] | mag | numeric | Moment Magnitude |
+--------+-----------+-----------+------------------------------------+
| [,3] | station | factor | Station Number |
+--------+-----------+-----------+------------------------------------+
| [,4] | dist | numeric | Station-hypocenter distance (km) |
+--------+-----------+-----------+------------------------------------+
| [,5] | accel | numeric | Peak acceleration (g) |
+--------+-----------+-----------+------------------------------------+
<NAME>., <NAME> and <NAME> (1981). Peak horizontal
acceleration and velocity from strong-motion records including records
from the 1979 Imperial Valley, California earthquake. USGS Open File
report 81-365. Menlo Park, Ca.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `attenu.csv`.
Returns:
Tuple of np.ndarray `x_train` with 182 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'attenu.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/datasets/attenu.csv'
maybe_download_and_extract(path, url,
save_file_name='attenu.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
src/account/signals.py | RogerTangos/datahub-stub | 192 | 11198375 | from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import pre_save
from psycopg2 import OperationalError
from django.db.utils import IntegrityError
from core.db.manager import DataHubManager
from core.db.rlsmanager import RowLevelSecurityManager
from django.conf import settings
# Note that these may fire multiple times and for users that already exist.
@receiver(pre_save, sender=User,
dispatch_uid="dh_user_pre_save_unique_email")
def enforce_email_uniqueness(sender, instance, **kwargs):
"""
Validates new user attributes at database save time.
Check is made both here and in the pipeline form to ensure the provided
email address is not being used by another user.
"""
if instance is not None:
email = instance.email
username = instance.username
if not email:
raise IntegrityError("Email required.")
if (sender.objects
.filter(email=email)
.exclude(username=username)
.count()):
raise IntegrityError(
"The email address {0} is associated with another account."
.format(email)
)
@receiver(pre_save, sender=User,
dispatch_uid="dh_user_pre_save_not_blacklisted")
def enforce_blacklist(sender, instance, **kwargs):
"""
Prevents certain usernames from being created
Checks usernmames against settings.BLACKLISTED_USERNAMES
"""
if instance is not None:
username = instance.username.lower()
if not username:
raise IntegrityError("Username required.")
if username in [x.lower() for x in settings.BLACKLISTED_USERNAMES]:
raise IntegrityError(
"Failed to create user. The name {0} is reserved"
"for DataHub use.".format(username))
@receiver(pre_save, sender=User,
dispatch_uid="dh_user_pre_save_create_user_db_and_data_folder")
def create_user_db_and_data_folder_if_needed(sender, instance, **kwargs):
"""
Creates a Postgres role and db and data folder to go with new Django users.
Raises an exception if the role, database, or user data folder exists
before this user.
"""
username = instance.username
hashed_password = <PASSWORD>
# The Django user doesn't exist yet, so we can't just try to create a
# DataHubManager connection as the user. We need to act as the db
# superuser and check for any existing db role or database.
db_exists = DataHubManager.database_exists(username)
user_exists = DataHubManager.user_exists(username)
user_data_folder_exists = DataHubManager.user_data_folder_exists(username)
if db_exists and user_exists and user_data_folder_exists:
# Make sure new users don't inherit orphaned roles or databases that
# are missing a matching Django user.
try:
User.objects.get(username=username)
except User.DoesNotExist:
raise IntegrityError("Failed to create user. That name is already"
" in use by an orphaned user.")
elif not db_exists and not user_exists and not user_data_folder_exists:
try:
DataHubManager.create_user(
username=username,
password=<PASSWORD>)
except OperationalError:
raise
else:
raise Exception("Failed to create user. That name is already"
" in use by either a role, database, or data folder.")
@receiver(pre_save, sender=User,
dispatch_uid="dh_user_pre_save_add_user_to_policy_table")
def add_user_to_policy_table(sender, instance, **kwargs):
"""
Adds default policies for user to row level security policy table.
Does nothing if the user already has an entry in the policy table.
"""
username = instance.username
# Create row level security policies in the dh_public policy table
# granting user select, insert, update access to policies he create
try:
RowLevelSecurityManager.add_user_to_policy_table(username)
except Exception:
# Ignore failures when the user already has a policy.
pass
|
src/python/pants/backend/docker/target_types.py | pantsbuild/pants | 1,806 | 11198391 | <gh_stars>1000+
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
import re
from abc import ABC, abstractmethod
from textwrap import dedent
from typing import Callable, ClassVar, Iterator, cast
from typing_extensions import final
from pants.backend.docker.registries import ALL_DEFAULT_REGISTRIES
from pants.base.build_environment import get_buildroot
from pants.core.goals.run import RestartableField
from pants.engine.fs import GlobMatchErrorBehavior
from pants.engine.target import (
COMMON_TARGET_FIELDS,
AsyncFieldMixin,
BoolField,
Dependencies,
DictStringToStringField,
OptionalSingleSourceField,
StringField,
StringSequenceField,
Target,
)
from pants.util.docutil import doc_url
# Common help text to be applied to each field that supports value interpolation.
_interpolation_help = (
"{kind} may use placeholders in curly braces to be interpolated. The placeholders are derived "
"from various sources, such as the Dockerfile instructions and build args.\n\n"
)
class DockerBuildArgsField(StringSequenceField):
alias = "extra_build_args"
default = ()
help = (
"Build arguments (`--build-arg`) to use when building this image. "
"Entries are either strings in the form `ARG_NAME=value` to set an explicit value; "
"or just `ARG_NAME` to copy the value from Pants's own environment.\n\n"
"Use `[docker].build_args` to set default build args for all images."
)
class DockerImageSourceField(OptionalSingleSourceField):
default = "Dockerfile"
# When the default glob value is in effect, we don't want the normal glob match error behavior
# to kick in for a missing Dockerfile, in case there are `instructions` provided, in which case
# we generate the Dockerfile instead. If there are no `instructions`, or there are both
# `instructions` and a Dockerfile hydrated from the `source` glob, we error out with a message
# to the user.
default_glob_match_error_behavior = GlobMatchErrorBehavior.ignore
help = (
"The Dockerfile to use when building the Docker image.\n\n"
"Use the `instructions` field instead if you prefer not having the Dockerfile in your "
"project source tree."
)
class DockerImageInstructionsField(StringSequenceField):
alias = "instructions"
required = False
help = (
"The `Dockerfile` content, typically one instruction per list item.\n\n"
"Use the `source` field instead if you prefer having the Dockerfile in your project "
"source tree.\n\n"
+ dedent(
"""\
Example:
# example/BUILD
docker_image(
instructions=[
"FROM base/image:1.0",
"RUN echo example",
],
)
"""
)
)
class DockerImageTagsField(StringSequenceField):
alias = "image_tags"
default = ("latest",)
help = (
"Any tags to apply to the Docker image name (the version is usually applied as a tag).\n\n"
+ _interpolation_help.format(kind="tag")
+ f"See {doc_url('tagging-docker-images')}."
)
class DockerImageTargetStageField(StringField):
alias = "target_stage"
help = (
"Specify target build stage, rather than building the entire `Dockerfile`.\n\n"
"When using multi-stage build, you may name your stages, and can target them when building "
"to only selectively build a certain stage. See also the `--docker-build-target-stage` "
"option.\n\n"
"Read more about [multi-stage Docker builds]"
"(https://docs.docker.com/develop/develop-images/multistage-build/#stop-at-a-specific-build-stage)"
)
class DockerDependenciesField(Dependencies):
supports_transitive_excludes = True
class DockerRegistriesField(StringSequenceField):
alias = "registries"
default = (ALL_DEFAULT_REGISTRIES,)
help = (
"List of addresses or configured aliases to any Docker registries to use for the "
"built image.\n\n"
"The address is a domain name with optional port for your registry, and any registry "
"aliases are prefixed with `@` for addresses in the [docker].registries configuration "
"section.\n\n"
"By default, all configured registries with `default = true` are used.\n\n"
+ dedent(
"""\
Example:
# pants.toml
[docker.registries.my-registry-alias]
address = "myregistrydomain:port"
default = false # optional
# example/BUILD
docker_image(
registries = [
"@my-registry-alias",
"myregistrydomain:port",
],
)
"""
)
+ (
"The above example shows two valid `registry` options: using an alias to a configured "
"registry and the address to a registry verbatim in the BUILD file."
)
)
class DockerRepositoryField(StringField):
alias = "repository"
help = (
'The repository name for the Docker image. e.g. "<repository>/<name>".\n\n'
"It uses the `[docker].default_repository` by default.\n\n"
+ _interpolation_help.format(kind="repository")
+ "Additional placeholders for the repository field are: `name`, `directory` and "
"`parent_directory`.\n\nSee the documentation for `[docker].default_repository` for more "
"information."
)
class DockerSkipPushField(BoolField):
alias = "skip_push"
default = False
help = "If set to true, do not push this image to registries when running `./pants publish`."
OptionValueFormatter = Callable[[str], str]
class DockerBuildOptionFieldMixin(ABC):
"""Inherit this mixin class to provide options to `docker build`."""
docker_build_option: ClassVar[str]
@abstractmethod
def option_values(self, *, value_formatter: OptionValueFormatter) -> Iterator[str]:
"""Subclasses must implement this, to turn their `self.value` into none, one or more option
values."""
@final
def options(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for value in self.option_values(value_formatter=value_formatter):
yield from (self.docker_build_option, value)
class DockerBuildImageLabelsOptionField(DockerBuildOptionFieldMixin, DictStringToStringField):
alias = "image_labels"
help = (
"Provide image metadata.\n\n"
+ _interpolation_help.format(kind="label value")
+ "See [Docker labels](https://docs.docker.com/config/labels-custom-metadata/"
"#manage-labels-on-objects) for more information."
)
docker_build_option = "--label"
def option_values(self, value_formatter: OptionValueFormatter) -> Iterator[str]:
for label, value in (self.value or {}).items():
yield f"{label}={value_formatter(value)}"
class DockerBuildSecretsOptionField(
AsyncFieldMixin, DockerBuildOptionFieldMixin, DictStringToStringField
):
alias = "secrets"
help = (
"Secret files to expose to the build (only if BuildKit enabled).\n\n"
"Secrets may use absolute paths, or paths relative to your project build root, or the "
"BUILD file if prefixed with `./`. The id should be valid as used by the Docker build "
"`--secret` option. See [Docker secrets](https://docs.docker.com/engine/swarm/secrets/) "
"for more information.\n\n"
+ dedent(
"""\
Example:
docker_image(
secrets={
"mysecret": "/var/secrets/some-secret",
"repo-secret": "src/proj/secrets/some-secret",
"target-secret": "./secrets/some-secret",
}
)
"""
)
)
docker_build_option = "--secret"
def option_values(self, **kwargs) -> Iterator[str]:
# os.path.join() discards preceeding parts if encountering an abs path, e.g. if the secret
# `path` is an absolute path, the `buildroot` and `spec_path` will not be considered. Also,
# an empty path part is ignored.
for secret, path in (self.value or {}).items():
full_path = os.path.join(
get_buildroot(),
self.address.spec_path if re.match(r"\.{1,2}/", path) else "",
path,
)
yield f"id={secret},src={os.path.normpath(full_path)}"
class DockerBuildSSHOptionField(DockerBuildOptionFieldMixin, StringSequenceField):
alias = "ssh"
default = ()
help = (
"SSH agent socket or keys to expose to the build (only if BuildKit enabled) "
"(format: default|<id>[=<socket>|<key>[,<key>]])\n\n"
"The exposed agent and/or keys can then be used in your `Dockerfile` by mounting them in "
"your `RUN` instructions:\n\n"
" RUN --mount=type=ssh ...\n\n"
"See [Docker documentation](https://docs.docker.com/develop/develop-images"
"/build_enhancements/#using-ssh-to-access-private-data-in-builds) for more information."
)
docker_build_option = "--ssh"
def option_values(self, **kwargs) -> Iterator[str]:
yield from cast("tuple[str]", self.value)
class DockerImageTarget(Target):
alias = "docker_image"
core_fields = (
*COMMON_TARGET_FIELDS,
DockerBuildArgsField,
DockerDependenciesField,
DockerImageSourceField,
DockerImageInstructionsField,
DockerImageTagsField,
DockerRegistriesField,
DockerRepositoryField,
DockerBuildImageLabelsOptionField,
DockerBuildSecretsOptionField,
DockerBuildSSHOptionField,
DockerSkipPushField,
DockerImageTargetStageField,
RestartableField,
)
help = (
"The `docker_image` target describes how to build and tag a Docker image.\n\n"
"Any dependencies, as inferred or explicitly specified, will be included in the Docker "
"build context, after being packaged if applicable.\n\n"
"By default, will use a Dockerfile from the same directory as the BUILD file this target "
"is defined in. Point at another file with the `source` field, or use the `instructions` "
"field to have the Dockerfile contents verbatim directly in the BUILD file.\n\n"
"Dependencies on upstream/base images defined by another `docker_image` are infered if "
"referenced by a build argument with a default value of the target address.\n\n"
+ dedent(
"""\
Example:
# src/docker/downstream/Dockerfile
ARG BASE=src/docker/upstream:image
FROM $BASE
...
"""
)
)
|
backend/api/migrations/0031_auto_20220127_0032.py | alairice/doccano | 2,082 | 11198392 | # Generated by Django 3.2.11 on 2022-01-27 00:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("api", "0030_delete_autolabelingconfig"),
]
operations = [
migrations.SeparateDatabaseAndState(
state_operations=[
migrations.AlterUniqueTogether(
name="category",
unique_together=None,
),
migrations.RemoveField(
model_name="category",
name="example",
),
migrations.RemoveField(
model_name="category",
name="label",
),
migrations.RemoveField(
model_name="category",
name="user",
),
migrations.RemoveField(
model_name="span",
name="example",
),
migrations.RemoveField(
model_name="span",
name="label",
),
migrations.RemoveField(
model_name="span",
name="user",
),
migrations.AlterUniqueTogether(
name="textlabel",
unique_together=None,
),
migrations.RemoveField(
model_name="textlabel",
name="example",
),
migrations.RemoveField(
model_name="textlabel",
name="user",
),
migrations.DeleteModel(
name="AnnotationRelations",
),
migrations.DeleteModel(
name="Category",
),
migrations.DeleteModel(
name="Span",
),
migrations.DeleteModel(
name="TextLabel",
),
],
database_operations=[
migrations.AlterModelTable(name="Span", table="labels_span"),
migrations.AlterModelTable(name="Category", table="labels_category"),
migrations.AlterModelTable(name="TextLabel", table="labels_textlabel"),
migrations.AlterModelTable(name="AnnotationRelations", table="labels_annotationrelations"),
],
)
]
|
release/stubs.min/System/Runtime/InteropServices/__init___parts/DllImportSearchPath.py | htlcnn/ironpython-stubs | 182 | 11198413 | class DllImportSearchPath(Enum,IComparable,IFormattable,IConvertible):
""" enum (flags) DllImportSearchPath,values: ApplicationDirectory (512),AssemblyDirectory (2),LegacyBehavior (0),SafeDirectories (4096),System32 (2048),UseDllDirectoryForDependencies (256),UserDirectories (1024) """
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
ApplicationDirectory=None
AssemblyDirectory=None
LegacyBehavior=None
SafeDirectories=None
System32=None
UseDllDirectoryForDependencies=None
UserDirectories=None
value__=None
|
logbook/queues.py | YoavCohen/logbook | 771 | 11198422 | # -*- coding: utf-8 -*-
"""
logbook.queues
~~~~~~~~~~~~~~
This module implements queue backends.
:copyright: (c) 2010 by <NAME>, <NAME>.
:license: BSD, see LICENSE for more details.
"""
import json
import threading
from threading import Thread, Lock
import platform
from logbook.base import NOTSET, LogRecord, dispatch_record
from logbook.handlers import Handler, WrapperHandler
from logbook.helpers import PY2, u
if PY2:
from Queue import Empty, Full, Queue as ThreadQueue
else:
from queue import Empty, Full, Queue as ThreadQueue
class RedisHandler(Handler):
"""A handler that sends log messages to a Redis instance.
It publishes each record as json dump. Requires redis module.
To receive such records you need to have a running instance of Redis.
Example setup::
handler = RedisHandler('http://127.0.0.1', port='9200', key='redis')
If your Redis instance is password protected, you can securely connect
passing your password when creating a RedisHandler object.
Example::
handler = RedisHandler(password='<PASSWORD>')
More info about the default buffer size: wp.me/p3tYJu-3b
"""
def __init__(self, host='127.0.0.1', port=6379, key='redis',
extra_fields=None, flush_threshold=128, flush_time=1,
level=NOTSET, filter=None, password=False, bubble=True,
context=None, push_method='rpush'):
Handler.__init__(self, level, filter, bubble)
try:
import redis
from redis import ResponseError
except ImportError:
raise RuntimeError('The redis library is required for '
'the RedisHandler')
self.redis = redis.Redis(host=host, port=port, password=password,
decode_responses=True)
try:
self.redis.ping()
except ResponseError:
raise ResponseError(
'The password provided is apparently incorrect')
self.key = key
self.extra_fields = extra_fields or {}
self.flush_threshold = flush_threshold
self.queue = []
self.lock = Lock()
self.push_method = push_method
# Set up a thread that flushes the queue every specified seconds
self._stop_event = threading.Event()
self._flushing_t = threading.Thread(target=self._flush_task,
args=(flush_time,
self._stop_event))
self._flushing_t.daemon = True
self._flushing_t.start()
def _flush_task(self, time, stop_event):
"""Calls the method _flush_buffer every certain time.
"""
while not self._stop_event.isSet():
with self.lock:
self._flush_buffer()
self._stop_event.wait(time)
def _flush_buffer(self):
"""Flushes the messaging queue into Redis.
All values are pushed at once for the same key.
The method rpush/lpush is defined by push_method argument
"""
if self.queue:
getattr(self.redis, self.push_method)(self.key, *self.queue)
self.queue = []
def disable_buffering(self):
"""Disables buffering.
If called, every single message will be directly pushed to Redis.
"""
self._stop_event.set()
self.flush_threshold = 1
def emit(self, record):
"""Emits a pair (key, value) to redis.
The key is the one provided when creating the handler, or redis if none
was provided. The value contains both the message and the hostname.
Extra values are also appended to the message.
"""
with self.lock:
r = {"message": record.msg,
"host": platform.node(),
"level": record.level_name,
"time": record.time.isoformat()}
r.update(self.extra_fields)
r.update(record.kwargs)
self.queue.append(json.dumps(r))
if len(self.queue) == self.flush_threshold:
self._flush_buffer()
def close(self):
self._flush_buffer()
class MessageQueueHandler(Handler):
"""A handler that acts as a message queue publisher, which publishes each
record as json dump. Requires the kombu module.
The queue will be filled with JSON exported log records. To receive such
log records from a queue you can use the :class:`MessageQueueSubscriber`.
For an AMQP backend such as RabbitMQ::
handler = MessageQueueHandler('amqp://guest:guest@localhost//')
This requires the py-amqp or the librabbitmq client library.
For Redis (requires redis client library)::
handler = MessageQueueHandler('redis://localhost:8889/0')
For MongoDB (requires pymongo)::
handler = MessageQueueHandler('mongodb://localhost:27017/logging')
Several other backends are also supported.
Refer to the `kombu`_ documentation
.. _kombu: http://kombu.readthedocs.org/en/latest/introduction.html
"""
def __init__(self, uri=None, queue='logging', level=NOTSET,
filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
try:
import kombu
except ImportError:
raise RuntimeError('The kombu library is required for '
'the RabbitMQSubscriber.')
if uri:
connection = kombu.Connection(uri)
self.queue = connection.SimpleQueue(queue)
def export_record(self, record):
"""Exports the record into a dictionary ready for JSON dumping.
"""
return record.to_dict(json_safe=True)
def emit(self, record):
self.queue.put(self.export_record(record))
def close(self):
self.queue.close()
RabbitMQHandler = MessageQueueHandler
class ZeroMQHandler(Handler):
"""A handler that acts as a ZeroMQ publisher, which publishes each record
as json dump. Requires the pyzmq library.
The queue will be filled with JSON exported log records. To receive such
log records from a queue you can use the :class:`ZeroMQSubscriber`.
If `multi` is set to `True`, the handler will use a `PUSH` socket to
publish the records. This allows multiple handlers to use the same `uri`.
The records can be received by using the :class:`ZeroMQSubscriber` with
`multi` set to `True`.
Example setup::
handler = ZeroMQHandler('tcp://127.0.0.1:5000')
"""
def __init__(self, uri=None, level=NOTSET, filter=None, bubble=False,
context=None, multi=False):
Handler.__init__(self, level, filter, bubble)
try:
import zmq
except ImportError:
raise RuntimeError('The pyzmq library is required for '
'the ZeroMQHandler.')
#: the zero mq context
self.context = context or zmq.Context()
if multi:
#: the zero mq socket.
self.socket = self.context.socket(zmq.PUSH)
if uri is not None:
self.socket.connect(uri)
else:
#: the zero mq socket.
self.socket = self.context.socket(zmq.PUB)
if uri is not None:
self.socket.bind(uri)
def export_record(self, record):
"""Exports the record into a dictionary ready for JSON dumping."""
return record.to_dict(json_safe=True)
def emit(self, record):
self.socket.send(json.dumps(
self.export_record(record)).encode("utf-8"))
def close(self, linger=-1):
self.socket.close(linger)
def __del__(self):
# When the Handler is deleted we must close our socket in a
# non-blocking fashion (using linger).
# Otherwise it can block indefinitely, for example if the Subscriber is
# not reachable.
# If messages are pending on the socket, we wait 100ms for them to be
# sent then we discard them.
self.close(linger=100)
class ThreadController(object):
"""A helper class used by queue subscribers to control the background
thread. This is usually created and started in one go by
:meth:`~logbook.queues.ZeroMQSubscriber.dispatch_in_background` or
a comparable function.
"""
def __init__(self, subscriber, setup=None):
self.setup = setup
self.subscriber = subscriber
self.running = False
self._thread = None
def start(self):
"""Starts the task thread."""
self.running = True
self._thread = Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
def stop(self):
"""Stops the task thread."""
if self.running:
self.running = False
self._thread.join()
self._thread = None
def _target(self):
if self.setup is not None:
self.setup.push_thread()
try:
while self.running:
self.subscriber.dispatch_once(timeout=0.05)
finally:
if self.setup is not None:
self.setup.pop_thread()
class SubscriberBase(object):
"""Baseclass for all subscribers."""
def recv(self, timeout=None):
"""Receives a single record from the socket. Timeout of 0 means
nonblocking, `None` means blocking and otherwise it's a timeout in
seconds after which the function just returns with `None`.
Subclasses have to override this.
"""
raise NotImplementedError()
def dispatch_once(self, timeout=None):
"""Receives one record from the socket, loads it and dispatches it. Returns
`True` if something was dispatched or `False` if it timed out.
"""
rv = self.recv(timeout)
if rv is not None:
dispatch_record(rv)
return True
return False
def dispatch_forever(self):
"""Starts a loop that dispatches log records forever."""
while 1:
self.dispatch_once()
def dispatch_in_background(self, setup=None):
"""Starts a new daemonized thread that dispatches in the background.
An optional handler setup can be provided that pushed to the new
thread (can be any :class:`logbook.base.StackedObject`).
Returns a :class:`ThreadController` object for shutting down
the background thread. The background thread will already be
running when this function returns.
"""
controller = ThreadController(self, setup)
controller.start()
return controller
class MessageQueueSubscriber(SubscriberBase):
"""A helper that acts as a message queue subscriber and will dispatch
received log records to the active handler setup. There are multiple ways
to use this class.
It can be used to receive log records from a queue::
subscriber = MessageQueueSubscriber('mongodb://localhost:27017/logging')
record = subscriber.recv()
But it can also be used to receive and dispatch these in one go::
with target_handler:
subscriber = MessageQueueSubscriber('mongodb://localhost:27017/logging')
subscriber.dispatch_forever()
This will take all the log records from that queue and dispatch them
over to `target_handler`. If you want you can also do that in the
background::
subscriber = MessageQueueSubscriber('mongodb://localhost:27017/logging')
controller = subscriber.dispatch_in_background(target_handler)
The controller returned can be used to shut down the background
thread::
controller.stop()
"""
def __init__(self, uri=None, queue='logging'):
try:
import kombu
except ImportError:
raise RuntimeError('The kombu library is required.')
if uri:
connection = kombu.Connection(uri)
self.queue = connection.SimpleQueue(queue)
def __del__(self):
try:
self.close()
except AttributeError:
# subscriber partially created
pass
def close(self):
self.queue.close()
def recv(self, timeout=None):
"""Receives a single record from the socket. Timeout of 0 means
nonblocking, `None` means blocking and otherwise it's a timeout in
seconds after which the function just returns with `None`.
"""
if timeout == 0:
try:
rv = self.queue.get(block=False)
except Exception:
return
else:
rv = self.queue.get(timeout=timeout)
log_record = rv.payload
rv.ack()
return LogRecord.from_dict(log_record)
RabbitMQSubscriber = MessageQueueSubscriber
class ZeroMQSubscriber(SubscriberBase):
"""A helper that acts as ZeroMQ subscriber and will dispatch received
log records to the active handler setup. There are multiple ways to
use this class.
It can be used to receive log records from a queue::
subscriber = ZeroMQSubscriber('tcp://127.0.0.1:5000')
record = subscriber.recv()
But it can also be used to receive and dispatch these in one go::
with target_handler:
subscriber = ZeroMQSubscriber('tcp://127.0.0.1:5000')
subscriber.dispatch_forever()
This will take all the log records from that queue and dispatch them
over to `target_handler`. If you want you can also do that in the
background::
subscriber = ZeroMQSubscriber('tcp://127.0.0.1:5000')
controller = subscriber.dispatch_in_background(target_handler)
The controller returned can be used to shut down the background
thread::
controller.stop()
If `multi` is set to `True`, the subscriber will use a `PULL` socket
and listen to records published by a `PUSH` socket (usually via a
:class:`ZeroMQHandler` with `multi` set to `True`). This allows a
single subscriber to dispatch multiple handlers.
"""
def __init__(self, uri=None, context=None, multi=False):
try:
import zmq
except ImportError:
raise RuntimeError('The pyzmq library is required for '
'the ZeroMQSubscriber.')
self._zmq = zmq
#: the zero mq context
self.context = context or zmq.Context()
if multi:
#: the zero mq socket.
self.socket = self.context.socket(zmq.PULL)
if uri is not None:
self.socket.bind(uri)
else:
#: the zero mq socket.
self.socket = self.context.socket(zmq.SUB)
if uri is not None:
self.socket.connect(uri)
self.socket.setsockopt_unicode(zmq.SUBSCRIBE, u(''))
def __del__(self):
try:
self.close()
except AttributeError:
# subscriber partially created
pass
def close(self):
"""Closes the zero mq socket."""
self.socket.close()
def recv(self, timeout=None):
"""Receives a single record from the socket. Timeout of 0 means
nonblocking, `None` means blocking and otherwise it's a timeout in
seconds after which the function just returns with `None`.
"""
if timeout is None:
rv = self.socket.recv()
elif not timeout:
rv = self.socket.recv(self._zmq.NOBLOCK)
if rv is None:
return
else:
if not self._zmq.select([self.socket], [], [], timeout)[0]:
return
rv = self.socket.recv(self._zmq.NOBLOCK)
if not PY2:
rv = rv.decode("utf-8")
return LogRecord.from_dict(json.loads(rv))
def _fix_261_mplog():
"""necessary for older python's to disable a broken monkeypatch
in the logging module. See multiprocessing/util.py for the
hasattr() check. At least in Python 2.6.1 the multiprocessing
module is not imported by logging and as such the test in
the util fails.
"""
import logging
import multiprocessing
logging.multiprocessing = multiprocessing
class MultiProcessingHandler(Handler):
"""Implements a handler that dispatches over a queue to a different
process. It is connected to a subscriber with a
:class:`multiprocessing.Queue`::
from multiprocessing import Queue
from logbook.queues import MultiProcessingHandler
queue = Queue(-1)
handler = MultiProcessingHandler(queue)
"""
def __init__(self, queue, level=NOTSET, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
self.queue = queue
_fix_261_mplog()
def emit(self, record):
self.queue.put_nowait(record.to_dict(json_safe=True))
class MultiProcessingSubscriber(SubscriberBase):
"""Receives log records from the given multiprocessing queue and
dispatches them to the active handler setup. Make sure to use the same
queue for both handler and subscriber. Idaelly the queue is set
up with maximum size (``-1``)::
from multiprocessing import Queue
queue = Queue(-1)
It can be used to receive log records from a queue::
subscriber = MultiProcessingSubscriber(queue)
record = subscriber.recv()
But it can also be used to receive and dispatch these in one go::
with target_handler:
subscriber = MultiProcessingSubscriber(queue)
subscriber.dispatch_forever()
This will take all the log records from that queue and dispatch them
over to `target_handler`. If you want you can also do that in the
background::
subscriber = MultiProcessingSubscriber(queue)
controller = subscriber.dispatch_in_background(target_handler)
The controller returned can be used to shut down the background
thread::
controller.stop()
If no queue is provided the subscriber will create one. This one can the
be used by handlers::
subscriber = MultiProcessingSubscriber()
handler = MultiProcessingHandler(subscriber.queue)
"""
def __init__(self, queue=None):
if queue is None:
from multiprocessing import Queue
queue = Queue(-1)
self.queue = queue
_fix_261_mplog()
def recv(self, timeout=None):
if timeout is None:
rv = self.queue.get()
else:
try:
rv = self.queue.get(block=True, timeout=timeout)
except Empty:
return None
return LogRecord.from_dict(rv)
class ExecnetChannelHandler(Handler):
"""Implements a handler that dispatches over a execnet channel
to a different process.
"""
def __init__(self, channel, level=NOTSET, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
self.channel = channel
def emit(self, record):
self.channel.send(record.to_dict(json_safe=True))
class ExecnetChannelSubscriber(SubscriberBase):
"""subscribes to a execnet channel"""
def __init__(self, channel):
self.channel = channel
def recv(self, timeout=None):
try:
rv = self.channel.receive(timeout=timeout)
except self.channel.RemoteError:
# XXX: handle
return None
except (self.channel.TimeoutError, EOFError):
return None
else:
return LogRecord.from_dict(rv)
class TWHThreadController(object):
"""A very basic thread controller that pulls things in from a
queue and sends it to a handler. Both queue and handler are
taken from the passed :class:`ThreadedWrapperHandler`.
"""
class Command(object):
stop = object()
emit = object()
emit_batch = object()
def __init__(self, wrapper_handler):
self.wrapper_handler = wrapper_handler
self.running = False
self._thread = None
def start(self):
"""Starts the task thread."""
self.running = True
self._thread = Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
def stop(self):
"""Stops the task thread."""
if self.running:
self.wrapper_handler.queue.put_nowait((self.Command.stop, ))
self._thread.join()
self._thread = None
def _target(self):
while 1:
item = self.wrapper_handler.queue.get()
command, data = item[0], item[1:]
if command is self.Command.stop:
self.running = False
break
elif command is self.Command.emit:
(record, ) = data
self.wrapper_handler.handler.emit(record)
elif command is self.Command.emit_batch:
record, reason = data
self.wrapper_handler.handler.emit_batch(record, reason)
class ThreadedWrapperHandler(WrapperHandler):
"""This handled uses a single background thread to dispatch log records
to a specific other handler using an internal queue. The idea is that if
you are using a handler that requires some time to hand off the log records
(such as the mail handler) and would block your request, you can let
Logbook do that in a background thread.
The threaded wrapper handler will automatically adopt the methods and
properties of the wrapped handler. All the values will be reflected:
>>> twh = ThreadedWrapperHandler(TestHandler())
>>> from logbook import WARNING
>>> twh.level_name = 'WARNING'
>>> twh.handler.level_name
'WARNING'
"""
_direct_attrs = frozenset(['handler', 'queue', 'controller'])
def __init__(self, handler, maxsize=0):
WrapperHandler.__init__(self, handler)
self.queue = ThreadQueue(maxsize)
self.controller = TWHThreadController(self)
self.controller.start()
def close(self):
self.controller.stop()
self.handler.close()
def emit(self, record):
item = (TWHThreadController.Command.emit, record)
try:
self.queue.put_nowait(item)
except Full:
# silently drop
pass
def emit_batch(self, records, reason):
item = (TWHThreadController.Command.emit_batch, records, reason)
try:
self.queue.put_nowait(item)
except Full:
# silently drop
pass
class GroupMember(ThreadController):
def __init__(self, subscriber, queue):
ThreadController.__init__(self, subscriber, None)
self.queue = queue
def _target(self):
if self.setup is not None:
self.setup.push_thread()
try:
while self.running:
record = self.subscriber.recv()
if record:
try:
self.queue.put(record, timeout=0.05)
except Full:
pass
finally:
if self.setup is not None:
self.setup.pop_thread()
class SubscriberGroup(SubscriberBase):
"""This is a subscriber which represents a group of subscribers.
This is helpful if you are writing a server-like application which has
"slaves". This way a user is easily able to view every log record which
happened somewhere in the entire system without having to check every
single slave::
subscribers = SubscriberGroup([
MultiProcessingSubscriber(queue),
ZeroMQSubscriber('tcp://127.0.0.1:5000')
])
with target_handler:
subscribers.dispatch_forever()
"""
def __init__(self, subscribers=None, queue_limit=10):
self.members = []
self.queue = ThreadQueue(queue_limit)
for subscriber in subscribers or []:
self.add(subscriber)
def add(self, subscriber):
"""Adds the given `subscriber` to the group."""
member = GroupMember(subscriber, self.queue)
member.start()
self.members.append(member)
def recv(self, timeout=None):
try:
return self.queue.get(timeout=timeout)
except Empty:
return
def stop(self):
"""Stops the group from internally recieving any more messages, once the
internal queue is exhausted :meth:`recv` will always return `None`.
"""
for member in self.members:
self.member.stop()
|
exps/sudoku.py | locuslab/SATNet | 383 | 11198431 | #!/usr/bin/env python3
#
# Partly derived from:
# https://github.com/locuslab/optnet/blob/master/sudoku/train.py
import argparse
import os
import shutil
import csv
import numpy as np
import numpy.random as npr
#import setproctitle
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
from tqdm.auto import tqdm
import satnet
class SudokuSolver(nn.Module):
def __init__(self, boardSz, aux, m):
super(SudokuSolver, self).__init__()
n = boardSz**6
self.sat = satnet.SATNet(n, m, aux)
def forward(self, y_in, mask):
out = self.sat(y_in, mask)
return out
class DigitConv(nn.Module):
'''
Convolutional neural network for MNIST digit recognition. From:
https://github.com/pytorch/examples/blob/master/mnist/main.py
'''
def __init__(self):
super(DigitConv, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)[:,:9].contiguous()
class MNISTSudokuSolver(nn.Module):
def __init__(self, boardSz, aux, m):
super(MNISTSudokuSolver, self).__init__()
self.digit_convnet = DigitConv()
self.sudoku_solver = SudokuSolver(boardSz, aux, m)
self.boardSz = boardSz
self.nSq = boardSz**2
def forward(self, x, is_inputs):
nBatch = x.shape[0]
x = x.flatten(start_dim = 0, end_dim = 1)
digit_guess = self.digit_convnet(x)
puzzles = digit_guess.view(nBatch, self.nSq * self.nSq * self.nSq)
solution = self.sudoku_solver(puzzles, is_inputs)
return solution
class CSVLogger(object):
def __init__(self, fname):
self.f = open(fname, 'w')
self.logger = csv.writer(self.f)
def log(self, fields):
self.logger.writerow(fields)
self.f.flush()
class FigLogger(object):
def __init__(self, fig, base_ax, title):
self.colors = ['tab:red', 'tab:blue']
self.labels = ['Loss (entropy)', 'Error']
self.markers = ['d', '.']
self.axes = [base_ax, base_ax.twinx()]
base_ax.set_xlabel('Epochs')
base_ax.set_title(title)
for i, ax in enumerate(self.axes):
ax.set_ylabel(self.labels[i], color=self.colors[i])
ax.tick_params(axis='y', labelcolor=self.colors[i])
self.reset()
self.fig = fig
def log(self, args):
for i, arg in enumerate(args[-2:]):
self.curves[i].append(arg)
x = list(range(len(self.curves[i])))
self.axes[i].plot(x, self.curves[i], self.colors[i], marker=self.markers[i])
self.axes[i].set_ylim(0, 1.05)
self.fig.canvas.draw()
def reset(self):
for ax in self.axes:
for line in ax.lines:
line.remove()
self.curves = [[], []]
def print_header(msg):
print('===>', msg)
def find_unperm(perm):
unperm = torch.zeros_like(perm)
for i in range(perm.size(0)):
unperm[perm[i]] = i
return unperm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='sudoku')
parser.add_argument('--boardSz', type=int, default=3)
parser.add_argument('--batchSz', type=int, default=40)
parser.add_argument('--testBatchSz', type=int, default=40)
parser.add_argument('--aux', type=int, default=300)
parser.add_argument('--m', type=int, default=600)
parser.add_argument('--nEpoch', type=int, default=100)
parser.add_argument('--testPct', type=float, default=0.1)
parser.add_argument('--lr', type=float, default=2e-3)
parser.add_argument('--save', type=str)
parser.add_argument('--model', type=str)
parser.add_argument('--no_cuda', action='store_true')
parser.add_argument('--mnist', action='store_true')
parser.add_argument('--perm', action='store_true')
args = parser.parse_args()
# For debugging: fix the random seed
npr.seed(1)
torch.manual_seed(7)
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
print('Using', torch.cuda.get_device_name(0))
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.cuda.init()
save = 'sudoku{}{}.boardSz{}-aux{}-m{}-lr{}-bsz{}'.format(
'.perm' if args.perm else '', '.mnist' if args.mnist else '',
args.boardSz, args.aux, args.m, args.lr, args.batchSz)
if args.save: save = '{}-{}'.format(args.save, save)
save = os.path.join('logs', save)
if os.path.isdir(save): shutil.rmtree(save)
os.makedirs(save)
#setproctitle.setproctitle('sudoku.{}'.format(save))
print_header('Loading data')
with open(os.path.join(args.data_dir, 'features.pt'), 'rb') as f:
X_in = torch.load(f)
with open(os.path.join(args.data_dir, 'features_img.pt'), 'rb') as f:
Ximg_in = torch.load(f)
with open(os.path.join(args.data_dir, 'labels.pt'), 'rb') as f:
Y_in = torch.load(f)
with open(os.path.join(args.data_dir, 'perm.pt'), 'rb') as f:
perm = torch.load(f)
N = X_in.size(0)
nTrain = int(N*(1.-args.testPct))
nTest = N-nTrain
assert(nTrain % args.batchSz == 0)
assert(nTest % args.testBatchSz == 0)
print_header('Forming inputs')
X, Ximg, Y, is_input = process_inputs(X_in, Ximg_in, Y_in, args.boardSz)
data = Ximg if args.mnist else X
if args.cuda: data, is_input, Y = data.cuda(), is_input.cuda(), Y.cuda()
unperm = None
if args.perm and not args.mnist:
print('Applying permutation')
data[:,:], Y[:,:], is_input[:,:] = data[:,perm], Y[:,perm], is_input[:,perm]
unperm = find_unperm(perm)
train_set = TensorDataset(data[:nTrain], is_input[:nTrain], Y[:nTrain])
test_set = TensorDataset(data[nTrain:], is_input[nTrain:], Y[nTrain:])
print_header('Building model')
if args.mnist:
model = MNISTSudokuSolver(args.boardSz, args.aux, args.m)
else:
model = SudokuSolver(args.boardSz, args.aux, args.m)
if args.cuda: model = model.cuda()
if args.mnist:
optimizer = optim.Adam([
{'params': model.sudoku_solver.parameters(), 'lr': args.lr},
{'params': model.digit_convnet.parameters(), 'lr': 1e-5},
])
else:
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.model:
model.load_state_dict(torch.load(args.model))
train_logger = CSVLogger(os.path.join(save, 'train.csv'))
test_logger = CSVLogger(os.path.join(save, 'test.csv'))
fields = ['epoch', 'loss', 'err']
train_logger.log(fields)
test_logger.log(fields)
test(args.boardSz, 0, model, optimizer, test_logger, test_set, args.testBatchSz, unperm)
for epoch in range(1, args.nEpoch+1):
train(args.boardSz, epoch, model, optimizer, train_logger, train_set, args.batchSz, unperm)
test(args.boardSz, epoch, model, optimizer, test_logger, test_set, args.testBatchSz, unperm)
#torch.save(model.state_dict(), os.path.join(save, 'it'+str(epoch)+'.pth'))
def process_inputs(X, Ximg, Y, boardSz):
is_input = X.sum(dim=3, keepdim=True).expand_as(X).int().sign()
Ximg = Ximg.flatten(start_dim=1, end_dim=2)
Ximg = Ximg.unsqueeze(2).float()
X = X.view(X.size(0), -1)
Y = Y.view(Y.size(0), -1)
is_input = is_input.view(is_input.size(0), -1)
return X, Ximg, Y, is_input
def run(boardSz, epoch, model, optimizer, logger, dataset, batchSz, to_train=False, unperm=None):
loss_final, err_final = 0, 0
loader = DataLoader(dataset, batch_size=batchSz)
tloader = tqdm(enumerate(loader), total=len(loader))
for i,(data,is_input,label) in tloader:
if to_train: optimizer.zero_grad()
preds = model(data.contiguous(), is_input.contiguous())
loss = nn.functional.binary_cross_entropy(preds, label)
if to_train:
loss.backward()
optimizer.step()
err = computeErr(preds.data, boardSz, unperm)/batchSz
tloader.set_description('Epoch {} {} Loss {:.4f} Err: {:.4f}'.format(epoch, ('Train' if to_train else 'Test '), loss.item(), err))
loss_final += loss.item()
err_final += err
loss_final, err_final = loss_final/len(loader), err_final/len(loader)
logger.log((epoch, loss_final, err_final))
if not to_train:
print('TESTING SET RESULTS: Average loss: {:.4f} Err: {:.4f}'.format(loss_final, err_final))
#print('memory: {:.2f} MB, cached: {:.2f} MB'.format(torch.cuda.memory_allocated()/2.**20, torch.cuda.memory_cached()/2.**20))
torch.cuda.empty_cache()
def train(args, epoch, model, optimizer, logger, dataset, batchSz, unperm=None):
run(args, epoch, model, optimizer, logger, dataset, batchSz, True, unperm)
@torch.no_grad()
def test(args, epoch, model, optimizer, logger, dataset, batchSz, unperm=None):
run(args, epoch, model, optimizer, logger, dataset, batchSz, False, unperm)
@torch.no_grad()
def computeErr(pred_flat, n, unperm):
if unperm is not None: pred_flat[:,:] = pred_flat[:,unperm]
nsq = n ** 2
pred = pred_flat.view(-1, nsq, nsq, nsq)
batchSz = pred.size(0)
s = (nsq-1)*nsq//2 # 0 + 1 + ... + n^2-1
I = torch.max(pred, 3)[1].squeeze().view(batchSz, nsq, nsq)
def invalidGroups(x):
valid = (x.min(1)[0] == 0)
valid *= (x.max(1)[0] == nsq-1)
valid *= (x.sum(1) == s)
return valid.bitwise_not()
boardCorrect = torch.ones(batchSz).type_as(pred)
for j in range(nsq):
# Check the jth row and column.
boardCorrect[invalidGroups(I[:,j,:])] = 0
boardCorrect[invalidGroups(I[:,:,j])] = 0
# Check the jth block.
row, col = n*(j // n), n*(j % n)
M = invalidGroups(I[:,row:row+n,col:col+n].contiguous().view(batchSz,-1))
boardCorrect[M] = 0
if boardCorrect.sum() == 0:
return batchSz
return float(batchSz-boardCorrect.sum())
if __name__=='__main__':
main()
|
eve/tests/test_settings_env.py | pmwheatley/eve | 2,288 | 11198489 | # -*- coding: utf-8 -*-
# this is just a helper file which we are going
# to try to load with environmental variable in
# test_existing_env_config() test case
DOMAIN = {'env_domain': {}}
|
mobileFacenet_64_PReLU.py | GzuPark/EXTD_Pytorch | 190 | 11198494 | import torch.nn as nn
import math
import torch
import torch.nn.functional as F
def conv_bn(inp, oup, stride, k_size=3):
return nn.Sequential(
nn.Conv2d(inp, oup, k_size, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.PReLU()
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.PReLU()
)
class DWC(nn.Module):
def __init__(self, in_channels, out_channels):
super(DWC, self).__init__()
#self.depthwise = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=(7,6),
#stride=1, padding=0, groups=in_channels, bias=False)
self.batch_norm_in = nn.BatchNorm2d(in_channels)
self.depthwise = nn.AvgPool2d((7, 6), stride=1, padding=0)
self.pointwise = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1,
stride=1, padding=0, bias=False)
def forward(self, x):
x = self.depthwise(x)
#x = self.batch_norm_in(x)
x = self.pointwise(x)
return x
class Max_AvgPool(nn.Module):
def __init__(self, kernel_size=(3,3), stride=2, padding=1, dim=128):
super(Max_AvgPool, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)
self.Avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding)
def forward(self, x):
x = self.Maxpool(x) + self.Avgpool(x) # add some channelwise gating?
return x
class Max_AvgPool(nn.Module):
def __init__(self, kernel_size=(3,3), stride=2, padding=1, dim=128):
super(Max_AvgPool, self).__init__()
self.Maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)
self.Avgpool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding)
def forward(self, x):
x = self.Maxpool(x) + self.Avgpool(x) # add some channelwise gating?
return x
class gated_conv1x1(nn.Module):
def __init__(self, inc=128, outc=128):
super(gated_conv1x1, self).__init__()
self.inp = int(inc/2)
self.oup = int(outc/2)
self.conv1x1_1 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=False)
self.gate_1 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=True)
self.conv1x1_2 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=False)
self.gate_2 = nn.Conv2d(self.inp, self.oup, 1, 1, 0, bias=True)
def forward(self, x):
x_1 = x[:, :self.inp, :, :]
x_2 = x[:, self.inp:, :, :]
a_1 = self.conv1x1_1(x_1)
g_1 = F.sigmoid(self.gate_1(x_1))
a_2 = self.conv1x1_2(x_2)
g_2 = F.sigmoid(self.gate_2(x_2))
ret = torch.cat((a_1*g_1, a_2*g_2), 1)
return ret
class InvertedResidual_dwc(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual_dwc, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
self.conv = []
if expand_ratio == 1:
self.conv.append(nn.Conv2d(inp, hidden_dim, kernel_size=(3, 3), stride=stride, padding=1, groups=hidden_dim))
self.conv.append(nn.BatchNorm2d(hidden_dim))
self.conv.append(nn.PReLU())
#self.conv.append(nn.MaxPool2d(kernel_size=(3, 3), stride=stride, padding=1))
#self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup))
self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
self.conv.append(nn.BatchNorm2d(oup))
else:
#self.conv.append(gated_conv1x1(inc=inp,outc=hidden_dim))
self.conv.append(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False))
self.conv.append(nn.BatchNorm2d(hidden_dim))
self.conv.append(nn.PReLU())
self.conv.append(nn.Conv2d(hidden_dim, hidden_dim, kernel_size=(3, 3), stride=stride, padding=1, groups=hidden_dim))
self.conv.append(nn.BatchNorm2d(hidden_dim))
self.conv.append(nn.PReLU())
#self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup))
self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
self.conv.append(nn.BatchNorm2d(oup))
self.conv = nn.Sequential(*self.conv)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
self.conv = []
if expand_ratio == 1:
self.conv.append(nn.MaxPool2d(kernel_size=(3, 3), stride=stride, padding=1))
#self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup))
self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
self.conv.append(nn.BatchNorm2d(oup))
else:
#self.conv.append(gated_conv1x1(inc=inp,outc=hidden_dim))
self.conv.append(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False))
self.conv.append(nn.BatchNorm2d(hidden_dim))
self.conv.append(nn.PReLU())
self.conv.append(nn.MaxPool2d(kernel_size=(3, 3), stride=stride, padding=1))
#self.conv.append(gated_conv1x1(inc=hidden_dim,outc=oup))
self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
self.conv.append(nn.BatchNorm2d(oup))
self.conv = nn.Sequential(*self.conv)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class Net(nn.Module): #mobileNet v2
def __init__(self, embedding_size=128, input_size=224, width_mult=1.):
super(Net, self).__init__()
block = InvertedResidual
block_dwc = InvertedResidual_dwc
input_channel = 64
last_channel = 256
interverted_residual_setting = [
# t, c, n, s
[1, 64, 1, 1], # depthwise conv for first row
[2, 64, 2, 1],
[4, 64, 2, 2],
[2, 64, 2, 1],
[4, 64, 5, 1],
[2, 64, 2, 2],
[2, 64, 6, 2],
]
# building first layer
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual
cnt = 0
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if cnt>1:
if i == n - 1: # reduce the featuremap in the last.
self.features.append(block_dwc(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block_dwc(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
else:
if i == n - 1: # reduce the featuremap in the last.
self.features.append(block_dwc(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block_dwc(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
cnt+=1
# building last several layers
self.features.append(gated_conv1x1(input_channel, self.last_channel))
# make it nn.Sequential
self.features_sequential = nn.Sequential(*self.features)
# Global depthwise conv
#self.GDCconv = DWC(self.last_channel, embedding_size)
self._initialize_weights()
def forward(self, x):
x = self.features_sequential(x).view(-1, 256*4)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_() |
tests/strategies/staggered_orders/test_staggered_orders_complex.py | fakegit/DEXBot | 249 | 11198513 | <gh_stars>100-1000
import logging
import math
import time
from datetime import datetime
import pytest
from bitshares.amount import Amount
# Turn on debug for dexbot logger
log = logging.getLogger("dexbot")
log.setLevel(logging.DEBUG)
MODES = ['mountain', 'valley', 'neutral', 'buy_slope', 'sell_slope']
###################
# Most complex methods which depends on high-level methods
###################
def test_maintain_strategy_manual_cp_empty_market(worker):
"""On empty market, center price should be set to manual CP."""
worker.cancel_all_orders()
# Undefine market_center_price
worker.market_center_price = None
# Workaround for https://github.com/Codaone/DEXBot/issues/566
worker.last_check = datetime(2000, 1, 1)
worker.maintain_strategy()
assert worker.market_center_price == worker.center_price
def test_maintain_strategy_no_manual_cp_empty_market(worker):
"""Strategy should not work on empty market if no manual CP was set."""
worker.cancel_all_orders()
# Undefine market_center_price
worker.market_center_price = None
worker.center_price = None
# Workaround for https://github.com/Codaone/DEXBot/issues/566
worker.last_check = datetime(2000, 1, 1)
worker.maintain_strategy()
assert worker.market_center_price is None
def test_maintain_strategy_no_operation_results(worker, monkeypatch):
"""https://github.com/Codaone/DEXBot/issues/764."""
def mock(*args, **kwargs):
return {'operation_results': None}
monkeypatch.setattr(worker, 'retry_action', mock)
worker.maintain_strategy()
# Run twice!
worker.maintain_strategy()
@pytest.mark.parametrize('mode', MODES)
def test_maintain_strategy_basic(mode, worker, do_initial_allocation):
"""Check if intial orders placement is correct."""
worker = do_initial_allocation(worker, mode)
# Check target spread is reached
assert worker.get_actual_spread() == pytest.approx(worker.target_spread, abs=(worker.increment / 2))
# Check number of orders
price = worker.center_price * math.sqrt(1 + worker.target_spread)
sell_orders_count = worker.calc_sell_orders_count(price, worker.upper_bound)
assert len(worker.sell_orders) == sell_orders_count
price = worker.center_price / math.sqrt(1 + worker.target_spread)
buy_orders_count = worker.calc_buy_orders_count(price, worker.lower_bound)
assert len(worker.buy_orders) == buy_orders_count
# Make sure balances are allocated after full maintenance
# Unallocated balances are less than closest order amount
assert worker.base_balance['amount'] < worker.buy_orders[0]['base']['amount']
assert worker.quote_balance['amount'] < worker.sell_orders[0]['base']['amount']
# Test how ranges are covered
# Expect furthest order price to be less than increment x2
assert worker.buy_orders[-1]['price'] < worker.lower_bound * (1 + worker.increment * 2)
assert worker.sell_orders[-1]['price'] ** -1 > worker.upper_bound / (1 + worker.increment * 2)
@pytest.mark.parametrize('mode', MODES)
def test_maintain_strategy_one_sided(mode, base_worker, config_only_base, do_initial_allocation):
"""Test for one-sided start (buy only)"""
worker = base_worker(config_only_base)
do_initial_allocation(worker, mode)
# Check number of orders
price = worker.center_price / math.sqrt(1 + worker.target_spread)
buy_orders_count = worker.calc_buy_orders_count(price, worker.lower_bound)
assert len(worker.buy_orders) == buy_orders_count
# Make sure balances are allocated after full maintenance
# Unallocated balances are less than closest order amount
assert worker.base_balance['amount'] < worker.buy_orders[0]['base']['amount']
# Test how ranges are covered
# Expect furthest order price to be less than increment x2
assert worker.buy_orders[-1]['price'] < worker.lower_bound * (1 + worker.increment * 2)
def test_maintain_strategy_1sat(base_worker, config_1_sat, do_initial_allocation):
worker = base_worker(config_1_sat)
do_initial_allocation(worker, worker.mode)
# Check target spread is reached
assert worker.get_actual_spread() == pytest.approx(worker.target_spread, abs=(worker.increment / 2))
# Check number of orders
price = worker.center_price * math.sqrt(1 + worker.target_spread)
sell_orders_count = worker.calc_sell_orders_count(price, worker.upper_bound)
assert len(worker.sell_orders) == sell_orders_count
price = worker.center_price / math.sqrt(1 + worker.target_spread)
buy_orders_count = worker.calc_buy_orders_count(price, worker.lower_bound)
assert len(worker.buy_orders) == buy_orders_count
# Make sure balances are allocated after full maintenance
# Unallocated balances are less than closest order amount
assert worker.base_balance['amount'] < worker.buy_orders[0]['base']['amount']
assert worker.quote_balance['amount'] < worker.sell_orders[0]['base']['amount']
# Test how ranges are covered
# Expect furthest order price to be less than increment x2
assert worker.buy_orders[-1]['price'] < worker.lower_bound * (1 + worker.increment * 2)
assert worker.sell_orders[-1]['price'] ** -1 > worker.upper_bound / (1 + worker.increment * 2)
# Combine each mode with base and quote
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_maintain_strategy_fallback_logic(asset, worker, do_initial_allocation):
"""Check fallback logic: when spread is not reached, furthest order should be cancelled to make free funds to close
spread."""
do_initial_allocation(worker, worker.mode)
# TODO: strategy must turn off bootstrapping once target spread is reached
worker['bootstrapping'] = False
if asset == 'base':
worker.cancel_orders_wrapper(worker.buy_orders[0])
amount = worker.balance(worker.market['base']['symbol'])
worker.bitshares.reserve(amount, account=worker.account)
elif asset == 'quote':
worker.cancel_orders_wrapper(worker.sell_orders[0])
amount = worker.balance(worker.market['quote']['symbol'])
worker.bitshares.reserve(amount, account=worker.account)
worker.refresh_orders()
spread_before = worker.get_actual_spread()
assert spread_before > worker.target_spread + worker.increment
for _ in range(0, 6):
worker.maintain_strategy()
worker.refresh_orders()
spread_after = worker.get_actual_spread()
assert spread_after <= worker.target_spread + worker.increment
@pytest.mark.parametrize('asset', ['base', 'quote'])
def test_maintain_strategy_fallback_logic_disabled(asset, worker, do_initial_allocation):
"""Check fallback logic: when spread is not reached, furthest order should be cancelled to make free funds to close
spread."""
worker.enable_fallback_logic = False
worker.operational_depth = 2
do_initial_allocation(worker, 'valley')
# TODO: strategy must turn off bootstrapping once target spread is reached
worker['bootstrapping'] = False
if asset == 'base':
worker.cancel_orders_wrapper(worker.buy_orders[:3])
amount = worker.buy_orders[0]['base'] * 3
worker.bitshares.reserve(amount, account=worker.account)
elif asset == 'quote':
worker.cancel_orders_wrapper(worker.sell_orders[:3])
amount = worker.sell_orders[0]['base'] * 3
worker.bitshares.reserve(amount, account=worker.account)
worker.refresh_orders()
spread_before = worker.get_actual_spread()
assert spread_before > worker.target_spread + worker.increment
for _ in range(0, 6):
worker.maintain_strategy()
worker.refresh_orders()
spread_after = worker.get_actual_spread()
assert spread_after == spread_before
# Also check that operational depth is proper
assert len(worker.real_buy_orders) == pytest.approx(worker.operational_depth, abs=1)
assert len(worker.real_sell_orders) == pytest.approx(worker.operational_depth, abs=1)
def test_check_operational_depth(worker, do_initial_allocation):
"""Test for correct operational depth following."""
worker.operational_depth = 10
do_initial_allocation(worker, worker.mode)
worker['bootstrapping'] = False
# abs=1 means we're accepting slight error
assert len(worker.buy_orders) == pytest.approx(worker.operational_depth, abs=1)
assert len(worker.sell_orders) == pytest.approx(worker.operational_depth, abs=1)
worker.operational_depth = 2
worker.refresh_orders()
worker.check_operational_depth(worker.real_buy_orders, worker.virtual_buy_orders)
worker.check_operational_depth(worker.real_sell_orders, worker.virtual_sell_orders)
assert len(worker.real_buy_orders) == pytest.approx(worker.operational_depth, abs=1)
assert len(worker.real_sell_orders) == pytest.approx(worker.operational_depth, abs=1)
worker.operational_depth = 8
worker.refresh_orders()
worker.check_operational_depth(worker.real_buy_orders, worker.virtual_buy_orders)
worker.check_operational_depth(worker.real_sell_orders, worker.virtual_sell_orders)
assert len(worker.real_buy_orders) == pytest.approx(worker.operational_depth, abs=1)
assert len(worker.real_sell_orders) == pytest.approx(worker.operational_depth, abs=1)
def test_increase_order_sizes_valley_basic(worker, do_initial_allocation, issue_asset, increase_until_allocated):
"""Test increases in valley mode when all orders are equal (new allocation round)."""
do_initial_allocation(worker, 'valley')
# Double worker's balance
issue_asset(worker.market['base']['symbol'], worker.base_total_balance, worker.account.name)
issue_asset(worker.market['quote']['symbol'], worker.quote_total_balance, worker.account.name)
increase_until_allocated(worker)
# All orders must be equal-sized
for order in worker.buy_orders:
assert order['base']['amount'] == worker.buy_orders[0]['base']['amount']
for order in worker.sell_orders:
assert order['base']['amount'] == worker.sell_orders[0]['base']['amount']
def test_increase_order_sizes_valley_direction(worker, do_initial_allocation, issue_asset, increase_until_allocated):
"""
Test increase direction in valley mode: new allocation round must be started from closest order.
Buy side, amounts in BASE:
100 100 100 100 100
100 100 100 100 115
100 100 100 115 115
100 100 115 115 115
"""
do_initial_allocation(worker, 'valley')
# Add balance to increase several orders; 1.01 to mitigate rounding issues
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
to_issue = worker.buy_orders[0]['base']['amount'] * (increase_factor - 1) * 3 * 1.01
issue_asset(worker.market['base']['symbol'], to_issue, worker.account.name)
to_issue = worker.sell_orders[0]['base']['amount'] * (increase_factor - 1) * 3 * 1.01
issue_asset(worker.market['quote']['symbol'], to_issue, worker.account.name)
increase_until_allocated(worker)
for order in worker.buy_orders:
assert order['base']['amount'] <= worker.buy_orders[0]['base']['amount']
for order in worker.sell_orders:
assert order['base']['amount'] <= worker.sell_orders[0]['base']['amount']
def test_increase_order_sizes_valley_transit_from_mountain(worker, do_initial_allocation, issue_asset):
"""
Transition from mountain to valley.
Buy side, amounts in BASE, increase should be like this:
70 80 90 100 <c>
80 80 90 100 <c>
80 90 90 100 <c>
90 90 90 100 <c>
"""
# Set up mountain
do_initial_allocation(worker, 'mountain')
# Switch to valley
worker.mode = 'valley'
for _ in range(0, 6):
# Add balance to increase ~1 order
to_issue = worker.buy_orders[0]['base']['amount']
issue_asset(worker.market['base']['symbol'], to_issue, worker.account.name)
previous_buy_orders = worker.buy_orders
worker.refresh_balances()
worker.increase_order_sizes('base', worker.base_balance, previous_buy_orders)
worker.refresh_orders()
for i in range(-1, -6, -1):
if (
previous_buy_orders[i]['base']['amount'] < previous_buy_orders[i - 1]['base']['amount']
and previous_buy_orders[i - 1]['base']['amount'] - previous_buy_orders[i]['base']['amount']
> previous_buy_orders[i]['base']['amount'] * worker.increment / 2
):
# Expect increased order if closer order is bigger than further
assert worker.buy_orders[i]['base']['amount'] > previous_buy_orders[i]['base']['amount']
# Only one check at a time
break
def test_increase_order_sizes_valley_smaller_closest_orders(worker, do_initial_allocation, increase_until_allocated):
"""
Test increase when closest-to-center orders are less than further orders. Normal situation when initial sides are
imbalanced and several orders were filled.
Buy side, amounts in BASE:
100 100 100 10 10 10 <center>
"""
worker = do_initial_allocation(worker, 'valley')
increase_until_allocated(worker)
# Cancel several closest orders
num_orders_to_cancel = 3
num_orders_before = len(worker.own_orders)
worker.cancel_orders_wrapper(worker.buy_orders[:num_orders_to_cancel])
worker.cancel_orders_wrapper(worker.sell_orders[:num_orders_to_cancel])
worker.refresh_orders()
worker.refresh_balances()
# Place limited orders
initial_base = worker.buy_orders[0]['base']['amount']
initial_quote = worker.sell_orders[0]['base']['amount']
base_limit = initial_base / 2
quote_limit = initial_quote / 2
for _ in range(0, num_orders_to_cancel):
worker.place_closer_order('base', worker.buy_orders[0], own_asset_limit=base_limit)
worker.place_closer_order('quote', worker.sell_orders[0], own_asset_limit=quote_limit)
worker.refresh_orders()
increase_until_allocated(worker)
# Number of orders should be the same
num_orders_after = len(worker.own_orders)
assert num_orders_before == num_orders_after
# New orders amounts should be equal to initial ones
# TODO: this relaxed test checks next closest orders because due to fp calculations closest orders may remain not
# increased
assert worker.buy_orders[1]['base']['amount'] == initial_base
assert worker.sell_orders[1]['base']['amount'] == initial_quote
def test_increase_order_sizes_valley_imbalaced_small_further(worker, do_initial_allocation, increase_until_allocated):
"""
If furthest orders are smaller than closest, they should be increased first. See
https://github.com/Codaone/DEXBot/issues/444 for details.
Buy side, amounts in BASE:
5 5 5 100 100 10 10 10 <center>
Should be:
10 10 10 100 100 10 10 10 <center>
"""
worker = do_initial_allocation(worker, 'valley')
# Cancel several closest orders
num_orders_to_cancel = 3
worker.cancel_orders_wrapper(worker.buy_orders[:num_orders_to_cancel])
# Cancel furthest orders
worker.cancel_orders_wrapper(worker.buy_orders[-num_orders_to_cancel:])
worker.refresh_orders()
worker.refresh_balances()
# Place limited orders
initial_base = worker.buy_orders[0]['base']['amount']
base_limit = initial_base / 2
for i in range(0, num_orders_to_cancel):
# Place smaller closer order
worker.place_closer_order('base', worker.buy_orders[0], own_asset_limit=base_limit)
# place_further_order() doesn't have own_asset_limit, so do own calculation
further_order = worker.place_further_order('base', worker.buy_orders[-1], place_order=False)
# Place smaller further order
to_buy = base_limit / further_order['price']
worker.place_market_buy_order(to_buy, further_order['price'])
worker.refresh_orders()
# Drop excess balance to only allow one increase round
worker.refresh_balances()
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
to_keep = base_limit * (increase_factor - 1) * num_orders_to_cancel * 2 * 1.01
to_drop = worker.base_balance['amount'] - to_keep
amount = Amount(to_drop, worker.market['base']['symbol'], bitshares_instance=worker.bitshares)
worker.bitshares.reserve(amount, account=worker.account)
increase_until_allocated(worker)
for i in range(1, num_orders_to_cancel):
further_order_amount = worker.buy_orders[-i]['base']['amount']
closer_order_amount = worker.buy_orders[i - 1]['base']['amount']
assert further_order_amount == closer_order_amount
def test_increase_order_sizes_valley_closest_order(worker, do_initial_allocation, issue_asset):
"""Should test proper calculation of closest order: order should not be less that min_increase_factor."""
worker = do_initial_allocation(worker, 'valley')
# Add balance to increase 2 orders
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
to_issue = worker.buy_orders[0]['base']['amount'] * (increase_factor - 1) * 2
issue_asset(worker.market['base']['symbol'], to_issue, worker.account.name)
previous_buy_orders = worker.buy_orders
worker.refresh_balances()
worker.increase_order_sizes('base', worker.base_balance, previous_buy_orders)
worker.refresh_orders()
assert worker.buy_orders[0]['base']['amount'] - previous_buy_orders[0]['base']['amount'] == pytest.approx(
previous_buy_orders[0]['base']['amount'] * (increase_factor - 1)
)
def test_increase_order_sizes_mountain_basic(worker, do_initial_allocation, issue_asset, increase_until_allocated):
"""
Test increases in mountain mode when all orders are equal (new allocation round).
New orders should be equal in their "quote"
"""
do_initial_allocation(worker, 'mountain')
increase_until_allocated(worker)
# Double worker's balance
issue_asset(worker.market['base']['symbol'], worker.base_total_balance, worker.account.name)
issue_asset(worker.market['quote']['symbol'], worker.quote_total_balance, worker.account.name)
increase_until_allocated(worker)
# All orders must be equal-sized in their quote, accept difference no more than increase_factor.
# This means all orders was increased and probably unfinished increase round may remain.
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
for order in worker.buy_orders:
assert order['quote']['amount'] == pytest.approx(worker.buy_orders[0]['quote']['amount'], rel=(increase_factor))
for order in worker.sell_orders:
assert order['quote']['amount'] == pytest.approx(
worker.sell_orders[0]['quote']['amount'], rel=(increase_factor)
)
def test_increase_order_sizes_mountain_direction(worker, do_initial_allocation, issue_asset, increase_until_allocated):
"""
Test increase direction in mountain mode.
Buy side, amounts in QUOTE:
10 10 10 10 10
15 10 10 10 10
15 15 10 10 10
15 15 15 10 10
"""
do_initial_allocation(worker, 'mountain')
increase_until_allocated(worker)
worker.mode = 'mountain'
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
for i in range(-1, -6, -1):
# Add balance to increase ~1 order
to_issue = worker.buy_orders[i]['base']['amount'] * (increase_factor - 1)
issue_asset(worker.market['base']['symbol'], to_issue, worker.account.name)
previous_buy_orders = worker.buy_orders
worker.refresh_balances()
worker.increase_order_sizes('base', worker.base_balance, previous_buy_orders)
worker.refresh_orders()
for i in range(-1, -6, -1):
if (
previous_buy_orders[i]['quote']['amount'] > previous_buy_orders[i - 1]['quote']['amount']
and previous_buy_orders[i]['quote']['amount'] - previous_buy_orders[i - 1]['quote']['amount']
> previous_buy_orders[i - 1]['quote']['amount'] * worker.increment / 2
):
# Expect increased order if further order is bigger than closer
assert worker.buy_orders[i - 1]['quote']['amount'] > previous_buy_orders[i - 1]['quote']['amount']
# Only one check at a time
break
def test_increase_order_sizes_mountain_furthest_order(
worker, do_initial_allocation, increase_until_allocated, issue_asset
):
"""Should test proper calculation of furthest order: try to maximize, don't allow too small increase."""
do_initial_allocation(worker, 'mountain')
previous_buy_orders = worker.buy_orders
# Add balance to increase ~1 order
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
to_issue = worker.buy_orders[-1]['base']['amount'] * (increase_factor - 1) * 1.1
issue_asset(worker.market['base']['symbol'], to_issue, worker.account.name)
worker.refresh_balances()
increase_until_allocated(worker)
worker.refresh_orders()
assert worker.buy_orders[-1]['base']['amount'] - previous_buy_orders[-1]['base']['amount'] == pytest.approx(
previous_buy_orders[-1]['base']['amount'] * (increase_factor - 1),
rel=(10 ** -worker.market['base']['precision']),
)
def test_increase_order_sizes_mountain_imbalanced(worker, do_initial_allocation):
"""
Test situation when sides was imbalances, several orders filled on opposite side. This also tests transition from
vally to mountain.
Buy side, amounts in QUOTE:
100 100 100 10 10 10 <c>
100 100 100 20 10 10 <c>
100 100 100 20 20 10 <c>
"""
do_initial_allocation(worker, 'mountain')
worker.mode = 'mountain'
# Cancel several closest orders
num_orders_to_cancel = 3
worker.cancel_orders_wrapper(worker.buy_orders[:num_orders_to_cancel])
worker.refresh_orders()
worker.refresh_balances()
# Place limited orders
initial_base = worker.buy_orders[0]['base']['amount']
base_limit = initial_base / 2
# Add own_asset_limit only for first new order
worker.place_closer_order('base', worker.buy_orders[0], own_asset_limit=base_limit)
worker.refresh_orders()
for _ in range(1, num_orders_to_cancel):
worker.place_closer_order('base', worker.buy_orders[0])
worker.refresh_orders()
previous_buy_orders = worker.buy_orders
for _ in range(0, num_orders_to_cancel):
worker.refresh_balances()
worker.increase_order_sizes('base', worker.base_balance, worker.buy_orders)
worker.refresh_orders()
for order_index in range(0, num_orders_to_cancel):
order = worker.buy_orders[order_index]
if (
previous_buy_orders[order_index]['quote']['amount']
< previous_buy_orders[order_index + 1]['quote']['amount']
and previous_buy_orders[order_index + 1]['base']['amount']
- previous_buy_orders[order_index]['base']['amount']
> previous_buy_orders[order_index]['base']['amount'] * worker.increment / 2
):
# If order before increase was smaller than further order, expect to see it increased
assert order['quote']['amount'] > previous_buy_orders[order_index]['quote']['amount']
break
def test_increase_order_sizes_neutral_basic(worker, do_initial_allocation, issue_asset, increase_until_allocated):
"""Test increases in neutral mode when all orders are equal (new allocation round)"""
do_initial_allocation(worker, 'neutral')
increase_until_allocated(worker)
# Double worker's balance
issue_asset(worker.market['base']['symbol'], worker.base_total_balance, worker.account.name)
issue_asset(worker.market['quote']['symbol'], worker.quote_total_balance, worker.account.name)
increase_until_allocated(worker)
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
for index, order in enumerate(worker.buy_orders):
if index == 0:
continue
# Assume amounts are equal within some tolerance, or accept difference at increase_factor size to detect new
# unfinished increase round
assert (
order['base']['amount']
== pytest.approx(
worker.buy_orders[index - 1]['base']['amount'] / math.sqrt(1 + worker.increment),
rel=(10 ** -worker.market['base']['precision']),
)
) or (
order['base']['amount']
== pytest.approx(
worker.buy_orders[index - 1]['base']['amount'] / math.sqrt(1 + worker.increment) / increase_factor,
rel=(10 ** -worker.market['base']['precision']),
)
)
for index, order in enumerate(worker.sell_orders):
if index == 0:
continue
assert (
order['base']['amount']
== pytest.approx(
worker.sell_orders[index - 1]['base']['amount'] / math.sqrt(1 + worker.increment),
rel=(10 ** -worker.market['quote']['precision']),
)
) or (
order['base']['amount']
== pytest.approx(
worker.sell_orders[index - 1]['base']['amount'] / math.sqrt(1 + worker.increment) / increase_factor,
rel=(10 ** -worker.market['quote']['precision']),
)
)
def test_increase_order_sizes_neutral_direction(worker, do_initial_allocation, issue_asset, increase_until_allocated):
"""
Test increase direction in neutral mode: new allocation round must be started from closest order.
Buy side, amounts in BASE:
100 100 100 100 100
100 100 100 100 115
100 100 100 114 115
100 100 113 114 115
"""
do_initial_allocation(worker, 'neutral')
# Add balance to increase several orders
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
to_issue = worker.buy_orders[0]['base']['amount'] * (increase_factor - 1) * 3
issue_asset(worker.market['base']['symbol'], to_issue, worker.account.name)
to_issue = worker.sell_orders[0]['base']['amount'] * (increase_factor - 1) * 3
issue_asset(worker.market['quote']['symbol'], to_issue, worker.account.name)
increase_until_allocated(worker)
for order in worker.buy_orders:
assert order['base']['amount'] <= worker.buy_orders[0]['base']['amount']
for order in worker.sell_orders:
assert order['base']['amount'] <= worker.sell_orders[0]['base']['amount']
def test_increase_order_sizes_neutral_transit_from_mountain(worker, do_initial_allocation, issue_asset):
"""
Transition from mountain to neutral.
Buy side, amounts in BASE, increase should be like this:
70 80 90 100 <c>
80 80 90 100 <c>
80 90 90 100 <c>
90 90 90 100 <c>
"""
# Set up mountain
do_initial_allocation(worker, 'mountain')
# Switch to neutral
worker.mode = 'neutral'
# Add balance to increase several orders
to_issue = worker.buy_orders[0]['base']['amount'] * 10
issue_asset(worker.market['base']['symbol'], to_issue, worker.account.name)
for _ in range(0, 6):
previous_buy_orders = worker.buy_orders
worker.refresh_balances()
worker.increase_order_sizes('base', worker.base_balance, previous_buy_orders)
worker.refresh_orders()
for i in range(-1, -6, -1):
if (
previous_buy_orders[i]['base']['amount'] < previous_buy_orders[i - 1]['base']['amount']
and previous_buy_orders[i - 1]['base']['amount'] - previous_buy_orders[i]['base']['amount']
> previous_buy_orders[i]['base']['amount'] * worker.increment / 2
):
# Expect increased order if closer order is bigger than further
assert worker.buy_orders[i]['base']['amount'] > previous_buy_orders[i]['base']['amount']
# Only one check at a time
break
def test_increase_order_sizes_neutral_smaller_closest_orders(worker, do_initial_allocation, increase_until_allocated):
"""
Test increase when closest-to-center orders are less than further orders. Normal situation when initial sides are
imbalanced and several orders were filled.
Buy side, amounts in BASE:
100 100 100 10 10 10 <center>
"""
worker = do_initial_allocation(worker, 'neutral')
increase_until_allocated(worker)
initial_base = worker.buy_orders[0]['base']['amount']
initial_quote = worker.sell_orders[0]['base']['amount']
# Cancel several closest orders
num_orders_to_cancel = 3
worker.cancel_orders_wrapper(worker.buy_orders[:num_orders_to_cancel])
worker.cancel_orders_wrapper(worker.sell_orders[:num_orders_to_cancel])
worker.refresh_orders()
worker.refresh_balances()
# Place limited orders
base_limit = initial_base / 2
quote_limit = initial_quote / 2
worker.place_closer_order('base', worker.buy_orders[0], own_asset_limit=base_limit)
worker.place_closer_order('quote', worker.sell_orders[0], own_asset_limit=quote_limit)
worker.refresh_orders()
for _ in range(1, num_orders_to_cancel):
worker.place_closer_order('base', worker.buy_orders[0])
worker.place_closer_order('quote', worker.sell_orders[0])
worker.refresh_orders()
increase_until_allocated(worker)
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
# New closest orders amount should be equal to initial ones
assert worker.buy_orders[0]['base']['amount'] == pytest.approx(
initial_base, rel=(0.1 * increase_factor * initial_base)
)
assert worker.sell_orders[0]['base']['amount'] == pytest.approx(
initial_quote, rel=(0.1 * increase_factor * initial_quote)
)
def test_increase_order_sizes_neutral_imbalaced_small_further(worker, do_initial_allocation, increase_until_allocated):
"""
If furthest orders are smaller than closest, they should be increased first. See
https://github.com/Codaone/DEXBot/issues/444 for details.
Buy side, amounts in BASE:
5 5 5 100 100 10 10 10 <center>
Should be:
10 10 10 100 100 10 10 10 <center>
"""
worker = do_initial_allocation(worker, 'neutral')
# Cancel several closest orders
num_orders_to_cancel = 3
worker.cancel_orders_wrapper(worker.buy_orders[:num_orders_to_cancel])
# Cancel furthest orders
worker.cancel_orders_wrapper(worker.buy_orders[-num_orders_to_cancel:])
worker.refresh_orders()
worker.refresh_balances()
# Place limited orders
initial_base = worker.buy_orders[0]['base']['amount']
base_limit = initial_base / 2
# Apply limit only for first order
worker.place_closer_order('base', worker.buy_orders[0], own_asset_limit=base_limit)
# place_further_order() doesn't have own_asset_limit, so do own calculation
further_order = worker.place_further_order('base', worker.buy_orders[-1], place_order=False)
worker.place_market_buy_order(base_limit / further_order['price'], further_order['price'])
worker.refresh_orders()
# Place remaining limited orders
for i in range(1, num_orders_to_cancel):
worker.place_closer_order('base', worker.buy_orders[0])
worker.place_further_order('base', worker.buy_orders[-1])
worker.refresh_orders()
# Drop excess balance to only allow one increase round
worker.refresh_balances()
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
to_keep = base_limit * (increase_factor - 1) * num_orders_to_cancel * 2
to_drop = worker.base_balance['amount'] - to_keep
amount = Amount(to_drop, worker.market['base']['symbol'], bitshares_instance=worker.bitshares)
worker.bitshares.reserve(amount, account=worker.account)
increase_until_allocated(worker)
for i in range(1, num_orders_to_cancel):
# This is a simple check without precise calculation
# We're roughly checking that new furthest orders are not exceeds new closest orders
further_order_amount = worker.buy_orders[-i]['base']['amount']
closer_order_amount = worker.buy_orders[i - 1]['base']['amount']
assert further_order_amount < closer_order_amount
def test_increase_order_sizes_neutral_closest_order(
worker, do_initial_allocation, increase_until_allocated, issue_asset
):
"""Should test proper calculation of closest order: order should not be less that min_increase_factor."""
worker = do_initial_allocation(worker, 'neutral')
increase_until_allocated(worker)
# Add balance to increase 2 orders
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
to_issue = worker.buy_orders[0]['base']['amount'] * (increase_factor - 1) * 2
issue_asset(worker.market['base']['symbol'], to_issue, worker.account.name)
previous_buy_orders = worker.buy_orders
worker.refresh_balances()
worker.increase_order_sizes('base', worker.base_balance, previous_buy_orders)
worker.refresh_orders()
assert worker.buy_orders[0]['base']['amount'] - previous_buy_orders[0]['base']['amount'] == pytest.approx(
previous_buy_orders[0]['base']['amount'] * (increase_factor - 1),
rel=(10 ** -worker.market['base']['precision']),
)
def test_increase_order_sizes_buy_slope(worker, do_initial_allocation, issue_asset, increase_until_allocated):
"""Check correct orders sizes on both sides."""
do_initial_allocation(worker, 'buy_slope')
# Double worker's balance
issue_asset(worker.market['base']['symbol'], worker.base_total_balance, worker.account.name)
issue_asset(worker.market['quote']['symbol'], worker.quote_total_balance, worker.account.name)
increase_until_allocated(worker)
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
for order in worker.buy_orders:
# All buy orders must be equal-sized in BASE
assert order['base']['amount'] == worker.buy_orders[0]['base']['amount']
for index, order in enumerate(worker.sell_orders):
# Sell orders are equal-sized in BASE asset or diff is equal to increase_factor
if index == 0:
continue
assert (
order['quote']['amount']
== pytest.approx(
worker.sell_orders[index - 1]['quote']['amount'], rel=(10 ** -worker.market['base']['precision'])
)
) or (
order['quote']['amount']
== pytest.approx(
worker.sell_orders[index - 1]['quote']['amount'] * increase_factor,
rel=(0.1 * increase_factor * order['quote']['amount']),
)
)
def test_increase_order_sizes_sell_slope(worker, do_initial_allocation, issue_asset, increase_until_allocated):
"""Check correct orders sizes on both sides."""
do_initial_allocation(worker, 'sell_slope')
# Double worker's balance
issue_asset(worker.market['base']['symbol'], worker.base_total_balance, worker.account.name)
issue_asset(worker.market['quote']['symbol'], worker.quote_total_balance, worker.account.name)
increase_until_allocated(worker)
increase_factor = max(1 + worker.increment, worker.min_increase_factor)
for index, order in enumerate(worker.buy_orders):
# All buy orders must be equal-sized in market QUOTE or diff is equal to increase_factor
if index == 0:
continue
assert (
order['quote']['amount']
== pytest.approx(
worker.buy_orders[index - 1]['quote']['amount'], rel=(10 ** -worker.market['quote']['precision'])
)
) or (
order['quote']['amount']
== pytest.approx(
worker.buy_orders[index - 1]['quote']['amount'] * increase_factor,
rel=(0.1 * increase_factor * order['quote']['amount']),
)
)
for order in worker.sell_orders:
# All sell orders must be equal-sized in market QUOTE
assert order['base']['amount'] == worker.sell_orders[0]['base']['amount']
# Note: no other tests for slope modes because they are combined modes. If valley and mountain are ok, so slopes too
def test_allocate_asset_basic(worker):
"""Check that free balance is shrinking after each allocation and spread is decreasing."""
worker.refresh_balances()
spread_after = worker.get_actual_spread()
# Allocate asset until target spread will be reached
while spread_after >= worker.target_spread + worker.increment:
free_base = worker.base_balance
free_quote = worker.quote_balance
spread_before = worker.get_actual_spread()
worker.allocate_asset('base', free_base)
worker.allocate_asset('quote', free_quote)
worker.refresh_orders()
worker.refresh_balances(use_cached_orders=True)
spread_after = worker.get_actual_spread()
# Update whistory of balance changes
worker.base_balance_history.append(worker.base_balance['amount'])
worker.quote_balance_history.append(worker.quote_balance['amount'])
if len(worker.base_balance_history) > 3:
del worker.base_balance_history[0]
del worker.quote_balance_history[0]
# Free balance is shrinking after each allocation
assert worker.base_balance < free_base or worker.quote_balance < free_quote
# Actual spread is decreasing
assert spread_after < spread_before
def test_allocate_asset_replace_closest_partial_order(worker, other_worker, do_initial_allocation, issue_asset):
"""Test that partially filled order is replaced when target spread is not reached, before placing closer order."""
do_initial_allocation(worker, worker.mode)
# Sell some quote from another account to make PF order on buy side
price = worker.buy_orders[0]['price'] / 1.01
amount = worker.buy_orders[0]['quote']['amount'] * (1 - worker.partial_fill_threshold * 1.1)
other_worker.place_market_sell_order(amount, price)
# Fill sell order
price = worker.sell_orders[0]['price'] ** -1 * 1.01
amount = worker.sell_orders[0]['base']['amount']
other_worker.place_market_buy_order(amount, price)
# Expect replaced closest buy order
worker.refresh_orders()
worker.refresh_balances(use_cached_orders=True)
worker.allocate_asset('base', worker.base_balance)
worker.refresh_orders()
assert worker.buy_orders[0]['base']['amount'] == worker.buy_orders[0]['for_sale']['amount']
def test_allocate_asset_replace_partially_filled_orders(
worker, other_worker, do_initial_allocation, issue_asset, maintain_until_allocated
):
"""
Check replacement of partially filled orders on both sides.
Simple check.
"""
do_initial_allocation(worker, worker.mode)
# TODO: automatically turn off bootstrapping after target spread is closed?
worker['bootstrapping'] = False
# Partially fill closest orders
price = worker.buy_orders[0]['price']
amount = worker.buy_orders[0]['quote']['amount'] / 2
log.debug('Filling {} @ {}'.format(amount, price))
other_worker.place_market_sell_order(amount, price)
price = worker.sell_orders[0]['price'] ** -1
amount = worker.sell_orders[0]['base']['amount'] / 2
log.debug('Filling {} @ {}'.format(amount, price))
other_worker.place_market_buy_order(amount, price)
# Add some balance to worker
to_issue = worker.buy_orders[0]['base']['amount']
issue_asset(worker.market['base']['symbol'], to_issue, worker.account.name)
to_issue = worker.sell_orders[0]['base']['amount']
issue_asset(worker.market['quote']['symbol'], to_issue, worker.account.name)
maintain_until_allocated(worker)
worker.refresh_orders()
assert worker.buy_orders[0]['base']['amount'] == worker.buy_orders[0]['for_sale']['amount']
assert worker.sell_orders[0]['base']['amount'] == worker.sell_orders[0]['for_sale']['amount']
def test_allocate_asset_increase_orders(worker, do_initial_allocation, maintain_until_allocated, issue_asset):
"""Add balance, expect increased orders."""
do_initial_allocation(worker, worker.mode)
order_ids = [order['id'] for order in worker.own_orders]
balance_in_orders_before = worker.get_allocated_assets(order_ids)
to_issue = worker.buy_orders[0]['base']['amount'] * 3
issue_asset(worker.market['base']['symbol'], to_issue, worker.account.name)
to_issue = worker.sell_orders[0]['base']['amount'] * 3
issue_asset(worker.market['quote']['symbol'], to_issue, worker.account.name)
# Use maintain_strategy() here for simplicity
maintain_until_allocated(worker)
order_ids = [order['id'] for order in worker.own_orders]
balance_in_orders_after = worker.get_allocated_assets(order_ids)
assert balance_in_orders_after['base'] > balance_in_orders_before['base']
assert balance_in_orders_after['quote'] > balance_in_orders_before['quote']
def test_allocate_asset_dust_order_simple(
worker, other_worker, do_initial_allocation, maintain_until_allocated, base_account
):
"""Make dust order, check if it canceled and closer opposite order placed."""
do_initial_allocation(worker, worker.mode)
num_sell_orders_before = len(worker.sell_orders)
num_buy_orders_before = len(worker.buy_orders)
# Partially fill order from another account
sell_price = worker.buy_orders[0]['price'] / 1.01
sell_amount = worker.buy_orders[0]['quote']['amount'] * (1 - worker.partial_fill_threshold) * 1.1
other_worker.place_market_sell_order(sell_amount, sell_price)
worker.refresh_balances()
worker.refresh_orders()
worker.allocate_asset('quote', worker.quote_balance)
worker.refresh_orders()
num_sell_orders_after = len(worker.sell_orders)
num_buy_orders_after = len(worker.buy_orders)
assert num_buy_orders_before - num_buy_orders_after == 1
assert num_sell_orders_after - num_sell_orders_before == 1
def test_allocate_asset_dust_order_excess_funds(
worker, other_worker, do_initial_allocation, maintain_until_allocated, issue_asset
):
"""Make dust order, add additional funds, these funds should be allocated and then dust order should be canceled and
closer opposite order placed."""
do_initial_allocation(worker, worker.mode)
num_sell_orders_before = len(worker.sell_orders)
num_buy_orders_before = len(worker.buy_orders)
# Partially fill order from another account
sell_price = worker.buy_orders[0]['price'] / 1.01
sell_amount = worker.buy_orders[0]['quote']['amount'] * (1 - worker.partial_fill_threshold) * 1.1
other_worker.place_market_sell_order(sell_amount, sell_price)
# Add some balance to the worker
issue_asset(worker.market['quote']['symbol'], worker.sell_orders[0]['base']['amount'], worker.account.name)
worker.refresh_balances()
worker.refresh_orders()
worker.allocate_asset('quote', worker.quote_balance)
worker.refresh_orders()
num_sell_orders_after = len(worker.sell_orders)
num_buy_orders_after = len(worker.buy_orders)
assert num_buy_orders_before - num_buy_orders_after == 1
assert num_sell_orders_after - num_sell_orders_before == 1
def test_allocate_asset_dust_order_increase_race(worker, other_worker, do_initial_allocation, issue_asset):
"""
Test for https://github.com/Codaone/DEXBot/issues/587.
Check if cancelling dust orders on opposite side will not cause a race for allocate_asset() on opposite side
"""
do_initial_allocation(worker, worker.mode)
num_buy_orders_before = len(worker.buy_orders)
# Make closest sell order small enough to be a most likely candidate for increase
worker.cancel_orders_wrapper(worker.sell_orders[0])
worker.refresh_orders()
worker.refresh_balances()
worker.place_closer_order(
'quote', worker.sell_orders[0], own_asset_limit=(worker.sell_orders[0]['base']['amount'] / 2)
)
worker.refresh_orders()
# Partially fill order from another account
buy_price = worker.sell_orders[0]['price'] ** -1 * 1.01
buy_amount = worker.sell_orders[0]['base']['amount'] * (1 - worker.partial_fill_threshold) * 1.1
log.debug('{}, {}'.format(buy_price, buy_amount))
other_worker.place_market_buy_order(buy_amount, buy_price)
# PF fill sell order should be cancelled and closer buy placed
worker.maintain_strategy()
worker.refresh_orders()
num_buy_orders_after = len(worker.buy_orders)
assert num_buy_orders_after - num_buy_orders_before == 1
def test_allocate_asset_filled_orders(worker, other_worker, do_initial_allocation, base_account):
"""Fill an order and check if opposite order placed."""
do_initial_allocation(worker, worker.mode)
# TODO: automatically turn off bootstrapping after target spread is closed?
worker['bootstrapping'] = False
num_sell_orders_before = len(worker.sell_orders)
# Fill sell order
price = worker.buy_orders[0]['price']
amount = worker.buy_orders[0]['quote']['amount']
other_worker.place_market_sell_order(amount, price)
worker.refresh_balances()
worker.refresh_orders()
worker.allocate_asset('quote', worker.quote_balance)
worker.refresh_orders()
num_sell_orders_after = len(worker.sell_orders)
assert num_sell_orders_after - num_sell_orders_before == 1
def test_allocate_asset_filled_order_on_massively_imbalanced_sides(
worker, other_worker, do_initial_allocation, base_account
):
"""
When sides are massively imbalanced, make sure that spread will be closed after filling one order on smaller side.
The goal is to test a situation when one side has a big-sized orders, and other side has much smaller orders.
Correct behavior: when order on smaller side filled, big side should place closer order.
Test for https://github.com/Codaone/DEXBot/issues/588
"""
do_initial_allocation(worker, worker.mode)
spread_before = worker.get_actual_spread()
log.info('Worker spread after bootstrap: {}'.format(spread_before))
# TODO: automatically turn off bootstrapping after target spread is closed?
worker['bootstrapping'] = False
# Cancel several closest orders
num_orders_to_cancel = 3
worker.cancel_orders_wrapper(worker.sell_orders[:num_orders_to_cancel])
worker.refresh_orders()
worker.refresh_balances()
# Place limited orders; the goal is to limit order amount to be much smaller than opposite
quote_limit = worker.buy_orders[0]['quote']['amount'] * worker.partial_fill_threshold / 2
spread_after = worker.get_actual_spread()
while spread_after >= worker.target_spread + worker.increment:
# We're using spread check because we cannot just place same number of orders as num_orders_to_cancel because
# it may result in too close spread because of price shifts
worker.place_closer_order('quote', worker.sell_orders[0], own_asset_limit=quote_limit)
worker.refresh_orders()
spread_after = worker.get_actual_spread()
log.info('Worker spread: {}'.format(worker.get_actual_spread()))
# Fill only one newly placed order from another account
num_orders_to_fill = 1
for i in range(0, num_orders_to_fill):
price = worker.sell_orders[i]['price'] ** -1 * 1.01
amount = worker.sell_orders[i]['base']['amount'] * 1.01
log.debug('Filling {} @ {}'.format(amount, price))
other_worker.place_market_buy_order(amount, price)
# Cancel unmatched dust
other_worker.cancel_all_orders()
worker.refresh_orders()
worker.refresh_balances(use_cached_orders=True)
# Filling of one order should result in spread > target spread, othewise allocate_asset will not place closer prder
spread_after = worker.get_actual_spread()
assert spread_after >= worker.target_spread + worker.increment
# Allocate obtained BASE
counter = 0
while spread_after >= worker.target_spread + worker.increment:
worker.allocate_asset('base', worker.base_balance)
worker.refresh_orders()
worker.refresh_balances(use_cached_orders=True)
spread_after = worker.get_actual_spread()
counter += 1
# Counter is for preventing infinity loop
assert counter < 20
def test_allocate_asset_partially_filled_order_on_massively_imbalanced_sides(
worker, other_worker, do_initial_allocation, base_account
):
"""
When sides are massively imbalanced, make sure that spread will be closed after filling one order on smaller side.
The goal is to test a situation when one side has a big-sized orders, and other side has much smaller orders.
Correct behavior: when order on smaller side filled, big side should place closer order.
This test is similar to test_allocate_asset_filled_order_on_massively_imbalanced_sides, but tests partially
filled order where "calncel dust order" logic is in action.
Test for https://github.com/Codaone/DEXBot/issues/588
"""
do_initial_allocation(worker, worker.mode)
spread_before = worker.get_actual_spread()
log.info('Worker spread after bootstrap: {}'.format(spread_before))
# TODO: automatically turn off bootstrapping after target spread is closed?
worker['bootstrapping'] = False
# Cancel several closest orders
num_orders_to_cancel = 3
worker.cancel_orders_wrapper(worker.sell_orders[:num_orders_to_cancel])
worker.refresh_orders()
worker.refresh_balances()
# Place limited orders; the goal is to limit order amount to be much smaller than opposite
quote_limit = worker.buy_orders[0]['quote']['amount'] * worker.partial_fill_threshold / 2
spread_after = worker.get_actual_spread()
while spread_after >= worker.target_spread + worker.increment:
# We're using spread check because we cannot just place same number of orders as num_orders_to_cancel because
# it may result in too close spread because of price shifts
worker.place_closer_order('quote', worker.sell_orders[0], own_asset_limit=quote_limit)
worker.refresh_orders()
spread_after = worker.get_actual_spread()
log.info('Worker spread: {}'.format(worker.get_actual_spread()))
# Fill only one newly placed order from another account
num_orders_to_fill = 1
for i in range(0, num_orders_to_fill):
price = worker.sell_orders[i]['price'] ** -1 * 1.01
# Make partially filled order (dust order)
amount = worker.sell_orders[i]['base']['amount'] * (1 - worker.partial_fill_threshold) * 1.01
log.debug('Filling {} @ {}'.format(amount, price))
other_worker.place_market_buy_order(amount, price)
# Cancel unmatched dust
other_worker.cancel_all_orders()
worker.refresh_orders()
worker.refresh_balances(use_cached_orders=True)
# Check that we filled enough
assert not worker.check_partial_fill(worker.sell_orders[0], fill_threshold=(1 - worker.partial_fill_threshold))
# Expect dust order cancel + closer order
log.info('spread before allocate_asset(): {}'.format(worker.get_actual_spread()))
worker.allocate_asset('base', worker.base_balance)
worker.refresh_orders()
spread_after = worker.get_actual_spread()
assert spread_after < worker.target_spread + worker.increment
@pytest.mark.parametrize('mode', MODES)
def test_allocate_asset_several_filled_orders_on_massively_imbalanced_sides(
mode, worker, other_worker, do_initial_allocation, base_account
):
"""
When sides are massively imbalanced, make sure that spread will be closed after filling several orders on smaller
side. The goal is to test a situation when one side has a big-sized orders, and other side has much smaller orders.
Correct behavior: when multiple orders on smaller side filled at once, big side should place appropriate number of
closer orders to close the spread.
Test for https://github.com/Codaone/DEXBot/issues/601
"""
worker.mode = mode
do_initial_allocation(worker, worker.mode)
spread_before = worker.get_actual_spread()
log.info('Worker spread after bootstrap: {}'.format(spread_before))
# TODO: automatically turn off bootstrapping after target spread is closed?
worker['bootstrapping'] = False
# Cancel several closest orders
num_orders_to_cancel = 3
worker.cancel_orders_wrapper(worker.sell_orders[:num_orders_to_cancel])
worker.refresh_orders()
worker.refresh_balances()
# Place limited orders; the goal is to limit order amount to be much smaller than opposite
quote_limit = worker.buy_orders[0]['quote']['amount'] * worker.partial_fill_threshold / 2
place_limited_order = True
spread_after = worker.get_actual_spread()
while spread_after >= worker.target_spread + worker.increment:
# We're using spread check because we cannot just place same number of orders as num_orders_to_cancel because
# it may result in too close spread because of price shifts
limit = quote_limit if place_limited_order else None
worker.place_closer_order('quote', worker.sell_orders[0], own_asset_limit=limit)
worker.refresh_orders()
worker.sync_current_orders()
spread_after = worker.get_actual_spread()
place_limited_order = False # only first order should be limited
log.info('Worker spread: {}'.format(worker.get_actual_spread()))
# Fill only one newly placed order from another account
num_orders_to_fill = num_orders_to_cancel
for i in range(0, num_orders_to_fill):
price = worker.sell_orders[i]['price'] ** -1 * 1.01
amount = worker.sell_orders[i]['base']['amount'] * 1.01
log.debug('Filling {} @ {}'.format(amount, price))
other_worker.place_market_buy_order(amount, price)
# Cancel unmatched dust
other_worker.cancel_all_orders()
worker.refresh_orders()
worker.refresh_balances(use_cached_orders=True)
# Allocate obtained BASE
counter = 0
spread_after = worker.get_actual_spread()
while spread_after >= worker.target_spread + worker.increment:
worker.allocate_asset('base', worker.base_balance)
worker.refresh_orders()
worker.refresh_balances(use_cached_orders=True)
spread_after = worker.get_actual_spread()
counter += 1
# Counter is for preventing infinity loop
# Success execution means target spread is reached
assert counter < 20
@pytest.mark.parametrize('mode', MODES)
def test_allocate_asset_limiting_on_sell_side(mode, worker, other_worker, do_initial_allocation, base_account):
"""Check order size limiting when placing closer order on side which is bigger (using funds obtained from filled
orders on side which is smaller)"""
do_initial_allocation(worker, mode)
# TODO: automatically turn off bootstrapping after target spread is closed?
worker['bootstrapping'] = False
# Fill several orders
num_orders_to_fill = 4
for i in range(0, num_orders_to_fill):
price = worker.buy_orders[i]['price'] / 1.01
amount = worker.buy_orders[i]['quote']['amount']
log.debug('Filling buy order buys {} QUOTE @ {}'.format(amount, price))
other_worker.place_market_sell_order(amount, price)
# Cancel unmatched dust
other_worker.cancel_all_orders()
# Allocate asset until target spread will be reached
worker.refresh_orders()
worker.refresh_balances(use_cached_orders=True)
spread_after = worker.get_actual_spread()
counter = 0
while spread_after >= worker.target_spread + worker.increment:
worker.allocate_asset('quote', worker.quote_balance)
worker.refresh_orders()
worker.refresh_balances(use_cached_orders=True)
spread_after = worker.get_actual_spread()
counter += 1
# Counter is for preventing infinity loop
assert counter < 20
# Check 2 closest orders to match mode
if worker.mode == 'valley' or worker.mode == 'sell_slope':
assert worker.sell_orders[0]['base']['amount'] == pytest.approx(worker.sell_orders[1]['base']['amount'])
elif worker.mode == 'mountain':
assert (
worker.sell_orders[0]['base']['amount']
== pytest.approx(worker.sell_orders[1]['base']['amount'], abs=(10 ** -worker.market['quote']['precision']))
or worker.sell_orders[0]['base']['amount'] >= worker.sell_orders[1]['base']['amount']
)
elif worker.mode == 'buy_slope':
assert worker.sell_orders[0]['quote']['amount'] == pytest.approx(
worker.sell_orders[1]['quote']['amount'], rel=(10 ** -worker.market['base']['precision'])
)
elif worker.mode == 'neutral':
assert worker.sell_orders[0]['base']['amount'] == pytest.approx(
worker.sell_orders[1]['base']['amount'] * math.sqrt(1 + worker.increment),
rel=(10 ** -worker.market['quote']['precision']),
)
@pytest.mark.parametrize('mode', MODES)
def test_allocate_asset_limiting_on_buy_side(mode, worker, other_worker, do_initial_allocation, issue_asset):
"""Check order size limiting when placing closer order on side which is bigger (using funds obtained from filled
orders on side which is smaller)"""
worker.center_price = 1
worker.lower_bound = 0.4
worker.upper_bound = 1.4
do_initial_allocation(worker, mode)
# TODO: automatically turn off bootstrapping after target spread is closed?
worker['bootstrapping'] = False
# Fill several orders
num_orders_to_fill = 5
for i in range(0, num_orders_to_fill):
price = worker.sell_orders[i]['price'] ** -1 * 1.01
amount = worker.sell_orders[i]['base']['amount']
log.debug('Filling {} QUOTE @ {}'.format(amount, price))
other_worker.place_market_buy_order(amount, price)
# Cancel unmatched dust
other_worker.cancel_all_orders()
# Allocate asset until target spread will be reached
worker.refresh_orders()
worker.refresh_balances(use_cached_orders=True)
spread_after = worker.get_actual_spread()
counter = 0
while spread_after >= worker.target_spread + worker.increment:
worker.allocate_asset('base', worker.base_balance)
worker.refresh_orders()
worker.refresh_balances(use_cached_orders=True)
spread_after = worker.get_actual_spread()
counter += 1
# Counter is for preventing infinity loop
assert counter < 20
# Check 2 closest orders to match mode
if worker.mode == 'valley' or worker.mode == 'buy_slope':
assert worker.buy_orders[0]['base']['amount'] == worker.buy_orders[1]['base']['amount']
elif worker.mode == 'mountain':
# In mountain mode allow both equal orders and increased closest order - it may be placed without limiting
assert (
worker.buy_orders[0]['base']['amount']
== pytest.approx(worker.buy_orders[1]['base']['amount'], abs=(10 ** -worker.market['base']['precision']))
or worker.buy_orders[0]['base']['amount'] >= worker.buy_orders[1]['base']['amount']
)
elif worker.mode == 'sell_slope':
assert worker.buy_orders[0]['quote']['amount'] == pytest.approx(
worker.buy_orders[1]['quote']['amount'], rel=(10 ** -worker.market['base']['precision'])
)
elif worker.mode == 'neutral':
assert worker.buy_orders[0]['base']['amount'] == pytest.approx(
worker.buy_orders[1]['base']['amount'] * math.sqrt(1 + worker.increment),
rel=(10 ** -worker.market['base']['precision']),
)
def test_get_actual_spread(worker):
worker.maintain_strategy()
# Twice run needed
worker.maintain_strategy()
worker.refresh_orders()
spread = worker.get_actual_spread()
assert float('Inf') > spread > 0
def test_stop_loss_check(worker, other_worker, do_initial_allocation, issue_asset):
worker.operational_depth = 100
worker.target_spread = 0.1 # speed up allocation
do_initial_allocation(worker, worker.mode)
# Issue additional QUOTE to 2nd account
issue_asset(worker.market['quote']['symbol'], 500, other_worker.account.name)
# Sleep is needed to allow node to update ticker
time.sleep(2)
# Normal conditions - stop loss should not be executed
worker.stop_loss_check()
assert worker.disabled is False
# Place bid below lower bound
other_worker.place_market_buy_order(1, worker.lower_bound / 1.01)
# Fill all orders pushing price below lower bound
other_worker.place_market_sell_order(500, worker.lower_bound)
time.sleep(2)
worker.refresh_orders()
worker.stop_loss_check()
worker.refresh_orders()
assert len(worker.sell_orders) == 1
order = worker.sell_orders[0]
assert order['price'] ** -1 < worker.lower_bound
assert worker.disabled is True
def test_tick(worker):
"""Check tick counter increment."""
counter_before = worker.counter
worker.tick('foo')
counter_after = worker.counter
assert counter_after - counter_before == 1
|
insights/parsers/tests/test_vsftpd.py | mglantz/insights-core | 121 | 11198530 | from insights.parsers.vsftpd import VsftpdConf, VsftpdPamConf
from insights.tests import context_wrap
VSFTPD_PAM_CONF = """
#%PAM-1.0
session optional pam_keyinit.so force revoke
auth required pam_listfile.so item=user sense=deny file=/etc/vsftpd/ftpusers onerr=succeed
auth required pam_shells.so
auth include password-auth
account include password-auth
session required pam_loginuid.so
session include password-auth
""".strip()
VSFTPD_CONF = """
# No anonymous login
anonymous_enable=NO
# Let local users login
local_enable=YES
# Write permissions
write_enable=YES
# Commented_option=not_present
""".strip()
def test_vsftpd_pam_conf():
pam_conf = VsftpdPamConf(context_wrap(VSFTPD_PAM_CONF, path='etc/pamd.d/vsftpd'))
assert len(pam_conf) == 7
assert pam_conf[0].service == 'vsftpd'
assert pam_conf[0].interface == 'session'
assert len(pam_conf[0].control_flags) == 1
assert pam_conf[0].control_flags[0].flag == 'optional'
assert pam_conf[0].module_name == 'pam_keyinit.so'
assert pam_conf[0].module_args == 'force revoke'
assert pam_conf[1].module_args == 'item=user sense=deny file=/etc/vsftpd/ftpusers onerr=succeed'
assert pam_conf[6].interface == 'session'
assert len(pam_conf[6].control_flags) == 1
assert pam_conf[6].control_flags[0].flag == 'include'
assert pam_conf[6].module_name == 'password-auth'
assert pam_conf[6].module_args is None
def test_vsftpd_conf():
vsftpd_conf = VsftpdConf(context_wrap(VSFTPD_CONF))
assert vsftpd_conf.get('anonymous_enable') == 'NO'
assert vsftpd_conf.get('local_enable') == 'YES'
assert vsftpd_conf.get('write_enable') == 'YES'
assert 'Commented_option' not in vsftpd_conf
|
tests/test_timeline_event_control.py | rvega/isobar | 241 | 11198536 | """ Unit tests for events """
import isobar as iso
import pytest
import math
from . import dummy_timeline
def test_event_control_no_interpolation(dummy_timeline):
"""
Simple case: schedule a series of regularly-spaced control points.
Output device should receive discrete control events.
"""
control_series = iso.PSeries(start=1, step=2, length=3)
dummy_timeline.schedule({
iso.EVENT_CONTROL: 0,
iso.EVENT_VALUE: control_series,
iso.EVENT_DURATION: 1,
iso.EVENT_CHANNEL: 9
})
dummy_timeline.run()
assert dummy_timeline.output_device.events == [
[0, "control", 0, 1, 9],
[1, "control", 0, 3, 9],
[2, "control", 0, 5, 9]
]
def test_event_control_linear_interpolation(dummy_timeline):
"""
Linear interpolation between control points.
"""
control_series = iso.PSequence([1, 3, 2], 1)
dummy_timeline.ticks_per_beat = 10
dummy_timeline.schedule({
iso.EVENT_CONTROL: 0,
iso.EVENT_VALUE: control_series,
iso.EVENT_DURATION: iso.PSequence([1, 0.5]),
iso.EVENT_CHANNEL: 9
}, interpolate=iso.INTERPOLATION_LINEAR)
dummy_timeline.run()
expected_series = [1 + 2 * n / dummy_timeline.ticks_per_beat for n in range(dummy_timeline.ticks_per_beat)] + \
[3 - 1 * n / (dummy_timeline.ticks_per_beat // 2) for n in range(dummy_timeline.ticks_per_beat // 2)] + \
[2]
values = [event[3] for event in dummy_timeline.output_device.events]
assert len(dummy_timeline.output_device.events) == (dummy_timeline.ticks_per_beat * 1.5) + 1
assert expected_series == pytest.approx(values, rel=0.01)
def test_event_control_linear_interpolation_zero_duration(dummy_timeline):
control_series = iso.PSequence([0, 1])
duration_series = iso.PSequence([1, 0])
dummy_timeline.ticks_per_beat = 10
dummy_timeline.schedule({
iso.EVENT_CONTROL: 0,
iso.EVENT_VALUE: control_series,
iso.EVENT_DURATION: duration_series,
iso.EVENT_CHANNEL: 9
}, interpolate=iso.INTERPOLATION_LINEAR, count=4)
dummy_timeline.run()
expected_series = [0.1 * n for n in range(0, 11)] + [0.1 * n for n in range(1, 11)]
values = [event[3] for event in dummy_timeline.output_device.events]
assert expected_series == pytest.approx(values, rel=0.0000001)
def test_event_control_cosine_interpolation(dummy_timeline):
"""
Linear interpolation between control points.
"""
alternator = iso.PSequence([0, 1])
dummy_timeline.ticks_per_beat = 10
dummy_timeline.schedule({
iso.EVENT_CONTROL: 0,
iso.EVENT_VALUE: alternator,
iso.EVENT_CHANNEL: 9
}, interpolate=iso.INTERPOLATION_COSINE, count=3)
dummy_timeline.run()
expected_series = [
0.5 * (1.0 - math.cos(math.pi * n / dummy_timeline.ticks_per_beat))
for n in range(2 * dummy_timeline.ticks_per_beat + 1)
]
values = [event[3] for event in dummy_timeline.output_device.events]
assert expected_series == pytest.approx(values, rel=0.000001) |
pase/utils.py | ishine/pase | 428 | 11198595 | import json
import shlex
import subprocess
import random
import torch
import torch.nn as nn
try:
from .losses import *
except ImportError:
from losses import *
import random
from random import shuffle
from pase.models.discriminator import *
import torch.optim as optim
from torch.autograd import Function
def pase_parser(cfg_fname, batch_acum=1, device='cpu', do_losses=True,
frontend=None):
with open(cfg_fname, 'r') as cfg_f:
cfg_all = json.load(cfg_f)
if do_losses:
# change loss section
for i, cfg in enumerate(cfg_all):
loss_name = cfg_all[i]['loss']
if hasattr(nn, loss_name):
# retrieve potential r frames parameter
r_frames = cfg_all[i].get('r', None)
# loss in nn Modules
cfg_all[i]['loss'] = ContextualizedLoss(getattr(nn, loss_name)(),
r=r_frames)
else:
if loss_name == 'LSGAN' or loss_name == 'GAN':
dnet_cfg = {}
if 'DNet_cfg' in cfg_all[i]:
dnet_cfg = cfg_all[i].pop('DNet_cfg')
dnet_cfg['frontend'] = frontend
# make DNet
DNet = RNNDiscriminator(**dnet_cfg)
if 'Dopt_cfg' in cfg_all[i]:
Dopt_cfg = cfg_all[i].pop('Dopt_cfg')
Dopt = optim.Adam(DNet.parameters(),
Dopt_cfg['lr'])
else:
Dopt = optim.Adam(DNet.parameters(), 0.0005)
Dloss = 'L2' if loss_name == 'LSGAN' else 'BCE'
cfg_all[i]['loss'] = WaveAdversarialLoss(DNet, Dopt,
loss=Dloss,
batch_acum=batch_acum,
device=device)
return cfg_all
def worker_parser(cfg_fname, batch_acum=1, device='cpu', do_losses=True,
frontend=None):
with open(cfg_fname, 'r') as cfg_f:
cfg_list = json.load(cfg_f)
if do_losses:
# change loss section
for type, cfg_all in cfg_list.items():
for i, cfg in enumerate(cfg_all):
loss_name = cfg_all[i]['loss']
if hasattr(nn, loss_name):
# retrieve potential r frames parameter
r_frames = cfg_all[i].get('r', None)
# loss in nn Modules
cfg_all[i]['loss'] = ContextualizedLoss(getattr(nn, loss_name)(),
r=r_frames)
else:
if loss_name == 'LSGAN' or loss_name == 'GAN':
dnet_cfg = {}
if 'DNet_cfg' in cfg_all[i]:
dnet_cfg = cfg_all[i].pop('DNet_cfg')
dnet_cfg['frontend'] = frontend
# make DNet
DNet = RNNDiscriminator(**dnet_cfg)
if 'Dopt_cfg' in cfg_all[i]:
Dopt_cfg = cfg_all[i].pop('Dopt_cfg')
Dopt = optim.Adam(DNet.parameters(),
Dopt_cfg['lr'])
else:
Dopt = optim.Adam(DNet.parameters(), 0.0005)
Dloss = 'L2' if loss_name == 'LSGAN' else 'BCE'
cfg_all[i]['loss'] = WaveAdversarialLoss(DNet, Dopt,
loss=Dloss,
batch_acum=batch_acum,
device=device)
cfg_list[type] = cfg_all
print(cfg_list)
return cfg_list
def build_optimizer(opt_cfg, params):
if isinstance(opt_cfg, str):
with open(opt_cfg, 'r') as cfg_f:
opt_cfg = json.load(cfg_f)
opt_name = opt_cfg.pop('name')
if 'sched' in opt_cfg:
sched_cfg = opt_cfg.pop('sched')
else:
sched_cfg = None
opt_cfg['params'] = params
opt = getattr(optim, opt_name)(**opt_cfg)
if sched_cfg is not None:
sname = sched_cfg.pop('name')
sched_cfg['optimizer'] = opt
sched = getattr(optim.lr_scheduler, sname)(**sched_cfg)
return opt, sched
else:
return opt, None
def chunk_batch_seq(X, seq_range=[90, 1000]):
bsz, nfeats, slen = X.size()
min_seq = seq_range[0]
max_seq = min(slen, seq_range[1])
# sample a random chunk size
chsz = random.choice(list(range(min_seq, max_seq)))
idxs = list(range(slen - chsz))
beg_i = random.choice(idxs)
return X[:, :, beg_i:beg_i + chsz]
def kfold_data(data_list, utt2class, folds=10, valid_p=0.1):
# returns the K lists of lists, so each k-th component
# is composed of 3 sub-lists
#idxs = list(range(len(data_list)))
# shuffle the idxs first
#shuffle(idxs)
# group by class first
classes = set(utt2class.values())
items = dict((k, []) for k in classes)
for data_el in data_list:
items[utt2class[data_el]].append(data_el)
lens = {}
test_splits = {}
for k in items.keys():
shuffle(items[k])
lens[k] = len(items[k])
TEST_SPLIT_K = int((1. / folds) * lens[k])
test_splits[k] = TEST_SPLIT_K
lists = []
beg_i = dict((k, 0) for k in test_splits.keys())
# now slide a window per fold
for fi in range(folds):
test_split = []
train_split = []
valid_split = []
print('-' * 30)
print('Fold {} splits:'.format(fi))
for k, data in items.items():
te_split = data[beg_i[k]:beg_i[k] + test_splits[k]]
test_split += te_split
tr_split = data[:beg_i[k]] + data[beg_i[k] + test_splits[k]:]
# select train and valid splits
tr_split = tr_split[int(valid_p * len(tr_split)):]
va_split = tr_split[:int(valid_p * len(tr_split))]
train_split += tr_split
valid_split += va_split
print('Split {} train: {}, valid: {}, test: {}'
''.format(k, len(tr_split), len(va_split), len(te_split)))
# build valid split within train_split
lists.append([train_split, valid_split, test_split])
return lists
class AuxiliarSuperviser(object):
def __init__(self, cmd_file, save_path='.'):
self.cmd_file = cmd_file
with open(cmd_file, 'r') as cmd_f:
self.cmd = [l.rstrip() for l in cmd_f]
self.save_path = save_path
def __call__(self, iteration, ckpt_path, cfg_path):
assert isinstance(iteration, int)
assert isinstance(ckpt_path, str)
assert isinstance(cfg_path, str)
for cmd in self.cmd:
sub_cmd = cmd.replace('$model', ckpt_path)
sub_cmd = sub_cmd.replace('$iteration', str(iteration))
sub_cmd = sub_cmd.replace('$cfg', cfg_path)
sub_cmd = sub_cmd.replace('$save_path', self.save_path)
print('Executing async command: ', sub_cmd)
#shsub = shlex.split(sub_cmd)
#print(shsub)
p = subprocess.Popen(sub_cmd,
shell=True)
def get_grad_norms(model, keys=[]):
grads = {}
for i, (k, param) in enumerate(dict(model.named_parameters()).items()):
accept = False
for key in keys:
# match substring in collection of model keys
if key in k:
accept = True
break
if not accept:
continue
if param.grad is None:
print('WARNING getting grads: {} param grad is None'.format(k))
continue
grads[k] = torch.norm(param.grad).cpu().item()
return grads
def sample_probable(p):
return random.random() < p
def zerospeech(shape, eps=1e-14):
S = np.random.randn(shape) * eps
return S.astype(np.float32)
class ScaleGrad(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x
@staticmethod
def backward(ctx, grad_output):
output = grad_output * ctx.alpha
return output, None
def log_sum_exp(x):
""" numerically stable log_sum_exp implementation that prevents overflow """
# TF ordering
axis = len(x.size()) - 1
m, _ = torch.max(x, dim=axis)
m2, _ = torch.max(x, dim=axis, keepdim=True)
return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis))
def discretized_mix_logistic_loss(y_hat, y, num_classes=65536,
log_scale_min=None, reduce=True):
""" https://github.com/fatchord/WaveRNN/blob/master/utils/distribution.py
"""
if log_scale_min is None:
log_scale_min = float(np.log(1e-14))
y_hat = y_hat.permute(0,2,1)
assert y_hat.dim() == 3
assert y_hat.size(1) % 3 == 0
nr_mix = y_hat.size(1) // 3
# (B x T x C)
y_hat = y_hat.transpose(1, 2)
# unpack parameters. (B, T, num_mixtures) x 3
logit_probs = y_hat[:, :, :nr_mix]
means = y_hat[:, :, nr_mix:2 * nr_mix]
log_scales = torch.clamp(y_hat[:, :, 2 * nr_mix:3 * nr_mix], min=log_scale_min)
# B x T x 1 -> B x T x num_mixtures
y = y.expand_as(means)
centered_y = y - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_y + 1. / (num_classes - 1))
cdf_plus = torch.sigmoid(plus_in)
min_in = inv_stdv * (centered_y - 1. / (num_classes - 1))
cdf_min = torch.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
# equivalent: torch.log(F.sigmoid(plus_in))
log_cdf_plus = plus_in - F.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
# equivalent: (1 - F.sigmoid(min_in)).log()
log_one_minus_cdf_min = -F.softplus(min_in)
# probability for all other cases
cdf_delta = cdf_plus - cdf_min
mid_in = inv_stdv * centered_y
# log probability in the center of the bin, to be used in extreme cases
# (not actually used in our code)
log_pdf_mid = mid_in - log_scales - 2. * F.softplus(mid_in)
# tf equivalent
"""
log_probs = tf.where(x < -0.999, log_cdf_plus,
tf.where(x > 0.999, log_one_minus_cdf_min,
tf.where(cdf_delta > 1e-5,
tf.log(tf.maximum(cdf_delta, 1e-12)),
log_pdf_mid - np.log(127.5))))
"""
# TODO: cdf_delta <= 1e-5 actually can happen. How can we choose the value
# for num_classes=65536 case? 1e-7? not sure..
inner_inner_cond = (cdf_delta > 1e-5).float()
inner_inner_out = inner_inner_cond * \
torch.log(torch.clamp(cdf_delta, min=1e-12)) + \
(1. - inner_inner_cond) * (log_pdf_mid - np.log((num_classes - 1) / 2))
inner_cond = (y > 0.999).float()
inner_out = inner_cond * log_one_minus_cdf_min + (1. - inner_cond) * inner_inner_out
cond = (y < -0.999).float()
log_probs = cond * log_cdf_plus + (1. - cond) * inner_out
log_probs = log_probs + F.log_softmax(logit_probs, -1)
if reduce:
return -torch.mean(log_sum_exp(log_probs))
else:
return -log_sum_exp(log_probs).unsqueeze(-1)
def sample_from_discretized_mix_logistic(y, log_scale_min=None):
"""
https://github.com/fatchord/WaveRNN/blob/master/utils/distribution.py
Sample from discretized mixture of logistic distributions
Args:
y (Tensor): B x C x T
log_scale_min (float): Log scale minimum value
Returns:
Tensor: sample in range of [-1, 1].
"""
if log_scale_min is None:
log_scale_min = float(np.log(1e-14))
assert y.size(1) % 3 == 0
nr_mix = y.size(1) // 3
# B x T x C
y = y.transpose(1, 2)
logit_probs = y[:, :, :nr_mix]
# sample mixture indicator from softmax
temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5)
temp = logit_probs.data - torch.log(- torch.log(temp))
_, argmax = temp.max(dim=-1)
# (B, T) -> (B, T, nr_mix)
one_hot = F.one_hot(argmax, nr_mix).float()
# select logistic parameters
means = torch.sum(y[:, :, nr_mix:2 * nr_mix] * one_hot, dim=-1)
log_scales = torch.clamp(torch.sum(
y[:, :, 2 * nr_mix:3 * nr_mix] * one_hot, dim=-1), min=log_scale_min)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5)
x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u))
x = torch.clamp(torch.clamp(x, min=-1.), max=1.)
return x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.