ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3c0fc07e2749d910790b13e76e0bbd8af719a0 |
microcode = '''
def macroop VADDSD_XMM_XMM {
maddf xmm0, xmm0v, xmm0m, size=8, ext=Scalar
movfp dest=xmm1, src1=xmm1v, dataSize=8
vclear dest=xmm2, destVL=16
};
def macroop VADDSD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
maddf xmm0, xmm0v, ufp1, size=8, ext=Scalar
movfp dest=xmm1, src1=xmm1v, dataSize=8
vclear dest=xmm2, destVL=16
};
def macroop VADDSD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
maddf xmm0, xmm0v, ufp1, size=8, ext=Scalar
movfp dest=xmm1, src1=xmm1v, dataSize=8
vclear dest=xmm2, destVL=16
};
'''
|
py | 1a3c0ff0ba10ebd74247baa1e625b0a786a7299c | import segmentation_models_pytorch as smp
import torch.optim
from .losses import CombinedLoss, BinaryFocalLoss
def get_optimizer(config, model):
"""
"""
optimizer_name = config.SOLVER.OPTIMIZER
if optimizer_name == 'adam':
return torch.optim.Adam(
model.parameters(),
lr=config.SOLVER.LR,
weight_decay=config.SOLVER.WEIGHT_DECAY
)
elif optimizer_name == 'adamw':
return torch.optim.AdamW(
model.parameters(),
lr=config.SOLVER.LR,
weight_decay=config.SOLVER.WEIGHT_DECAY
)
else:
raise ValueError()
def get_lr_scheduler(config, optimizer):
"""
"""
scheduler_name = config.SOLVER.LR_SCHEDULER
if scheduler_name == 'multistep':
return torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=config.SOLVER.LR_MULTISTEP_MILESTONES,
gamma=config.SOLVER.LR_MULTISTEP_GAMMA
)
elif scheduler_name == 'annealing':
return torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=config.SOLVER.LR_ANNEALING_T_MAX,
eta_min=config.SOLVER.LR_ANNEALING_ETA_MIN,
)
else:
raise ValueError()
def get_loss(config):
"""
"""
def _get_loss(config, loss_name):
if loss_name == 'bce':
return smp.utils.losses.BCELoss()
elif loss_name == 'dice':
return smp.utils.losses.DiceLoss()
elif loss_name == 'focal':
return BinaryFocalLoss(
gamma=config.SOLVER.FOCAL_LOSS_GAMMA
)
else:
raise ValueError()
loss_modules = []
for loss_name in config.SOLVER.LOSSES:
loss_modules.append(_get_loss(config, loss_name))
return CombinedLoss(
loss_modules,
config.SOLVER.LOSS_WEIGHTS
)
|
py | 1a3c10b5e32224fed42e44a159389baa600c8841 | import asyncio
import logging
import time
from typing import Callable
from covid.protocols.protocol_message_types import ProtocolMessageTypes
log = logging.getLogger(__name__)
async def time_out_assert_custom_interval(timeout: int, interval, function, value=True, *args, **kwargs):
start = time.time()
while time.time() - start < timeout:
if asyncio.iscoroutinefunction(function):
f_res = await function(*args, **kwargs)
else:
f_res = function(*args, **kwargs)
if value == f_res:
return None
await asyncio.sleep(interval)
assert False, "Timed assertion timed out"
async def time_out_assert(timeout: int, function, value=True, *args, **kwargs):
await time_out_assert_custom_interval(timeout, 0.05, function, value, *args, **kwargs)
async def time_out_assert_not_none(timeout: int, function, *args, **kwargs):
start = time.time()
while time.time() - start < timeout:
if asyncio.iscoroutinefunction(function):
f_res = await function(*args, **kwargs)
else:
f_res = function(*args, **kwargs)
if f_res is not None:
return None
await asyncio.sleep(0.05)
assert False, "Timed assertion timed out"
def time_out_messages(incoming_queue: asyncio.Queue, msg_name: str, count: int = 1) -> Callable:
async def bool_f():
if incoming_queue.qsize() < count:
return False
for _ in range(count):
response = (await incoming_queue.get())[0].type
if ProtocolMessageTypes(response).name != msg_name:
# log.warning(f"time_out_message: found {response} instead of {msg_name}")
return False
return True
return bool_f
|
py | 1a3c10c15412a8262c1987eb38f7ac521885d29e | # encoding: UTF-8
__author__ = 'CHENXY'
# C++和python类型的映射字典
type_dict = {
'int': 'int',
'char': 'string',
'double': 'float',
'short': 'int'
}
def process_line(line):
"""处理每行"""
if '///' in line: # 注释
py_line = process_comment(line)
elif 'typedef' in line: # 类型申明
py_line = process_typedef(line)
elif '#define' in line: # 定义常量
py_line = process_define(line)
elif line == '\n': # 空行
py_line = line
else:
py_line = ''
return py_line
def process_comment(line):
"""处理注释"""
# if line[3] == '/':
# py_line = ''
# else:
# py_line = '#' + line[3:]
py_line = '#' + line[3:]
return py_line
def process_typedef(line):
"""处理类型申明"""
content = line.split(' ')
type_ = type_dict[content[1]]
keyword = content[2]
if '[' in keyword:
i = keyword.index('[')
keyword = keyword[:i]
else:
keyword = keyword.replace(';\n', '') # 删除行末分号
py_line = 'typedefDict["%s"] = "%s"\n' % (keyword, type_)
return py_line
def process_define(line):
"""处理定义常量"""
content = line.split(' ')
constant = content[1]
if len(content)>2:
value = content[-1]
py_line = 'defineDict["%s"] = %s' % (constant, value)
else:
py_line = ''
return py_line
def main():
"""主函数"""
try:
fcpp = open('SecurityFtdcL2MDUserApiDataType.h','r')
fpy = open('l2_data_type.py', 'w')
fpy.write('# encoding: UTF-8\n')
fpy.write('\n')
fpy.write('defineDict = {}\n')
fpy.write('typedefDict = {}\n')
fpy.write('\n')
for line in fcpp:
py_line = process_line(line)
if py_line:
fpy.write(py_line.decode('gbk').encode('utf-8'))
fcpp.close()
fpy.close()
print('data_type.py生成过程完成')
except:
print('data_type.py生成过程出错')
if __name__ == '__main__':
main()
|
py | 1a3c112503c74bda0e9f1a0b63f2315436e8569e | # coding=utf-8
# Copyright 2020, The T5 Authors and HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" T5 model configuration """
from collections import OrderedDict
from typing import Any, Dict, Iterable, Mapping, Optional
from transformers import PreTrainedTokenizer, TensorType
from ... import is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
logger = logging.get_logger(__name__)
T5_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class T5Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.T5Model` or a
:class:`~transformers.TFT5Model`. It is used to instantiate a T5 model according to the specified arguments,
defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration
to that of the T5 `t5-small <https://huggingface.co/t5-small>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Arguments:
vocab_size (:obj:`int`, `optional`, defaults to 32128):
Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.T5Model` or :class:`~transformers.TFT5Model`.
d_model (:obj:`int`, `optional`, defaults to 512):
Size of the encoder layers and the pooler layer.
d_kv (:obj:`int`, `optional`, defaults to 64):
Size of the key, query, value projections per attention head. :obj:`d_kv` has to be equal to :obj:`d_model
// num_heads`.
d_ff (:obj:`int`, `optional`, defaults to 2048):
Size of the intermediate feed forward layer in each :obj:`T5Block`.
num_layers (:obj:`int`, `optional`, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_decoder_layers (:obj:`int`, `optional`):
Number of hidden layers in the Transformer decoder. Will use the same value as :obj:`num_layers` if not
set.
num_heads (:obj:`int`, `optional`, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
relative_attention_num_buckets (:obj:`int`, `optional`, defaults to 32):
The number of buckets to use for each attention layer.
dropout_rate (:obj:`float`, `optional`, defaults to 0.1):
The ratio for all dropout layers.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-6):
The epsilon used by the layer normalization layers.
initializer_factor (:obj:`float`, `optional`, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
feed_forward_proj (:obj:`string`, `optional`, defaults to :obj:`"relu"`):
Type of feed forward layer to be used. Should be one of :obj:`"relu"` or :obj:`"gated-gelu"`. T5v1.1 uses
the :obj:`"gated-gelu"` feed forward projection. Original T5 uses :obj:`"relu"`.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
"""
model_type = "t5"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=32128,
d_model=512,
d_kv=64,
d_ff=2048,
num_layers=6,
num_decoder_layers=None,
num_heads=8,
relative_attention_num_buckets=32,
dropout_rate=0.1,
layer_norm_epsilon=1e-6,
initializer_factor=1.0,
feed_forward_proj="relu",
is_encoder_decoder=True,
use_cache=True,
pad_token_id=0,
eos_token_id=1,
gradient_checkpointing=False,
**kwargs
):
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
**kwargs,
)
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
@property
def hidden_size(self):
return self.d_model
@property
def num_attention_heads(self):
return self.num_heads
@property
def num_hidden_layers(self):
return self.num_layers
class T5OnnxConfig(OnnxConfigWithPast):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
common_inputs = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch"}),
("decoder_attention_mask", {0: "batch"}),
]
)
if self.use_past:
for i in range(0, self._config.num_layers):
common_inputs[f"past_key_values.{i}.decoder.key"] = {0: "batch", 2: "past_sequence"}
common_inputs[f"past_key_values.{i}.decoder.value"] = {0: "batch", 2: "past_sequence"}
common_inputs[f"past_key_values.{i}.encoder.key"] = {0: "batch", 2: "past_sequence"}
common_inputs[f"past_key_values.{i}.encoder.value"] = {0: "batch", 2: "past_sequence"}
return common_inputs
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
common_outputs = super().outputs
if "last_hidden_state" in common_outputs:
common_outputs["last_hidden_state"] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
for i in range(self._config.num_layers):
common_outputs[f"present.{i}.decoder.key"] = {0: "batch", 2: "decoder_sequence"}
common_outputs[f"present.{i}.decoder.value"] = {0: "batch", 2: "decoder_sequence"}
common_outputs[f"present.{i}.encoder.key"] = {0: "batch", 2: "encoder_sequence"}
common_outputs[f"present.{i}.encoder.value"] = {0: "batch", 2: "encoder_sequence"}
if self.task == "default":
common_outputs["encoder_last_hidden_state"] = {0: "batch", 2: "encoder_sequence"}
return common_outputs
def generate_dummy_inputs(
self,
tokenizer: PreTrainedTokenizer,
batch_size: int = -1,
seq_length: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
) -> Mapping[str, Any]:
# Generate encoder inputs
encoder_inputs = super().generate_dummy_inputs(tokenizer, batch_size, seq_length, is_pair, framework)
# Generate decoder inputs
decoder_inputs = super().generate_dummy_inputs(tokenizer, batch_size, 1, is_pair, framework)
decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
ordered_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
batch = encoder_inputs["input_ids"].shape[0]
encoder_seq_length = encoder_inputs["input_ids"].shape[1]
encoder_shape = (
batch,
self._config.num_heads,
encoder_seq_length,
self._config.hidden_size // self._config.num_heads,
)
decoder_shape = (batch, self._config.num_heads, 1, self._config.hidden_size // self._config.num_heads)
ordered_inputs["past_key_values"] = []
for _ in range(self._config.num_layers):
ordered_inputs["past_key_values"].append(
(
torch.zeros(decoder_shape),
torch.zeros(decoder_shape),
torch.zeros(encoder_shape),
torch.zeros(encoder_shape),
)
)
return ordered_inputs
@staticmethod
def flatten_output_collection_property(name: str, field: Iterable[Any]) -> Dict[str, Any]:
if name in ["present", "past_key_values"]:
flatten_output = {}
for idx, t in enumerate(field):
flatten_output[f"{name}.{idx}.decoder.key"] = t[0]
flatten_output[f"{name}.{idx}.decoder.value"] = t[1]
flatten_output[f"{name}.{idx}.encoder.key"] = t[2]
flatten_output[f"{name}.{idx}.encoder.value"] = t[3]
return flatten_output
return super().flatten_output_collection_property(name, field)
|
py | 1a3c12ec84441ece3f0d3f67584e4d817b2c304b | from handroll.composers.mixins import FrontmatterComposerMixin
class FrontmatterExtractor(FrontmatterComposerMixin):
"""Extract frontmatter from a source file."""
def extract(self, source_file):
data, source = self.get_data(source_file)
return data
|
py | 1a3c12ee8f8411ca55c3ab61c0a1707831427aa6 | """
Here you can implement views with using
pages rendered. Remember not to implement
business logic in views: only request|response
preparations.
If you need complex logic for rendering|processing,
create corresponding classes or modules for its
implementation
"""
|
py | 1a3c12feb5f01ef5e1b994e5658fb970a739e261 | # -*- coding: utf-8 -*-
from .export_cli import ExportCli
from .export_html import ExportHtml
from .export_md import ExportMd
from wallabag.format_type import ScreenType
class ExportFactory():
def create(entry, params, type, width):
if type == ScreenType.MARKDOWN:
return ExportMd(entry)
elif type == ScreenType.TERM:
return ExportCli(entry, params, width)
else:
return ExportHtml(entry)
|
py | 1a3c1303523c0d1cc251d59a13c14d2303acb53c | import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
requires = ['opencv-python', 'numpy', 'gym']
setup(name='Flappy_Bird_with_Segmentation',
version='1.0',
description='Flappy bird environment with ground truth segmentation',
author='Xiangyu Chen',
author_email='[email protected]',
url='https://github.com/cxy1997/Flappy-Bird-with-Segmentation/',
keywords='Flappy Bird',
packages=find_packages(),
license='LICENSE',
install_requires=requires) |
py | 1a3c14049b6bb70f4ebf1e30a85de6d2b813a896 | #!/usr/bin/env python
# coding: utf-8
import argparse
import logging
import os
import sys
import time
import numpy as np
import pandas as pd
import scanpy as sc
import torch
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from torch import nn, optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader, TensorDataset
import DaNN.mmd as mmd
import scanpypip.preprocessing as pp
import trainers as t
import utils as ut
from models import (AEBase, DaNN, PretrainedPredictor,
PretrainedVAEPredictor, VAEBase)
DATA_MAP={
"GSE117872":"data/GSE117872/GSE117872_good_Data_TPM.txt",
"GSE117309":'data/GSE117309/filtered_gene_bc_matrices_HBCx-22/hg19/',
"GSE117309_TAMR":'data/GSE117309/filtered_gene_bc_matrices_HBCx22-TAMR/hg19/',
"GSE121107":'data/GSE121107/GSM3426289_untreated_out_gene_exon_tagged.dge.txt',
"GSE121107_1H":'data/GSE121107/GSM3426290_entinostat_1hr_out_gene_exon_tagged.dge.txt',
"GSE121107_6H":'data/GSE121107/GSM3426291_entinostat_6hr_out_gene_exon_tagged.dge.txt',
"GSE111014":'data/GSE111014/',
"GSE110894":"data/GSE110894/GSE110894.csv",
"GSE122843":"data/GSE122843/GSE122843.txt",
"GSE112274":"data/GSE112274/GSE112274_cell_gene_FPKM.csv",
"GSE116237":"data/GSE116237/GSE116237_bulkRNAseq_expressionMatrix.txt",
"GSE108383":"data/GSE108383/GSE108383_Melanoma_fluidigm.txt",
"GSE140440":"data/GSE140440/GSE140440.csv",
"GSE129730":"data/GSE129730/GSE129730.h5ad",
"GSE149383":"data/GSE149383/erl_total_data_2K.csv",
"GSE110894_small":"data/GSE110894/GSE110894_small.h5ad"
}
def run_main(args):
################################################# START SECTION OF LOADING PARAMETERS #################################################
# Read parameters
t0 = time.time()
epochs = args.epochs
dim_au_out = args.bottleneck #8, 16, 32, 64, 128, 256,512
na = args.missing_value
if args.sc_data in DATA_MAP:
data_path = DATA_MAP[args.sc_data]
else:
data_path = args.sc_data
test_size = args.test_size
select_drug = args.drug
freeze = args.freeze_pretrain
valid_size = args.valid_size
g_disperson = args.var_genes_disp
min_n_genes = args.min_n_genes
max_n_genes = args.max_n_genes
source_model_path = args.bulk_model_path
target_model_path = args.sc_model_path
log_path = args.logging_file
batch_size = args.batch_size
encoder_hdims = args.bulk_h_dims.split(",")
encoder_hdims = list(map(int, encoder_hdims))
source_data_path = args.bulk_data
pretrain = args.pretrain
data_name = args.sc_data
label_path = args.label
reduce_model = args.dimreduce
predict_hdims = args.predictor_h_dims.split(",")
predict_hdims = list(map(int, predict_hdims))
leiden_res = args.cluster_res
load_model = bool(args.load_sc_model)
# Misc
now=time.strftime("%Y-%m-%d-%H-%M-%S")
# Initialize logging and std out
out_path = log_path+now+".err"
log_path = log_path+now+".log"
out=open(out_path,"w")
sys.stderr=out
#Logging infomaion
logging.basicConfig(level=logging.INFO,
filename=log_path,
filemode='a',
format=
'%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
)
logging.getLogger('matplotlib.font_manager').disabled = True
logging.info(args)
logging.info("Start at " + str(t0))
# Save arguments
args_df = ut.save_arguments(args,now)
################################################# END SECTION OF LOADING PARAMETERS #################################################
################################################# START SECTION OF SINGLE CELL DATA REPROCESSING #################################################
# Load data and preprocessing
adata = pp.read_sc_file(data_path)
if data_name == 'GSE117872':
adata = ut.specific_process(adata,dataname=data_name,select_origin=args.batch_id)
elif data_name =='GSE122843':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE110894':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE112274':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE116237':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE108383':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE140440':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE129730':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE149383':
adata = ut.specific_process(adata,dataname=data_name)
else:
adata=adata
sc.pp.filter_cells(adata, min_genes=200)
sc.pp.filter_genes(adata, min_cells=3)
adata = pp.cal_ncount_ngenes(adata)
#Preprocess data by filtering
if data_name not in ['GSE112274','GSE140440']:
adata = pp.receipe_my(adata,l_n_genes=min_n_genes,r_n_genes=max_n_genes,filter_mincells=args.min_c,
filter_mingenes=args.min_g,normalize=True,log=True)
else:
adata = pp.receipe_my(adata,l_n_genes=min_n_genes,r_n_genes=max_n_genes,filter_mincells=args.min_c,percent_mito = args.percent_mito,
filter_mingenes=args.min_g,normalize=True,log=True)
# Select highly variable genes
sc.pp.highly_variable_genes(adata,min_disp=g_disperson,max_disp=np.inf,max_mean=6)
adata.raw = adata
adata = adata[:, adata.var.highly_variable]
# Preprocess data if spcific process is required
data=adata.X
# PCA
# Generate neighbor graph
sc.tl.pca(adata,svd_solver='arpack')
sc.pp.neighbors(adata, n_neighbors=10)
# Generate cluster labels
sc.tl.leiden(adata,resolution=leiden_res)
sc.tl.umap(adata)
adata.obs['leiden_origin']= adata.obs['leiden']
adata.obsm['X_umap_origin']= adata.obsm['X_umap']
data_c = adata.obs['leiden'].astype("long").to_list()
################################################# END SECTION OF SINGLE CELL DATA REPROCESSING #################################################
################################################# START SECTION OF LOADING SC DATA TO THE TENSORS #################################################
#Prepare to normailize and split target data
mmscaler = preprocessing.MinMaxScaler()
try:
data = mmscaler.fit_transform(data)
except:
logging.warning("Only one class, no ROC")
# Process sparse data
data = data.todense()
data = mmscaler.fit_transform(data)
# Split data to train and valid set
# Along with the leiden conditions for CVAE propose
Xtarget_train, Xtarget_valid, Ctarget_train, Ctarget_valid = train_test_split(data,data_c, test_size=valid_size, random_state=42)
# Select the device of gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Assuming that we are on a CUDA machine, this should print a CUDA device:
logging.info(device)
torch.cuda.set_device(device)
# Construct datasets and data loaders
Xtarget_trainTensor = torch.FloatTensor(Xtarget_train).to(device)
Xtarget_validTensor = torch.FloatTensor(Xtarget_valid).to(device)
# Use leiden label if CVAE is applied
Ctarget_trainTensor = torch.LongTensor(Ctarget_train).to(device)
Ctarget_validTensor = torch.LongTensor(Ctarget_valid).to(device)
X_allTensor = torch.FloatTensor(data).to(device)
C_allTensor = torch.LongTensor(data_c).to(device)
train_dataset = TensorDataset(Xtarget_trainTensor, Ctarget_trainTensor)
valid_dataset = TensorDataset(Xtarget_validTensor, Ctarget_validTensor)
Xtarget_trainDataLoader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
Xtarget_validDataLoader = DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=True)
dataloaders_pretrain = {'train':Xtarget_trainDataLoader,'val':Xtarget_validDataLoader}
################################################# START SECTION OF LOADING SC DATA TO THE TENSORS #################################################
################################################# START SECTION OF LOADING BULK DATA #################################################
# Read source data
data_r=pd.read_csv(source_data_path,index_col=0)
label_r=pd.read_csv(label_path,index_col=0)
label_r=label_r.fillna(na)
# Extract labels
selected_idx = label_r.loc[:,select_drug]!=na
label = label_r.loc[selected_idx.index,select_drug]
data_r = data_r.loc[selected_idx.index,:]
label = label.values.reshape(-1,1)
le = preprocessing.LabelEncoder()
label = le.fit_transform(label)
dim_model_out = 2
# Process source data
mmscaler = preprocessing.MinMaxScaler()
source_data = mmscaler.fit_transform(data_r)
# Split source data
Xsource_train_all, Xsource_test, Ysource_train_all, Ysource_test = train_test_split(source_data,label, test_size=test_size, random_state=42)
Xsource_train, Xsource_valid, Ysource_train, Ysource_valid = train_test_split(Xsource_train_all,Ysource_train_all, test_size=valid_size, random_state=42)
# Transform source data
# Construct datasets and data loaders
Xsource_trainTensor = torch.FloatTensor(Xsource_train).to(device)
Xsource_validTensor = torch.FloatTensor(Xsource_valid).to(device)
Ysource_trainTensor = torch.LongTensor(Ysource_train).to(device)
Ysource_validTensor = torch.LongTensor(Ysource_valid).to(device)
sourcetrain_dataset = TensorDataset(Xsource_trainTensor, Ysource_trainTensor)
sourcevalid_dataset = TensorDataset(Xsource_validTensor, Ysource_validTensor)
Xsource_trainDataLoader = DataLoader(dataset=sourcetrain_dataset, batch_size=batch_size, shuffle=True)
Xsource_validDataLoader = DataLoader(dataset=sourcevalid_dataset, batch_size=batch_size, shuffle=True)
dataloaders_source = {'train':Xsource_trainDataLoader,'val':Xsource_validDataLoader}
################################################# END SECTION OF LOADING BULK DATA #################################################
################################################# START SECTION OF MODEL CUNSTRUCTION #################################################
# Construct target encoder
if reduce_model == "AE":
encoder = AEBase(input_dim=data.shape[1],latent_dim=dim_au_out,h_dims=encoder_hdims)
loss_function_e = nn.MSELoss()
elif reduce_model == "VAE":
encoder = VAEBase(input_dim=data.shape[1],latent_dim=dim_au_out,h_dims=encoder_hdims)
if torch.cuda.is_available():
encoder.cuda()
logging.info("Target encoder structure is: ")
logging.info(encoder)
encoder.to(device)
optimizer_e = optim.Adam(encoder.parameters(), lr=1e-2)
loss_function_e = nn.MSELoss()
exp_lr_scheduler_e = lr_scheduler.ReduceLROnPlateau(optimizer_e)
dim_model_out = 2
# Load AE model
if reduce_model == "AE":
source_model = PretrainedPredictor(input_dim=Xsource_train.shape[1],latent_dim=dim_au_out,h_dims=encoder_hdims,
hidden_dims_predictor=predict_hdims,output_dim=dim_model_out,
pretrained_weights=None,freezed=freeze)
source_model.load_state_dict(torch.load(source_model_path))
source_encoder = source_model
# Load VAE model
elif reduce_model in ["VAE"]:
source_model = PretrainedVAEPredictor(input_dim=Xsource_train.shape[1],latent_dim=dim_au_out,h_dims=encoder_hdims,
hidden_dims_predictor=predict_hdims,output_dim=dim_model_out,
pretrained_weights=None,freezed=freeze,z_reparam=bool(args.VAErepram))
source_model.load_state_dict(torch.load(source_model_path))
source_encoder = source_model
logging.info("Load pretrained source model from: "+source_model_path)
source_encoder.to(device)
################################################# END SECTION OF MODEL CUNSTRUCTION #################################################
################################################# START SECTION OF SC MODEL PRETRAININIG #################################################
# Pretrain target encoder
# Pretain using autoencoder is pretrain is not False
if(str(pretrain)!='0'):
# Pretrained target encoder if there are not stored files in the harddisk
train_flag = True
pretrain = str(pretrain)
if(os.path.exists(pretrain)==True):
try:
encoder.load_state_dict(torch.load(pretrain))
logging.info("Load pretrained target encoder from "+pretrain)
train_flag = False
except:
logging.warning("Loading failed, procceed to re-train model")
if train_flag == True:
if reduce_model == "AE":
encoder,loss_report_en = t.train_AE_model(net=encoder,data_loaders=dataloaders_pretrain,
optimizer=optimizer_e,loss_function=loss_function_e,
n_epochs=epochs,scheduler=exp_lr_scheduler_e,save_path=pretrain)
elif reduce_model == "VAE":
encoder,loss_report_en = t.train_VAE_model(net=encoder,data_loaders=dataloaders_pretrain,
optimizer=optimizer_e,
n_epochs=epochs,scheduler=exp_lr_scheduler_e,save_path=pretrain)
logging.info("Pretrained finished")
# Before Transfer learning, we test the performance of using no transfer performance:
# Use vae result to predict
embeddings_pretrain = encoder.encode(X_allTensor)
pretrain_prob_prediction = source_model.predict(embeddings_pretrain).detach().cpu().numpy()
adata.obs["sens_preds_pret"] = pretrain_prob_prediction[:,1]
adata.obs["sens_label_pret"] = pretrain_prob_prediction.argmax(axis=1)
# Add embeddings to the adata object
embeddings_pretrain = embeddings_pretrain.detach().cpu().numpy()
adata.obsm["X_pre"] = embeddings_pretrain
################################################# END SECTION OF SC MODEL PRETRAININIG #################################################
################################################# START SECTION OF TRANSFER LEARNING TRAINING #################################################
# Using ADDA transfer learning
# DaNN model
# Set predictor loss
loss_d = nn.CrossEntropyLoss()
optimizer_d = optim.Adam(encoder.parameters(), lr=1e-2)
exp_lr_scheduler_d = lr_scheduler.ReduceLROnPlateau(optimizer_d)
# Set DaNN model
DaNN_model = DaNN(source_model=source_encoder,target_model=encoder)
DaNN_model.to(device)
def loss(x,y,GAMMA=args.mmd_GAMMA):
result = mmd.mmd_loss(x,y,GAMMA)
return result
loss_disrtibution = loss
# Tran DaNN model
DaNN_model, report_ = t.train_DaNN_model(DaNN_model,
dataloaders_source,dataloaders_pretrain,
# Should here be all optimizer d?
optimizer_d, loss_d,
epochs,exp_lr_scheduler_d,
dist_loss=loss_disrtibution,
load=load_model,
weight = args.mmd_weight,
save_path=target_model_path+"_DaNN.pkl")
encoder = DaNN_model.target_model
source_model = DaNN_model.source_model
logging.info("Transfer DaNN finished")
################################################# END SECTION OF TRANSER LEARNING TRAINING #################################################
################################################# START SECTION OF PREPROCESSING FEATURES #################################################
# Extract feature embeddings
# Extract prediction probabilities
embedding_tensors = encoder.encode(X_allTensor)
prediction_tensors = source_model.predictor(embedding_tensors)
embeddings = embedding_tensors.detach().cpu().numpy()
predictions = prediction_tensors.detach().cpu().numpy()
# Transform predict8ion probabilities to 0-1 labels
adata.obs["sens_preds"] = predictions[:,1]
adata.obs["sens_label"] = predictions.argmax(axis=1)
adata.obs["sens_label"] = adata.obs["sens_label"].astype('category')
adata.obs["rest_preds"] = predictions[:,0]
################################################# END SECTION OF PREPROCESSING FEATURES #################################################
################################################# START SECTION OF ANALYSIS AND POST PROCESSING #################################################
################################################# END SECTION OF ANALYSIS AND POST PROCESSING #################################################
################################################# START SECTION OF ANALYSIS FOR BULK DATA #################################################
# Save adata
adata.write("saved/adata/"+data_name+now+".h5ad")
################################################# END SECTION OF ANALYSIS FOR BULK DATA #################################################
t1 = time.time()
logging.info("End at " + str(t1)+", takes :" )
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data
parser.add_argument('--bulk_data', type=str, default='data/GDSC2_expression.csv',help='Path of the bulk RNA-Seq expression profile')
parser.add_argument('--label', type=str, default='data/GDSC2_label_9drugs_binary.csv',help='Path of the processed bulk RNA-Seq drug screening annotation')
parser.add_argument('--sc_data', type=str, default="GSE110894",help='Accession id for testing data, only support pre-built data.')
parser.add_argument('--drug', type=str, default='Cisplatin',help='Name of the selected drug, should be a column name in the input file of --label')
parser.add_argument('--missing_value', type=int, default=1,help='The value filled in the missing entry in the drug screening annotation, default: 1')
parser.add_argument('--test_size', type=float, default=0.2,help='Size of the test set for the bulk model traning, default: 0.2')
parser.add_argument('--valid_size', type=float, default=0.2,help='Size of the validation set for the bulk model traning, default: 0.2')
parser.add_argument('--var_genes_disp', type=float, default=0,help='Dispersion of highly variable genes selection when pre-processing the data. \
If None, all genes will be selected .default: None')
parser.add_argument('--min_n_genes', type=int, default=0,help="Minimum number of genes for a cell that have UMI counts >1 for filtering propose, default: 0 ")
parser.add_argument('--max_n_genes', type=int, default=20000,help="Maximum number of genes for a cell that have UMI counts >1 for filtering propose, default: 20000 ")
parser.add_argument('--min_g', type=int, default=200,help="Minimum number of genes for a cell >1 for filtering propose, default: 200")
parser.add_argument('--min_c', type=int, default=3,help="Minimum number of cell that each gene express for filtering propose, default: 3")
parser.add_argument('--percent_mito', type=int, default=100,help="Percentage of expreesion level of moticondrial genes of a cell for filtering propose, default: 100")
parser.add_argument('--cluster_res', type=float, default=0.3,help="Resolution of Leiden clustering of scRNA-Seq data, default: 0.3")
parser.add_argument('--mmd_weight', type=float, default=0.25,help="Weight of the MMD loss of the transfer learning, default: 0.25")
parser.add_argument('--mmd_GAMMA', type=int, default=1000,help="Gamma parameter in the kernel of the MMD loss of the transfer learning, default: 1000")
# train
parser.add_argument('--bulk_model_path','-s', type=str, default='saved/models/predictor_bulk.pkl',help='Path of the trained predictor in the bulk level')
parser.add_argument('--sc_model_path', '-p', type=str, default='saved/models/predictor_sc_',help='Path (prefix) of the trained predictor in the single cell level')
parser.add_argument('--pretrain', type=str, default='saved/models/encoder_sc.pkl',help='Path of the pre-trained encoder in the single-cell level')
parser.add_argument('--lr', type=float, default=1e-2,help='Learning rate of model training. Default: 1e-2')
parser.add_argument('--epochs', type=int, default=500,help='Number of epoches training. Default: 500')
parser.add_argument('--batch_size', type=int, default=200,help='Number of batch size when training. Default: 200')
parser.add_argument('--bottleneck', type=int, default=512,help='Size of the bottleneck layer of the model. Default: 32')
parser.add_argument('--dimreduce', type=str, default="AE",help='Encoder model type. Can be AE or VAE. Default: AE')
parser.add_argument('--freeze_pretrain', type=int,default=0,help='Fix the prarmeters in the pretrained model. 0: do not freeze, 1: freeze. Default: 0')
parser.add_argument('--bulk_h_dims', type=str, default="512,256",help='Shape of the source encoder. Each number represent the number of neuron in a layer. \
Layers are seperated by a comma. Default: 512,256')
parser.add_argument('--sc_h_dims', type=str, default="512,256",help='Shape of the encoder. Each number represent the number of neuron in a layer. \
Layers are seperated by a comma. Default: 512,256')
parser.add_argument('--predictor_h_dims', type=str, default="16,8",help='Shape of the predictor. Each number represent the number of neuron in a layer. \
Layers are seperated by a comma. Default: 16,8')
parser.add_argument('--VAErepram', type=int, default=1)
parser.add_argument('--batch_id', type=str, default="HN137",help="Batch id only for testing")
parser.add_argument('--load_sc_model', type=int, default=0,help='Load a trained model or not. 0: do not load, 1: load. Default: 0')
# mis
parser.add_argument('--logging_file', '-l', type=str, default='saved/logs/transfer_',help='Path of training log')
#
args, unknown = parser.parse_known_args()
run_main(args)
|
py | 1a3c14b8129c766f5b9e20f9904d1ca8381ec68f | from cellpose import io, models, metrics, plot
from pathlib import Path
from subprocess import check_output, STDOUT
import os, shutil
def test_class_train(data_dir, image_names):
train_dir = str(data_dir.joinpath('2D').joinpath('train'))
model_dir = str(data_dir.joinpath('2D').joinpath('train').joinpath('models'))
shutil.rmtree(model_dir, ignore_errors=True)
output = io.load_train_test_data(train_dir, mask_filter='_cyto_masks')
images, labels, image_names, test_images, test_labels, image_names_test = output
model = models.CellposeModel(pretrained_model=None, diam_mean=30)
cpmodel_path = model.train(images, labels, train_files=image_names,
test_data=test_images, test_labels=test_labels, test_files=image_names_test,
channels=[2,1], save_path=train_dir, n_epochs=10)
print('>>>> model trained and saved to %s'%cpmodel_path)
def test_cli_train(data_dir, image_names):
train_dir = str(data_dir.joinpath('2D').joinpath('train'))
model_dir = str(data_dir.joinpath('2D').joinpath('train').joinpath('models'))
shutil.rmtree(model_dir, ignore_errors=True)
cmd = 'python -m cellpose --train --train_size --n_epochs 10 --dir %s --mask_filter _cyto_masks --pretrained_model cyto --chan 2 --chan2 1 --diameter 30'%train_dir
try:
cmd_stdout = check_output(cmd, stderr=STDOUT, shell=True).decode()
except Exception as e:
print(e)
raise ValueError(e)
|
py | 1a3c16c1acc4c6aa392b9a6458044c104660bcda | import torch
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class Keypoints(object):
def __init__(self, keypoints, size, mode=None):
# FIXME remove check once we have better integration with device
# in my version this would consistently return a CPU tensor
device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device('cpu')
keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
# TODO should I split them?
# self.visibility = keypoints[..., 2]
self.keypoints = keypoints# [..., :2]
self.size = size
self.mode = mode
self.extra_fields = {}
def crop(self, box):
raise NotImplementedError()
def resize(self, size, *args, **kwargs):
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
ratio_w, ratio_h = ratios
resized_data = self.keypoints.clone()
resized_data[..., 0] *= ratio_w
resized_data[..., 1] *= ratio_h
keypoints = type(self)(resized_data, size, self.mode)
for k, v in self.extra_fields.items():
keypoints.add_field(k, v)
# print("keypoints resize!!")
return keypoints
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT,):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT implemented")
# flip_inds = type(self).FLIP_INDS
# flipped_data = self.keypoints[:, flip_inds]
# width = self.size[0]
# TO_REMOVE = 1
# # Flip x coordinates
# flipped_data[..., 0] = width - flipped_data[..., 0] - TO_REMOVE
# # Maintain COCO convention that if visibility == 0, then x, y = 0
# inds = flipped_data[..., 2] == 0
# flipped_data[inds] = 0
flipped_data=self.keypoints
keypoints = type(self)(flipped_data, self.size, self.mode)
for k, v in self.extra_fields.items():
keypoints.add_field(k, v)
return keypoints
def to(self, *args, **kwargs):
keypoints = type(self)(self.keypoints.to(*args, **kwargs), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, "to"):
v = v.to(*args, **kwargs)
keypoints.add_field(k, v)
return keypoints
def __getitem__(self, item):
keypoints = type(self)(self.keypoints[item], self.size, self.mode)
for k, v in self.extra_fields.items():
keypoints.add_field(k, v[item])
return keypoints
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def __repr__(self):
s = self.__class__.__name__ + '('
s += 'num_instances={}, '.format(len(self.keypoints))
s += 'image_width={}, '.format(self.size[0])
s += 'image_height={})'.format(self.size[1])
return s
def _create_flip_indices(names, flip_map):
full_flip_map = flip_map.copy()
full_flip_map.update({v: k for k, v in flip_map.items()})
flipped_names = [i if i not in full_flip_map else full_flip_map[i] for i in names]
flip_indices = [names.index(i) for i in flipped_names]
return torch.tensor(flip_indices)
class PersonKeypoints(Keypoints):
NAMES = [
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
FLIP_MAP = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'
}
# TODO this doesn't look great
PersonKeypoints.FLIP_INDS = _create_flip_indices(PersonKeypoints.NAMES, PersonKeypoints.FLIP_MAP)
def kp_connections(keypoints):
kp_lines = [
[keypoints.index('left_eye'), keypoints.index('right_eye')],
[keypoints.index('left_eye'), keypoints.index('nose')],
[keypoints.index('right_eye'), keypoints.index('nose')],
[keypoints.index('right_eye'), keypoints.index('right_ear')],
[keypoints.index('left_eye'), keypoints.index('left_ear')],
[keypoints.index('right_shoulder'), keypoints.index('right_elbow')],
[keypoints.index('right_elbow'), keypoints.index('right_wrist')],
[keypoints.index('left_shoulder'), keypoints.index('left_elbow')],
[keypoints.index('left_elbow'), keypoints.index('left_wrist')],
[keypoints.index('right_hip'), keypoints.index('right_knee')],
[keypoints.index('right_knee'), keypoints.index('right_ankle')],
[keypoints.index('left_hip'), keypoints.index('left_knee')],
[keypoints.index('left_knee'), keypoints.index('left_ankle')],
[keypoints.index('right_shoulder'), keypoints.index('left_shoulder')],
[keypoints.index('right_hip'), keypoints.index('left_hip')],
]
return kp_lines
PersonKeypoints.CONNECTIONS = kp_connections(PersonKeypoints.NAMES)
# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop)
def keypoints_to_heat_map(keypoints, rois, heatmap_size):
if rois.numel() == 0:
return rois.new().long(), rois.new().long()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
offset_x = offset_x[:, None]
offset_y = offset_y[:, None]
scale_x = scale_x[:, None]
scale_y = scale_y[:, None]
x = keypoints[..., 0]
y = keypoints[..., 1]
x_boundary_inds = x == rois[:, 2][:, None]
y_boundary_inds = y == rois[:, 3][:, None]
x = (x - offset_x) * scale_x
x = x.floor().long()
y = (y - offset_y) * scale_y
y = y.floor().long()
x[x_boundary_inds] = heatmap_size - 1
y[y_boundary_inds] = heatmap_size - 1
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
vis = keypoints[..., 2] > 0
valid = (valid_loc & vis).long()
lin_ind = y * heatmap_size + x
heatmaps = lin_ind * valid
return heatmaps, valid
|
py | 1a3c16d4a6d1eb468670ce38d16dca3600ca0f36 | """
make_bmap.py
Creates an image that can be used as a bump mapping texture.
Mahesh Venkitachalam
shader.in
"""
import numpy as np
from PIL import Image
from math import sqrt
def main():
NX, NY = 256, 256
nmap = np.zeros([NX, NY, 3], np.float32)
r = 32.0
rsq = r*r
centers = [(64, 64), (192, 64), (64, 192), (192, 192)]
for i in range(NX):
for j in range(NY):
inside = False
for C in centers:
x = (i-C[0])
y = (j-C[1])
if x*x + y*y < rsq :
nmap[i][j][0] = x / r
nmap[i][j][1] = y / r
nmap[i][j][2] = sqrt(rsq - (x*x + y*y))/ r
inside = True
if not inside:
nmap[i][j][0] = 0.0
nmap[i][j][1] = 0.0
nmap[i][j][2] = 1.0
# [-1, 1] to [0, 255]
nmap = 255.0*0.5*(nmap + 1.0)
img = np.array(nmap, np.uint8)
img = Image.fromarray(img)
img.save("bmap.png")
# call main
if __name__ == '__main__':
main()
|
py | 1a3c170736c5adfa95b6294c6a3bd1b950275158 | song = [
[["C3",2],["F1",1],["E4",8]],
[["D3",2],["F1",1],["E4",0]],
[["C3",2],["E1",1],["E4",8]],
[["C3",2],["E1",1],["E4",0]],
[["D3",2],["F1",1],["E4",8]],
[["C3",2],["F1",1],["E4",8]],
[["D3",2],["E1",1],["E4",0]],
[["C3",2],["E1",1],["E4",8]]
] |
py | 1a3c18a47c211d71d07f1e572a3c2c6bf97a979c | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.9 Python SDK
Pure Storage FlashBlade REST 1.9 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CertificateGroupUse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'use': 'FixedReferenceWithRemote'
}
attribute_map = {
'id': 'id',
'name': 'name',
'use': 'use'
}
def __init__(self, id=None, name=None, use=None): # noqa: E501
"""CertificateGroupUse - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._use = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if use is not None:
self.use = use
@property
def id(self):
"""Gets the id of this CertificateGroupUse. # noqa: E501
A non-modifiable, globally unique ID chosen by the system. # noqa: E501
:return: The id of this CertificateGroupUse. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CertificateGroupUse.
A non-modifiable, globally unique ID chosen by the system. # noqa: E501
:param id: The id of this CertificateGroupUse. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this CertificateGroupUse. # noqa: E501
The name of the object (e.g., a file system or snapshot). # noqa: E501
:return: The name of this CertificateGroupUse. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CertificateGroupUse.
The name of the object (e.g., a file system or snapshot). # noqa: E501
:param name: The name of this CertificateGroupUse. # noqa: E501
:type: str
"""
self._name = name
@property
def use(self):
"""Gets the use of this CertificateGroupUse. # noqa: E501
A reference to an object using this certificate group. # noqa: E501
:return: The use of this CertificateGroupUse. # noqa: E501
:rtype: FixedReferenceWithRemote
"""
return self._use
@use.setter
def use(self, use):
"""Sets the use of this CertificateGroupUse.
A reference to an object using this certificate group. # noqa: E501
:param use: The use of this CertificateGroupUse. # noqa: E501
:type: FixedReferenceWithRemote
"""
self._use = use
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CertificateGroupUse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CertificateGroupUse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a3c18a4f44f3290b824d7ffc5bddbe1a4c1b61e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MicroPython documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 21 11:42:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# The members of the html_context dict are available inside topindex.html
micropy_version = os.getenv('MICROPY_VERSION') or 'latest'
micropy_all_versions = (os.getenv('MICROPY_ALL_VERSIONS') or 'latest').split(',')
url_pattern = '%s/en/%%s' % (os.getenv('MICROPY_URL_PREFIX') or '/',)
html_context = {
'cur_version':micropy_version,
'all_versions':[
(ver, url_pattern % ver) for ver in micropy_all_versions
],
'downloads':[
('PDF', url_pattern % micropy_version + '/micropython-docs.pdf'),
],
}
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'MicroPython'
copyright = '- The MicroPython Documentation is Copyright © 2014-2021, Damien P. George, Paul Sokolovsky, and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# We don't follow "The short X.Y version" vs "The full version, including alpha/beta/rc tags"
# breakdown, so use the same version identifier for both to avoid confusion.
version = release = '1.17'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', '.venv']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Global include files. Sphinx docs suggest using rst_epilog in preference
# of rst_prolog, so we follow. Absolute paths below mean "from the base
# of the doctree".
rst_epilog = """
.. include:: /templates/replace.inc
"""
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '../../logo/trans-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "topindex.html"}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicroPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Include 3 levels of headers in PDF ToC
'preamble': '\setcounter{tocdepth}{2}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MicroPython.tex', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'micropython', 'MicroPython Documentation',
['Damien P. George, Paul Sokolovsky, and contributors'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MicroPython', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'MicroPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/3.5', None)}
|
py | 1a3c198c9a2d1d83413145ca796a5644a76edb81 | # -*- encoding: utf-8 -*-
import mock
import unittest
from dateutil import parser as dateparser
import pytz
from postcode_api.downloaders.ftp import FtpDownloader
class FTPDownloaderTest(unittest.TestCase):
def test_login(self):
with mock.patch('ftplib.FTP') as ftp_class:
ftp = ftp_class.return_value
credentials = {
'host': 'example.com',
'user': 'ftpuser',
'pass': 'ftppass',
'path': 'ftppath'}
downloader = FtpDownloader(
credentials['host'],
credentials['user'],
credentials['pass'],
path=credentials['path'])
downloader.ftp
ftp_class.assertCalledWith(credentials['host'])
ftp.login.assertCalledWith(
credentials['user'], credentials['pass'])
ftp.cwd.assertCalledWith(credentials['path'])
def mock_ftp_dir(self, pattern, num_files=10):
def mock_list(p, fn):
self.assertEqual(pattern, p)
for i in range(num_files):
fn('-rw-r--r-- 1 ftpuser ftp 1024 27 Apr 10:26 %s.zip' % i)
return mock_list
def test_list_files(self):
num_files = 10
pattern = '*.zip'
with mock.patch('ftplib.FTP') as ftp_class:
ftp = ftp_class.return_value
ftp.dir.side_effect = self.mock_ftp_dir(pattern, num_files)
downloader = FtpDownloader('host', 'user', 'pass')
files = downloader.list(pattern)
ftp.dir.assertCalledWith(pattern)
self.assertEqual(num_files, len(files))
for i in range(num_files):
self.assertEqual('%s.zip' % i, files[i])
def mock_open(self):
open_name = 'postcode_api.downloaders.ftp.open'
self.mocked_file = mock.mock_open()
return mock.patch(open_name, self.mocked_file, create=True)
def test_download(self):
with mock.patch('ftplib.FTP') as ftp_class, \
mock.patch('tempfile.mkdtemp') as mkdtemp, \
self.mock_open() as mock_open:
test_dir = 'foo'
ftp = ftp_class.return_value
pattern = '*.zip'
num_files = 10
ftp.dir.side_effect = self.mock_ftp_dir(pattern, num_files)
mkdtemp.return_value = test_dir
downloader = FtpDownloader('host', 'user', 'pass')
files = downloader.download(pattern=pattern, dest_dir=test_dir)
filename = lambda i: '{0}/{1}.zip'.format(test_dir, i)
mock_open.assert_has_calls(
[mock.call(filename(i), 'wb') for i in range(num_files)],
any_order=True)
self.assertEqual([filename(i) for i in range(num_files)], files)
def test_download_file(self):
with mock.patch('ftplib.FTP') as ftp_class, \
mock.patch('postcode_api.downloaders.ftp.log'), \
self.mock_open() as mock_open:
src = 'foo'
dest = '/tmp/foo'
content_length = 1000
def mock_ftp_retr(cmd, fn):
self.assertEqual('RETR %s' % src, cmd)
for data in 'a' * content_length:
fn(data)
ftp = ftp_class.return_value
ftp.retrbinary.side_effect = mock_ftp_retr
downloader = FtpDownloader('host', 'user', 'pass')
downloader._headers[src] = {'content-length': content_length}
downloader.download_file(src, dest)
self.mocked_file().write.assert_has_calls(
[mock.call('a')] * content_length)
def test_last_modified(self):
dt = pytz.UTC.localize(dateparser.parse('27 Apr 10:26'))
with mock.patch('ftplib.FTP') as ftp_class:
ftp = ftp_class.return_value
ftp.dir.side_effect = self.mock_ftp_dir('*.zip', 10)
downloader = FtpDownloader('host', 'user', 'pass')
downloader.list('*.zip')
last_modified = downloader.last_modified('1.zip')
self.assertEqual(dt, last_modified)
def test_find_dir_with_latest_file_matching(self):
mock_files = ['dir-a/file-2014-05-06', 'dir-b/file-2015-06-07', 'dir-c/file-2015-10-29']
with mock.patch('ftplib.FTP'):
downloader = FtpDownloader('host', 'user', 'pass')
downloader.list = mock.MagicMock(return_value=mock_files)
self.assertEqual( downloader.find_dir_with_latest_file_matching('*/file-*'), 'dir-c' )
|
py | 1a3c1a9e37861d100f5e6460bacb269ee26409a2 | from typing import List
from anubis.models import AssignmentRepo, Assignment
from anubis.utils.data import is_debug
from anubis.utils.cache import cache
@cache.memoize(timeout=10, source_check=True, unless=is_debug)
def get_repos(user_id: str):
repos: List[AssignmentRepo] = (
AssignmentRepo.query.join(Assignment)
.filter(AssignmentRepo.owner_id == user_id)
.order_by(Assignment.release_date.desc())
.all()
)
return [repo.data for repo in repos]
|
py | 1a3c1aa6f7e0131bb2b271a0c5eb44ca43ec5d71 | from concurrent.futures import Future
from functools import wraps
from typing import Callable, Optional, TypeVar, Union
CallableReturnsInt = Callable[..., int]
IntOrBool = TypeVar(
"IntOrBool",
int,
bool,
)
CallableReturnsIntOrBool = TypeVar(
"CallableReturnsIntOrBool",
Callable[..., int],
Callable[..., bool],
)
IntCompatible = TypeVar(
"IntCompatible",
int,
Callable[..., int],
Future,
)
BoolCompatible = TypeVar(
"BoolCompatible",
int,
bool,
Callable[..., int],
Callable[..., bool],
Future,
)
class KiwoomOpenApiPlusError(Exception):
def __init__(self, message: Optional[str] = None):
if message is not None:
super().__init__(message)
else:
super().__init__()
self._message = message
@property
def message(self):
return self._message
@classmethod
def try_or_raise(
cls,
arg: IntCompatible,
message: Optional[str] = None,
except_callback: Optional[Callable] = None,
) -> IntCompatible:
return KiwoomOpenApiPlusNegativeReturnCodeError.try_or_raise(
arg, message, except_callback
)
@classmethod
def try_or_raise_boolean(
cls,
arg: BoolCompatible,
message: str,
except_callback: Optional[Callable] = None,
) -> BoolCompatible:
return KiwoomOpenApiPlusBooleanReturnCodeError.try_or_raise(
arg, message, except_callback
)
@classmethod
def get_error_message_by_code(cls, code: int, default: Optional[str] = None):
return KiwoomOpenApiPlusNegativeReturnCodeError.get_error_message_by_code(
code, default
)
class KiwoomOpenApiPlusNegativeReturnCodeError(KiwoomOpenApiPlusError):
OP_ERR_NONE = 0
OP_ERR_FAIL = -10
OP_ERR_COND_NOTFOUND = -11
OP_ERR_COND_MISMATCH = -12
OP_ERR_COND_OVERFLOW = -13
OP_ERR_TR_FAIL = -22
OP_ERR_LOGIN = -100
OP_ERR_CONNECT = -101
OP_ERR_VERSION = -102
OP_ERR_FIREWALL = -103
OP_ERR_MEMORY = -104
OP_ERR_INPUT = -105
OP_ERR_SOCKET_CLOSED = -106
OP_ERR_SISE_OVERFLOW = -200
OP_ERR_RQ_STRUCT_FAIL = -201
OP_ERR_RQ_STRING_FAIL = -202
OP_ERR_NO_DATA = -203
OP_ERR_OVER_MAX_DATA = -204
OP_ERR_DATA_RCV_FAIL = -205
OP_ERR_OVER_MAX_FID = -206
OP_ERR_REAL_CANCEL = -207
OP_ERR_ORD_WRONG_INPUT = -300
OP_ERR_ORD_WRONG_ACCTNO = -301
OP_ERR_OTHER_ACC_USE = -302
OP_ERR_MIS_2BILL_EXC = -303
OP_ERR_MIS_5BILL_EXC = -304
OP_ERR_MIS_1PER_EXC = -305
OP_ERR_MIS_3PER_EXC = -306
OP_ERR_SEND_FAIL = -307
OP_ERR_ORD_OVERFLOW = -308
OP_ERR_ORD_OVERFLOW2 = -311
OP_ERR_MIS_300CNT_EXC = -309
OP_ERR_MIS_500CNT_EXC = -310
OP_ERR_ORD_WRONG_ACCTINFO = -340
OP_ERR_ORD_SYMCODE_EMPTY = -500
MSG_ERR_NONE = "정상처리"
MSG_ERR_FAIL = "실패"
MSG_ERR_COND_NOTFOUND = "조건번호 없음"
MSG_ERR_COND_MISMATCH = "조건번호와 조건식 틀림"
MSG_ERR_COND_OVERFLOW = "조건검색 조회요청 초과"
MSG_ERR_TR_FAIL = "전문 처리 실패"
MSG_ERR_LOGIN = "사용자정보 교환 실패"
MSG_ERR_CONNECT = "서버접속 실패"
MSG_ERR_VERSION = "버전처리 실패"
MSG_ERR_FIREWALL = "개인방화벽 실패"
MSG_ERR_MEMORY = "메모리보호 실패"
MSG_ERR_INPUT = "함수입력값 오류"
MSG_ERR_SOCKET_CLOSED = "통신 연결종료"
MSG_ERR_SISE_OVERFLOW = "시세조회 과부하"
MSG_ERR_RQ_STRUCT_FAIL = "전문작성 초기화 실패"
MSG_ERR_RQ_STRING_FAIL = "전문작성 입력값 오류"
MSG_ERR_NO_DATA = "데이터 없음"
MSG_ERR_OVER_MAX_DATA = "조회 가능한 종목수 초과"
MSG_ERR_DATA_RCV_FAIL = "데이터수신 실패"
MSG_ERR_OVER_MAX_FID = "조회 가능한 FID수 초과"
MSG_ERR_REAL_CANCEL = "실시간 해제 오류"
MSG_ERR_ORD_WRONG_INPUT = "입력값 오류"
MSG_ERR_ORD_WRONG_ACCTNO = "계좌 비밀번호 없음"
MSG_ERR_OTHER_ACC_USE = "타인계좌사용 오류"
MSG_ERR_MIS_2BILL_EXC = "주문가격이 20억원을 초과"
MSG_ERR_MIS_5BILL_EXC = "주문가격이 50억원을 초과"
MSG_ERR_MIS_1PER_EXC = "주문수량이 총발행주수의 1%초과오류"
MSG_ERR_MIS_3PER_EXC = "주문수량이 총발행주수의 3%초과오류"
MSG_ERR_SEND_FAIL = "주문전송 실패"
MSG_ERR_ORD_OVERFLOW = "주문전송 과부하"
MSG_ERR_ORD_OVERFLOW2 = "주문전송 과부하"
MSG_ERR_MIS_300CNT_EXC = "주문수량 300계약 초과"
MSG_ERR_MIS_500CNT_EXC = "주문수량 500계약 초과"
MSG_ERR_ORD_WRONG_ACCTINFO = "계좌정보없음"
MSG_ERR_ORD_SYMCODE_EMPTY = "종목코드없음"
ERROR_MESSAGE_BY_CODE = {
OP_ERR_NONE: MSG_ERR_NONE,
OP_ERR_FAIL: MSG_ERR_FAIL,
OP_ERR_COND_NOTFOUND: MSG_ERR_COND_NOTFOUND,
OP_ERR_COND_MISMATCH: MSG_ERR_COND_MISMATCH,
OP_ERR_COND_OVERFLOW: MSG_ERR_COND_OVERFLOW,
OP_ERR_TR_FAIL: MSG_ERR_TR_FAIL,
OP_ERR_LOGIN: MSG_ERR_LOGIN,
OP_ERR_CONNECT: MSG_ERR_CONNECT,
OP_ERR_VERSION: MSG_ERR_VERSION,
OP_ERR_FIREWALL: MSG_ERR_FIREWALL,
OP_ERR_MEMORY: MSG_ERR_MEMORY,
OP_ERR_INPUT: MSG_ERR_INPUT,
OP_ERR_SOCKET_CLOSED: MSG_ERR_SOCKET_CLOSED,
OP_ERR_SISE_OVERFLOW: MSG_ERR_SISE_OVERFLOW,
OP_ERR_RQ_STRUCT_FAIL: MSG_ERR_RQ_STRUCT_FAIL,
OP_ERR_RQ_STRING_FAIL: MSG_ERR_RQ_STRING_FAIL,
OP_ERR_NO_DATA: MSG_ERR_NO_DATA,
OP_ERR_OVER_MAX_DATA: MSG_ERR_OVER_MAX_DATA,
OP_ERR_DATA_RCV_FAIL: MSG_ERR_DATA_RCV_FAIL,
OP_ERR_OVER_MAX_FID: MSG_ERR_OVER_MAX_FID,
OP_ERR_REAL_CANCEL: MSG_ERR_REAL_CANCEL,
OP_ERR_ORD_WRONG_INPUT: MSG_ERR_ORD_WRONG_INPUT,
OP_ERR_ORD_WRONG_ACCTNO: MSG_ERR_ORD_WRONG_ACCTNO,
OP_ERR_OTHER_ACC_USE: MSG_ERR_OTHER_ACC_USE,
OP_ERR_MIS_2BILL_EXC: MSG_ERR_MIS_2BILL_EXC,
OP_ERR_MIS_5BILL_EXC: MSG_ERR_MIS_5BILL_EXC,
OP_ERR_MIS_1PER_EXC: MSG_ERR_MIS_1PER_EXC,
OP_ERR_MIS_3PER_EXC: MSG_ERR_MIS_3PER_EXC,
OP_ERR_SEND_FAIL: MSG_ERR_SEND_FAIL,
OP_ERR_ORD_OVERFLOW: MSG_ERR_ORD_OVERFLOW,
OP_ERR_ORD_OVERFLOW2: MSG_ERR_ORD_OVERFLOW2,
OP_ERR_MIS_300CNT_EXC: MSG_ERR_MIS_300CNT_EXC,
OP_ERR_MIS_500CNT_EXC: MSG_ERR_MIS_500CNT_EXC,
OP_ERR_ORD_WRONG_ACCTINFO: MSG_ERR_ORD_WRONG_ACCTINFO,
OP_ERR_ORD_SYMCODE_EMPTY: MSG_ERR_ORD_SYMCODE_EMPTY,
}
@classmethod
def get_error_message_by_code(cls, code: int, default: Optional[str] = None):
return cls.ERROR_MESSAGE_BY_CODE.get(code, default)
@classmethod
def check_code_or_raise(cls, code: int):
if code < 0:
raise cls(code)
return code
@classmethod
def wrap_to_check_code_or_raise(
cls, func: CallableReturnsInt
) -> CallableReturnsInt:
@wraps(func)
def wrapper(*args, **kwargs):
return cls.check_code_or_raise(func(*args, **kwargs))
return wrapper
@classmethod
def try_or_raise(
cls,
arg: IntCompatible,
message: Optional[str] = None,
except_callback: Optional[Callable] = None,
) -> IntCompatible:
if isinstance(arg, Future):
def callback(future):
exc = future.exception()
if exc:
if except_callback:
except_callback(exc)
else:
raise exc
result = future.result()
try:
cls.try_or_raise(result, message)
except cls as e:
if except_callback:
except_callback(e)
else:
raise
arg.add_done_callback(callback)
return arg
elif isinstance(arg, int):
return cls.check_code_or_raise(arg)
elif callable(arg):
return cls.wrap_to_check_code_or_raise(arg)
else:
raise TypeError(
"Expected 'int', 'callable' or 'Future' but %s found" % type(arg)
)
def __init__(self, code: int, message: Optional[str] = None):
if message is None:
message = self.get_error_message_by_code(code)
super().__init__(message)
self._code = code
self._message = message
def __str__(self):
return self._message
def __repr__(self):
return "{}({!r}, {!r})".format(
self.__class__.__name__, self._code, self._message
)
@property
def code(self):
return self._code
class KiwoomOpenApiPlusBooleanReturnCodeError(KiwoomOpenApiPlusError):
OP_ERR_SUCCESS = 1
OP_ERR_FAILURE = 0
@classmethod
def check_code_or_raise(
cls, code: IntOrBool, message: Optional[str] = None
) -> IntOrBool:
if not code:
raise cls(code, message)
return code
@classmethod
def wrap_to_check_code_or_raise(
cls, func: CallableReturnsIntOrBool, message: Optional[str] = None
) -> CallableReturnsIntOrBool:
@wraps(func)
def wrapper(*args, **kwargs):
return cls.check_code_or_raise(func(*args, **kwargs), message)
return wrapper
@classmethod
def try_or_raise(
cls,
arg: BoolCompatible,
message: Optional[str] = None,
except_callback: Optional[Callable] = None,
) -> BoolCompatible:
if isinstance(arg, Future):
def callback(future):
exc = future.exception()
if exc:
if except_callback:
except_callback(exc)
else:
raise exc
result = future.result()
try:
cls.try_or_raise(result, message)
except cls as e:
if except_callback:
except_callback(e)
else:
raise
arg.add_done_callback(callback)
return arg
elif isinstance(arg, (int, bool)):
return cls.check_code_or_raise(arg, message)
elif callable(arg):
return cls.wrap_to_check_code_or_raise(arg, message)
else:
raise TypeError(
"Expected 'int', 'bool', 'callable' or 'Future' but %s found"
% type(arg)
)
def __init__(self, code: Union[int, bool], message: Optional[str] = None):
super().__init__(message)
self._code = code
self._message = message
def __str__(self):
if self._message:
return self._message
else:
return self.__repr__()
def __repr__(self):
return "{}({!r}, {!r})".format(
self.__class__.__name__, self._code, self._message
)
@property
def code(self):
return self._code
|
py | 1a3c1ab39b2fb6dcfed5713171d58efd5e45fc0f | import pytest
from mixer.backend.django import mixer
from .. import forms
pytestmark = pytest.mark.django_db
class TestPostForm:
def test_form(self):
form = forms.PostForm(data={})
assert form.is_valid() is False, ('Should be invalid if no data is given')
data = {'body': 'Hello'}
form = forms.PostForm(data=data)
assert form.is_valid() is False, ('Should be invalid if body text is less than 10 characters')
assert 'body' in form.errors, 'Should return field error for `body`'
data = {'body': 'Hello World!'}
form = forms.PostForm(data=data)
assert form.is_valid() is True, 'Should be valid when data is given' |
py | 1a3c1b0160a300fc5eee668fc40355f34a4ec6e6 | """
This file offers the methods to automatically retrieve the graph Janthinobacterium sp. Marseille.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def JanthinobacteriumSpMarseille(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Janthinobacterium sp. Marseille graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Janthinobacterium sp. Marseille graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="JanthinobacteriumSpMarseille",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | 1a3c1b719706f6a3980e91cf4d5e345ea71a5bcb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Measure) on 2019-01-25.
# 2019, SMART Health IT.
##
from . import domainresource
class Measure(domainresource.DomainResource):
""" A quality measure definition.
The Measure resource provides the definition of a quality measure.
"""
resource_type = "Measure"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.approvalDate = None
""" When the measure was approved by publisher.
Type `FHIRDate` (represented as `str` in JSON). """
self.author = None
""" Who authored the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.clinicalRecommendationStatement = None
""" Summary of clinical guidelines.
Type `str`. """
self.compositeScoring = None
""" opportunity | all-or-nothing | linear | weighted.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.definition = None
""" Defined terms used in the measure documentation.
List of `str` items. """
self.description = None
""" Natural language description of the measure.
Type `str`. """
self.disclaimer = None
""" Disclaimer for use of the measure or its referenced content.
Type `str`. """
self.editor = None
""" Who edited the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.effectivePeriod = None
""" When the measure is expected to be used.
Type `Period` (represented as `dict` in JSON). """
self.endorser = None
""" Who endorsed the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.group = None
""" Population criteria group.
List of `MeasureGroup` items (represented as `dict` in JSON). """
self.guidance = None
""" Additional guidance for implementers.
Type `str`. """
self.identifier = None
""" Additional identifier for the measure.
List of `Identifier` items (represented as `dict` in JSON). """
self.improvementNotation = None
""" increase | decrease.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.jurisdiction = None
""" Intended jurisdiction for measure (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.lastReviewDate = None
""" When the measure was last reviewed.
Type `FHIRDate` (represented as `str` in JSON). """
self.library = None
""" Logic used by the measure.
List of `str` items. """
self.name = None
""" Name for this measure (computer friendly).
Type `str`. """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.purpose = None
""" Why this measure is defined.
Type `str`. """
self.rateAggregation = None
""" How is rate aggregation performed for this measure.
Type `str`. """
self.rationale = None
""" Detailed description of why the measure exists.
Type `str`. """
self.relatedArtifact = None
""" Additional documentation, citations, etc..
List of `RelatedArtifact` items (represented as `dict` in JSON). """
self.reviewer = None
""" Who reviewed the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.riskAdjustment = None
""" How risk adjustment is applied for this measure.
Type `str`. """
self.scoring = None
""" proportion | ratio | continuous-variable | cohort.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.subjectCodeableConcept = None
""" E.g. Patient, Practitioner, RelatedPerson, Organization, Location,
Device.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.subjectReference = None
""" E.g. Patient, Practitioner, RelatedPerson, Organization, Location,
Device.
Type `FHIRReference` (represented as `dict` in JSON). """
self.subtitle = None
""" Subordinate title of the measure.
Type `str`. """
self.supplementalData = None
""" What other data should be reported with the measure.
List of `MeasureSupplementalData` items (represented as `dict` in JSON). """
self.title = None
""" Name for this measure (human friendly).
Type `str`. """
self.topic = None
""" The category of the measure, such as Education, Treatment,
Assessment, etc..
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.type = None
""" process | outcome | structure | patient-reported-outcome |
composite.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.url = None
""" Canonical identifier for this measure, represented as a URI
(globally unique).
Type `str`. """
self.usage = None
""" Describes the clinical usage of the measure.
Type `str`. """
self.useContext = None
""" The context that the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the measure.
Type `str`. """
super(Measure, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(Measure, self).elementProperties()
js.extend([
("approvalDate", "approvalDate", fhirdate.FHIRDate, False, None, False),
("author", "author", contactdetail.ContactDetail, True, None, False),
("clinicalRecommendationStatement", "clinicalRecommendationStatement", str, False, None, False),
("compositeScoring", "compositeScoring", codeableconcept.CodeableConcept, False, None, False),
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("definition", "definition", str, True, None, False),
("description", "description", str, False, None, False),
("disclaimer", "disclaimer", str, False, None, False),
("editor", "editor", contactdetail.ContactDetail, True, None, False),
("effectivePeriod", "effectivePeriod", period.Period, False, None, False),
("endorser", "endorser", contactdetail.ContactDetail, True, None, False),
("experimental", "experimental", bool, False, None, False),
("group", "group", MeasureGroup, True, None, False),
("guidance", "guidance", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("improvementNotation", "improvementNotation", codeableconcept.CodeableConcept, False, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("lastReviewDate", "lastReviewDate", fhirdate.FHIRDate, False, None, False),
("library", "library", str, True, None, False),
("name", "name", str, False, None, False),
("publisher", "publisher", str, False, None, False),
("purpose", "purpose", str, False, None, False),
("rateAggregation", "rateAggregation", str, False, None, False),
("rationale", "rationale", str, False, None, False),
("relatedArtifact", "relatedArtifact", relatedartifact.RelatedArtifact, True, None, False),
("reviewer", "reviewer", contactdetail.ContactDetail, True, None, False),
("riskAdjustment", "riskAdjustment", str, False, None, False),
("scoring", "scoring", codeableconcept.CodeableConcept, False, None, False),
("status", "status", str, False, None, True),
("subjectCodeableConcept", "subjectCodeableConcept", codeableconcept.CodeableConcept, False, "subject", False),
("subjectReference", "subjectReference", fhirreference.FHIRReference, False, "subject", False),
("subtitle", "subtitle", str, False, None, False),
("supplementalData", "supplementalData", MeasureSupplementalData, True, None, False),
("title", "title", str, False, None, False),
("topic", "topic", codeableconcept.CodeableConcept, True, None, False),
("type", "type", codeableconcept.CodeableConcept, True, None, False),
("url", "url", str, False, None, False),
("usage", "usage", str, False, None, False),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class MeasureGroup(backboneelement.BackboneElement):
""" Population criteria group.
A group of population criteria for the measure.
"""
resource_type = "MeasureGroup"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Meaning of the group.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.description = None
""" Summary description.
Type `str`. """
self.population = None
""" Population criteria.
List of `MeasureGroupPopulation` items (represented as `dict` in JSON). """
self.stratifier = None
""" Stratifier criteria for the measure.
List of `MeasureGroupStratifier` items (represented as `dict` in JSON). """
super(MeasureGroup, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(MeasureGroup, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("description", "description", str, False, None, False),
("population", "population", MeasureGroupPopulation, True, None, False),
("stratifier", "stratifier", MeasureGroupStratifier, True, None, False),
])
return js
class MeasureGroupPopulation(backboneelement.BackboneElement):
""" Population criteria.
A population criteria for the measure.
"""
resource_type = "MeasureGroupPopulation"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" initial-population | numerator | numerator-exclusion | denominator
| denominator-exclusion | denominator-exception | measure-
population | measure-population-exclusion | measure-observation.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.criteria = None
""" The criteria that defines this population.
Type `Expression` (represented as `dict` in JSON). """
self.description = None
""" The human readable description of this population criteria.
Type `str`. """
super(MeasureGroupPopulation, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(MeasureGroupPopulation, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("criteria", "criteria", expression.Expression, False, None, True),
("description", "description", str, False, None, False),
])
return js
class MeasureGroupStratifier(backboneelement.BackboneElement):
""" Stratifier criteria for the measure.
The stratifier criteria for the measure report, specified as either the
name of a valid CQL expression defined within a referenced library or a
valid FHIR Resource Path.
"""
resource_type = "MeasureGroupStratifier"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Meaning of the stratifier.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.component = None
""" Stratifier criteria component for the measure.
List of `MeasureGroupStratifierComponent` items (represented as `dict` in JSON). """
self.criteria = None
""" How the measure should be stratified.
Type `Expression` (represented as `dict` in JSON). """
self.description = None
""" The human readable description of this stratifier.
Type `str`. """
super(MeasureGroupStratifier, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(MeasureGroupStratifier, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("component", "component", MeasureGroupStratifierComponent, True, None, False),
("criteria", "criteria", expression.Expression, False, None, False),
("description", "description", str, False, None, False),
])
return js
class MeasureGroupStratifierComponent(backboneelement.BackboneElement):
""" Stratifier criteria component for the measure.
A component of the stratifier criteria for the measure report, specified as
either the name of a valid CQL expression defined within a referenced
library or a valid FHIR Resource Path.
"""
resource_type = "MeasureGroupStratifierComponent"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Meaning of the stratifier component.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.criteria = None
""" Component of how the measure should be stratified.
Type `Expression` (represented as `dict` in JSON). """
self.description = None
""" The human readable description of this stratifier component.
Type `str`. """
super(MeasureGroupStratifierComponent, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(MeasureGroupStratifierComponent, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("criteria", "criteria", expression.Expression, False, None, True),
("description", "description", str, False, None, False),
])
return js
class MeasureSupplementalData(backboneelement.BackboneElement):
""" What other data should be reported with the measure.
The supplemental data criteria for the measure report, specified as either
the name of a valid CQL expression within a referenced library, or a valid
FHIR Resource Path.
"""
resource_type = "MeasureSupplementalData"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Meaning of the supplemental data.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.criteria = None
""" Expression describing additional data to be reported.
Type `Expression` (represented as `dict` in JSON). """
self.description = None
""" The human readable description of this supplemental data.
Type `str`. """
self.usage = None
""" supplemental-data | risk-adjustment-factor.
List of `CodeableConcept` items (represented as `dict` in JSON). """
super(MeasureSupplementalData, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(MeasureSupplementalData, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("criteria", "criteria", expression.Expression, False, None, True),
("description", "description", str, False, None, False),
("usage", "usage", codeableconcept.CodeableConcept, True, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import expression
except ImportError:
expression = sys.modules[__package__ + '.expression']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import relatedartifact
except ImportError:
relatedartifact = sys.modules[__package__ + '.relatedartifact']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext'] |
py | 1a3c1c6b6f9b800288e103a9cc6f9675e6e518a3 | import numpy as np
#custom function
def log_transform(x):
return np.log(x + 1) |
gyp | 1a3c1d6d684f8ca6448d21adbb48f67e0f6b7403 | {
"targets": [{
"target_name": "node-addon-sqlite-backup",
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-fno-exceptions" ],
"sources": [
"cppsrc/modules/runBackUpWorker.cc",
"cppsrc/modules/backUpAsyncWorker.cc",
"cppsrc/modules/sqlitebackup.c",
"cppsrc/modules/compress.c"
],
'include_dirs': [
"<!@(node -p \"require('node-addon-api').include\")"
],
'libraries': [],
'dependencies': [
"<!(node -p \"require('node-addon-api').gyp\")"
],
'defines': [ 'NAPI_DISABLE_CPP_EXCEPTIONS' ]
}]
} |
py | 1a3c1e0098ac95e3870a26cd6e38ebce4c6a8339 | import numpy as np
import kernel_py as kp
import scipy.stats as st
import matplotlib.pyplot as plt
import GPy as gpy
from scipy import linalg
import fipy as fp
#np.random.seed(31051985)
def f0(y):
assert y.shape[0] == 3
f = np.zeros((3,1))
f[0,0] = y[1] * y[2]
f[1,0] = y[0] * y[2]
f[2,0] = - 2. * y[0] * y[1]
return f
def f1(y):
assert y.shape[0] == 3
f = np.zeros((3,1))
f[0,0] = y[0] * y[2]
f[1,0] = - y[1] * y[2]
f[2,0] = - y[0]**2 + y[1]**2
return f
#def f(y0, t):
# kappa = np.array([1., 1., -2.])
# r = ode(f0).set_integrator('dopri5')
# r.set_initial_value(y0, 0).set_f_params(kappa)
## y_m = [y0[None, :]]
# for tt in t[1:]:
# r.integrate(tt)
# y_m.append(r.y[None, :])
# y_m = np.vstack(y_m)
# return y_m#.flatten()
def RK4(y0, T, N):
h = T / (N+1)
y = np.zeros((3, N+1))
y[:,0] = y0
for i in range(1,N+1):
k1 = f1(y[:,i-1])
k2 = f1(y[:,i-1] + h*k1.flatten() / 2)
k3 = f1(y[:,i-1] + h*k2.flatten() / 2)
k4 = f1(y[:,i-1] + h*k3.flatten())
y[:,i] = y[:,i-1] + h * (k1 + 2*k2 + 2*k3 + k4).flatten() / 6.
return y
X = 2 * st.uniform.rvs(size = (4,2)) - 1.
Y = np.zeros((X.shape[0],1))
N = 1000
T = 10.
for i in range(X.shape[0]):
y0 = np.zeros(3)
y0[0] = 1.
y0[1] = 0.1 * X[i, 0]
y0[2] = X[i,1]
Y[i,0] = RK4(y0, T, N)[1,-1]# + 0.1 * np.random.normal(size = (3,1))
kern = kp.RBF(2, 1, 1)
ker = gpy.kern.RBF(2, 1, 1)
m = gpy.models.GPRegression(X, Y, ker)
gp = kp.GP(X, Y, kern)
#x = np.linspace(-4., 4., 100).reshape(100,1)
x = np.linspace(-1, 1., 50)
y = np.linspace(-1, 1., 50)
xx, yy = np.meshgrid(x, y)
X_test = np.hstack([xx.flatten().reshape(2500,1), yy.flatten().reshape(2500,1)])
f, var = gp.predict(X_test)
m.optimize(messages = True)
print '-' * 30
print m.kern.lengthscale[0], m.kern.variance[0], m.likelihood.gaussian_variance()[0]
print '-' * 30
#m.plot()
#plt.show()
N_quad = 300
#print gp._kern._iso
gp.optimize()
f, var = gp.predict(X_test)
fig1 = plt.figure()
ax2 = fig1.add_subplot(111)
ax2.contourf(xx, yy, f.reshape(50,50), 30)
#ax2.fill_between(x[:,0], f - 2*np.sqrt(np.diag(var)), f + 2*np.sqrt(np.diag(var)), alpha = 0.5)
ax2.plot(X[:,0], X[:,1], 'wo')
#plt.colorbar()
plt.show()
#
sig = np.zeros(301)
sig_noise = np.zeros(301)
ell = np.zeros(301)
sig[0] = gp._kern._var
sig_noise[0] = gp._noise_var
ell[0] = gp._kern._lengthscale[0]
for i in range(300):
x_new = gp.argmaxvar()
print 'New design :' + str(x_new)
print x_new.shape
y0 = np.zeros(3)
y0[0] = 1.
y0[1] = 0.1 * x_new[0]
y0[2] = x_new[1]
y_new = RK4(y0, T, N)[1,-1].reshape((1,1))
X = np.vstack([X, x_new])
Y = np.vstack([Y, y_new])
gp_new = kp.GP(X, Y, kern)
gp_new.optimize()
#gp_new._kern._lengthscale
sig[i+1] = gp_new._kern._var
sig_noise[i+1] = gp_new._noise_var
ell[i+1] = gp_new._kern._lengthscale[0]
gp = gp_new
#print gp._log_marginal_likelihood
#print m._log_marginal_likelihood
#x = np.linspace(np.min([x.min(), x_new[0]]), np.max([x.max(), x_new[0]]), 100).reshape(100,1)
f, var = gp_new.predict(X_test)
#fig1 = plt.figure(figsize = (11,5))
#ax1 = fig1.add_subplot(121)
#ax1.contourf(xx, yy, f.reshape(50,50), 30)
#ax1.plot(X[:,0], X[:,1], 'wo')
#ax2 = fig1.add_subplot(122)
#ax2.contourf(xx, yy, np.diag(var).reshape(50,50), 30)
#ax2.plot(X[:,0], X[:,1], 'wo')
#plt.show()
if i % 100 == 0:
np.save('sig_batch_'+str(i)+'.npy', sig)
np.save('ell_batch_'+str(i)+'.npy', ell)
np.save('sig_noise_batch_'+str(i)+'.npy', sig_noise)
np.save('X_batch_'+str(i)+'.npy', X)
np.save('Y_batch_'+str(i)+'.npy', Y)
np.save('sig.npy', sig)
np.save('sig_noise.npy', sig_noise)
np.save('ell.npy', ell)
np.save('X.npy', X)
np.save('Y.npy', Y)
#print gp.log_marginal_likelihood(np.array([m.kern.lengthscale[0], m.kern.variance[0], m.likelihood.gaussian_variance()[0]]))
|
py | 1a3c1f1411c3865a82a0b2c7689f5f20a9221bd4 | # coding: utf-8
"""
NiFi Registry REST API
The REST API provides an interface to a registry with operations for saving, versioning, reading NiFi flows and components.
OpenAPI spec version: 0.2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class PoliciesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_access_policy(self, body, **kwargs):
"""
Creates an access policy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_access_policy(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param AccessPolicy body: The access policy configuration details. (required)
:return: AccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_access_policy_with_http_info(body, **kwargs)
else:
(data) = self.create_access_policy_with_http_info(body, **kwargs)
return data
def create_access_policy_with_http_info(self, body, **kwargs):
"""
Creates an access policy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_access_policy_with_http_info(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param AccessPolicy body: The access policy configuration details. (required)
:return: AccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_access_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_access_policy`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/policies', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccessPolicy',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_access_policies(self, **kwargs):
"""
Gets all access policies
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_access_policies(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[AccessPolicy]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_access_policies_with_http_info(**kwargs)
else:
(data) = self.get_access_policies_with_http_info(**kwargs)
return data
def get_access_policies_with_http_info(self, **kwargs):
"""
Gets all access policies
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_access_policies_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[AccessPolicy]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_access_policies" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/policies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[AccessPolicy]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_access_policy(self, id, **kwargs):
"""
Gets an access policy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_access_policy(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The access policy id. (required)
:return: AccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_access_policy_with_http_info(id, **kwargs)
else:
(data) = self.get_access_policy_with_http_info(id, **kwargs)
return data
def get_access_policy_with_http_info(self, id, **kwargs):
"""
Gets an access policy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_access_policy_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The access policy id. (required)
:return: AccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_access_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_access_policy`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/policies/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccessPolicy',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_access_policy_for_resource(self, action, resource, **kwargs):
"""
Gets an access policy for the specified action and resource
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_access_policy_for_resource(action, resource, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str action: The request action. (required)
:param str resource: The resource of the policy. (required)
:return: AccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_access_policy_for_resource_with_http_info(action, resource, **kwargs)
else:
(data) = self.get_access_policy_for_resource_with_http_info(action, resource, **kwargs)
return data
def get_access_policy_for_resource_with_http_info(self, action, resource, **kwargs):
"""
Gets an access policy for the specified action and resource
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_access_policy_for_resource_with_http_info(action, resource, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str action: The request action. (required)
:param str resource: The resource of the policy. (required)
:return: AccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['action', 'resource']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_access_policy_for_resource" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'action' is set
if ('action' not in params) or (params['action'] is None):
raise ValueError("Missing the required parameter `action` when calling `get_access_policy_for_resource`")
# verify the required parameter 'resource' is set
if ('resource' not in params) or (params['resource'] is None):
raise ValueError("Missing the required parameter `resource` when calling `get_access_policy_for_resource`")
if 'resource' in params and not re.search('.+', params['resource']):
raise ValueError("Invalid value for parameter `resource` when calling `get_access_policy_for_resource`, must conform to the pattern `/.+/`")
collection_formats = {}
path_params = {}
if 'action' in params:
path_params['action'] = params['action']
if 'resource' in params:
path_params['resource'] = params['resource']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/policies/{action}/{resource}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccessPolicy',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_resources(self, **kwargs):
"""
Gets the available resources that support access/authorization policies
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_resources(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Resource]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_resources_with_http_info(**kwargs)
else:
(data) = self.get_resources_with_http_info(**kwargs)
return data
def get_resources_with_http_info(self, **kwargs):
"""
Gets the available resources that support access/authorization policies
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_resources_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Resource]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/policies/resources', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Resource]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def remove_access_policy(self, id, **kwargs):
"""
Deletes an access policy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_access_policy(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The access policy id. (required)
:return: AccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.remove_access_policy_with_http_info(id, **kwargs)
else:
(data) = self.remove_access_policy_with_http_info(id, **kwargs)
return data
def remove_access_policy_with_http_info(self, id, **kwargs):
"""
Deletes an access policy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_access_policy_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The access policy id. (required)
:return: AccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_access_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `remove_access_policy`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/policies/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccessPolicy',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_access_policy(self, id, body, **kwargs):
"""
Updates a access policy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_access_policy(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The access policy id. (required)
:param AccessPolicy body: The access policy configuration details. (required)
:return: AccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_access_policy_with_http_info(id, body, **kwargs)
else:
(data) = self.update_access_policy_with_http_info(id, body, **kwargs)
return data
def update_access_policy_with_http_info(self, id, body, **kwargs):
"""
Updates a access policy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_access_policy_with_http_info(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The access policy id. (required)
:param AccessPolicy body: The access policy configuration details. (required)
:return: AccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_access_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_access_policy`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_access_policy`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth', 'Authorization']
return self.api_client.call_api('/policies/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccessPolicy',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | 1a3c1f33841c2d05dd2feef3eeba7c28e2afe62f | # -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from json.decoder import JSONDecodeError
from time import time
from typing import Union
from urllib.parse import urlparse
import requests
from multipledispatch import dispatch
from iconsdk.exception import JSONRPCException, URLException
from iconsdk.providers.provider import Provider
from iconsdk.utils import to_dict
class HTTPProvider(Provider):
"""
The HTTPProvider takes the full URI where the server can be found.
For local development this would be something like 'http://localhost:9000'.
"""
@dispatch(str, int, dict=None)
def __init__(self, base_domain_url: str, version: int, request_kwargs: dict = None):
"""
The initializer to be set with base domain URL and version.
:param base_domain_url: base domain URL as like <scheme>://<host>:<port>
:param version: version for RPC server
:param request_kwargs: kwargs for setting to head of request
"""
uri = urlparse(base_domain_url)
if uri.path != '':
raise URLException('Path is not allowed')
self._serverUri = f'{uri.scheme}://{uri.netloc}'
self._channel = ''
self._version = version
self._request_kwargs = request_kwargs or {}
self._generate_url_map()
@dispatch(str, dict=None)
def __init__(self, full_path_url: str, request_kwargs: dict = None):
"""
The initializer to be set with full path url as like <scheme>://<host>:<port>/api/v3.
If you need to use a channel, you can use it such as <scheme>://<host>:<port>/api/v3/{channel}.
:param full_path_url: full path URL as like <scheme>://<host>:<port>/api/v3
:param request_kwargs: kwargs for setting to head of request
"""
uri = urlparse(full_path_url)
self._serverUri = f'{uri.scheme}://{uri.netloc}'
self._channel = self._get_channel(uri.path)
self._version = 3
self._request_kwargs = request_kwargs or {}
self._generate_url_map()
def _generate_url_map(self):
def _add_channel_path(url: str):
if self._channel:
return f"{url}/{self._channel}"
return url
self._URL_MAP = {
'icx': _add_channel_path(f"{self._serverUri}/api/v{self._version}"),
'debug': _add_channel_path(f"{self._serverUri}/api/debug/v{self._version}")
}
@staticmethod
def _get_channel(path: str):
tokens = re.split("/(?=[^/]+$)", path.rstrip('/'))
if tokens[0] == '/api/v3':
return tokens[1]
elif tokens == ['/api', 'v3']:
return ''
raise URLException('Invalid URI path')
def __str__(self):
return "RPC connection to {0}".format(self._serverUri)
@to_dict
def _get_request_kwargs(self) -> dict:
if 'headers' not in self._request_kwargs:
yield 'headers', {'Content-Type': 'application/json'}
for key, value in self._request_kwargs.items():
yield key, value
@staticmethod
def _make_post_request(request_url: str, data: dict, **kwargs) -> requests.Response:
kwargs.setdefault('timeout', 10)
with requests.Session() as session:
response = session.post(url=request_url, data=json.dumps(data), **kwargs)
return response
def _make_id(self) -> int:
return int(time())
def make_request(self, method: str, params=None, full_response: bool = False) -> Union[str, list, dict]:
rpc_dict = {
'jsonrpc': '2.0',
'method': method,
'id': self._make_id()
}
if params:
rpc_dict['params'] = params
req_key = method.split('_')[0]
retry_count = 2
raw_response = ''
while retry_count > 0:
request_url = self._URL_MAP.get(req_key)
response = self._make_post_request(request_url, rpc_dict, **self._get_request_kwargs())
try:
return self._return_custom_response(response, full_response)
except JSONDecodeError:
retry_count -= 1
raw_response = response.content.decode()
if req_key == 'debug':
self._URL_MAP['debug'] = "{0}/api/v{1}d/{2}".format(self._serverUri, self._version, self._channel)
else:
break
raise JSONRPCException(f'Unknown response: {raw_response}')
@staticmethod
def _return_custom_response(response: requests.Response, full_response: bool = False) -> Union[str, list, dict]:
content = json.loads(response.content)
if full_response:
return content
if response.ok:
return content['result']
raise JSONRPCException(content["error"])
|
py | 1a3c1fde31364667f232a83a797376ef6cf3101c | """The tests for the MQTT component."""
import asyncio
from datetime import datetime, timedelta
import json
import ssl
from unittest.mock import AsyncMock, MagicMock, call, mock_open, patch
import pytest
import voluptuous as vol
from openpeerpower.components import mqtt, websocket_api
from openpeerpower.components.mqtt import debug_info
from openpeerpower.components.mqtt.mixins import MQTT_ENTITY_DEVICE_INFO_SCHEMA
from openpeerpower.const import (
ATTR_DOMAIN,
ATTR_SERVICE,
EVENT_CALL_SERVICE,
EVENT_OPENPEERPOWER_STOP,
TEMP_CELSIUS,
)
from openpeerpower.core import callback
from openpeerpower.helpers import device_registry as dr
from openpeerpower.setup import async_setup_component
from openpeerpower.util.dt import utcnow
from tests.common import (
MockConfigEntry,
async_fire_mqtt_message,
async_fire_time_changed,
mock_device_registry,
mock_registry,
)
from tests.testing_config.custom_components.test.sensor import DEVICE_CLASSES
@pytest.fixture(autouse=True)
def mock_storage(opp_storage):
"""Autouse opp_storage for the TestCase tests."""
@pytest.fixture
def device_reg(opp):
"""Return an empty, loaded, registry."""
return mock_device_registry(opp)
@pytest.fixture
def entity_reg(opp):
"""Return an empty, loaded, registry."""
return mock_registry(opp)
@pytest.fixture
def mock_mqtt():
"""Make sure connection is established."""
with patch("openpeerpower.components.mqtt.MQTT") as mock_mqtt:
mock_mqtt.return_value.async_connect = AsyncMock(return_value=True)
mock_mqtt.return_value.async_disconnect = AsyncMock(return_value=True)
yield mock_mqtt
@pytest.fixture
def calls():
"""Fixture to record calls."""
return []
@pytest.fixture
def record_calls(calls):
"""Fixture to record calls."""
@callback
def record_calls(*args):
"""Record calls."""
calls.append(args)
return record_calls
async def test_mqtt_connects_on_open_peer_power_mqtt_setup(
opp, mqtt_client_mock, mqtt_mock
):
"""Test if client is connected after mqtt init on bootstrap."""
assert mqtt_client_mock.connect.call_count == 1
async def test_mqtt_disconnects_on_open_peer_power_stop(opp, mqtt_mock):
"""Test if client stops on OPP stop."""
opp.bus.fire(EVENT_OPENPEERPOWER_STOP)
await opp.async_block_till_done()
await opp.async_block_till_done()
assert mqtt_mock.async_disconnect.called
async def test_publish_calls_service(opp, mqtt_mock, calls, record_calls):
"""Test the publishing of call to services."""
opp.bus.async_listen_once(EVENT_CALL_SERVICE, record_calls)
mqtt.async_publish(opp, "test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].data["service_data"][mqtt.ATTR_TOPIC] == "test-topic"
assert calls[0][0].data["service_data"][mqtt.ATTR_PAYLOAD] == "test-payload"
async def test_service_call_without_topic_does_not_publish(opp, mqtt_mock):
"""Test the service call if topic is missing."""
opp.bus.fire(
EVENT_CALL_SERVICE,
{ATTR_DOMAIN: mqtt.DOMAIN, ATTR_SERVICE: mqtt.SERVICE_PUBLISH},
)
await opp.async_block_till_done()
assert not mqtt_mock.async_publish.called
async def test_service_call_with_template_payload_renders_template(opp, mqtt_mock):
"""Test the service call with rendered template.
If 'payload_template' is provided and 'payload' is not, then render it.
"""
mqtt.async_publish_template(opp, "test/topic", "{{ 1+1 }}")
await opp.async_block_till_done()
assert mqtt_mock.async_publish.called
assert mqtt_mock.async_publish.call_args[0][1] == "2"
async def test_service_call_with_payload_doesnt_render_template(opp, mqtt_mock):
"""Test the service call with unrendered template.
If both 'payload' and 'payload_template' are provided then fail.
"""
payload = "not a template"
payload_template = "a template"
with pytest.raises(vol.Invalid):
await opp.services.async_call(
mqtt.DOMAIN,
mqtt.SERVICE_PUBLISH,
{
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: payload,
mqtt.ATTR_PAYLOAD_TEMPLATE: payload_template,
},
blocking=True,
)
assert not mqtt_mock.async_publish.called
async def test_service_call_with_ascii_qos_retain_flags(opp, mqtt_mock):
"""Test the service call with args that can be misinterpreted.
Empty payload message and ascii formatted qos and retain flags.
"""
await opp.services.async_call(
mqtt.DOMAIN,
mqtt.SERVICE_PUBLISH,
{
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: "",
mqtt.ATTR_QOS: "2",
mqtt.ATTR_RETAIN: "no",
},
blocking=True,
)
assert mqtt_mock.async_publish.called
assert mqtt_mock.async_publish.call_args[0][2] == 2
assert not mqtt_mock.async_publish.call_args[0][3]
def test_validate_topic():
"""Test topic name/filter validation."""
# Invalid UTF-8, must not contain U+D800 to U+DFFF.
with pytest.raises(vol.Invalid):
mqtt.util.valid_topic("\ud800")
with pytest.raises(vol.Invalid):
mqtt.util.valid_topic("\udfff")
# Topic MUST NOT be empty
with pytest.raises(vol.Invalid):
mqtt.util.valid_topic("")
# Topic MUST NOT be longer than 65535 encoded bytes.
with pytest.raises(vol.Invalid):
mqtt.util.valid_topic("ü" * 32768)
# UTF-8 MUST NOT include null character
with pytest.raises(vol.Invalid):
mqtt.util.valid_topic("bad\0one")
# Topics "SHOULD NOT" include these special characters
# (not MUST NOT, RFC2119). The receiver MAY close the connection.
mqtt.util.valid_topic("\u0001")
mqtt.util.valid_topic("\u001F")
mqtt.util.valid_topic("\u009F")
mqtt.util.valid_topic("\u009F")
mqtt.util.valid_topic("\uffff")
def test_validate_subscribe_topic():
"""Test invalid subscribe topics."""
mqtt.valid_subscribe_topic("#")
mqtt.valid_subscribe_topic("sport/#")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport/#/")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("foo/bar#")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("foo/#/bar")
mqtt.valid_subscribe_topic("+")
mqtt.valid_subscribe_topic("+/tennis/#")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport+")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport+/")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport/+1")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("sport/+#")
with pytest.raises(vol.Invalid):
mqtt.valid_subscribe_topic("bad+topic")
mqtt.valid_subscribe_topic("sport/+/player1")
mqtt.valid_subscribe_topic("/finance")
mqtt.valid_subscribe_topic("+/+")
mqtt.valid_subscribe_topic("$SYS/#")
def test_validate_publish_topic():
"""Test invalid publish topics."""
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic("pub+")
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic("pub/+")
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic("1#")
with pytest.raises(vol.Invalid):
mqtt.valid_publish_topic("bad+topic")
mqtt.valid_publish_topic("//")
# Topic names beginning with $ SHOULD NOT be used, but can
mqtt.valid_publish_topic("$SYS/")
def test_entity_device_info_schema():
"""Test MQTT entity device info validation."""
# just identifier
MQTT_ENTITY_DEVICE_INFO_SCHEMA({"identifiers": ["abcd"]})
MQTT_ENTITY_DEVICE_INFO_SCHEMA({"identifiers": "abcd"})
# just connection
MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]]}
)
# full device info
MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{
"identifiers": ["helloworld", "hello"],
"connections": [
[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"],
[dr.CONNECTION_ZIGBEE, "zigbee_id"],
],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
}
)
# full device info with via_device
MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{
"identifiers": ["helloworld", "hello"],
"connections": [
[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"],
[dr.CONNECTION_ZIGBEE, "zigbee_id"],
],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
"via_device": "test-hub",
}
)
# no identifiers
with pytest.raises(vol.Invalid):
MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
}
)
# empty identifiers
with pytest.raises(vol.Invalid):
MQTT_ENTITY_DEVICE_INFO_SCHEMA(
{"identifiers": [], "connections": [], "name": "Beer"}
)
async def test_receiving_non_utf8_message_gets_logged(
opp, mqtt_mock, calls, record_calls, caplog
):
"""Test receiving a non utf8 encoded message."""
await mqtt.async_subscribe(opp, "test-topic", record_calls)
async_fire_mqtt_message(opp, "test-topic", b"\x9a")
await opp.async_block_till_done()
assert (
"Can't decode payload b'\\x9a' on test-topic with encoding utf-8" in caplog.text
)
async def test_all_subscriptions_run_when_decode_fails(
opp, mqtt_mock, calls, record_calls
):
"""Test all other subscriptions still run when decode fails for one."""
await mqtt.async_subscribe(opp, "test-topic", record_calls, encoding="ascii")
await mqtt.async_subscribe(opp, "test-topic", record_calls)
async_fire_mqtt_message(opp, "test-topic", TEMP_CELSIUS)
await opp.async_block_till_done()
assert len(calls) == 1
async def test_subscribe_topic(opp, mqtt_mock, calls, record_calls):
"""Test the subscription of a topic."""
unsub = await mqtt.async_subscribe(opp, "test-topic", record_calls)
async_fire_mqtt_message(opp, "test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "test-topic"
assert calls[0][0].payload == "test-payload"
unsub()
async_fire_mqtt_message(opp, "test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
async def test_subscribe_deprecated(opp, mqtt_mock):
"""Test the subscription of a topic using deprecated callback signature."""
calls = []
@callback
def record_calls(topic, payload, qos):
"""Record calls."""
calls.append((topic, payload, qos))
unsub = await mqtt.async_subscribe(opp, "test-topic", record_calls)
async_fire_mqtt_message(opp, "test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0] == "test-topic"
assert calls[0][1] == "test-payload"
unsub()
async_fire_mqtt_message(opp, "test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
async def test_subscribe_deprecated_async(opp, mqtt_mock):
"""Test the subscription of a topic using deprecated callback signature."""
calls = []
async def record_calls(topic, payload, qos):
"""Record calls."""
calls.append((topic, payload, qos))
unsub = await mqtt.async_subscribe(opp, "test-topic", record_calls)
async_fire_mqtt_message(opp, "test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0] == "test-topic"
assert calls[0][1] == "test-payload"
unsub()
async_fire_mqtt_message(opp, "test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
async def test_subscribe_topic_not_match(opp, mqtt_mock, calls, record_calls):
"""Test if subscribed topic is not a match."""
await mqtt.async_subscribe(opp, "test-topic", record_calls)
async_fire_mqtt_message(opp, "another-test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 0
async def test_subscribe_topic_level_wildcard(opp, mqtt_mock, calls, record_calls):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(opp, "test-topic/+/on", record_calls)
async_fire_mqtt_message(opp, "test-topic/bier/on", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "test-topic/bier/on"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_level_wildcard_no_subtree_match(
opp, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(opp, "test-topic/+/on", record_calls)
async_fire_mqtt_message(opp, "test-topic/bier", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 0
async def test_subscribe_topic_level_wildcard_root_topic_no_subtree_match(
opp, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(opp, "test-topic/#", record_calls)
async_fire_mqtt_message(opp, "test-topic-123", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 0
async def test_subscribe_topic_subtree_wildcard_subtree_topic(
opp, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(opp, "test-topic/#", record_calls)
async_fire_mqtt_message(opp, "test-topic/bier/on", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "test-topic/bier/on"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_subtree_wildcard_root_topic(
opp, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(opp, "test-topic/#", record_calls)
async_fire_mqtt_message(opp, "test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "test-topic"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_subtree_wildcard_no_match(
opp, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(opp, "test-topic/#", record_calls)
async_fire_mqtt_message(opp, "another-test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 0
async def test_subscribe_topic_level_wildcard_and_wildcard_root_topic(
opp, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(opp, "+/test-topic/#", record_calls)
async_fire_mqtt_message(opp, "hi/test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "hi/test-topic"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_level_wildcard_and_wildcard_subtree_topic(
opp, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(opp, "+/test-topic/#", record_calls)
async_fire_mqtt_message(opp, "hi/test-topic/here-iam", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "hi/test-topic/here-iam"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_level_wildcard_and_wildcard_level_no_match(
opp, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(opp, "+/test-topic/#", record_calls)
async_fire_mqtt_message(opp, "hi/here-iam/test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 0
async def test_subscribe_topic_level_wildcard_and_wildcard_no_match(
opp, mqtt_mock, calls, record_calls
):
"""Test the subscription of wildcard topics."""
await mqtt.async_subscribe(opp, "+/test-topic/#", record_calls)
async_fire_mqtt_message(opp, "hi/another-test-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 0
async def test_subscribe_topic_sys_root(opp, mqtt_mock, calls, record_calls):
"""Test the subscription of $ root topics."""
await mqtt.async_subscribe(opp, "$test-topic/subtree/on", record_calls)
async_fire_mqtt_message(opp, "$test-topic/subtree/on", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "$test-topic/subtree/on"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_sys_root_and_wildcard_topic(
opp, mqtt_mock, calls, record_calls
):
"""Test the subscription of $ root and wildcard topics."""
await mqtt.async_subscribe(opp, "$test-topic/#", record_calls)
async_fire_mqtt_message(opp, "$test-topic/some-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "$test-topic/some-topic"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_topic_sys_root_and_wildcard_subtree_topic(
opp, mqtt_mock, calls, record_calls
):
"""Test the subscription of $ root and wildcard subtree topics."""
await mqtt.async_subscribe(opp, "$test-topic/subtree/#", record_calls)
async_fire_mqtt_message(opp, "$test-topic/subtree/some-topic", "test-payload")
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == "$test-topic/subtree/some-topic"
assert calls[0][0].payload == "test-payload"
async def test_subscribe_special_characters(opp, mqtt_mock, calls, record_calls):
"""Test the subscription to topics with special characters."""
topic = "/test-topic/$(.)[^]{-}"
payload = "p4y.l[]a|> ?"
await mqtt.async_subscribe(opp, topic, record_calls)
async_fire_mqtt_message(opp, topic, payload)
await opp.async_block_till_done()
assert len(calls) == 1
assert calls[0][0].topic == topic
assert calls[0][0].payload == payload
async def test_subscribe_same_topic(opp, mqtt_client_mock, mqtt_mock):
"""
Test subscring to same topic twice and simulate retained messages.
When subscribing to the same topic again, SUBSCRIBE must be sent to the broker again
for it to resend any retained messages.
"""
# Fake that the client is connected
mqtt_mock().connected = True
calls_a = MagicMock()
await mqtt.async_subscribe(opp, "test/state", calls_a)
async_fire_mqtt_message(
opp, "test/state", "online"
) # Simulate a (retained) message
await opp.async_block_till_done()
assert calls_a.called
mqtt_client_mock.subscribe.assert_called()
calls_a.reset_mock()
mqtt_client_mock.reset_mock()
calls_b = MagicMock()
await mqtt.async_subscribe(opp, "test/state", calls_b)
async_fire_mqtt_message(
opp, "test/state", "online"
) # Simulate a (retained) message
await opp.async_block_till_done()
assert calls_a.called
assert calls_b.called
mqtt_client_mock.subscribe.assert_called()
async def test_not_calling_unsubscribe_with_active_subscribers(
opp, mqtt_client_mock, mqtt_mock
):
"""Test not calling unsubscribe() when other subscribers are active."""
# Fake that the client is connected
mqtt_mock().connected = True
unsub = await mqtt.async_subscribe(opp, "test/state", None)
await mqtt.async_subscribe(opp, "test/state", None)
await opp.async_block_till_done()
assert mqtt_client_mock.subscribe.called
unsub()
await opp.async_block_till_done()
assert not mqtt_client_mock.unsubscribe.called
@pytest.mark.parametrize(
"mqtt_config",
[{mqtt.CONF_BROKER: "mock-broker", mqtt.CONF_DISCOVERY: False}],
)
async def test_restore_subscriptions_on_reconnect(opp, mqtt_client_mock, mqtt_mock):
"""Test subscriptions are restored on reconnect."""
# Fake that the client is connected
mqtt_mock().connected = True
await mqtt.async_subscribe(opp, "test/state", None)
await opp.async_block_till_done()
assert mqtt_client_mock.subscribe.call_count == 1
mqtt_mock._mqtt_on_disconnect(None, None, 0)
with patch("openpeerpower.components.mqtt.DISCOVERY_COOLDOWN", 0):
mqtt_mock._mqtt_on_connect(None, None, None, 0)
await opp.async_block_till_done()
assert mqtt_client_mock.subscribe.call_count == 2
@pytest.mark.parametrize(
"mqtt_config",
[{mqtt.CONF_BROKER: "mock-broker", mqtt.CONF_DISCOVERY: False}],
)
async def test_restore_all_active_subscriptions_on_reconnect(
opp, mqtt_client_mock, mqtt_mock
):
"""Test active subscriptions are restored correctly on reconnect."""
# Fake that the client is connected
mqtt_mock().connected = True
unsub = await mqtt.async_subscribe(opp, "test/state", None, qos=2)
await mqtt.async_subscribe(opp, "test/state", None)
await mqtt.async_subscribe(opp, "test/state", None, qos=1)
await opp.async_block_till_done()
expected = [
call("test/state", 2),
call("test/state", 0),
call("test/state", 1),
]
assert mqtt_client_mock.subscribe.mock_calls == expected
unsub()
await opp.async_block_till_done()
assert mqtt_client_mock.unsubscribe.call_count == 0
mqtt_mock._mqtt_on_disconnect(None, None, 0)
with patch("openpeerpower.components.mqtt.DISCOVERY_COOLDOWN", 0):
mqtt_mock._mqtt_on_connect(None, None, None, 0)
await opp.async_block_till_done()
expected.append(call("test/state", 1))
assert mqtt_client_mock.subscribe.mock_calls == expected
async def test_setup_logs_error_if_no_connect_broker(opp, caplog):
"""Test for setup failure if connection to broker is missing."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={mqtt.CONF_BROKER: "test-broker"})
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().connect = lambda *args: 1
assert await mqtt.async_setup_entry(opp, entry)
assert "Failed to connect to MQTT server:" in caplog.text
async def test_setup_raises_ConfigEntryNotReady_if_no_connect_broker(opp, caplog):
"""Test for setup failure if connection to broker is missing."""
entry = MockConfigEntry(domain=mqtt.DOMAIN, data={mqtt.CONF_BROKER: "test-broker"})
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().connect = MagicMock(side_effect=OSError("Connection error"))
assert await mqtt.async_setup_entry(opp, entry)
assert "Failed to connect to MQTT server due to exception:" in caplog.text
async def test_setup_uses_certificate_on_certificate_set_to_auto(opp):
"""Test setup uses bundled certs when certificate is set to auto."""
calls = []
def mock_tls_set(certificate, certfile=None, keyfile=None, tls_version=None):
calls.append((certificate, certfile, keyfile, tls_version))
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().tls_set = mock_tls_set
entry = MockConfigEntry(
domain=mqtt.DOMAIN,
data={mqtt.CONF_BROKER: "test-broker", "certificate": "auto"},
)
assert await mqtt.async_setup_entry(opp, entry)
assert calls
import certifi
expectedCertificate = certifi.where()
# assert mock_mqtt.mock_calls[0][1][2]["certificate"] == expectedCertificate
assert calls[0][0] == expectedCertificate
async def test_setup_without_tls_config_uses_tlsv1_under_python36(opp):
"""Test setup defaults to TLSv1 under python3.6."""
calls = []
def mock_tls_set(certificate, certfile=None, keyfile=None, tls_version=None):
calls.append((certificate, certfile, keyfile, tls_version))
with patch("paho.mqtt.client.Client") as mock_client:
mock_client().tls_set = mock_tls_set
entry = MockConfigEntry(
domain=mqtt.DOMAIN,
data={"certificate": "auto", mqtt.CONF_BROKER: "test-broker"},
)
assert await mqtt.async_setup_entry(opp, entry)
assert calls
import sys
if sys.hexversion >= 0x03060000:
expectedTlsVersion = ssl.PROTOCOL_TLS # pylint: disable=no-member
else:
expectedTlsVersion = ssl.PROTOCOL_TLSv1
assert calls[0][3] == expectedTlsVersion
@pytest.mark.parametrize(
"mqtt_config",
[
{
mqtt.CONF_BROKER: "mock-broker",
mqtt.CONF_BIRTH_MESSAGE: {
mqtt.ATTR_TOPIC: "birth",
mqtt.ATTR_PAYLOAD: "birth",
},
}
],
)
async def test_custom_birth_message(opp, mqtt_client_mock, mqtt_mock):
"""Test sending birth message."""
birth = asyncio.Event()
async def wait_birth(topic, payload, qos):
"""Handle birth message."""
birth.set()
with patch("openpeerpower.components.mqtt.DISCOVERY_COOLDOWN", 0.1):
await mqtt.async_subscribe(opp, "birth", wait_birth)
mqtt_mock._mqtt_on_connect(None, None, 0, 0)
await opp.async_block_till_done()
await birth.wait()
mqtt_client_mock.publish.assert_called_with("birth", "birth", 0, False)
@pytest.mark.parametrize(
"mqtt_config",
[
{
mqtt.CONF_BROKER: "mock-broker",
mqtt.CONF_BIRTH_MESSAGE: {
mqtt.ATTR_TOPIC: "openpeerpower/status",
mqtt.ATTR_PAYLOAD: "online",
},
}
],
)
async def test_default_birth_message(opp, mqtt_client_mock, mqtt_mock):
"""Test sending birth message."""
birth = asyncio.Event()
async def wait_birth(topic, payload, qos):
"""Handle birth message."""
birth.set()
with patch("openpeerpower.components.mqtt.DISCOVERY_COOLDOWN", 0.1):
await mqtt.async_subscribe(opp, "openpeerpower/status", wait_birth)
mqtt_mock._mqtt_on_connect(None, None, 0, 0)
await opp.async_block_till_done()
await birth.wait()
mqtt_client_mock.publish.assert_called_with(
"openpeerpower/status", "online", 0, False
)
@pytest.mark.parametrize(
"mqtt_config",
[{mqtt.CONF_BROKER: "mock-broker", mqtt.CONF_BIRTH_MESSAGE: {}}],
)
async def test_no_birth_message(opp, mqtt_client_mock, mqtt_mock):
"""Test disabling birth message."""
with patch("openpeerpower.components.mqtt.DISCOVERY_COOLDOWN", 0.1):
mqtt_mock._mqtt_on_connect(None, None, 0, 0)
await opp.async_block_till_done()
await asyncio.sleep(0.2)
mqtt_client_mock.publish.assert_not_called()
@pytest.mark.parametrize(
"mqtt_config",
[
{
mqtt.CONF_BROKER: "mock-broker",
mqtt.CONF_WILL_MESSAGE: {
mqtt.ATTR_TOPIC: "death",
mqtt.ATTR_PAYLOAD: "death",
},
}
],
)
async def test_custom_will_message(opp, mqtt_client_mock, mqtt_mock):
"""Test will message."""
mqtt_client_mock.will_set.assert_called_with(
topic="death", payload="death", qos=0, retain=False
)
async def test_default_will_message(opp, mqtt_client_mock, mqtt_mock):
"""Test will message."""
mqtt_client_mock.will_set.assert_called_with(
topic="openpeerpower/status", payload="offline", qos=0, retain=False
)
@pytest.mark.parametrize(
"mqtt_config",
[{mqtt.CONF_BROKER: "mock-broker", mqtt.CONF_WILL_MESSAGE: {}}],
)
async def test_no_will_message(opp, mqtt_client_mock, mqtt_mock):
"""Test will message."""
mqtt_client_mock.will_set.assert_not_called()
@pytest.mark.parametrize(
"mqtt_config",
[
{
mqtt.CONF_BROKER: "mock-broker",
mqtt.CONF_BIRTH_MESSAGE: {},
mqtt.CONF_DISCOVERY: False,
}
],
)
async def test_mqtt_subscribes_topics_on_connect(opp, mqtt_client_mock, mqtt_mock):
"""Test subscription to topic on connect."""
await mqtt.async_subscribe(opp, "topic/test", None)
await mqtt.async_subscribe(opp, "home/sensor", None, 2)
await mqtt.async_subscribe(opp, "still/pending", None)
await mqtt.async_subscribe(opp, "still/pending", None, 1)
opp.add_job = MagicMock()
mqtt_mock._mqtt_on_connect(None, None, 0, 0)
await opp.async_block_till_done()
assert mqtt_client_mock.disconnect.call_count == 0
expected = {"topic/test": 0, "home/sensor": 2, "still/pending": 1}
calls = {call[1][1]: call[1][2] for call in opp.add_job.mock_calls}
assert calls == expected
async def test_setup_fails_without_config(opp):
"""Test if the MQTT component fails to load with no config."""
assert not await async_setup_component(opp, mqtt.DOMAIN, {})
@pytest.mark.no_fail_on_log_exception
async def test_message_callback_exception_gets_logged(opp, caplog, mqtt_mock):
"""Test exception raised by message handler."""
@callback
def bad_handler(*args):
"""Record calls."""
raise Exception("This is a bad message callback")
await mqtt.async_subscribe(opp, "test-topic", bad_handler)
async_fire_mqtt_message(opp, "test-topic", "test")
await opp.async_block_till_done()
assert (
"Exception in bad_handler when handling msg on 'test-topic':"
" 'test'" in caplog.text
)
async def test_mqtt_ws_subscription(opp, opp_ws_client, mqtt_mock):
"""Test MQTT websocket subscription."""
client = await opp_ws_client(opp)
await client.send_json({"id": 5, "type": "mqtt/subscribe", "topic": "test-topic"})
response = await client.receive_json()
assert response["success"]
async_fire_mqtt_message(opp, "test-topic", "test1")
async_fire_mqtt_message(opp, "test-topic", "test2")
response = await client.receive_json()
assert response["event"]["topic"] == "test-topic"
assert response["event"]["payload"] == "test1"
response = await client.receive_json()
assert response["event"]["topic"] == "test-topic"
assert response["event"]["payload"] == "test2"
# Unsubscribe
await client.send_json({"id": 8, "type": "unsubscribe_events", "subscription": 5})
response = await client.receive_json()
assert response["success"]
async def test_dump_service(opp, mqtt_mock):
"""Test that we can dump a topic."""
mopen = mock_open()
await opp.services.async_call(
"mqtt", "dump", {"topic": "bla/#", "duration": 3}, blocking=True
)
async_fire_mqtt_message(opp, "bla/1", "test1")
async_fire_mqtt_message(opp, "bla/2", "test2")
with patch("openpeerpower.components.mqtt.open", mopen):
async_fire_time_changed(opp, utcnow() + timedelta(seconds=3))
await opp.async_block_till_done()
writes = mopen.return_value.write.mock_calls
assert len(writes) == 2
assert writes[0][1][0] == "bla/1,test1\n"
assert writes[1][1][0] == "bla/2,test2\n"
async def test_mqtt_ws_remove_discovered_device(
opp, device_reg, entity_reg, opp_ws_client, mqtt_mock
):
"""Test MQTT websocket device removal."""
data = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(opp, "openpeerpower/sensor/bla/config", data)
await opp.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert device_entry is not None
client = await opp_ws_client(opp)
await client.send_json(
{"id": 5, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
# Verify device entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert device_entry is None
async def test_mqtt_ws_remove_discovered_device_twice(
opp, device_reg, opp_ws_client, mqtt_mock
):
"""Test MQTT websocket device removal."""
data = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(opp, "openpeerpower/sensor/bla/config", data)
await opp.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert device_entry is not None
client = await opp_ws_client(opp)
await client.send_json(
{"id": 5, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
await client.send_json(
{"id": 6, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == websocket_api.const.ERR_NOT_FOUND
async def test_mqtt_ws_remove_discovered_device_same_topic(
opp, device_reg, opp_ws_client, mqtt_mock
):
"""Test MQTT websocket device removal."""
data = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "availability_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(opp, "openpeerpower/sensor/bla/config", data)
await opp.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert device_entry is not None
client = await opp_ws_client(opp)
await client.send_json(
{"id": 5, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
await client.send_json(
{"id": 6, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == websocket_api.const.ERR_NOT_FOUND
async def test_mqtt_ws_remove_non_mqtt_device(
opp, device_reg, opp_ws_client, mqtt_mock
):
"""Test MQTT websocket device removal of device belonging to other domain."""
config_entry = MockConfigEntry(domain="test")
config_entry.add_to_opp(opp)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
assert device_entry is not None
client = await opp_ws_client(opp)
await client.send_json(
{"id": 5, "type": "mqtt/device/remove", "device_id": device_entry.id}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == websocket_api.const.ERR_NOT_FOUND
async def test_mqtt_ws_get_device_debug_info(opp, device_reg, opp_ws_client, mqtt_mock):
"""Test MQTT websocket device debug info."""
config = {
"device": {"identifiers": ["0AFFD2"]},
"platform": "mqtt",
"state_topic": "foobar/sensor",
"unique_id": "unique",
}
data = json.dumps(config)
async_fire_mqtt_message(opp, "openpeerpower/sensor/bla/config", data)
await opp.async_block_till_done()
# Verify device entry is created
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert device_entry is not None
client = await opp_ws_client(opp)
await client.send_json(
{"id": 5, "type": "mqtt/device/debug_info", "device_id": device_entry.id}
)
response = await client.receive_json()
assert response["success"]
expected_result = {
"entities": [
{
"entity_id": "sensor.mqtt_sensor",
"subscriptions": [{"topic": "foobar/sensor", "messages": []}],
"discovery_data": {
"payload": config,
"topic": "openpeerpower/sensor/bla/config",
},
}
],
"triggers": [],
}
assert response["result"] == expected_result
async def test_debug_info_multiple_devices(opp, mqtt_mock):
"""Test we get correct debug_info when multiple devices are present."""
devices = [
{
"domain": "sensor",
"config": {
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"state_topic": "test-topic-sensor",
"unique_id": "unique",
},
},
{
"domain": "binary_sensor",
"config": {
"device": {"identifiers": ["0AFFD1"]},
"platform": "mqtt",
"state_topic": "test-topic-binary-sensor",
"unique_id": "unique",
},
},
{
"domain": "device_automation",
"config": {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD2"]},
"platform": "mqtt",
"topic": "test-topic1",
"type": "foo",
"subtype": "bar",
},
},
{
"domain": "device_automation",
"config": {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD3"]},
"platform": "mqtt",
"topic": "test-topic2",
"type": "ikk",
"subtype": "baz",
},
},
]
registry = dr.async_get(opp)
for d in devices:
data = json.dumps(d["config"])
domain = d["domain"]
id = d["config"]["device"]["identifiers"][0]
async_fire_mqtt_message(opp, f"openpeerpower/{domain}/{id}/config", data)
await opp.async_block_till_done()
for d in devices:
domain = d["domain"]
id = d["config"]["device"]["identifiers"][0]
device = registry.async_get_device({("mqtt", id)})
assert device is not None
debug_info_data = await debug_info.info_for_device(opp, device.id)
if d["domain"] != "device_automation":
assert len(debug_info_data["entities"]) == 1
assert len(debug_info_data["triggers"]) == 0
discovery_data = debug_info_data["entities"][0]["discovery_data"]
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
topic = d["config"]["state_topic"]
assert {"topic": topic, "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
else:
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 1
discovery_data = debug_info_data["triggers"][0]["discovery_data"]
assert discovery_data["topic"] == f"openpeerpower/{domain}/{id}/config"
assert discovery_data["payload"] == d["config"]
async def test_debug_info_multiple_entities_triggers(opp, mqtt_mock):
"""Test we get correct debug_info for a device with multiple entities and triggers."""
config = [
{
"domain": "sensor",
"config": {
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"state_topic": "test-topic-sensor",
"unique_id": "unique",
},
},
{
"domain": "binary_sensor",
"config": {
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"state_topic": "test-topic-binary-sensor",
"unique_id": "unique",
},
},
{
"domain": "device_automation",
"config": {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"topic": "test-topic1",
"type": "foo",
"subtype": "bar",
},
},
{
"domain": "device_automation",
"config": {
"automation_type": "trigger",
"device": {"identifiers": ["0AFFD0"]},
"platform": "mqtt",
"topic": "test-topic2",
"type": "ikk",
"subtype": "baz",
},
},
]
registry = dr.async_get(opp)
for c in config:
data = json.dumps(c["config"])
domain = c["domain"]
# Use topic as discovery_id
id = c["config"].get("topic", c["config"].get("state_topic"))
async_fire_mqtt_message(opp, f"openpeerpower/{domain}/{id}/config", data)
await opp.async_block_till_done()
device_id = config[0]["config"]["device"]["identifiers"][0]
device = registry.async_get_device({("mqtt", device_id)})
assert device is not None
debug_info_data = await debug_info.info_for_device(opp, device.id)
assert len(debug_info_data["entities"]) == 2
assert len(debug_info_data["triggers"]) == 2
for c in config:
# Test we get debug info for each entity and trigger
domain = c["domain"]
# Use topic as discovery_id
id = c["config"].get("topic", c["config"].get("state_topic"))
if c["domain"] != "device_automation":
discovery_data = [e["discovery_data"] for e in debug_info_data["entities"]]
topic = c["config"]["state_topic"]
assert {"topic": topic, "messages": []} in [
t for e in debug_info_data["entities"] for t in e["subscriptions"]
]
else:
discovery_data = [e["discovery_data"] for e in debug_info_data["triggers"]]
assert {
"topic": f"openpeerpower/{domain}/{id}/config",
"payload": c["config"],
} in discovery_data
async def test_debug_info_non_mqtt(opp, device_reg, entity_reg):
"""Test we get empty debug_info for a device with non MQTT entities."""
DOMAIN = "sensor"
platform = getattr(opp.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_opp(opp)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in DEVICE_CLASSES:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(opp, DOMAIN, {DOMAIN: {"platform": "test"}})
debug_info_data = await debug_info.info_for_device(opp, device_entry.id)
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 0
async def test_debug_info_wildcard(opp, mqtt_mock):
"""Test debug info."""
config = {
"device": {"identifiers": ["helloworld"]},
"platform": "mqtt",
"name": "test",
"state_topic": "sensor/#",
"unique_id": "veryunique",
}
registry = dr.async_get(opp)
data = json.dumps(config)
async_fire_mqtt_message(opp, "openpeerpower/sensor/bla/config", data)
await opp.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(opp, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": "sensor/#", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("openpeerpower.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(opp, "sensor/abc", "123")
debug_info_data = await debug_info.info_for_device(opp, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {
"topic": "sensor/#",
"messages": [
{
"payload": "123",
"qos": 0,
"retain": False,
"time": start_dt,
"topic": "sensor/abc",
}
],
} in debug_info_data["entities"][0]["subscriptions"]
async def test_debug_info_filter_same(opp, mqtt_mock):
"""Test debug info removes messages with same timestamp."""
config = {
"device": {"identifiers": ["helloworld"]},
"platform": "mqtt",
"name": "test",
"state_topic": "sensor/#",
"unique_id": "veryunique",
}
registry = dr.async_get(opp)
data = json.dumps(config)
async_fire_mqtt_message(opp, "openpeerpower/sensor/bla/config", data)
await opp.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(opp, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": "sensor/#", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
dt1 = datetime(2019, 1, 1, 0, 0, 0)
dt2 = datetime(2019, 1, 1, 0, 0, 1)
with patch("openpeerpower.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = dt1
async_fire_mqtt_message(opp, "sensor/abc", "123")
async_fire_mqtt_message(opp, "sensor/abc", "123")
dt_utcnow.return_value = dt2
async_fire_mqtt_message(opp, "sensor/abc", "123")
debug_info_data = await debug_info.info_for_device(opp, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert len(debug_info_data["entities"][0]["subscriptions"][0]["messages"]) == 2
assert {
"topic": "sensor/#",
"messages": [
{
"payload": "123",
"qos": 0,
"retain": False,
"time": dt1,
"topic": "sensor/abc",
},
{
"payload": "123",
"qos": 0,
"retain": False,
"time": dt2,
"topic": "sensor/abc",
},
],
} == debug_info_data["entities"][0]["subscriptions"][0]
async def test_debug_info_same_topic(opp, mqtt_mock):
"""Test debug info."""
config = {
"device": {"identifiers": ["helloworld"]},
"platform": "mqtt",
"name": "test",
"state_topic": "sensor/status",
"availability_topic": "sensor/status",
"unique_id": "veryunique",
}
registry = dr.async_get(opp)
data = json.dumps(config)
async_fire_mqtt_message(opp, "openpeerpower/sensor/bla/config", data)
await opp.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(opp, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": "sensor/status", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("openpeerpower.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(opp, "sensor/status", "123", qos=0, retain=False)
debug_info_data = await debug_info.info_for_device(opp, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {
"payload": "123",
"qos": 0,
"retain": False,
"time": start_dt,
"topic": "sensor/status",
} in debug_info_data["entities"][0]["subscriptions"][0]["messages"]
config["availability_topic"] = "sensor/availability"
data = json.dumps(config)
async_fire_mqtt_message(opp, "openpeerpower/sensor/bla/config", data)
await opp.async_block_till_done()
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("openpeerpower.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(opp, "sensor/status", "123", qos=0, retain=False)
async def test_debug_info_qos_retain(opp, mqtt_mock):
"""Test debug info."""
config = {
"device": {"identifiers": ["helloworld"]},
"platform": "mqtt",
"name": "test",
"state_topic": "sensor/#",
"unique_id": "veryunique",
}
registry = dr.async_get(opp)
data = json.dumps(config)
async_fire_mqtt_message(opp, "openpeerpower/sensor/bla/config", data)
await opp.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
debug_info_data = await debug_info.info_for_device(opp, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) >= 1
assert {"topic": "sensor/#", "messages": []} in debug_info_data["entities"][0][
"subscriptions"
]
start_dt = datetime(2019, 1, 1, 0, 0, 0)
with patch("openpeerpower.util.dt.utcnow") as dt_utcnow:
dt_utcnow.return_value = start_dt
async_fire_mqtt_message(opp, "sensor/abc", "123", qos=0, retain=False)
async_fire_mqtt_message(opp, "sensor/abc", "123", qos=1, retain=True)
async_fire_mqtt_message(opp, "sensor/abc", "123", qos=2, retain=False)
debug_info_data = await debug_info.info_for_device(opp, device.id)
assert len(debug_info_data["entities"][0]["subscriptions"]) == 1
assert {
"payload": "123",
"qos": 0,
"retain": False,
"time": start_dt,
"topic": "sensor/abc",
} in debug_info_data["entities"][0]["subscriptions"][0]["messages"]
assert {
"payload": "123",
"qos": 1,
"retain": True,
"time": start_dt,
"topic": "sensor/abc",
} in debug_info_data["entities"][0]["subscriptions"][0]["messages"]
assert {
"payload": "123",
"qos": 2,
"retain": False,
"time": start_dt,
"topic": "sensor/abc",
} in debug_info_data["entities"][0]["subscriptions"][0]["messages"]
async def test_publish_json_from_template(opp, mqtt_mock):
"""Test the publishing of call to services."""
test_str = "{'valid': 'python', 'invalid': 'json'}"
test_str_tpl = "{'valid': '{{ \"python\" }}', 'invalid': 'json'}"
await async_setup_component(
opp,
"script",
{
"script": {
"test_script_payload": {
"sequence": {
"service": "mqtt.publish",
"data": {"topic": "test-topic", "payload": test_str_tpl},
}
},
"test_script_payload_template": {
"sequence": {
"service": "mqtt.publish",
"data": {
"topic": "test-topic",
"payload_template": test_str_tpl,
},
}
},
}
},
)
await opp.services.async_call("script", "test_script_payload", blocking=True)
await opp.async_block_till_done()
assert mqtt_mock.async_publish.called
assert mqtt_mock.async_publish.call_args[0][1] == test_str
mqtt_mock.async_publish.reset_mock()
assert not mqtt_mock.async_publish.called
await opp.services.async_call(
"script", "test_script_payload_template", blocking=True
)
await opp.async_block_till_done()
assert mqtt_mock.async_publish.called
assert mqtt_mock.async_publish.call_args[0][1] == test_str
|
py | 1a3c20d10d11d0d301d88054196464c7ccabb22a | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import os
import pytest
import numpy as np
import pandapower as pp
import pandapower.shortcircuit as sc
from pandapower.test.shortcircuit.test_meshing_detection import meshed_grid
@pytest.fixture
def radial_grid():
net = pp.create_empty_network(sn_mva=2.)
b0 = pp.create_bus(net, 220)
b1 = pp.create_bus(net, 110)
b2 = pp.create_bus(net, 110)
b3 = pp.create_bus(net, 110)
pp.create_ext_grid(net, b0, s_sc_max_mva=100., s_sc_min_mva=80., rx_min=0.4, rx_max=0.4)
pp.create_transformer(net, b0, b1, "100 MVA 220/110 kV")
pp.create_line(net, b1, b2, std_type="305-AL1/39-ST1A 110.0" , length_km=20.)
pp.create_line(net, b2, b3, std_type="N2XS(FL)2Y 1x185 RM/35 64/110 kV" , length_km=15.)
return net
@pytest.fixture
def three_bus_big_sgen_example():
net = pp.create_empty_network()
b1 = pp.create_bus(net, 110)
b2 = pp.create_bus(net, 110)
b3 = pp.create_bus(net, 110)
pp.create_ext_grid(net, b1, s_sc_max_mva=100., s_sc_min_mva=80., rx_min=0.4, rx_max=0.4)
pp.create_line(net, b1, b2, std_type="305-AL1/39-ST1A 110.0" , length_km=20.)
pp.create_line(net, b2, b3, std_type="N2XS(FL)2Y 1x185 RM/35 64/110 kV" , length_km=15.)
net.line["endtemp_degree"] = 80
pp.create_sgen(net, b2, sn_mva=200., p_mw=0, k=1.2)
return net
def test_radial_network(radial_grid):
net = radial_grid
sc_bus = 3
sc.calc_sc(net)
ik = net.res_bus_sc.ikss_ka.at[sc_bus]
sc.calc_sc(net, bus=sc_bus, inverse_y=False, branch_results=True, return_all_currents=True)
assert np.isclose(net.res_bus_sc.ikss_ka.at[sc_bus], ik)
assert np.isclose(net.res_line_sc.ikss_ka.loc[(1, sc_bus)], ik)
assert np.isclose(net.res_line_sc.ikss_ka.loc[(0, sc_bus)], ik)
assert np.isclose(net.res_trafo_sc.ikss_lv_ka.loc[(0, sc_bus)], ik)
trafo_ratio = net.trafo.vn_lv_kv.values / net.trafo.vn_hv_kv.values
assert np.isclose(net.res_trafo_sc.ikss_hv_ka.loc[(0, sc_bus)], ik*trafo_ratio)
sc_bus = 2
sc.calc_sc(net)
ik = net.res_bus_sc.ikss_ka.at[sc_bus]
sc.calc_sc(net, bus=sc_bus, inverse_y=False, branch_results=True, return_all_currents=True)
assert np.isclose(net.res_bus_sc.ikss_ka.at[sc_bus], ik)
assert np.isclose(net.res_line_sc.ikss_ka.loc[(1, sc_bus)], 0)
assert np.isclose(net.res_line_sc.ikss_ka.loc[(0, sc_bus)], ik)
assert np.isclose(net.res_trafo_sc.ikss_lv_ka.loc[(0, sc_bus)], ik)
trafo_ratio = net.trafo.vn_lv_kv.values / net.trafo.vn_hv_kv.values
assert np.isclose(net.res_trafo_sc.ikss_hv_ka.loc[(0, sc_bus)], ik*trafo_ratio)
def test_meshed_network(meshed_grid):
net = meshed_grid
sc.calc_sc(net)
sc_bus = 5
ik = net.res_bus_sc.ikss_ka.at[sc_bus]
sc.calc_sc(net, bus=sc_bus, inverse_y=False, branch_results=True, return_all_currents=True)
assert np.isclose(net.res_bus_sc.ikss_ka.at[sc_bus], ik)
line_ix = net.line[(net.line.to_bus==sc_bus) | (net.line.from_bus==sc_bus)].index
line_flow_into_sc = net.res_line_sc.loc[(line_ix, sc_bus), "ikss_ka"].sum()
assert np.isclose(line_flow_into_sc, ik, atol=2e-3)
def test_big_gen_network(three_bus_big_sgen_example):
net = three_bus_big_sgen_example
sc_bus = [0, 1, 2]
sc.calc_sc(net, bus=sc_bus, branch_results=True, return_all_currents=True, inverse_y=False)
assert np.isclose(net.res_line_sc.loc[(0, 0),"ikss_ka"], 1.25967331, atol=1e-3)
assert np.isclose(net.res_line_sc.loc[(1, 0),"ikss_ka"], 0., atol=2e-3)
assert np.isclose(net.res_line_sc.loc[(0, 2),"ikss_ka"], 0.46221808, atol=1e-3)
assert np.isclose(net.res_line_sc.loc[(1, 2),"ikss_ka"], 1.72233192, atol=1e-3)
def test_big_gen_network_v2(three_bus_big_sgen_example):
net = three_bus_big_sgen_example
sc_bus = [0, 2]
sc.calc_sc(net, bus=sc_bus, branch_results=True, return_all_currents=True, inverse_y=False)
assert np.isclose(net.res_line_sc.loc[(0, 0),"ikss_ka"], 1.25967331, atol=1e-3)
assert np.isclose(net.res_line_sc.loc[(1, 0),"ikss_ka"], 0., atol=2e-3)
assert np.isclose(net.res_line_sc.loc[(0, 2),"ikss_ka"], 0.46221808, atol=1e-3)
assert np.isclose(net.res_line_sc.loc[(1, 2),"ikss_ka"], 1.72233192, atol=1e-3)
if __name__ == '__main__':
pytest.main([__file__]) |
py | 1a3c215b1dccb4afd122860541afed4f751f8409 | import xml.etree.ElementTree as ET
from nltk.tokenize import WordPunctTokenizer
from sentence_splitter import SentenceSplitter
from src.parser import Word, Dataset
class Aspect(object):
def __init__(self, begin=0, end=0, target="", polarity=1, category="", aspect_type=0, mark=0):
self.type_values = {
'explicit': 0,
'implicit': 1,
'fct': 2
}
self.rev_type_values = {value: key for key, value in self.type_values.items()}
self.sentiment_values = {
'positive': 3,
'neutral': 1,
'negative': 0,
'both': 2
}
self.rev_sentiment_values = {value: key for key, value in self.sentiment_values.items()}
self.mark_values = {
'Rel': 0,
'Irr': 1,
'Cmpr': 2,
'Prev': 3,
'Irn': 4
}
self.rev_mark_values = {value: key for key, value in self.mark_values.items()}
self.begin = begin
self.end = end
self.target = target
self.polarity = polarity
self.category = category
self.type = aspect_type
self.mark = mark
self.words = []
def parse(self, node):
self.begin = int(node.get('from'))
self.end = int(node.get('to'))
self.target = node.get('term')
self.polarity = self.sentiment_values[node.get('sentiment')]
self.category = node.get('category')
self.type = self.type_values[node.get('type')]
self.mark = self.mark_values[node.get('mark')]
def is_empty(self):
return self.target == ""
def inflate_target(self):
self.target = " ".join([word.text for word in self.words]).replace('"', "'").replace('&', '#')
def to_xml(self):
return '<aspect mark="{mark}" category="{category}" type="{aspect_type}" from="{begin}" to="{end}" sentiment="{polarity}" term="{term}"/>\n'.format(
begin=self.begin, end=self.end, term=self.target, mark=self.rev_mark_values[self.mark],
aspect_type=self.rev_type_values[self.type], category=self.category,
polarity=self.rev_sentiment_values[self.polarity])
def __repr__(self):
return "<Aspect {begin}:{end} {t} {category} {polarity} at {hid}>".format(
begin=self.begin,
end=self.end,
category=self.category,
polarity=self.polarity,
t=self.type,
hid=hex(id(self))
)
class Review(object):
def __init__(self, text="", rid=0):
self.text = text
self.rid = rid
self.aspects = []
self.sentences = []
self.categories = {}
self.sentiment_values = {
'positive': 3,
'neutral': 1,
'negative': 0,
'both': 2,
'absence': 4
}
self.rev_sentiment_values = {value: key for key, value in self.sentiment_values.items()}
def parse(self, node):
self.text = node.find(".//text").text
self.rid = node.get("id")
self.aspects = []
for aspect_node in node.findall(".//aspect"):
aspect = Aspect()
aspect.parse(aspect_node)
self.aspects.append(aspect)
for category_node in node.findall(".//category"):
category_name = category_node.get('name')
sentiment = category_node.get('sentiment')
self.categories[category_name] = self.sentiment_values[sentiment]
def to_xml(self):
aspects_xml = "".join([aspect.to_xml() for aspect in self.aspects])
categories_xml = ''
for name, sentiment_num in self.categories.items():
categories_xml += '<category name="{name}" sentiment="{sentiment}"/>\n'.format(
name=name,
sentiment=self.rev_sentiment_values[sentiment_num]
)
return '<review id="{rid}">\n<categories>\n{categories}</categories>\n<text>{text}</text>\n<aspects>\n{aspects}</aspects>\n</review>\n'.format(
rid=self.rid,
text=self.text.replace("&", "#"),
aspects=aspects_xml,
categories=categories_xml)
class SentiRuEvalDataset(Dataset):
def __init__(self):
super().__init__()
self.language = "ru"
def parse(self, filename, vectorizer=None, **kwargs):
assert filename.endswith('xml')
tree = ET.parse(filename)
root = tree.getroot()
self.reviews = []
for review_node in root.findall(".//review"):
review = Review()
review.parse(review_node)
self.reviews.append(review)
self.tokenize()
self.pos_tag()
def tokenize(self):
sentence_splitter = SentenceSplitter(language='ru')
for i, review in enumerate(self.reviews):
text = review.text
sentences = sentence_splitter.split(text)
words_borders = list(WordPunctTokenizer().span_tokenize(text))
for sentence in sentences:
tokenized_sentence = []
sentence_begin = text.find(sentence)
sentence_end = sentence_begin + len(sentence)
for word_begin, word_end in words_borders:
if word_begin >= sentence_begin and word_end <= sentence_end:
word_text = text[word_begin: word_end]
word = Word(word_text, word_begin, word_end)
for opinion in review.aspects:
if word.begin >= opinion.begin and word.end <= opinion.end:
word.add_opinion(opinion)
opinion.words.append(word)
tokenized_sentence.append(word)
self.reviews[i].sentences.append(tokenized_sentence)
def print_stat(self):
print("Num of reviews: " + str(len(self.reviews)))
print("Num of opinions: " + str(self.get_opinion_count()))
print("Max review length: " + str(max(self.get_lengths())))
print(self.reviews[0].sentences[0])
print(self.reviews[0].sentences[0])
def get_aspect_categories(self):
categories = set()
for review in self.reviews:
for aspect in review.aspects:
categories.add(aspect.category)
categories = list(sorted(list(categories)))
return {category: i for i, category in enumerate(categories)}
def get_review_categories(self):
categories = set()
for review in self.reviews:
for category in review.categories.keys():
categories.add(category)
categories = list(sorted(list(categories)))
return {category: i for i, category in enumerate(categories)}
|
py | 1a3c233bb308a4905f2dd38b5bb09a3f1b86307e | import json
import pytest
from unittest import mock
from asynctest import patch
from blebox_uniapi.box import Box
from blebox_uniapi import error
pytestmark = pytest.mark.asyncio
@pytest.fixture
def mock_session():
return mock.MagicMock(host="172.1.2.3", port=80)
@pytest.fixture
def data():
return {
"id": "abcd1234ef",
"type": "airSensor",
"deviceName": "foobar",
"fv": "1.23",
"hv": "4.56",
"apiLevel": "20180403",
}
async def test_json_paths(mock_session, data):
box = Box(mock_session, data)
assert "foo" == box.follow(json.loads("""["foo"]"""), "[0]")
assert 4 == box.follow(
json.loads("""[{"foo":"3", "value":4}]"""), "[foo='3']/value"
)
assert 4 == box.follow(json.loads("""[{"foo":3, "value":4}]"""), "[foo=3]/value")
with pytest.raises(error.JPathFailed, match=r"with: foo=bc at .* within .*"):
box.follow(json.loads("""[{"foo":"ab", "value":4}]"""), "[foo='bc']/value")
with pytest.raises(
error.JPathFailed, match=r"with value at index 1 at .* within .*"
):
box.follow(json.loads("""[{"value":4}]"""), "[1]/value")
with pytest.raises(
error.JPathFailed, match=r"with value at index 1 at .* within .*"
):
box.follow(json.loads("""{"value":4}"""), "[1]/value")
with pytest.raises(error.JPathFailed, match=r"with: foo=7 at .* within .*"):
box.follow(json.loads("""[{"foo":3, "value":4}]"""), "[foo=7]/value")
with pytest.raises(
error.JPathFailed, match=r"item 'foo' not among \['value'\] at .* within .*"
):
box.follow(json.loads("""{"value":4}"""), "foo")
with pytest.raises(
error.JPathFailed,
match=r"unexpected item type: 'foo' not in: \[4\] at .* within .*",
):
box.follow(json.loads("""[4]"""), "foo")
with pytest.raises(
error.JPathFailed,
match=r"list expected but got {'foo': \[4\]} at .* within .*",
):
box.follow(json.loads("""{"foo": [4]}"""), "[bar=0]/value")
async def test_without_id(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse, match="Device at 172.1.2.3:80 has no id"
):
del data["id"]
Box(mock_session, data)
async def test_without_type(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match="Device:abcd1234ef at 172.1.2.3:80 has no type",
):
del data["type"]
Box(mock_session, data)
async def test_with_unknown_type(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match=r"'foobar' \(unknownBox:abcd1234ef/1.23 at 172.1.2.3:80\) is not a supported type",
):
data["type"] = "unknownBox"
Box(mock_session, data)
async def test_without_name(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match="airSensor:abcd1234ef at 172.1.2.3:80 has no name",
):
del data["deviceName"]
Box(mock_session, data)
async def test_without_firmware_version(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match=r"'foobar' \(airSensor:abcd1234ef at 172.1.2.3:80\) has no firmware version",
):
del data["fv"]
Box(mock_session, data)
async def test_without_hardware_version(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match=r"'foobar' \(airSensor:abcd1234ef/1.23 at 172.1.2.3:80\) has no hardware version",
):
del data["hv"]
Box(mock_session, data)
async def test_without_api_level(mock_session, data):
with pytest.raises(
error.UnsupportedBoxVersion,
match=r"'foobar' \(airSensor:abcd1234ef/1.23 at 172.1.2.3:80\) has unsupported version",
):
del data["apiLevel"]
Box(mock_session, data)
async def test_with_init_failure(mock_session, data):
with patch(
"blebox_uniapi.box.AirQuality", spec_set=True, autospec=True
) as mock_sensor:
mock_sensor.side_effect = KeyError
with pytest.raises(
error.UnsupportedBoxResponse,
match=r"'foobar' \(airSensor:abcd1234ef/1.23 at 172.1.2.3:80\) failed to initialize: ",
):
Box(mock_session, data)
async def test_properties(mock_session, data):
box = Box(mock_session, data)
assert "foobar" == box.name
assert None is box.last_data
assert "airSensor" == box.type
assert "airSensor" == box.model
assert "abcd1234ef" == box.unique_id
assert "1.23" == box.firmware_version
assert "4.56" == box.hardware_version
assert "BleBox" == box.brand
assert 20180403 == box.api_version
async def test_validations(mock_session, data):
box = Box(mock_session, data)
with pytest.raises(
error.BadFieldExceedsMax,
match=r"foobar.field1 is 123 which exceeds max \(100\)",
):
box.check_int_range(123, "field1", 100, 0)
with pytest.raises(
error.BadFieldLessThanMin,
match=r"foobar.field1 is 123 which is less than minimum \(200\)",
):
box.check_int_range(123, "field1", 300, 200)
with pytest.raises(error.BadFieldMissing, match=r"foobar.field1 is missing"):
box.check_int(None, "field1", 300, 200)
with pytest.raises(
error.BadFieldNotANumber, match=r"foobar.field1 is '123' which is not a number"
):
box.check_int("123", "field1", 300, 200)
with pytest.raises(error.BadFieldMissing, match=r"foobar.field1 is missing"):
box.check_hex_str(None, "field1", 300, 200)
with pytest.raises(
error.BadFieldNotAString, match=r"foobar.field1 is 123 which is not a string"
):
box.check_hex_str(123, "field1", 300, 200)
with pytest.raises(error.BadFieldMissing, match=r"foobar.field1 is missing"):
box.check_rgbw(None, "field1")
with pytest.raises(
error.BadFieldNotAString, match=r"foobar.field1 is 123 which is not a string"
):
box.check_rgbw(123, "field1")
with pytest.raises(
error.BadFieldNotRGBW, match=r"foobar.field1 is 123 which is not a rgbw string"
):
box.check_rgbw("123", "field1")
|
py | 1a3c23459ea8004e02e18cbe21dc460ae6435407 | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(mid_channels),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
M.BatchNorm2d(mid_channels),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.BatchNorm2d(outputs),
M.ReLU(),
]
self.branch_main = M.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
M.BatchNorm2d(inp),
# pw-linear
M.Conv2d(inp, inp, 1, 1, 0, bias=False),
M.BatchNorm2d(inp),
M.ReLU(),
]
self.branch_proj = M.Sequential(*branch_proj)
else:
self.branch_proj = None
def forward(self, old_x):
if self.stride == 1:
x_proj, x = self.channel_shuffle(old_x)
return F.concat((x_proj, self.branch_main(x)), 1)
elif self.stride == 2:
x_proj = old_x
x = old_x
return F.concat((self.branch_proj(x_proj), self.branch_main(x)), 1)
else:
raise ValueError("use stride 1 or 2, current stride {}".format(self.stride))
def channel_shuffle(self, x):
batchsize, num_channels, height, width = x.shape
# assert (num_channels % 4 == 0)
x = x.reshape(batchsize * num_channels // 2, 2, height * width)
x = F.transpose(x, (1, 0, 2))
x = x.reshape(2, -1, num_channels // 2, height, width)
return x[0], x[1]
class ShuffleNetV2(M.Module):
def __init__(self, num_classes=1000, model_size="1.5x"):
super().__init__()
self.stage_repeats = [4, 8, 4]
self.model_size = model_size
if model_size == "0.5x":
self.stage_out_channels = [-1, 24, 48, 96, 192, 1024]
elif model_size == "1.0x":
self.stage_out_channels = [-1, 24, 116, 232, 464, 1024]
elif model_size == "1.5x":
self.stage_out_channels = [-1, 24, 176, 352, 704, 1024]
elif model_size == "2.0x":
self.stage_out_channels = [-1, 24, 244, 488, 976, 2048]
else:
raise NotImplementedError
# building first layer
input_channel = self.stage_out_channels[1]
self.first_conv = M.Sequential(
M.Conv2d(3, input_channel, 3, 2, 1, bias=False), M.BatchNorm2d(input_channel), M.ReLU(),
)
self.maxpool = M.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.features = []
for idxstage in range(len(self.stage_repeats)):
numrepeat = self.stage_repeats[idxstage]
output_channel = self.stage_out_channels[idxstage + 2]
for i in range(numrepeat):
if i == 0:
self.features.append(
ShuffleV2Block(
input_channel, output_channel,
mid_channels=output_channel // 2, ksize=3, stride=2,
)
)
else:
self.features.append(
ShuffleV2Block(
input_channel // 2, output_channel,
mid_channels=output_channel // 2, ksize=3, stride=1,
)
)
input_channel = output_channel
self.features = M.Sequential(*self.features)
self.conv_last = M.Sequential(
M.Conv2d(input_channel, self.stage_out_channels[-1], 1, 1, 0, bias=False),
M.BatchNorm2d(self.stage_out_channels[-1]),
M.ReLU(),
)
self.globalpool = M.AvgPool2d(7)
if self.model_size == "2.0x":
self.dropout = M.Dropout(0.2)
self.classifier = M.Sequential(
M.Linear(self.stage_out_channels[-1], num_classes, bias=False)
)
self._initialize_weights()
def forward(self, x):
x = self.first_conv(x)
x = self.maxpool(x)
x = self.features(x)
x = self.conv_last(x)
x = self.globalpool(x)
if self.model_size == "2.0x":
x = self.dropout(x)
x = x.reshape(-1, self.stage_out_channels[-1])
x = self.classifier(x)
return x
def _initialize_weights(self):
for name, m in self.named_modules():
if isinstance(m, M.Conv2d):
if "first" in name:
M.init.normal_(m.weight, 0, 0.01)
else:
M.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
if m.bias is not None:
M.init.fill_(m.bias, 0)
elif isinstance(m, M.BatchNorm2d):
M.init.fill_(m.weight, 1)
if m.bias is not None:
M.init.fill_(m.bias, 0.0001)
M.init.fill_(m.running_mean, 0)
elif isinstance(m, M.BatchNorm1d):
M.init.fill_(m.weight, 1)
if m.bias is not None:
M.init.fill_(m.bias, 0.0001)
M.init.fill_(m.running_mean, 0)
elif isinstance(m, M.Linear):
M.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
M.init.fill_(m.bias, 0)
@hub.pretrained("https://data.megengine.org.cn/models/weights/snetv2_x2_0_75115_497d4601.pkl")
def shufflenet_v2_x2_0(num_classes=1000):
return ShuffleNetV2(num_classes=num_classes, model_size="2.0x")
@hub.pretrained("https://data.megengine.org.cn/models/weights/snetv2_x1_5_72775_38ac4273.pkl")
def shufflenet_v2_x1_5(num_classes=1000):
return ShuffleNetV2(num_classes=num_classes, model_size="1.5x")
@hub.pretrained("https://data.megengine.org.cn/models/weights/snetv2_x1_0_69369_daf9dba0.pkl")
def shufflenet_v2_x1_0(num_classes=1000):
return ShuffleNetV2(num_classes=num_classes, model_size="1.0x")
@hub.pretrained("https://data.megengine.org.cn/models/weights/snetv2_x0_5_60750_c28db1a2.pkl")
def shufflenet_v2_x0_5(num_classes=1000):
return ShuffleNetV2(num_classes=num_classes, model_size="0.5x")
|
py | 1a3c23ab9e31a5cf4a855589172904b774af1b85 | """
Keeps the older BaseController security and fetching methods and also
defines a base ModelManager, ModelSerializer, and ModelDeserializer.
ModelManagers are used for operations on models that occur outside the scope of
a single model object, such as:
- object creation
- object lookup
- interactions between 2+ objects of different model classes
(Since these were to replace model Mixins from
web/framework/base/controller.py the rule of thumb used there also generally
has been applied here: if it uses the trans or sa_session, put it in a manager
and not the model.)
ModelSerializers allow flexible conversion of model objects to dictionaries.
They control what keys are sent, how values are simplified, can remap keys,
and allow both predefined and user controlled key sets.
ModelDeserializers control how a model validates and process an incoming
attribute change to a model object.
"""
# TODO: it may be there's a better way to combine the above three classes
# such as: a single flat class, serializers being singletons in the manager, etc.
# instead of the three separate classes. With no 'apparent' perfect scheme
# I'm opting to just keep them separate.
import datetime
import logging
import re
from typing import (
Any,
Callable,
Dict,
Generic,
List,
NamedTuple,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
import sqlalchemy
from sqlalchemy.orm import Query
from sqlalchemy.orm.scoping import scoped_session
from typing_extensions import Protocol
from galaxy import (
exceptions,
model,
)
from galaxy.model import tool_shed_install
from galaxy.schema import ValueFilterQueryParams
from galaxy.schema.fields import DecodedDatabaseIdField
from galaxy.security.idencoding import IdEncodingHelper
from galaxy.structured_app import (
BasicSharedApp,
MinimalManagerApp,
)
from galaxy.web import url_for as gx_url_for
log = logging.getLogger(__name__)
class ParsedFilter(NamedTuple):
filter_type: str # orm_function, function, or orm
filter: Any
parsed_filter = ParsedFilter
OrmFilterParserType = Union[None, Dict[str, Any], Callable]
OrmFilterParsersType = Dict[str, OrmFilterParserType]
FunctionFilterParserType = Dict[str, Any]
FunctionFilterParsersType = Dict[str, Any]
# ==== accessors from base/controller.py
def security_check(trans, item, check_ownership=False, check_accessible=False):
"""
Security checks for an item: checks if (a) user owns item or (b) item
is accessible to user. This is a generic method for dealing with objects
uniformly from the older controller mixin code - however whenever possible
the managers for a particular model should be used to perform security
checks.
"""
# all items are accessible to an admin
if trans.user_is_admin:
return item
# Verify ownership: there is a current user and that user is the same as the item's
if check_ownership:
if not trans.user:
raise exceptions.ItemOwnershipException("Must be logged in to manage Galaxy items", type="error")
if item.user != trans.user:
raise exceptions.ItemOwnershipException(
f"{item.__class__.__name__} is not owned by the current user", type="error"
)
# Verify accessible:
# if it's part of a lib - can they access via security
# if it's something else (sharable) have they been added to the item's users_shared_with_dot_users
if check_accessible:
if type(item) in (
trans.app.model.LibraryFolder,
trans.app.model.LibraryDatasetDatasetAssociation,
trans.app.model.LibraryDataset,
):
if not trans.app.security_agent.can_access_library_item(trans.get_current_user_roles(), item, trans.user):
raise exceptions.ItemAccessibilityException(
f"{item.__class__.__name__} is not accessible to the current user", type="error"
)
else:
if (
(item.user != trans.user)
and (not item.importable)
and (trans.user not in item.users_shared_with_dot_users)
):
raise exceptions.ItemAccessibilityException(
f"{item.__class__.__name__} is not accessible to the current user", type="error"
)
return item
def get_class(class_name):
"""
Returns the class object that a string denotes. Without this method, we'd have
to do eval(<class_name>).
"""
if class_name == "ToolShedRepository":
item_class = tool_shed_install.ToolShedRepository
else:
if not hasattr(model, class_name):
raise exceptions.MessageException(f"Item class '{class_name}' not available.")
item_class = getattr(model, class_name)
return item_class
def decode_id(app: BasicSharedApp, id: Any):
# note: use str - occasionally a fully numeric id will be placed in post body and parsed as int via JSON
# resulting in error for valid id
if isinstance(id, DecodedDatabaseIdField):
return int(id)
else:
return decode_with_security(app.security, id)
def decode_with_security(security: IdEncodingHelper, id: Any):
return security.decode_id(str(id))
def encode_with_security(security: IdEncodingHelper, id: Any):
return security.encode_id(id)
def get_object(trans, id, class_name, check_ownership=False, check_accessible=False, deleted=None):
"""
Convenience method to get a model object with the specified checks. This is
a generic method for dealing with objects uniformly from the older
controller mixin code - however whenever possible the managers for a
particular model should be used to load objects.
"""
decoded_id = decode_id(trans.app, id)
try:
item_class = get_class(class_name)
assert item_class is not None
item = trans.sa_session.query(item_class).get(decoded_id)
assert item is not None
except Exception:
log.exception(f"Invalid {class_name} id ( {id} ) specified.")
raise exceptions.MessageException(f"Invalid {class_name} id ( {id} ) specified", type="error")
if check_ownership or check_accessible:
security_check(trans, item, check_ownership, check_accessible)
if deleted is True and not item.deleted:
raise exceptions.ItemDeletionException(
f'{class_name} "{getattr(item, "name", id)}" is not deleted', type="warning"
)
elif deleted is False and item.deleted:
raise exceptions.ItemDeletionException(f'{class_name} "{getattr(item, "name", id)}" is deleted', type="warning")
return item
# =============================================================================
def munge_lists(listA, listB):
"""
Combine two lists into a single list.
(While allowing them to be None, non-lists, or lists.)
"""
# TODO: there's nothing specifically filter or model-related here - move to util
if listA is None:
return listB
if listB is None:
return listA
if not isinstance(listA, list):
listA = [listA]
if not isinstance(listB, list):
listB = [listB]
return listA + listB
# -----------------------------------------------------------------------------
class ModelManager:
"""
Base class for all model/resource managers.
Provides common queries and CRUD operations as a (hopefully) light layer
over the ORM.
"""
model_class: Type[model._HasTable]
foreign_key_name: str
app: BasicSharedApp
def __init__(self, app: BasicSharedApp):
self.app = app
def session(self) -> scoped_session:
return self.app.model.context
def _session_setattr(self, item: model._HasTable, attr: str, val: Any, flush: bool = True):
setattr(item, attr, val)
self.session().add(item)
if flush:
self.session().flush()
return item
# .... query foundation wrapper
def query(
self,
eagerloads: bool = True,
filters=None,
order_by=None,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> Query:
"""
Return a basic query from model_class, filters, order_by, and limit and offset.
Set eagerloads to False to disable them for this query.
"""
query = self.session().query(self.model_class)
# joined table loading
if eagerloads is False:
query = query.enable_eagerloads(False)
return self._filter_and_order_query(query, filters=filters, order_by=order_by, limit=limit, offset=offset)
def _filter_and_order_query(
self, query: Query, filters=None, order_by=None, limit: Optional[int] = None, offset: Optional[int] = None
) -> Query:
# TODO: not a lot of functional cohesion here
query = self._apply_orm_filters(query, filters)
query = self._apply_order_by(query, order_by)
query = self._apply_orm_limit_offset(query, limit, offset)
return query
# .... filters
def _apply_orm_filters(self, query: Query, filters) -> Query:
"""
Add any filters to the given query.
"""
if filters is None:
return query
if not isinstance(filters, list):
filters = [filters]
# note: implicit AND
for filter in filters:
query = query.filter(filter)
return query
def _munge_filters(self, filtersA, filtersB):
"""
Combine two lists into a single list.
(While allowing them to be None, non-lists, or lists.)
"""
return munge_lists(filtersA, filtersB)
# .... order, limit, and offset
def _apply_order_by(self, query: Query, order_by) -> Query:
"""
Return the query after adding the order_by clauses.
Use the manager's default_order_by if order_by is None.
"""
if order_by is None:
return query.order_by(*self._default_order_by())
if isinstance(order_by, (list, tuple)):
return query.order_by(*order_by)
return query.order_by(order_by)
def _default_order_by(self):
"""
Returns a tuple of columns for the default order when getting multiple models.
"""
return (self.model_class.table.c.create_time,)
def _apply_orm_limit_offset(self, query: Query, limit: Optional[int], offset: Optional[int]) -> Query:
"""
Return the query after applying the given limit and offset (if not None).
"""
if limit is not None:
query = query.limit(limit)
if offset is not None:
query = query.offset(offset)
return query
# .... query resolution
def one(self, **kwargs):
"""
Sends kwargs to build the query and returns one and only one model.
"""
query = self.query(**kwargs)
return self._one_with_recast_errors(query)
def _one_with_recast_errors(self, query):
"""
Call sqlalchemy's one and recast errors to serializable errors if any.
:raises exceptions.ObjectNotFound: if no model is found
:raises exceptions.InconsistentDatabase: if more than one model is found
"""
# overridden to raise serializable errors
try:
return query.one()
except sqlalchemy.orm.exc.NoResultFound:
raise exceptions.ObjectNotFound(f"{self.model_class.__name__} not found")
except sqlalchemy.orm.exc.MultipleResultsFound:
raise exceptions.InconsistentDatabase(f"found more than one {self.model_class.__name__}")
def _one_or_none(self, query):
"""
Return the object if found, None if it's not.
:raises exceptions.InconsistentDatabase: if more than one model is found
"""
try:
return self._one_with_recast_errors(query)
except exceptions.ObjectNotFound:
return None
# NOTE: at this layer, all ids are expected to be decoded and in int form
def by_id(self, id: int):
"""
Gets a model by primary id.
"""
id_filter = self.model_class.table.c.id == id
return self.one(filters=id_filter)
# .... multirow queries
def list(self, filters=None, order_by=None, limit=None, offset=None, **kwargs):
"""
Returns all objects matching the given filters
"""
# list becomes a way of applying both filters generated in the orm (such as .user ==)
# and functional filters that aren't currently possible using the orm (such as instance calcluated values
# or annotations/tags). List splits those two filters and applies limits/offsets
# only after functional filters (if any) using python.
orm_filters, fn_filters = self._split_filters(filters)
if not fn_filters:
# if no fn_filtering required, we can use the 'all orm' version with limit offset
return self._orm_list(filters=orm_filters, order_by=order_by, limit=limit, offset=offset, **kwargs)
# fn filters will change the number of items returnable by limit/offset - remove them here from the orm query
query = self.query(filters=orm_filters, order_by=order_by, limit=None, offset=None, **kwargs)
items = query.all()
# apply limit, offset after SQL filtering
items = self._apply_fn_filters_gen(items, fn_filters)
return list(self._apply_fn_limit_offset_gen(items, limit, offset))
def _split_filters(self, filters):
"""
Splits `filters` into a tuple of two lists:
a list of filters to be added to the SQL query
and a list of functional filters to be applied after the SQL query.
"""
orm_filters: list = []
fn_filters: list = []
if filters is None:
return (orm_filters, fn_filters)
if not isinstance(filters, list):
filters = [filters]
for filter_ in filters:
if not hasattr(filter_, "filter_type"):
orm_filters.append(filter_)
elif filter_.filter_type == "function":
fn_filters.append(filter_.filter)
elif filter_.filter_type == "orm_function":
orm_filters.append(filter_.filter(self.model_class))
else:
orm_filters.append(filter_.filter)
return (orm_filters, fn_filters)
def _orm_list(self, query=None, **kwargs):
"""
Sends kwargs to build the query return all models found.
"""
query = query or self.query(**kwargs)
return query.all()
def _apply_fn_filters_gen(self, items, filters):
"""
If all the filter functions in `filters` return True for an item in `items`,
yield that item.
"""
# cpu-expensive
for item in items:
filter_results = [f(item) for f in filters]
if all(filter_results):
yield item
def _apply_fn_limit_offset_gen(self, items, limit, offset):
"""
Iterate over `items` and begin yielding items after
`offset` number of items and stop when we've yielded
`limit` number of items.
"""
# change negative limit, offset to None
if limit is not None and limit < 0:
limit = None
if offset is not None and offset < 0:
offset = None
yielded = 0
for i, item in enumerate(items):
if offset is not None and i < offset:
continue
if limit is not None and yielded >= limit:
break
yield item
yielded += 1
def by_ids(self, ids, filters=None, **kwargs):
"""
Returns an in-order list of models with the matching ids in `ids`.
"""
if not ids:
return []
ids_filter = parsed_filter("orm", self.model_class.table.c.id.in_(ids))
found = self.list(filters=self._munge_filters(ids_filter, filters), **kwargs)
# TODO: this does not order by the original 'ids' array
# ...could use get (supposedly since found are in the session, the db won't be hit twice)
# return map( self.session().query( self.model_class ).get, ids )
# ...could implement own version here - slow?
return self._order_items_by_id(ids, found)
def _order_items_by_id(self, ids, items):
"""
Given a list of (unique) ids and a list of items having an 'id' attribute,
return items that have the given ids in that order.
If an id in ids is not found or if an item in items doesn't have a given
id, they will not be in the returned list.
"""
ID_ATTR_NAME = "id"
# TODO:?? aside from sqlalx.get mentioned above, I haven't seen an in-SQL way
# to make this happen. This may not be the most efficient way either.
# NOTE: that this isn't sorting by id - this is matching the order in items to the order in ids
# move items list into dict by id
item_dict = {}
for item in items:
item_id = getattr(item, ID_ATTR_NAME, None)
if item_id:
item_dict[item_id] = item
# pull from map in order of ids
in_order = []
for id in ids:
if id in item_dict:
in_order.append(item_dict[id])
return in_order
def create(self, flush=True, *args, **kwargs):
"""
Generically create a new model.
"""
# override in subclasses
item = self.model_class(*args, **kwargs)
self.session().add(item)
if flush:
self.session().flush()
return item
def copy(self, item, **kwargs):
"""
Clone or copy an item.
"""
raise exceptions.NotImplemented("Abstract method")
def update(self, item, new_values, flush=True, **kwargs):
"""
Given a dictionary of new values, update `item` and return it.
..note: NO validation or deserialization occurs here.
"""
self.session().add(item)
for key, value in new_values.items():
if hasattr(item, key):
setattr(item, key, value)
if flush:
self.session().flush()
return item
def associate(self, associate_with, item, foreign_key_name=None):
"""
Generically associate `item` with `associate_with` based on `foreign_key_name`.
"""
foreign_key_name = foreign_key_name or self.foreign_key_name
setattr(associate_with, foreign_key_name, item)
return item
def _foreign_key(self, associated_model_class, foreign_key_name=None):
foreign_key_name = foreign_key_name or self.foreign_key_name
return getattr(associated_model_class, foreign_key_name)
def query_associated(self, associated_model_class, item, foreign_key_name=None):
"""
Generically query other items that have been associated with this `item`.
"""
foreign_key = self._foreign_key(associated_model_class, foreign_key_name=foreign_key_name)
return self.session().query(associated_model_class).filter(foreign_key == item)
# a rename of sql DELETE to differentiate from the Galaxy notion of mark_as_deleted
# def destroy( self, item, **kwargs ):
# return item
T = TypeVar("T")
# ---- code for classes that use one *main* model manager
# TODO: this may become unecessary if we can access managers some other way (class var, app, etc.)
class HasAModelManager(Generic[T]):
"""
Mixin used where serializers, deserializers, filter parsers, etc.
need some functionality around the model they're mainly concerned with
and would perform that functionality with a manager.
"""
#: the class used to create this serializer's generically accessible model_manager
model_manager_class: Type[
T
] # ideally this would be Type[ModelManager] but HistoryContentsManager cannot be a ModelManager
# examples where this doesn't really work are ConfigurationSerializer (no manager)
# and contents (2 managers)
app: MinimalManagerApp
def __init__(self, app: MinimalManagerApp, manager=None, **kwargs):
self._manager = manager
self.app = app
@property
def manager(self) -> T:
"""Return an appropriate manager if it exists, instantiate if not."""
# PRECONDITION: assumes self.app is assigned elsewhere
if not self._manager:
# TODO: pass this serializer to it
self._manager = self.app[self.model_manager_class]
# this will error for unset model_manager_class'es
return self._manager
# ==== SERIALIZERS/to_dict,from_dict
class ModelSerializingError(exceptions.InternalServerError):
"""Thrown when request model values can't be serialized"""
class ModelDeserializingError(exceptions.ObjectAttributeInvalidException):
"""Thrown when an incoming value isn't usable by the model
(bad type, out of range, etc.)
"""
class SkipAttribute(Exception):
"""
Raise this inside a serializer to prevent the returned dictionary from having
a the associated key or value for this attribute.
"""
class Serializer(Protocol):
def __call__(self, item: Any, key: str, **context) -> Any:
...
class ModelSerializer(HasAModelManager[T]):
"""
Turns models into JSONable dicts.
Maintains a map of requestable keys and the Callable() serializer functions
that should be called for those keys.
E.g. { 'x' : lambda item, key: item.x, ... }
Note: if a key to serialize is not listed in the Serializer.serializable_keyset
or serializers, it will not be returned.
To serialize call:
my_serializer = MySerializer( app )
...
keys_to_serialize = [ 'id', 'name', 'attr1', 'attr2', ... ]
item_dict = MySerializer.serialize( my_item, keys_to_serialize )
"""
default_view: Optional[str]
views: Dict[str, List[str]]
def __init__(self, app: MinimalManagerApp, **kwargs):
"""
Set up serializer map, any additional serializable keys, and views here.
"""
super().__init__(app, **kwargs)
# a list of valid serializable keys that can use the default (string) serializer
# this allows us to: 'mention' the key without adding the default serializer
# TODO: we may want to eventually error if a key is requested
# that is in neither serializable_keyset or serializers
self.serializable_keyset: Set[str] = set()
# a map of dictionary keys to the functions (often lambdas) that create the values for those keys
self.serializers: Dict[str, Serializer] = {}
# add subclass serializers defined there
self.add_serializers()
# update the keyset by the serializers (removing the responsibility from subclasses)
self.serializable_keyset.update(self.serializers.keys())
# views are collections of serializable attributes (a named array of keys)
# inspired by model.dict_{view}_visible_keys
self.views = {}
self.default_view = None
@staticmethod
def url_for(*args, context=None, **kwargs):
trans = context and context.get("trans")
url_for = trans and trans.url_builder or gx_url_for
return url_for(*args, **kwargs)
def add_serializers(self):
"""
Register a map of attribute keys -> serializing functions that will serialize
the attribute.
"""
self.serializers.update(
{
"id": self.serialize_id,
"create_time": self.serialize_date,
"update_time": self.serialize_date,
}
)
def add_view(self, view_name, key_list, include_keys_from=None):
"""
Add the list of serializable attributes `key_list` to the serializer's
view dictionary under the key `view_name`.
If `include_keys_from` is a proper view name, extend `key_list` by
the list in that view.
"""
key_list = list(set(key_list + self.views.get(include_keys_from, [])))
self.views[view_name] = key_list
self.serializable_keyset.update(key_list)
return key_list
def serialize(self, item, keys, **context):
"""
Serialize the model `item` to a dictionary.
Given model `item` and the list `keys`, create and return a dictionary
built from each key in `keys` that also exists in `serializers` and
values of calling the keyed/named serializers on item.
"""
# TODO: constrain context to current_user/whos_asking when that's all we need (trans)
returned = {}
for key in keys:
# check both serializers and serializable keys
if key in self.serializers:
try:
returned[key] = self.serializers[key](item, key, **context)
except SkipAttribute:
# dont add this key if the deserializer threw this
pass
elif key in self.serializable_keyset:
returned[key] = self.default_serializer(item, key, **context)
# ignore bad/unreg keys
return returned
def skip(self, msg="skipped"):
"""
To be called from inside a serializer to skip it.
Handy for config checks, information hiding, etc.
"""
raise SkipAttribute(msg)
def _remap_from(self, original_key):
if original_key in self.serializers:
return self.serializers[original_key]
if original_key in self.serializable_keyset:
return lambda i, k, **c: self.default_serializer(i, original_key, **c)
raise KeyError(f"serializer not found for remap: {original_key}")
def default_serializer(self, item, key, **context):
"""
Serialize the `item`'s attribute named `key`.
"""
# TODO:?? point of change but not really necessary?
return getattr(item, key)
# serializers for common galaxy objects
def serialize_date(self, item: Any, key: str, **context):
"""
Serialize a date attribute of `item`.
"""
date = getattr(item, key)
return date.isoformat() if date is not None else None
def serialize_id(self, item: Any, key: str, **context):
"""
Serialize an id attribute of `item`.
"""
id = getattr(item, key)
# Note: it may not be best to encode the id at this layer
return self.app.security.encode_id(id) if id is not None else None
def serialize_type_id(self, item: Any, key: str, **context):
"""
Serialize an type-id for `item`.
"""
TYPE_ID_SEP = "-"
type_id = getattr(item, key)
if type_id is None:
return None
split = type_id.split(TYPE_ID_SEP, 1)
# Note: it may not be best to encode the id at this layer
return TYPE_ID_SEP.join((split[0], self.app.security.encode_id(split[1])))
# serializing to a view where a view is a predefied list of keys to serialize
def serialize_to_view(self, item, view=None, keys=None, default_view=None, **context):
"""
Use a predefined list of keys (the string `view`) and any additional keys
listed in `keys`.
The combinations can be:
`view` only: return those keys listed in the named view
`keys` only: return those keys listed
no `view` or `keys`: use the `default_view` if any
`view` and `keys`: combine both into one list of keys
"""
# TODO: default view + view makes no sense outside the API.index context - move default view there
all_keys = []
keys = keys or []
# chose explicit over concise here
if view:
if keys:
all_keys = self._view_to_keys(view) + keys
else:
all_keys = self._view_to_keys(view)
else:
if keys:
all_keys = keys
else:
all_keys = self._view_to_keys(default_view)
return self.serialize(item, all_keys, **context)
def _view_to_keys(self, view=None):
"""
Converts a known view into a list of keys.
:raises RequestParameterInvalidException: if the view is not listed in `self.views`.
"""
if view is None:
view = self.default_view
if view not in self.views:
raise exceptions.RequestParameterInvalidException(
f"unknown view - {view}", view=view, available_views=self.views
)
return self.views[view][:]
class ModelValidator:
"""
An object that inspects a dictionary (generally meant to be a set of
new/updated values for the model) and raises an error if a value is
not acceptable.
"""
@staticmethod
def matches_type(key: str, val: Any, types: Union[type, Tuple[Union[type, Tuple[Any, ...]], ...]]):
"""
Check `val` against the type (or tuple of types) in `types`.
:raises exceptions.RequestParameterInvalidException: if not an instance.
"""
if not isinstance(val, types):
msg = f"must be a type: {types}"
raise exceptions.RequestParameterInvalidException(msg, key=key, val=val)
return val
# validators for primitives and compounds of primitives
@staticmethod
def basestring(key: str, val: Any) -> str:
return ModelValidator.matches_type(key, val, (str,))
@staticmethod
def bool(key: str, val: Any) -> bool:
return ModelValidator.matches_type(key, val, bool)
@staticmethod
def nullable_basestring(key: str, val: Any) -> str:
"""
Must be a basestring or None.
"""
return ModelValidator.matches_type(key, val, ((str,), type(None)))
@staticmethod
def int_range(key: str, val: Any, min: Optional[int] = None, max: Optional[int] = None) -> int:
"""
Must be a int between min and max.
"""
val_ = ModelValidator.matches_type(key, val, int)
if min is not None and val_ < min:
raise exceptions.RequestParameterInvalidException("less than minimum", key=key, val=val_, min=min)
if max is not None and val_ > max:
raise exceptions.RequestParameterInvalidException("greater than maximum", key=key, val=val_, max=max)
return val_
@staticmethod
def basestring_list(key: str, val: Any) -> List[str]:
"""
Must be a list of basestrings.
"""
# TODO: Here's where compound types start becoming a nightmare. Any more or more complex
# and should find a different way.
val_ = ModelValidator.matches_type(key, val, list)
return [ModelValidator.basestring(key, elem) for elem in val_]
# validators for Galaxy
@staticmethod
def genome_build(key: str, val: Any) -> str:
"""
Must be a valid base_string.
Note: no checking against installation's ref list is done as many
data sources consider this an open field.
"""
# TODO: is this correct?
if val is None:
return "?"
# currently, data source sites like UCSC are able to set the genome build to non-local build names
# afterwards, attempting to validate the whole model will choke here
# for genome_build_shortname, longname in self.app.genome_builds.get_genome_build_names( trans=trans ):
# if val == genome_build_shortname:
# return val
# raise exceptions.RequestParameterInvalidException( "invalid reference", key=key, val=val )
# IOW: fallback to string validation
return ModelValidator.basestring(key, val)
# def slug( self, item, key, val ):
# """validate slug"""
# pass
class Deserializer(Protocol):
def __call__(self, item: Any, key: Any, val: Any, **kwargs) -> Any:
...
class ModelDeserializer(HasAModelManager[T]):
"""
An object that converts an incoming serialized dict into values that can be
directly assigned to an item's attributes and assigns them.
"""
validate = ModelValidator()
app: MinimalManagerApp
# TODO:?? a larger question is: which should be first? Deserialize then validate - or - validate then deserialize?
def __init__(self, app: MinimalManagerApp, **kwargs):
"""
Set up deserializers and validator.
"""
super().__init__(app, **kwargs)
self.deserializers: Dict[str, Deserializer] = {}
self.deserializable_keyset: Set[str] = set()
self.add_deserializers()
def add_deserializers(self):
"""
Register a map of attribute keys -> functions that will deserialize data
into attributes to be assigned to the item.
"""
# to be overridden in subclasses
def deserialize(self, item, data, flush=True, **context):
"""
Convert an incoming serialized dict into values that can be
directly assigned to an item's attributes and assign them
"""
# TODO: constrain context to current_user/whos_asking when that's all we need (trans)
sa_session = self.app.model.context
new_dict = {}
for key, val in data.items():
if key in self.deserializers:
new_dict[key] = self.deserializers[key](item, key, val, **context)
# !important: don't error on unreg. keys -- many clients will add weird ass keys onto the model
# TODO:?? add and flush here or in manager?
if flush and len(new_dict):
sa_session.add(item)
sa_session.flush()
return new_dict
# ... common deserializers for primitives
def default_deserializer(self, item, key, val, **context):
"""
If the incoming `val` is different than the `item` value change it
and, in either case, return the value.
"""
# TODO: sets the item attribute to value (this may not work in all instances)
# only do the following if val == getattr( item, key )
if hasattr(item, key) and getattr(item, key) != val:
setattr(item, key, val)
return val
def deserialize_basestring(self, item, key, val, convert_none_to_empty=False, **context):
val = "" if (convert_none_to_empty and val is None) else self.validate.basestring(key, val)
return self.default_deserializer(item, key, val, **context)
def deserialize_bool(self, item, key, val, **context):
val = self.validate.bool(key, val)
return self.default_deserializer(item, key, val, **context)
def deserialize_int(self, item, key, val, min=None, max=None, **context):
val = self.validate.int_range(key, val, min, max)
return self.default_deserializer(item, key, val, **context)
# def deserialize_date( self, item, key, val ):
# #TODO: parse isoformat date into date object
# ... common deserializers for Galaxy
def deserialize_genome_build(self, item, key, val, **context):
"""
Make sure `val` is a valid dbkey and assign it.
"""
val = self.validate.genome_build(key, val)
return self.default_deserializer(item, key, val, **context)
# ==== Building query filters based on model data
class ModelFilterParser(HasAModelManager):
"""
Converts string tuples (partially converted query string params) of
attr, op, val into either:
- ORM based filters (filters that can be applied by the ORM at the SQL
level) or
- functional filters (filters that use derived values or values not
within the SQL tables)
These filters can then be applied to queries.
This abstraction allows 'smarter' application of limit and offset at either the
SQL level or the generator/list level based on the presence of functional
filters. In other words, if no functional filters are present, limit and offset
may be applied at the SQL level. If functional filters are present, limit and
offset need to applied at the list level.
These might be safely be replaced in the future by creating SQLAlchemy
hybrid properties or more thoroughly mapping derived values.
"""
# ??: this class kindof 'lives' in both the world of the controllers/param-parsing and to models/orm
# (as the model informs how the filter params are parsed)
# I have no great idea where this 'belongs', so it's here for now
model_class: Type[model._HasTable]
parsed_filter = parsed_filter
orm_filter_parsers: OrmFilterParsersType
fn_filter_parsers: FunctionFilterParsersType
def __init__(self, app: MinimalManagerApp, **kwargs):
"""
Set up serializer map, any additional serializable keys, and views here.
"""
super().__init__(app, **kwargs)
#: regex for testing/dicing iso8601 date strings, with optional time and ms, but allowing only UTC timezone
self.date_string_re = re.compile(
r"^(\d{4}\-\d{2}\-\d{2})[T| ]{0,1}(\d{2}:\d{2}:\d{2}(?:\.\d{1,6}){0,1}){0,1}Z{0,1}$"
)
# dictionary containing parsing data for ORM/SQLAlchemy-based filters
# ..note: although kind of a pain in the ass and verbose, opt-in/allowlisting allows more control
# over potentially expensive queries
self.orm_filter_parsers = {}
#: dictionary containing parsing data for functional filters - applied after a query is made
self.fn_filter_parsers = {}
# set up both of the above
self._add_parsers()
def _add_parsers(self):
"""
Set up, extend, or alter `orm_filter_parsers` and `fn_filter_parsers`.
"""
# note: these are the default filters for all models
self.orm_filter_parsers.update(
{
# (prob.) applicable to all models
"id": {"op": ("in")},
"encoded_id": {"column": "id", "op": ("in"), "val": self.parse_id_list},
# dates can be directly passed through the orm into a filter (no need to parse into datetime object)
"extension": {"op": ("eq", "like", "in")},
"create_time": {"op": ("le", "ge", "lt", "gt"), "val": self.parse_date},
"update_time": {"op": ("le", "ge", "lt", "gt"), "val": self.parse_date},
}
)
def build_filter_params(
self,
query_params: ValueFilterQueryParams,
filter_attr_key: str = "q",
filter_value_key: str = "qv",
attr_op_split_char: str = "-",
) -> List[Tuple[str, str, str]]:
"""
Builds a list of tuples containing filtering information in the form of (attribute, operator, value).
"""
DEFAULT_OP = "eq"
qdict = query_params.dict(exclude_defaults=True)
if filter_attr_key not in qdict:
return []
# precondition: attrs/value pairs are in-order in the qstring
attrs = qdict.get(filter_attr_key)
if not isinstance(attrs, list):
attrs = [attrs]
# ops are strings placed after the attr strings and separated by a split char (e.g. 'create_time-lt')
# ops are optional and default to 'eq'
reparsed_attrs = []
ops = []
for attr in attrs:
op = DEFAULT_OP
if attr_op_split_char in attr:
# note: only split the last (e.g. q=community-tags-in&qv=rna yields ( 'community-tags', 'in', 'rna' )
attr, op = attr.rsplit(attr_op_split_char, 1)
ops.append(op)
reparsed_attrs.append(attr)
attrs = reparsed_attrs
values = qdict.get(filter_value_key, [])
if not isinstance(values, list):
values = [values]
# TODO: it may be more helpful to the consumer if we error on incomplete 3-tuples
# (instead of relying on zip to shorten)
return list(zip(attrs, ops, values))
def parse_query_filters(self, query_filters: ValueFilterQueryParams):
"""Convenience function to parse a ValueFilterQueryParams object into a collection of filtering criteria."""
filter_params = self.build_filter_params(query_filters)
return self.parse_filters(filter_params)
def parse_filters(self, filter_tuple_list):
"""
Parse string 3-tuples (attr, op, val) into orm or functional filters.
"""
# TODO: allow defining the default filter op in this class (and not 'eq' in base/controller.py)
parsed = []
for (attr, op, val) in filter_tuple_list:
filter_ = self.parse_filter(attr, op, val)
parsed.append(filter_)
return parsed
def parse_filter(self, attr, op, val):
"""
Attempt to parse filter as a custom/fn filter, then an orm filter, and
if neither work - raise an error.
:raises exceptions.RequestParameterInvalidException: if no functional or orm
filter can be parsed.
"""
try:
# check for a custom filter
fn_filter = self._parse_fn_filter(attr, op, val)
if fn_filter is not None:
return fn_filter
# if no custom filter found, try to make an ORM filter
# note: have to use explicit is None here, bool( sqlalx.filter ) == False
orm_filter = self._parse_orm_filter(attr, op, val)
if orm_filter is not None:
return orm_filter
# by convention, assume most val parsers raise ValueError
except ValueError as val_err:
raise exceptions.RequestParameterInvalidException(
"unparsable value for filter", column=attr, operation=op, value=val, ValueError=str(val_err)
)
# if neither of the above work, raise an error with how-to info
# TODO: send back all valid filter keys in exception for added user help
raise exceptions.RequestParameterInvalidException("bad filter", column=attr, operation=op)
# ---- fn filters
def _parse_fn_filter(self, attr, op, val):
"""
Attempt to parse a non-ORM filter function.
"""
# fn_filter_list is a dict: fn_filter_list[ attr ] = { 'opname1' : opfn1, 'opname2' : opfn2, etc. }
# attr, op is a nested dictionary pointing to the filter fn
attr_map = self.fn_filter_parsers.get(attr, None)
if not attr_map:
return None
allowed_ops = attr_map["op"]
# allowed ops is a map here, op => fn
filter_fn = allowed_ops.get(op, None)
if not filter_fn:
return None
# parse the val from string using the 'val' parser if present (otherwise, leave as string)
val_parser = attr_map.get("val", None)
if val_parser:
val = val_parser(val)
# curry/partial and fold the val in there now
return self.parsed_filter(filter_type="function", filter=lambda i: filter_fn(i, val))
# ---- ORM filters
def _parse_orm_filter(self, attr, op, val):
"""
Attempt to parse a ORM-based filter.
Using SQLAlchemy, this would yield a sql.elements.BinaryExpression.
"""
# orm_filter_list is a dict: orm_filter_list[ attr ] = <list of allowed ops>
column_map = self.orm_filter_parsers.get(attr, None)
if not column_map:
# no column mapping (not allowlisted)
return None
if callable(column_map):
return self.parsed_filter(filter_type="orm_function", filter=column_map(attr, op, val))
# attr must be an allowlisted column by attr name or by key passed in column_map
# note: column_map[ 'column' ] takes precedence
if "column" in column_map:
attr = column_map["column"]
column = self.model_class.table.columns.get(attr)
if column is None:
# could be a property (hybrid_property, etc.) - assume we can make a filter from it
column = getattr(self.model_class, attr)
if column is None:
# no orm column
return None
# op must be allowlisted: contained in the list orm_filter_list[ attr ][ 'op' ]
allowed_ops = column_map["op"]
if op not in allowed_ops:
return None
op = self._convert_op_string_to_fn(column, op)
if not op:
return None
# parse the val from string using the 'val' parser if present (otherwise, leave as string)
val_parser = column_map.get("val", None)
if val_parser:
val = val_parser(val)
orm_filter = op(val)
return self.parsed_filter(filter_type="orm", filter=orm_filter)
#: these are the easier/shorter string equivalents to the python operator fn names that need '__' around them
UNDERSCORED_OPS = ("lt", "le", "eq", "ne", "ge", "gt")
def _convert_op_string_to_fn(self, column, op_string):
"""
Convert the query string filter op shorthand into actual ORM usable
function names, then return the ORM function.
"""
# correct op_string to usable function key
fn_name = op_string
if op_string in self.UNDERSCORED_OPS:
fn_name = f"__{op_string}__"
elif op_string == "in":
fn_name = "in_"
# get the column fn using the op_string and error if not a callable attr
# TODO: special case 'not in' - or disallow?
op_fn = getattr(column, fn_name, None)
if not op_fn or not callable(op_fn):
return None
return op_fn
# ---- preset fn_filters: dictionaries of standard filter ops for standard datatypes
def string_standard_ops(self, key):
return {
"op": {
"eq": lambda i, v: v == getattr(i, key),
"contains": lambda i, v: v in getattr(i, key),
}
}
# --- more parsers! yay!
# TODO: These should go somewhere central - we've got ~6 parser modules/sections now
def parse_id_list(self, id_list_string, sep=","):
"""
Split `id_list_string` at `sep`.
"""
# TODO: move id decoding out
id_list = [self.app.security.decode_id(id_) for id_ in id_list_string.split(sep)]
return id_list
def parse_int_list(self, int_list_string, sep=","):
"""
Split `int_list_string` at `sep` and parse as ints.
"""
# TODO: move id decoding out
int_list = [int(v) for v in int_list_string.split(sep)]
return int_list
def parse_date(self, date_string):
"""
Reformats a string containing either seconds from epoch or an iso8601 formated
date string into a new date string usable within a filter query.
Seconds from epoch can be a floating point value as well (i.e containing ms).
"""
# assume it's epoch if no date separator is present
try:
epoch = float(date_string)
datetime_obj = datetime.datetime.fromtimestamp(epoch)
return datetime_obj.isoformat(sep=" ")
except ValueError:
pass
match = self.date_string_re.match(date_string)
if match:
date_string = " ".join(group for group in match.groups() if group)
return date_string
raise ValueError("datetime strings must be in the ISO 8601 format and in the UTC")
def contains_non_orm_filter(self, filters: List[ParsedFilter]) -> bool:
"""Whether the list of filters contains any non-orm filter."""
return any(filter.filter_type == "function" for filter in filters)
def parse_bool(bool_string: Union[str, bool]) -> bool:
"""
Parse a boolean from a string.
"""
# Be strict here to remove complexity of options (but allow already parsed).
if bool_string in ("True", "true", True):
return True
if bool_string in ("False", "false", False):
return False
raise ValueError(f"invalid boolean: {bool_string}")
def raise_filter_err(attr, op, val, msg):
raise exceptions.RequestParameterInvalidException(msg, column=attr, operation=op, val=val)
def is_valid_slug(slug):
"""Returns true iff slug is valid."""
VALID_SLUG_RE = re.compile(r"^[a-z0-9\-]+$")
return VALID_SLUG_RE.match(slug)
class SortableManager:
"""A manager interface for parsing order_by strings into actual 'order by' queries."""
def parse_order_by(self, order_by_string, default=None):
"""Return an ORM compatible order_by clause using the given string (i.e.: 'name-dsc,create_time').
This must be implemented by the manager."""
raise NotImplementedError
|
py | 1a3c23cc7c3bdd5567132e919ceec12bbd59a711 | # qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.y(input_qubit[2]) # number=9
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.x(input_qubit[2]) # number=6
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.y(input_qubit[2]) # number=7
prog.y(input_qubit[2]) # number=8
prog.swap(input_qubit[2],input_qubit[0]) # number=10
prog.swap(input_qubit[2],input_qubit[0]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_noisy500.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
py | 1a3c25672c29e682adce605e9475466c2192042f | import logging
from enum import Enum
import json
from iota import Address, TryteString
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
vendor_address = "OPMGOSBITOTGSZRESXAO9SGPAOOFEQ9OIPEMY9DEHPVOUULUHXIHHWBNFNMKXPEZWIMHB9JPEXSE9SFLA"
class ChargingStationStatus(Enum):
FREE = "free"
OCCUPIED = "occupied"
CLOSED = "closed"
class ChargingStation:
def __init__(self, longitude, latitude, price, id, owner,
status=ChargingStationStatus.FREE.value):
self.longitude = longitude
self.latitude = latitude
self.price = price
self.id = id
self.owner = owner
self.status = status
def get_message(self):
return {
"long": self.longitude,
"lat": self.latitude,
"owner": self.owner,
"price": self.price,
"id": self.id,
"status": self.status
}
# Advertise charging station (FREE or OCCUPIED OR CLOSED)
# Return: bundle hash
# Params:
# iota:IotaWrapper, the instance to use for sending
def advertise(self, iota):
msg = json.dumps(self.get_message())
bundle = iota.send_transfer(
transfers=iota.create_transfers(self.owner, msg, 0),
inputs=[Address(self.owner, key_index=0, security_level=0)]
)
return bundle["bundle"].as_json_compatible()
def __str__(self):
return ("Station %s (%s) at (%f, %f) " %
(self.id, self.status, self.latitude, self.longitude))
# This is a monopoly
five_stations_data = [
[18.5772788, 54.4060541, 3, "ExpensiveStation", vendor_address,
ChargingStationStatus.FREE.value],
[18.5772656, 54.404569, 1, "BestStation", vendor_address,
ChargingStationStatus.OCCUPIED.value],
[18.578795, 54.406126, 1.3, "FriendlyGarage", vendor_address,
ChargingStationStatus.FREE.value],
[18.578126, 54.404454, 1.3, "CoolCharger", vendor_address,
ChargingStationStatus.FREE.value],
[18.577074, 54.405355, 1.3, "Favourite", vendor_address,
ChargingStationStatus.CLOSED.value],
]
FIVE_STATIONS = [ChargingStation(*stnd) for stnd in five_stations_data]
|
py | 1a3c2757ab809e0cd78fbc5dc641525ba284097c | # IMPORTATION STANDARD
import gzip
import json
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.cryptocurrency.defi import llama_view
def filter_json_data(response):
"""To reduce cassette size."""
headers = response["headers"]
if "FILTERED" in headers:
return response
limit = 10
content = response["body"]["string"]
if content.decode().startswith("H4sI"):
content = gzip.decompress(content).decode()
content = json.loads(content)
else:
content = json.loads(content)
if isinstance(content, list):
new_content = content[:limit]
elif isinstance(content, dict):
new_content = {k: content[k] for k in list(content)[:limit]}
else:
raise AttributeError(f"Content type not supported : {content}")
new_content_json = json.dumps(new_content)
new_content_gz = gzip.compress(new_content_json.encode())
response["body"]["string"] = new_content_gz
response["headers"]["Content-Encoding"] = ["gzip"]
response["headers"]["FILTERED"] = ["TRUE"]
return response
def gzip_data(response):
"""To reduce cassette size."""
headers = response["headers"]
if "COMPRESSED" in headers:
return response
content = response["body"]["string"].decode()
if content.startswith("H4sI"):
content = gzip.decompress(content)
new_content_gz = gzip.compress(content.encode())
response["body"]["string"] = new_content_gz
response["headers"]["Content-Encoding"] = ["gzip"]
response["headers"]["COMPRESSED"] = ["TRUE"]
return response
@pytest.mark.vcr(before_record_response=filter_json_data)
@pytest.mark.record_stdout
def test_display_defi_protocols():
llama_view.display_defi_protocols(20, "tvl", False, False)
@pytest.mark.vcr(before_record_response=gzip_data)
@pytest.mark.record_stdout
def test_display_defi_tvl(mocker):
# MOCK EXPORT_DATA
mocker.patch(target="openbb_terminal.cryptocurrency.defi.llama_view.export_data")
# MOCK VISUALIZE_OUTPUT
mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output")
llama_view.display_defi_tvl(20)
@pytest.mark.vcr(before_record_response=filter_json_data)
@pytest.mark.record_stdout
def test_display_grouped_defi_protocols(mocker):
# MOCK EXPORT_DATA
mocker.patch(target="openbb_terminal.cryptocurrency.defi.llama_view.export_data")
# MOCK VISUALIZE_OUTPUT
mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output")
llama_view.display_grouped_defi_protocols(20)
@pytest.mark.vcr(before_record_response=gzip_data)
@pytest.mark.record_stdout
def test_display_historical_tvl(mocker):
# MOCK EXPORT_DATA
mocker.patch(target="openbb_terminal.cryptocurrency.defi.llama_view.export_data")
# MOCK VISUALIZE_OUTPUT
mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output")
llama_view.display_historical_tvl("anchor")
|
py | 1a3c275e4de2ff1cffe03f27d71f3686261b7867 | import pandas as pd
#import data csv
df = pd.read_csv('input_data/streeteasy.csv')
boroughs = sorted(list(df["borough"].unique()))
neighborhoods = sorted(list(df["neighborhood"].unique()))
submarkets = sorted(list(df["submarket"].unique()))
|
py | 1a3c27822afc09be3b1011447c03fba30581373b | from bs4 import BeautifulSoup
from inspect import getmembers
import urllib.request
import urllib.error
import urllib.parse
import threading
import requests
import sys
import pprint
import string
import time
import threading
import hashlib
import psycopg2
class Novoterm(threading.Thread):
item_links = list()
category_links = []
i = 0
running = True
conn = None
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.19 (KHTML, like Gecko) Ubuntu/12.04 Chromium/18.0.1025.168 Chrome/18.0.1025.168 Safari/535.19'
def __init__(self, id):
self.id = str(id)
threading.Thread.__init__(self)
@staticmethod
def getCategoryLinks(self):
self.dbConnection(self)
try:
url = "http://novoterm.pl/kerra/"
url = "http://novoterm.pl/loge/"
response = urllib.request.urlopen(urllib.request.Request(url, headers={'User-Agent': self.user_agent}))
soup = BeautifulSoup(response)
x = soup.find('div', {"id":"nasze-produkty"})
cat_name = x.select('div.bottommargin-sm h3')
cat_link = x.select('div.bottommargin-sm > a')
i = 0
for cn in cat_name:
cname = str(cn).lstrip('<h3>').rstrip('</h3>')
# insert category to db
cur = self.conn.cursor()
sql = "INSERT INTO categories (name, description, created_at, updated_at) VALUES ("
sql += "'"+cname +"', 'Desc', now(), now())"
cur.execute(sql)
self.conn.commit()
# get inserted row id
cur.execute("SELECT currval('categories_id_seq')")
response = urllib.request.urlopen(urllib.request.Request(cat_link[i]['href'], headers={'User-Agent': self.user_agent}))
soup = BeautifulSoup(response)
x = soup.find('div', {"id":"isotope-container"}).select('div > a')
i = i + 1
cat_id = cur.fetchone()[0]
j = 0
for link in x:
self.item_links.append((link['href'], int(cat_id)))
j += 1
print(cname+": "+str(j))
except urllib.error.HTTPError:
print('err')
@staticmethod
def getItemLinks(self):
try:
url = 'http://novoterm.pl/kerra/kategoria-produktu/stelaze-podtynkowe-pl-pl/'
items_stelaze_podtynkowe = list()
#response = urllib.request.urlopen(url).read()
response = urllib.request.urlopen(urllib.request.Request(url, headers={'User-Agent': self.user_agent}))
soup = BeautifulSoup(response)
x = soup.find('div', {"id":"isotope-container"}).select('div > a')
for link in x:
self.item_links.append(link['href'])
except urllib.error.HTTPError:
print('err')
def getItemsDataFromNovoterm(self):
if self.item_links:
items = self.item_links.pop()
item_url = items[0]
try:
response = urllib.request.urlopen(urllib.request.Request(item_url, headers={'User-Agent': self.user_agent}))
soup = BeautifulSoup(response)
# pobieranie obrazka głównego
item = soup.find('a', {"class":"fancybox"})
t = item['href'].split('.');
tl = len(t)
image_name = hashlib.md5(str(time.time()).replace('.', '').encode('utf-8')).hexdigest()+"."+t[tl-1]
urllib.request.urlretrieve(item['href'], '/home/error/kod/hiveware/storage/shop/items/'+image_name)
# pobieranie schematu
# schema_src = soup.find('div', {"class":"product-content"}).select('div.topmargin-sm > div > img')[0]['src']
# t = schema_src.split('.');
# tl = len(t)
# urllib.request.urlretrieve(schema_src, "schema/"+image_name+"."+t[tl-1])
# pobiera name
item_name = str(soup.find('div', {"class":"product-head-info"}).select('h2')[0]).lstrip('<h2>').rstrip('</h2>')
# pobieranie opisu (razem z html - strongi)
item_desc = str(soup.find('div', {"class":"product-content"}).select('div.topmargin-sm > div > p')[0])
self.dbInsert(item_name, item_desc, "items/"+image_name, items[1])
except urllib.error.HTTPError:
print('error in get item')
else:
self.running = False
def dbConnection(self):
conn_string = "host='localhost' dbname='hive' user='postgres' password='123123'"
#conn_string = "host='148.251.156.146' dbname='qwer34_test' user='qwer34_test' password='aWXkNlaDJk'"
self.conn = psycopg2.connect(conn_string)
def dbInsert(self, name, desc, img, cat_id):
cur = self.conn.cursor()
sql = "INSERT INTO items (name, description, image_path, price, weight, count, created_at, updated_at) VALUES ("
sql += "'"+name +"', '"+desc+"', '"+img+"', 1, 1, 1, now(), now())"
cur.execute(sql)
self.conn.commit()
cur.execute("SELECT currval('items_id_seq')")
item_id = cur.fetchone()[0]
sql = "INSERT INTO category_item (item_id, category_id) VALUES ("
sql += str(item_id) + ", " + str(cat_id) + ")"
cur.execute(sql)
self.conn.commit()
def run(self):
while self.running:
self.getItemsDataFromNovoterm()
# ========================================================================================================= #
# ========================================================================================================= #
# pobieram linki z itemami zeby poetm wielowatkowo pobierac z tych stron dane i jeb do bazy
Novoterm.getCategoryLinks(Novoterm)
i = 0
threads = [Novoterm(i) for i in range(0, 8)]
for t in threads:
try:
t.start()
except:
exit()
|
py | 1a3c286b7d83dc5c3ec70802ee14ba41c93f7f80 | """Provides RootCauseAnalysis class for computing RCA."""
import warnings
from itertools import combinations
from math import isclose
from textwrap import wrap
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from chaos_genius.core.rca.constants import TIME_RANGES_BY_KEY
from chaos_genius.core.rca.rca_utils.string_helpers import (
convert_df_dims_to_query_strings,
convert_query_string_to_user_string,
)
from chaos_genius.core.rca.rca_utils.waterfall_utils import (
get_best_subgroups_using_superset_algo,
get_waterfall_ylims,
waterfall_plot_mpl,
)
from chaos_genius.core.utils.round import round_df, round_number
SUPPORTED_AGGREGATIONS = ["mean", "sum", "count"]
EPSILON = 1e-8
class RootCauseAnalysis:
"""RCA Processor class which computes the RCA."""
def __init__(
self,
grp1_df: pd.DataFrame,
grp2_df: pd.DataFrame,
dims: List[str],
metric: str,
num_dim_combs: List[int] = None,
agg: str = "mean",
preaggregated: bool = False,
preaggregated_count_col: str = "count",
) -> None:
"""Initialize the RCA class.
:param grp1_df: baseline dataframe
:type grp1_df: pd.DataFrame
:param grp2_df: rca/focus dataframe
:type grp2_df: pd.DataFrame
:param dims: list of dimensions to consider
:type dims: List[str]
:param metric: name of metric column
:type metric: str
:param num_dim_combs: which number of dimension combinations to
consider, defaults to None
:type num_dim_combs: List[int], optional
:param agg: aggregation to use, defaults to "mean"
:type agg: str, optional
:param preaggregated: whether the dataframes are preaggregated,
defaults to False
:type preaggregated: bool, optional
:param preaggregated_count_col: name of the column containing the
count of the aggregated dataframe, defaults to "count"
:type preaggregated_count_col: str, optional
"""
self._grp1_df = grp1_df
self._grp2_df = grp2_df
self._preprocess_rca_dfs()
self._full_df = pd.concat([self._grp1_df, self._grp2_df])
self._check_columns(dims)
self._dims = dims
self._check_columns(metric)
self._metric = metric
self._metric_is_cat = self._full_df[metric].dtype == object
if agg not in SUPPORTED_AGGREGATIONS:
raise ValueError(f"Aggregation {agg} is not supported.")
self._agg = agg
if num_dim_combs is None or not dims:
num_dim_combs = list(range(1, len(dims) + 1))
else:
if max(num_dim_combs) > len(self._dims) or min(num_dim_combs) < 1:
raise ValueError(f"n {num_dim_combs} is out of range.")
if len(set(num_dim_combs)) != len(num_dim_combs):
raise ValueError(f"n {num_dim_combs} has duplicates.")
if len(num_dim_combs) > 4:
warnings.warn(
"Passing more than 4 values for n will take a while."
)
self._num_dim_combs_to_consider = num_dim_combs
self._impact_table = None
self._waterfall_table = None
self._max_waterfall_columns = 5
self._max_subgroups_considered = 100
self._preaggregated = preaggregated
self._preaggregated_count_col = preaggregated_count_col
def _initialize_impact_table(self):
self._create_binned_columns()
dim_combs_list = self._generate_all_dim_combinations()
impacts = []
for dim_comb in dim_combs_list:
dim_comb_impact = self._compare_subgroups(dim_comb)
impacts.append(dim_comb_impact)
impact_table = pd.concat(impacts)
# sort by absolute impact values
impact_table = impact_table.sort_values(
by="impact",
ascending=False,
key=lambda x: x.abs(),
ignore_index=True,
)
# add query string
impact_table.loc[:, "string"] = impact_table[self._dims].apply(
lambda inp: convert_df_dims_to_query_strings(inp), axis=1
)
# keeping only relevant features
# impact_table.drop(self._dims, axis= 1, inplace= True)
metric_columns = [
"impact",
"val_g1",
"val_g2",
"size_g1",
"size_g2",
"count_g1",
"count_g2",
]
impact_table = impact_table[["string"] + self._dims + metric_columns]
return impact_table
def _get_single_dim_impact_table(self, single_dim):
if self._impact_table is None:
self._impact_table = self._initialize_impact_table()
impact_table = self._impact_table.copy()
other_dims = set(self._dims)
other_dims.remove(single_dim)
impact_table = impact_table[
(~impact_table[single_dim].isna())
& (impact_table[other_dims].isna().sum(axis=1) == len(other_dims))
]
impact_table = impact_table.reset_index(drop=True)
return impact_table
def _initialize_waterfall_table(self, single_dim=None):
if self._impact_table is None:
self._impact_table = self._initialize_impact_table()
# get impact values
if single_dim is not None:
impact_table = self._get_single_dim_impact_table(single_dim)
else:
impact_table = self._impact_table.copy()
# getting subgroups for waterfall
best_subgroups = get_best_subgroups_using_superset_algo(
impact_table,
self._max_waterfall_columns,
self._max_subgroups_considered,
)
best_subgroups = best_subgroups[
best_subgroups["ignored"] == False # noqa E712
]
best_subgroups = best_subgroups.merge(
impact_table[["string", "impact"]], how="inner", on="string"
)
best_subgroups["impact_non_overlap"] = best_subgroups["impact"]
best_subgroups.rename(
columns={"impact": "impact_full_group"}, inplace=True
)
best_subgroups[["indices_in_group", "non_overlap_indices"]] = 0
# calculate overlap values
best_subgroups = self._get_overlap_values_for_waterfall(best_subgroups)
return best_subgroups
def _preprocess_rca_dfs(self):
"""Preprocess dataframes for RCA Analysis."""
self._grp1_df = self._grp1_df.reset_index(drop=True)
self._grp2_df = self._grp2_df.reset_index(drop=True)
self._grp2_df.index = self._grp2_df.index + len(self._grp1_df)
def _check_columns(self, cols):
if isinstance(cols, str):
cols = [cols]
for col in cols:
if col not in self._full_df.columns:
raise ValueError(f"Column {col} not in data.")
def _create_binned_columns(self):
non_cat_cols = self._full_df.dtypes[self._dims][
self._full_df.dtypes[self._dims] != object
]
for col in non_cat_cols.index:
binned_values = pd.qcut(
self._full_df[col], 4, duplicates="drop"
).astype(str)
self._full_df[col] = binned_values
self._grp1_df = self._full_df.loc[self._grp1_df.index]
self._grp2_df = self._full_df.loc[self._grp2_df.index]
def _generate_all_dim_combinations(self) -> List[List[str]]:
"""Create a dictionary of all possible combinations of dims.
Returns:
List[List[str]]: Returns a list of all possible subgroups
"""
list_subgroups = []
for i in self._num_dim_combs_to_consider:
list_subgroups_of_level = list(
map(list, combinations(self._dims, i))
)
list_subgroups.extend(list_subgroups_of_level)
return list_subgroups
def _calculate_subgroup_values(self, data, suffix):
agg_name = self._agg + suffix
count_name = "count" + suffix
if self._agg == "mean":
value_numerator = data[agg_name] * data[count_name]
value_denominator = data[count_name].sum() + EPSILON
value = value_numerator / value_denominator
elif self._agg in ["sum", "count"]:
value = data[agg_name]
else:
raise ValueError(f"Aggregation {self._agg} is not defined.")
size = data[count_name] * 100 / (data[count_name].sum() + EPSILON)
return value, size
def _compare_subgroups(self, dim_comb: List[str]) -> pd.DataFrame:
if self._preaggregated:
if self._agg == "count":
# if agg is count, sum across the count column
# to get the correct count
grp1_df = self._grp1_df.groupby(dim_comb)[
self._preaggregated_count_col
].agg(["sum"]).reset_index().rename(columns={"sum": "count"})
grp2_df = self._grp2_df.groupby(dim_comb)[
self._preaggregated_count_col
].agg(["sum"]).reset_index().rename(columns={"sum": "count"})
elif self._agg == "sum":
# if agg is sum, sum across the sum and count column
# to get the correct values
grp1_df = self._grp1_df.groupby(dim_comb)[
[self._metric, self._preaggregated_count_col]
].sum().reset_index().rename(columns={
self._metric: "sum",
self._preaggregated_count_col: "count"
})
grp2_df = self._grp2_df.groupby(dim_comb)[
[self._metric, self._preaggregated_count_col]
].sum().reset_index().rename(columns={
self._metric: "sum",
self._preaggregated_count_col: "count"
})
else:
raise ValueError(
f"Unsupported aggregation: {self._agg} for preaggregated data."
)
else:
agg_list = [self._agg, "count"] if self._agg != "count" else ["count"]
grp1_df = (
self._grp1_df.groupby(dim_comb)[self._metric]
.agg(agg_list)
.reset_index()
)
grp2_df = (
self._grp2_df.groupby(dim_comb)[self._metric]
.agg(agg_list)
.reset_index()
)
combined_df = grp1_df.merge(
grp2_df, how="outer", on=dim_comb, suffixes=["_g1", "_g2"]
).fillna(0)
for i, suffix in enumerate(["_g1", "_g2"]):
agg_name = self._agg + suffix
count_name = "count" + suffix
if self._agg == "mean":
value_numerator = (
combined_df[agg_name] * combined_df[count_name]
)
value_denominator = combined_df[count_name].sum() + EPSILON
value = value_numerator / value_denominator
elif self._agg in ["sum", "count"]:
value = combined_df[agg_name]
else:
raise ValueError(f"Aggregation {self._agg} is not defined.")
combined_df["val" + suffix] = value
combined_df["size" + suffix] = combined_df[count_name] * 100
if i == 0:
combined_df["size" + suffix] /= len(self._grp1_df) + EPSILON
elif i == 1:
combined_df["size" + suffix] /= len(self._grp2_df) + EPSILON
(
combined_df["val_g1"],
combined_df["size_g1"],
) = self._calculate_subgroup_values(combined_df, "_g1")
(
combined_df["val_g2"],
combined_df["size_g2"],
) = self._calculate_subgroup_values(combined_df, "_g2")
combined_df["impact"] = combined_df["val_g2"] - combined_df["val_g1"]
return combined_df
def _get_overlap_values_for_waterfall(
self,
subgroups_df: pd.DataFrame,
):
subgroups_df_output = subgroups_df.copy()
len_d1 = self._grp1_df[self._metric].count()
len_d2 = self._grp2_df[self._metric].count()
for subgroup in subgroups_df_output["string"]:
all_indices = set()
# others are all subgroups minus the current subgroup
other_subgroups = subgroups_df_output["string"].values.tolist()
other_subgroups.remove(subgroup)
other_combinations = {
i: combinations(other_subgroups, i)
for i in range(1, len(subgroups_df_output))
}
d1_idxs = set(self._grp1_df.query(subgroup).index)
d2_idxs = set(self._grp2_df.query(subgroup).index)
overlap_indices_count = 0
curr_loc = 0
for i in range(1, len(subgroups_df_output)):
for combo in other_combinations[i]:
query = " and ".join(combo)
d1_combo = set(self._grp1_df.query(query).index)
d2_combo = set(self._grp2_df.query(query).index)
overlap_points_d1 = (
d1_idxs.intersection(d1_combo) - all_indices
)
overlap_points_d2 = (
d2_idxs.intersection(d2_combo) - all_indices
)
overlap_indices_count += len(overlap_points_d1) + len(
overlap_points_d2
)
t_d1 = self._grp1_df.loc[overlap_points_d1]
t_d2 = self._grp2_df.loc[overlap_points_d2]
if self._agg == "mean":
grp1_val = (
t_d1[self._metric].mean()
* t_d1[self._metric].count()
/ len_d1
)
grp2_val = (
t_d2[self._metric].mean()
* t_d2[self._metric].count()
/ len_d2
)
elif self._agg == "sum":
grp1_val = t_d1[self._metric].sum()
grp2_val = t_d2[self._metric].sum()
elif self._agg == "count":
grp1_val = t_d1[self._metric].count()
grp2_val = t_d2[self._metric].count()
overlap_impact = grp2_val - grp1_val
if np.isnan(overlap_impact):
overlap_impact = 0
curr_loc = subgroups_df_output[
subgroups_df_output["string"] == subgroup
].index[0]
subgroups_df_output.loc[
curr_loc, "impact_non_overlap"
] = subgroups_df_output.loc[
curr_loc, "impact_non_overlap"
] - (
overlap_impact * len(combo) / (len(combo) + 1)
)
all_indices = all_indices.union(overlap_points_d1).union(
overlap_points_d2
)
subgroups_df_output.loc[curr_loc, "indices_in_group"] = len(
d1_idxs
) + len(d2_idxs)
subgroups_df_output.loc[curr_loc, "non_overlap_indices"] = (
len(d1_idxs) + len(d2_idxs) - overlap_indices_count
)
return subgroups_df_output
def _get_waterfall_output_data(
self,
df_subgroups: pd.DataFrame,
word_wrap_num: int,
plot_in_mpl: bool,
) -> Tuple[Tuple[float, float], pd.DataFrame]:
if self._preaggregated:
if self._agg == "count":
d1_agg = self._grp1_df[self._preaggregated_count_col].sum()
d2_agg = self._grp2_df[self._preaggregated_count_col].sum()
elif self._agg == "sum":
d1_agg = self._grp1_df[self._metric].sum()
d2_agg = self._grp2_df[self._metric].sum()
else:
raise ValueError(
f"Unsupported aggregation {self._agg} for preaggregated data."
)
else:
d1_agg = self._grp1_df[self._metric].agg(self._agg)
d2_agg = self._grp2_df[self._metric].agg(self._agg)
d1_agg = 0 if pd.isna(d1_agg) else d1_agg
d2_agg = 0 if pd.isna(d2_agg) else d2_agg
impact = d2_agg - d1_agg
non_overlap_impact = df_subgroups["impact_non_overlap"].sum()
waterfall_df = df_subgroups[["string", "impact_non_overlap"]].copy()
others_impact = impact - non_overlap_impact
# only if impact of others is not close to 0, we add it
if not isclose(others_impact, 0, rel_tol=0.0001, abs_tol=EPSILON):
waterfall_df = waterfall_df.append(
{"string": "others", "impact_non_overlap": others_impact},
ignore_index=True,
)
col_names_for_mpl = [
"start",
*[
"\n".join(wrap(i, word_wrap_num))
for i in waterfall_df["string"].values.tolist()
],
]
col_values = [
d1_agg,
*waterfall_df["impact_non_overlap"].values.tolist(),
]
col_names_for_mpl.append("end")
col_values.append(d2_agg)
y_axis_lims = get_waterfall_ylims(
pd.DataFrame(
data={self._metric: col_values}, index=col_names_for_mpl
),
self._metric,
)
if plot_in_mpl:
print("plot")
waterfall_plot_mpl(
pd.DataFrame(
data={self._metric: col_values}, index=col_names_for_mpl
),
self._metric,
y_axis_lims,
)
plt.show()
# Calculate steps for each subgroup
col_values = (
col_values[:1]
+ [sum(col_values[: i + 1]) for i in range(1, len(col_values) - 1)]
+ col_values[-1:]
)
js_df = pd.DataFrame(
data={
"value": col_values,
"category": ["start"]
+ waterfall_df["string"].values.tolist()
+ ["end"],
"stepValue": col_values,
}
)
js_df["open"] = js_df["value"].shift(1, fill_value=0)
js_df["color"] = [
"#FA5252" if val <= 0 else "#05A677"
for val in [0]
+ waterfall_df["impact_non_overlap"].values.tolist()
+ [0]
]
js_df.loc[[0, len(js_df) - 1], ["open", "color"]] = [
[0, "#778CA3"],
[0, "#778CA3"],
]
js_df["displayValue"] = js_df["value"] - js_df["open"]
return y_axis_lims, js_df
def _get_best_subgroups_waterfall(
self,
single_dim,
max_waterfall_columns,
max_subgroups_considered,
):
recalc = False
if (
max_waterfall_columns is not None
and max_waterfall_columns != self._max_waterfall_columns
):
recalc = True
self._max_waterfall_columns = max_waterfall_columns
if (
max_subgroups_considered is not None
and max_subgroups_considered != self._max_subgroups_considered
):
recalc = True
self._max_subgroups_considered = max_subgroups_considered
if single_dim is None:
if self._waterfall_table is None or recalc:
self._waterfall_table = self._initialize_waterfall_table(
single_dim
)
best_subgroups = self._waterfall_table.copy()
else:
best_subgroups = self._initialize_waterfall_table(single_dim)
best_subgroups.drop("ignored", axis=1, inplace=True)
return best_subgroups
def get_panel_metrics(self) -> Dict[str, float]:
"""Return panel metrics for the KPI.
:return: Dictionary with metrics
:rtype: Dict[str, float]
"""
if self._preaggregated:
if self._agg == "count":
g1_agg = self._grp1_df[self._preaggregated_count_col].sum()
g2_agg = self._grp2_df[self._preaggregated_count_col].sum()
elif self._agg == "sum":
g1_agg = self._grp1_df[self._metric].sum()
g2_agg = self._grp2_df[self._metric].sum()
else:
raise ValueError(
f"Unsupported aggregation: {self._agg} for preaggregated data."
)
else:
g1 = self._grp1_df[self._metric]
g2 = self._grp2_df[self._metric]
# set aggregations to 0 if data is empty
g1_agg = g1.agg(self._agg) if len(g1) > 0 else 0
g2_agg = g2.agg(self._agg) if len(g2) > 0 else 0
impact = g2_agg - g1_agg
perc_diff = (impact / g1_agg) * 100 if g1_agg != 0 else np.inf
panel_metrics = {
"group1_value": round_number(g1_agg),
"group2_value": round_number(g2_agg),
"difference": round_number(impact),
"perc_change": round_number(perc_diff)
if not np.isinf(perc_diff)
else "inf",
}
# Check for None or NaN values in output
for k, v in panel_metrics.items():
if v is None or pd.isna(v):
raise ValueError(f"{k} with value: {v} is either None or NaN")
return panel_metrics
def get_impact_rows(
self, single_dim: str = None
) -> List[Dict[str, object]]:
"""Return impact dataframe as a list.
:param single_dim: dimension to use, defaults to None
:type single_dim: str, optional
:return: list with rows of impact table
:rtype: List[Dict[str, object]]
"""
if self._impact_table is None:
self._impact_table = self._initialize_impact_table()
impact_table = self._impact_table.copy()
if single_dim is not None:
impact_table = impact_table[~impact_table[single_dim].isna()]
impact_table = impact_table.reset_index(drop=True)
impact_table.drop(self._dims, axis=1, inplace=True)
impact_table["string"] = impact_table["string"].apply(
convert_query_string_to_user_string
)
# Check for any nan values in impact values and raise ValueError if found
self._check_nan(
impact_table, f"Impact table for dimension {single_dim}"
)
return round_df(impact_table).to_dict("records")
def get_impact_column_map(
self, timeline: str = "last_30_days"
) -> List[Dict[str, str]]:
"""Return a mapping of column names to values for UI.
:param timeline: timeline to use, defaults to "last_30_days"
:type timeline: str, optional
:return: List of mappings
:rtype: List[Dict[str, str]]
"""
prev_timestr = TIME_RANGES_BY_KEY[timeline]["last_period_name"]
curr_timestr = TIME_RANGES_BY_KEY[timeline]["current_period_name"]
mapping = [
("subgroup", "Subgroup Name"),
("g1_agg", f"{prev_timestr} Value"),
("g1_count", f"{prev_timestr} Count (#)"),
("g1_size", f"{prev_timestr} Size (%)"),
("g2_agg", f"{curr_timestr} Value"),
("g2_count", f"{curr_timestr} Count (#)"),
("g2_size", f"{curr_timestr} Size (%)"),
("impact", "Impact"),
]
mapping = [{"title": v, "field": k} for k, v in mapping]
return mapping
def get_waterfall_table_rows(
self,
single_dim: str = None,
max_waterfall_columns: int = None, # defaults to 5 or last value
max_subgroups_considered: int = None, # defaults to 100 or last value
) -> List[Dict]:
"""Return rows for the waterfall table.
:param single_dim: dimension to use, defaults to None
:type single_dim: str, optional
:param max_waterfall_columns: max columns in waterfall, defaults to
None
:type max_waterfall_columns: int, optional
:return: list of all rows in table
:rtype: List[Dict]
"""
best_subgroups = self._get_best_subgroups_waterfall(
single_dim, max_waterfall_columns, max_subgroups_considered
)
best_subgroups["string"] = best_subgroups["string"].apply(
convert_query_string_to_user_string
)
# Check for any nan values in best subgroups and raise ValueError if found
self._check_nan(
best_subgroups, f"Waterfall table for dimension {single_dim}"
)
return round_df(best_subgroups).to_dict("records")
def get_waterfall_plot_data(
self,
single_dim: str = None,
plot_in_mpl: bool = False,
word_wrap_num: int = 15,
max_waterfall_columns: int = None, # defaults to 5 or last value
max_subgroups_considered: int = None, # defaults to 100 or last value
) -> Tuple[List[Dict], List[float]]:
"""Return plot data for waterfall chart.
:param single_dim: dimension to use, defaults to None
:type single_dim: str, optional
:param plot_in_mpl: flag to plot in matplotlib, defaults to False
:type plot_in_mpl: bool, optional
:param word_wrap_num: wordwrapping for columns, defaults to 15
:type word_wrap_num: int, optional
:param max_waterfall_columns: max columns in waterfall, defaults to
None
:type max_waterfall_columns: int, optional
:return: plot data for waterfall chart
:rtype: Tuple[List[Dict], List[float, float]]
"""
best_subgroups = self._get_best_subgroups_waterfall(
single_dim, max_waterfall_columns, max_subgroups_considered
)
# get waterfall chart data
y_axis_lims, waterfall_df = self._get_waterfall_output_data(
best_subgroups, word_wrap_num, plot_in_mpl
)
# convert query strings to user strings
waterfall_df["category"] = waterfall_df["category"].apply(
convert_query_string_to_user_string
)
# Check for any nan values in waterfall df and raise ValueError if found
self._check_nan(
waterfall_df, f"Waterfall chart for dimension {single_dim}"
)
return (
round_df(waterfall_df).to_dict("records"),
[round_number(i) for i in y_axis_lims],
)
def get_hierarchical_table(
self,
single_dim: str,
max_depth: int = 3,
max_children: int = 5,
max_parents: int = 5,
) -> List[Dict]:
"""Return rows for hierarchical table.
:param single_dim: dimension to use
:type single_dim: str
:param max_depth: maximum depth for the hierarchy, defaults to 3
:type max_depth: int, optional
:param max_children: max children per row, defaults to 5
:type max_children: int, optional
:param max_parents: max first level rows, defaults to 5
:type max_parents: int, optional
:return: list of rows for the table
:rtype: List[Dict]
"""
other_dims = self._dims[:]
other_dims.remove(single_dim)
impact_table = self._initialize_impact_table()
impact_table["parentId"] = None
# impact_table["id"] = impact_table.index
impact_table["depth"] = None
output_table = self._get_single_dim_impact_table(single_dim)
output_table = output_table.iloc[:max_parents]
output_table["depth"] = 1
for depth in range(1, max_depth):
parents = output_table[output_table["depth"] == depth]
for index, row in parents.iterrows():
string = row["string"]
filters = string.split(" and ")
children = impact_table
for filter_string in filters:
children = children[
children["string"].str.contains(
filter_string, regex=False
)
]
children = children[
children[other_dims].isna().sum(axis=1)
== len(other_dims) - depth
]
children = children.iloc[:max_children]
children["depth"] = depth + 1
children["parentId"] = index
output_table = output_table.append(children, ignore_index=True)
output_table.drop(self._dims, axis=1, inplace=True)
output_table = output_table.reset_index().rename(
columns={"index": "id"}
)
output_table["string"] = output_table["string"].apply(
convert_query_string_to_user_string
)
# Check for any nan values in output table and raise ValueError if found
self._check_nan(
output_table.drop("parentId", axis=1),
f"Hierarchical table for dimension {single_dim}",
)
return round_df(output_table).to_dict("records")
def _check_nan(self, df: pd.DataFrame, message: str) -> None:
"""Check if NaN values in dataframe."""
nan_df = df.isna().sum()
nan_dict: dict = nan_df[nan_df > 0].to_dict()
if nan_dict:
raise ValueError(f"{message} contains NaN values. {nan_dict}")
|
py | 1a3c29af0ab32d0ac3d435cc715e577366c9602d | #!/usr/bin/env python
# This script adapted from PyXML's xmlchargen.py
# From 2nd edition
BaseChar = "[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | [#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | [#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | [#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | [#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | [#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | [#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | [#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | [#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | [#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | [#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | [#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | [#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | [#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | [#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | [#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | [#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | [#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | [#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | [#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | [#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | [#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | [#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | [#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | [#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | [#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | [#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | [#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | [#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | [#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | #x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | #x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | #x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | [#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | [#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | #x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | [#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | [#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | [#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | [#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | [#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | #x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | [#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | [#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | [#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | [#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"
Ideographic = "[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"
CombiningChar = "[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | [#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | [#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | [#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | #x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | [#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | [#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | #x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | [#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | [#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | #x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | [#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | [#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | [#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | [#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | [#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | #x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | [#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | #x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | [#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | [#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | #x3099 | #x309A"
Digit = "[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | [#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | [#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | [#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"
Extender = "#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | [#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"
import re
intervall = re.compile(r"^\[#x([0-9A-Fa-f]{4})-#x([0-9A-Fa-f]{4})\]$")
single = re.compile("^(#x(?P<hex>[0-9A-F]{4}))|('(?P<chr>.)')$")
class CharacterParser:
def __init__(self, chars=None):
self.contents = {}
if chars is not None:
self._parse(chars)
return
def _parse(self, chars):
alts = chars.split(" | ")
for a in alts:
m = single.match(a)
if m:
val = m.group('hex')
if val:
val = int(val, 16)
else:
val = ord(m.group('chr'))
self.contents[val] = val
continue
m = intervall.match(a)
assert m, "invalid range: %r" % a
lower = int(m.group(1), 16)
upper = int(m.group(2), 16)
assert upper > lower, "invalid range: %r" % a
for i in range(lower, upper+1):
self.contents[i] = i
return
def as_array(self):
res = [0] * 65536
for k in self.contents.keys():
res[k] = 1
return res
def as_charset(self):
charmap = [0]*65536
for k in self.contents.keys():
charmap[k] = 1
comps = {}
mapping = [0]*256
block = 0
bytes = []
for i in xrange(256):
chunk = tuple(charmap[i*256:(i+1)*256])
new = comps.setdefault(chunk, block)
mapping[i] = new
if new == block:
block = block + 1
mask = 1
byte = 0
for bit in chunk:
if bit:
byte += mask
mask += mask
if mask > 255:
bytes.append(byte)
mask = 1
byte = 0
# return the mapping of MSB of the code-point to block number +
# the block data
return mapping + bytes
def union(*args):
result = CharacterParser()
for a in args:
result.contents.update(a.contents)
return result
BaseChar = CharacterParser(BaseChar)
Ideographic = CharacterParser(Ideographic)
CombiningChar = CharacterParser(CombiningChar)
Digit = CharacterParser(Digit)
Extender = CharacterParser(Extender)
# BaseChar | Ideographic
Letter = union(BaseChar, Ideographic)
# Letter | '_'
NCNameStart = union(Letter, CharacterParser("'_'"))
# Letter | Digit | '.' | '-' | '_' | CombiningChar | Extender
NCNameChar = union(Letter, Digit,
CharacterParser("'.' | '-' | '_'"),
CombiningChar, Extender)
# Letter | '_' | ':'
NameStart = union(Letter, CharacterParser("'_' | ':'"))
# Letter | Digit | '.' | '-' | '_' | ':' | CombiningChar | Extender
NameChar = union(Letter, Digit,
CharacterParser("'.' | '-' | '_' | ':'"),
CombiningChar, Extender)
if __name__ == '__main__':
import sys, time
if len(sys.argv) < 2:
f = sys.stdout
else:
f = open(sys.argv[1], 'w')
print >> f, "/* this file was generated on %s using:" % time.asctime()
print >> f, " * %s" % ' '.join(sys.argv)
print >> f, " */"
for name in ('NCNameStart', 'NCNameChar', 'NameStart', 'NameChar'):
print >> f
print >> f, "static unsigned char charset_%s[] = {" % name
charset = locals()[name].as_charset()
width = 0
for item in charset[:-1]:
if width > 70:
print >> f
width = 0
print >> f, '0x%02X,' % item,
width = width + 6
if width > 70:
print >> f
print >> f, '0x%02X' % charset[-1]
print >> f, "};"
|
py | 1a3c2aa0c946f66413e1a8326c2da7912209a6f2 | import torch.nn as nn
import torch.nn.functional as F
import torch
from einops.layers.torch import Rearrange
from einops import rearrange
import numpy as np
from typing import Any, List
import math
import warnings
from collections import OrderedDict
__all__ = ['ConTBlock', 'ConTNet']
r""" The following trunc_normal method is pasted from timm https://github.com/rwightman/pytorch-image-models/tree/master/timm
"""
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class ConvBN(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, groups=1, bn=True):
padding = (kernel_size - 1) // 2
if bn:
super(ConvBN, self).__init__(OrderedDict([
('conv', nn.Conv2d(in_planes, out_planes, kernel_size, stride,
padding=padding, groups=groups, bias=False)),
('bn', nn.BatchNorm2d(out_planes))
]))
else:
super(ConvBN, self).__init__(OrderedDict([
('conv', nn.Conv2d(in_planes, out_planes, kernel_size, stride,
padding=padding, groups=groups, bias=False)),
]))
class MHSA(nn.Module):
r"""
Build a Multi-Head Self-Attention:
- https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
def __init__(self,
planes,
head_num,
dropout,
patch_size,
qkv_bias,
relative):
super(MHSA, self).__init__()
self.head_num = head_num
head_dim = planes // head_num
self.qkv = nn.Linear(planes, 3*planes, bias=qkv_bias)
self.relative = relative
self.patch_size = patch_size
self.scale = head_dim ** -0.5
if self.relative:
# print('### relative position embedding ###')
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * patch_size - 1) * (2 * patch_size - 1), head_num))
coords_w = coords_h = torch.arange(patch_size)
coords = torch.stack(torch.meshgrid([coords_h, coords_w]))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += patch_size - 1
relative_coords[:, :, 1] += patch_size - 1
relative_coords[:, :, 0] *= 2 * patch_size - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.attn_drop = nn.Dropout(p=dropout)
self.proj = nn.Linear(planes, planes)
self.proj_drop = nn.Dropout(p=dropout)
def forward(self, x):
B, N, C, H = *x.shape, self.head_num
# print(x.shape)
qkv = self.qkv(x).reshape(B, N, 3, H, C // H).permute(2, 0, 3, 1, 4) # x: (3, B, H, N, C//H)
q, k, v = qkv[0], qkv[1], qkv[2] # x: (B, H, N, C//N)
q = q * self.scale
attn = (q @ k.transpose(-2, -1)) # attn: (B, H, N, N)
if self.relative:
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.patch_size ** 2, self.patch_size ** 2, -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attn = attn + relative_position_bias.unsqueeze(0)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MLP(nn.Module):
r"""
Build a Multi-Layer Perceptron
- https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
def __init__(self,
planes,
mlp_dim,
dropout):
super(MLP, self).__init__()
self.fc1 = nn.Linear(planes, mlp_dim)
self.act = nn.GELU()
self.fc2 = nn.Linear(mlp_dim, planes)
self.drop = nn.Dropout(dropout)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class STE(nn.Module):
r"""
Build a Standard Transformer Encoder(STE)
input: Tensor (b, c, h, w)
output: Tensor (b, c, h, w)
"""
def __init__(self,
planes: int,
mlp_dim: int,
head_num: int,
dropout: float,
patch_size: int,
relative: bool,
qkv_bias: bool,
pre_norm: bool,
**kwargs):
super(STE, self).__init__()
self.patch_size = patch_size
self.pre_norm = pre_norm
self.relative = relative
self.flatten = nn.Sequential(
Rearrange('b c pnh pnw psh psw -> (b pnh pnw) psh psw c'),
)
if not relative:
self.pe = nn.ParameterList(
[nn.Parameter(torch.zeros(1, patch_size, 1, planes//2)), nn.Parameter(torch.zeros(1, 1, patch_size, planes//2))]
)
self.attn = MHSA(planes, head_num, dropout, patch_size, qkv_bias=qkv_bias, relative=relative)
self.mlp = MLP(planes, mlp_dim, dropout=dropout)
self.norm1 = nn.LayerNorm(planes)
self.norm2 = nn.LayerNorm(planes)
def forward(self, x):
bs, c, h, w = x.shape
patch_size = self.patch_size
patch_num_h, patch_num_w = h // patch_size, w // patch_size
x = (
x.unfold(2, self.patch_size, self.patch_size)
.unfold(3, self.patch_size, self.patch_size)
) # x: (b, c, patch_num, patch_num, patch_size, patch_size)
x = self.flatten(x) # x: (b, patch_size, patch_size, c)
### add 2d position embedding ###
if not self.relative:
x_h, x_w = x.split(c // 2, dim=3)
x = torch.cat((x_h + self.pe[0], x_w + self.pe[1]), dim=3) # x: (b, patch_size, patch_size, c)
x = rearrange(x, 'b psh psw c -> b (psh psw) c')
if self.pre_norm:
x = x + self.attn(self.norm1(x))
x = x + self.mlp(self.norm2(x))
else:
x = self.norm1(x + self.attn(x))
x = self.norm2(x + self.mlp(x))
x = rearrange(x, '(b pnh pnw) (psh psw) c -> b c (pnh psh) (pnw psw)', pnh=patch_num_h, pnw=patch_num_w, psh=patch_size, psw=patch_size)
return x
class ConTBlock(nn.Module):
r"""
Build a ConTBlock
"""
def __init__(self,
planes: int,
out_planes: int,
mlp_dim: int,
head_num: int,
dropout: float,
patch_size: List[int],
downsample: nn.Module = None,
stride: int=1,
last_dropout: float=0.3,
**kwargs):
super(ConTBlock, self).__init__()
self.downsample = downsample
self.identity = nn.Identity()
self.dropout = nn.Identity()
self.bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.ste1 = STE(planes=planes, mlp_dim=mlp_dim, head_num=head_num, dropout=dropout, patch_size=patch_size[0], **kwargs)
self.ste2 = STE(planes=planes, mlp_dim=mlp_dim, head_num=head_num, dropout=dropout, patch_size=patch_size[1], **kwargs)
if stride == 1 and downsample is not None:
self.dropout = nn.Dropout(p=last_dropout)
kernel_size = 1
else:
kernel_size = 3
self.out_conv = ConvBN(planes, out_planes, kernel_size, stride, bn=False)
def forward(self, x):
x_preact = self.relu(self.bn(x))
identity = self.identity(x)
if self.downsample is not None:
identity = self.downsample(x_preact)
residual = self.ste1(x_preact)
residual = self.ste2(residual)
out = self.out_conv(residual)
#out = self.dropout(residual+identity)
return out
class ConTNet(nn.Module):
r"""
Build a ConTNet backbone
"""
def __init__(self,
block,
layers: List[int],
mlp_dim: List[int],
head_num: List[int],
dropout: List[float],
in_channels: int=3,
inplanes: int=64,
num_classes: int=1000,
init_weights: bool=True,
first_embedding: bool=False,
tweak_C: bool=False,
**kwargs):
r"""
Args:
block: ConT Block
layers: number of blocks at each layer
mlp_dim: dimension of mlp in each stage
head_num: number of head in each stage
dropout: dropout in the last two stage
relative: if True, relative Position Embedding is used
groups: nunmber of group at each conv layer in the Network
depthwise: if True, depthwise convolution is adopted
in_channels: number of channels of input image
inplanes: channel of the first convolution layer
num_classes: number of classes for classification task
only useful when `with_classifier` is True
with_avgpool: if True, an average pooling is added at the end of resnet stage5
with_classifier: if True, FC layer is registered for classification task
first_embedding: if True, a conv layer with both stride and kernel of 7 is placed at the top
tweakC: if true, the first layer of ResNet-C replace the ori layer
"""
super(ConTNet, self).__init__()
self.inplanes = inplanes
self.block = block
# build the top layer
if tweak_C:
self.layer0 = nn.Sequential(OrderedDict([
('conv_bn1', ConvBN(in_channels, inplanes//2, kernel_size=3, stride=2)),
('relu1', nn.ReLU(inplace=True)),
('conv_bn2', ConvBN(inplanes//2, inplanes//2, kernel_size=3, stride=1)),
('relu2', nn.ReLU(inplace=True)),
('conv_bn3', ConvBN(inplanes//2, inplanes, kernel_size=3, stride=1)),
('relu3', nn.ReLU(inplace=True)),
('maxpool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
]))
elif first_embedding:
self.layer0 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(in_channels, inplanes, kernel_size=4, stride=4)),
('norm', nn.LayerNorm(inplanes))
]))
else:
self.layer0 = nn.Sequential(OrderedDict([
('conv', ConvBN(in_channels, inplanes, kernel_size=7, stride=2, bn=False)),
# ('relu', nn.ReLU(inplace=True)),
('maxpool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
]))
# build cont layers
self.cont_layers = []
self.out_channels = OrderedDict()
for i in range(len(layers)):
stride = 2,
patch_size = [7,14]
if i == len(layers)-1:
stride, patch_size[1] = 1, 7 # the last stage does not conduct downsampling
cont_layer = self._make_layer(inplanes * 2**i, layers[i], stride=stride, mlp_dim=mlp_dim[i], head_num=head_num[i], dropout=dropout[i], patch_size=patch_size, **kwargs)
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, cont_layer)
self.cont_layers.append(layer_name)
self.out_channels[layer_name] = 2 * inplanes * 2**i
self.last_out_channels = next(reversed(self.out_channels.values()))
self.fc = nn.Linear(self.last_out_channels, num_classes)
if init_weights:
self._initialize_weights()
def _make_layer(self,
planes: int,
blocks: int,
stride: int,
mlp_dim: int,
head_num: int,
dropout: float,
patch_size: List[int],
use_avgdown: bool=False,
**kwargs):
layers = OrderedDict()
for i in range(0, blocks-1):
layers[f'{self.block.__name__}{i}'] = self.block(
planes, planes, mlp_dim, head_num, dropout, patch_size, **kwargs)
downsample = None
if stride != 1:
if use_avgdown:
downsample = nn.Sequential(OrderedDict([
('avgpool', nn.AvgPool2d(kernel_size=2, stride=2)),
('conv', ConvBN(planes, planes * 2, kernel_size=1, stride=1, bn=False))]))
else:
downsample = ConvBN(planes, planes * 2, kernel_size=1,
stride=2, bn=False)
else:
downsample = ConvBN(planes, planes * 2, kernel_size=1, stride=1, bn=False)
layers[f'{self.block.__name__}{blocks-1}'] = self.block(
planes, planes*2, mlp_dim, head_num, dropout, patch_size, downsample, stride, **kwargs)
return nn.Sequential(layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.LayerNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.layer0(x)
for _, layer_name in enumerate(self.cont_layers):
cont_layer = getattr(self, layer_name)
x = cont_layer(x)
x = x.mean([2, 3])
x = self.fc(x)
return x
def create_ConTNet_Ti(kwargs):
return ConTNet(block=ConTBlock,
mlp_dim=[196, 392, 768, 768],
head_num=[1, 2, 4, 8],
dropout=[0,0,0,0],
inplanes=48,
layers=[1,1,1,1],
last_dropout=0,
**kwargs)
def create_ConTNet_S(kwargs):
return ConTNet(block=ConTBlock,
mlp_dim=[256, 512, 1024, 1024],
head_num=[1, 2, 4, 8],
dropout=[0,0,0,0],
inplanes=64,
layers=[1,1,1,1],
last_dropout=0,
**kwargs)
def create_ConTNet_M(kwargs):
return ConTNet(block=ConTBlock,
mlp_dim=[256, 512, 1024, 1024],
head_num=[1, 2, 4, 8],
dropout=[0,0,0,0],
inplanes=64,
layers=[2,2,2,2],
last_dropout=0,
**kwargs)
def create_ConTNet_B(kwargs):
return ConTNet(block=ConTBlock,
mlp_dim=[256, 512, 1024, 1024],
head_num=[1, 2, 4, 8],
dropout=[0,0,0.1,0.1],
inplanes=64,
layers=[3,4,6,3],
last_dropout=0.2,
**kwargs)
def build_model(arch, use_avgdown, relative, qkv_bias, pre_norm):
type = arch.split('-')[-1]
func = eval(f'create_ConTNet_{type}')
kwargs = dict(use_avgdown=use_avgdown, relative=relative, qkv_bias=qkv_bias, pre_norm=pre_norm)
return func(kwargs)
|
py | 1a3c2b2dfb2892f8bf5ef225450c3b962b55e954 | import argparse
import os.path as osp
import numpy as np
import onnx
import os
#import onnxruntime as rt
import torch
from mmdet.core import (build_model_from_cfg, generate_inputs_and_wrap_model,
preprocess_example_input)
#from mmdet.models import build
def pytorch2onnx(config_path,
checkpoint_path,
input_img,
input_shape,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
simplify = True,
dynamic = True,
normalize_cfg=None,
dataset='coco',
test_img=None):
input_config = {
'input_shape': input_shape,
'input_path': input_img,
'normalize_cfg': normalize_cfg
}
checkpoint = torch.load(checkpoint_path, map_location='cpu')
tmp_ckpt_file = None
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
tmp_ckpt_file = checkpoint_path+"_slim.pth"
torch.save(checkpoint, tmp_ckpt_file)
print('remove optimizer params and save to', tmp_ckpt_file)
checkpoint_path = tmp_ckpt_file
model, tensor_data = generate_inputs_and_wrap_model(
config_path, checkpoint_path, input_config)
if tmp_ckpt_file is not None:
os.remove(tmp_ckpt_file)
if simplify or dynamic:
ori_output_file = output_file.split('.')[0]+"_ori.onnx"
else:
ori_output_file = output_file
output_names=['score_8','score_16','score_32','bbox_8','bbox_16','bbox_32','kps_8','kps_16','kps_32']
torch.onnx.export(
model,
tensor_data,
ori_output_file,
output_names=output_names,
keep_initializers_as_inputs=False,
verbose=False,
opset_version=opset_version)
if simplify or dynamic:
model = onnx.load(ori_output_file)
if dynamic:
model.graph.input[0].type.tensor_type.shape.dim[2].dim_param = '?'
model.graph.input[0].type.tensor_type.shape.dim[3].dim_param = '?'
if simplify:
from onnxsim import simplify
#print(model.graph.input[0])
if dynamic:
input_shapes = {model.graph.input[0].name : list(input_shape)}
model, check = simplify(model, input_shapes=input_shapes, dynamic_input_shape=True)
else:
model, check = simplify(model)
assert check, "Simplified ONNX model could not be validated"
onnx.save(model, output_file)
os.remove(ori_output_file)
print(f'Successfully exported ONNX model: {output_file}')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--input-img', type=str, help='Images for input')
parser.add_argument('--show', action='store_true', help='show onnx graph')
parser.add_argument('--output-file', type=str, default='')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--test-img', type=str, default=None, help='Images for test')
parser.add_argument(
'--dataset', type=str, default='coco', help='Dataset name')
parser.add_argument(
'--verify',
action='store_true',
help='verify the onnx model output against pytorch output')
parser.add_argument(
'--shape',
type=int,
nargs='+',
#default=[640, 640],
#default=[384, 384],
default=[-1, -1],
help='input image size')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[127.5, 127.5, 127.5],
help='mean value used for preprocess input data')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[128.0, 128.0, 128.0],
help='variance value used for preprocess input data')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
assert args.opset_version == 11, 'MMDet only support opset 11 now'
if not args.input_img:
args.input_img = osp.join(
osp.dirname(__file__), '../tests/data/t1.jpg')
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (1, 3) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
assert len(args.mean) == 3
assert len(args.std) == 3
simplify = True
dynamic = False
if input_shape[2]<=0 or input_shape[3]<=0:
input_shape = (1,3,640,640)
dynamic = True
#simplify = False
print('set to dynamic input with dummy shape:', input_shape)
normalize_cfg = {'mean': args.mean, 'std': args.std}
if len(args.output_file)==0:
output_dir = osp.join(osp.dirname(__file__), '../onnx')
if not osp.exists(output_dir):
os.makedirs(output_dir)
cfg_name = args.config.split('/')[-1]
pos = cfg_name.rfind('.')
cfg_name = cfg_name[:pos]
if dynamic:
args.output_file = osp.join(output_dir, "%s.onnx"%cfg_name)
else:
args.output_file = osp.join(output_dir, "%s_shape%dx%d.onnx"%(cfg_name,input_shape[2],input_shape[3]))
# convert model to onnx file
pytorch2onnx(
args.config,
args.checkpoint,
args.input_img,
input_shape,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
simplify = simplify,
dynamic = dynamic,
normalize_cfg=normalize_cfg,
dataset=args.dataset,
test_img=args.test_img)
|
py | 1a3c2bb745b11739cfd69340345157fc087e791a |
# coding: utf-8
# # Predicting Aberrations in The Hippo Signaling Pathway
# ## To do this, we will use Hetnets to query "Signaling by Hippo"
#
# The query is built in the [Cypher Language](https://neo4j.com/developer/cypher-query-language/) and draws data from [Hetionet](https://neo4j.het.io/browser/)
#
# ### How Cognoma could help with Hippo signaling
#
# The Hippo pathway is a highly conserved signaling cascade that controls organ size, cell growth, and cell death ([Zhao et al. 2010](http://doi.org/10.1101/gad.1909210)). It is one of the mechanisms that influences size diversity across eukaryotes; including different sizes across dog breeds ([Dong et al. 2007](http://doi.org/10.1016/j.cell.2007.07.019), [Crickmore and Mann 2008](http://doi.org/10.1002/bies.20806)). Recently, Hippo signaling has also been shown to be important for tumorigenesis, but there are shockingly few recurrent mutations of single genes within the pathway across tissues ([Harvey et al 2013](http://doi.org/10.1038/nrc3458)). Therefore, leveraging cancers from multiple tissues and combining genes associated with the same pathway could aid in the detection of a Hippo signaling specific gene expression signature. Cognoma is situated well to quickly query the list of all pathway associated genes, build a machine learning classifier to detect aberrant pathway activity, and output tissue and gene specific performance.
# In[1]:
import os
import urllib
import random
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest
from statsmodels.robust.scale import mad
# In[2]:
from neo4j.v1 import GraphDatabase
# In[3]:
get_ipython().magic('matplotlib inline')
plt.style.use('seaborn-notebook')
# ## Specify model configuration - Generate genelist
# In[4]:
names = ('label', 'rel_type', 'node_id')
query_params = [
('Pathway', 'PARTICIPATES_GpPW', 'PC7_7459'), # "Signaling by Hippo" - Reactome
('BiologicalProcess', 'PARTICIPATES_GpBP', 'GO:0035329'), # "hippo signaling" - Gene Ontology
('BiologicalProcess', 'PARTICIPATES_GpBP', 'GO:0035330') # "regulation of hippo signaling" - Gene Ontology
]
param_list = [dict(zip(names, qp)) for qp in query_params]
# In[5]:
query = '''
MATCH (node)-[rel]-(gene)
WHERE node.identifier = {node_id}
AND {label} in labels(node)
AND {rel_type} = type(rel)
RETURN
gene.identifier as entrez_gene_id,
gene.name as gene_symbol
ORDER BY gene_symbol
'''
# In[6]:
driver = GraphDatabase.driver("bolt://neo4j.het.io")
full_results_df = pd.DataFrame()
with driver.session() as session:
for parameters in param_list:
result = session.run(query, parameters)
result_df = pd.DataFrame((x.values() for x in result), columns=result.keys())
full_results_df = full_results_df.append(result_df, ignore_index=True)
classifier_genes_df = full_results_df.drop_duplicates().sort_values('gene_symbol').reset_index(drop=True)
classifier_genes_df['entrez_gene_id'] = classifier_genes_df['entrez_gene_id'].astype('str')
# In[7]:
# Here are the genes that participate in the Hippo signaling pathway
classifier_genes_df
# ## Load Data
# In[8]:
get_ipython().run_cell_magic('time', '', "path = os.path.join('download', 'expression-matrix.tsv.bz2')\nX = pd.read_table(path, index_col=0)")
# In[9]:
get_ipython().run_cell_magic('time', '', "path = os.path.join('download', 'mutation-matrix.tsv.bz2')\nY = pd.read_table(path, index_col=0)")
# In[10]:
get_ipython().run_cell_magic('time', '', "path = os.path.join('download', 'samples.tsv')\nclinical = pd.read_table(path, index_col=0)")
# In[11]:
clinical.tail(5)
# In[12]:
# Subset the Y matrix to only the genes to be classified
y_full = Y[classifier_genes_df['entrez_gene_id']]
# In[13]:
y_full.columns = classifier_genes_df['gene_symbol']
y_full = y_full.assign(disease = clinical['disease'])
# This matrix now stores the final y matrix for the classifier (y['indicator'])
y = y_full.assign(indicator = y_full.max(axis=1))
# In[14]:
unique_pos = y.groupby('disease').apply(lambda x: x['indicator'].sum())
heatmap_df = y_full.groupby('disease').sum().assign(TOTAL = unique_pos)
heatmap_df = heatmap_df.divide(y_full.disease.value_counts(sort=False).sort_index(), axis=0)
# In[15]:
# What is the percentage of different mutations across different cancer types?
sns.heatmap(heatmap_df);
# Visualizing the input data here is key. The heterogeneity of the mutations across tissues is apparent for this particular pathway. In comparison with `TP53` mutations, it appears that Hippo signaling impacts different tissues with higher diversity.
#
# Looking closer at the plots above, it is evident that several tissues do not demonstrate aberrations (at least at the mutation level) in Hippo signaling. Specifically, it appears that cancers with gender specificity like testicular cancer and and prostate cancer are _not_ impacted. Therefore, because of this artificial imbalance, if Cognoma were to include these cancers in the classifier, it **will** key in on gender specific signal (i.e. genes that are only on the Y chromosome, or X inactivation genes).
# In[16]:
# How many samples in each tissue that have Hippo signaling aberrations
ind = ['Negatives', 'Positives', 'Positive Prop']
percent = heatmap_df['TOTAL']
neg = y.disease.value_counts() - unique_pos
tissue_summary_df = pd.DataFrame([neg, unique_pos, percent], index=ind,
dtype='object').T.sort_values('Positive Prop', ascending=False)
tissue_summary_df
# ## Filter Data by Tissue
#
# This is a crucial step that is different from previous classifiers
# In[17]:
# Technically, these are hyper parameters, but for simplicity, set here
filter_prop = 0.10
filter_count = 15
tissue_prop_decision = tissue_summary_df['Positive Prop'] >= filter_prop
tissue_count_decision = tissue_summary_df['Positives'] >= filter_count
tissue_decision = tissue_prop_decision & tissue_count_decision
# In[18]:
# This criteria filters out the following tissues
pd.Series(tissue_summary_df.index[~tissue_decision].sort_values())
# In[19]:
# What are the tissues remaining?
tissue_summary_df = tissue_summary_df[tissue_decision]
tissue_summary_df
# In[20]:
# Distribution of mutation counts after filtering
sns.heatmap(heatmap_df.loc[tissue_decision]);
# In[21]:
# Subset data
clinical_sub = clinical[clinical['disease'].isin(tissue_summary_df.index)]
X_sub = X.ix[clinical_sub.index]
y_sub = y['indicator'].ix[clinical_sub.index]
# In[22]:
# Total distribution of positives/negatives
y_sub.value_counts(True)
# In[23]:
y_sub.head(7)
# ## Set aside 10% of the data for testing
# In[24]:
strat = clinical_sub['disease'].str.cat(y_sub.astype(str))
strat.head(6)
# In[25]:
# Make sure the splits have equal tissue and label partitions
X_train, X_test, y_train, y_test = train_test_split(X_sub, y_sub, test_size=0.1, random_state=0,
stratify=strat)
'Size: {:,} features, {:,} training samples, {:,} testing samples'.format(len(X_sub.columns),
len(X_train), len(X_test))
# ## Median absolute deviation feature selection
# In[26]:
def fs_mad(x, y):
"""
Get the median absolute deviation (MAD) for each column of x
"""
scores = mad(x)
return scores, np.array([np.NaN]*len(scores))
# ## Define pipeline and Cross validation model fitting
# In[27]:
# Parameter Sweep for Hyperparameters
param_grid = {
'select__k': [8000],
'classify__loss': ['log'],
'classify__penalty': ['elasticnet'],
'classify__alpha': [10 ** x for x in range(-3, 1)],
'classify__l1_ratio': [0, 0.2, 0.8, 1],
}
pipeline = Pipeline(steps=[
('select', SelectKBest(fs_mad)),
('standardize', StandardScaler()),
('classify', SGDClassifier(random_state=0, class_weight='balanced'))
])
cv_pipeline = GridSearchCV(estimator=pipeline, param_grid=param_grid, n_jobs=-1, scoring='roc_auc')
# In[28]:
get_ipython().run_cell_magic('time', '', 'cv_pipeline.fit(X=X_train, y=y_train);')
# In[29]:
# Best Params
print('{:.3%}'.format(cv_pipeline.best_score_))
# Best Params
cv_pipeline.best_params_
# ## Visualize hyperparameters performance
# In[30]:
cv_result_df = pd.concat([
pd.DataFrame(cv_pipeline.cv_results_),
pd.DataFrame.from_records(cv_pipeline.cv_results_['params']),
], axis='columns')
cv_result_df.head(2)
# In[31]:
# Cross-validated performance heatmap
cv_score_mat = pd.pivot_table(cv_result_df, values='mean_test_score', index='classify__l1_ratio', columns='classify__alpha')
ax = sns.heatmap(cv_score_mat, annot=True, fmt='.2%')
ax.set_xlabel('Regularization strength multiplier (alpha)')
ax.set_ylabel('Elastic net mixing parameter (l1_ratio)');
# ## Use Optimal Hyperparameters to Output ROC Curve
# In[32]:
y_pred_train = cv_pipeline.decision_function(X_train)
y_pred_test = cv_pipeline.decision_function(X_test)
def get_threshold_metrics(y_true, y_pred, tissue='all'):
roc_columns = ['fpr', 'tpr', 'threshold']
roc_items = zip(roc_columns, roc_curve(y_true, y_pred))
roc_df = pd.DataFrame.from_items(roc_items)
auroc = roc_auc_score(y_true, y_pred)
return {'auroc': auroc, 'roc_df': roc_df, 'tissue': tissue}
metrics_train = get_threshold_metrics(y_train, y_pred_train)
metrics_test = get_threshold_metrics(y_test, y_pred_test)
# In[33]:
# Plot ROC
plt.figure()
for label, metrics in ('Training', metrics_train), ('Testing', metrics_test):
roc_df = metrics['roc_df']
plt.plot(roc_df.fpr, roc_df.tpr,
label='{} (AUROC = {:.1%})'.format(label, metrics['auroc']))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Predicting Hippo Signaling Pathway Aberations')
plt.legend(loc='lower right');
# ## Tissue specific performance
# In[34]:
tissue_metrics = {}
for tissue in clinical_sub.disease.unique():
sample_sub = clinical_sub[clinical_sub['disease'] == tissue].index.values
y_tissue_train = y_train[y_train.index.isin(sample_sub)]
y_tissue_pred_train = y_pred_train[y_train.index.isin(sample_sub)]
y_tissue_test = y_test[y_test.index.isin(sample_sub)]
y_tissue_pred_test = y_pred_test[y_test.index.isin(sample_sub)]
metrics_train = get_threshold_metrics(y_tissue_train, y_tissue_pred_train, tissue=tissue)
metrics_test = get_threshold_metrics(y_tissue_test, y_tissue_pred_test, tissue=tissue)
tissue_metrics[tissue] = [metrics_train, metrics_test]
# In[35]:
tissue_auroc = {}
plt.figure()
for tissue, metrics_val in tissue_metrics.items():
metrics_train, metrics_test = metrics_val
plt.subplot()
auroc = []
for label, metrics in ('Training', metrics_train), ('Testing', metrics_test):
roc_df = metrics['roc_df']
auroc.append(metrics['auroc'])
plt.plot(roc_df.fpr, roc_df.tpr,
label='{} (AUROC = {:.1%})'.format(label, metrics['auroc']))
tissue_auroc[tissue] = auroc
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Predicting Hippo Signaling in {}'.format(tissue))
plt.legend(loc='lower right');
plt.show()
# In[36]:
tissue_results = pd.DataFrame(tissue_auroc, index=['Train', 'Test']).T
tissue_results = tissue_results.sort_values('Test', ascending=False)
# In[37]:
ax = tissue_results.plot(kind='bar', title='Tissue Specific Prediction of Hippo Signaling')
ax.set_ylabel('AUROC');
# Hippo signaling prediction has highly variable predictions across different tissues. The classifier performs spectacularly in several tissues, but also appears to oppositely predict Hippo signaling in others. In three tissues the `test` set performance is actually _greater_ than the `train` set performance. This is likely a result of variance in samples across tissues and a happenstance in stratified `train_test_split`.
# ## What are the classifier coefficients?
# In[38]:
final_pipeline = cv_pipeline.best_estimator_
final_classifier = final_pipeline.named_steps['classify']
# In[39]:
select_indices = final_pipeline.named_steps['select'].transform(
np.arange(len(X.columns)).reshape(1, -1)
).tolist()
coef_df = pd.DataFrame.from_items([
('feature', X.columns[select_indices]),
('weight', final_classifier.coef_[0]),
])
coef_df['abs'] = coef_df['weight'].abs()
coef_df = coef_df.sort_values('abs', ascending=False)
# In[40]:
'{:.1%} zero coefficients; {:,} negative and {:,} positive coefficients'.format(
(coef_df.weight == 0).mean(),
(coef_df.weight < 0).sum(),
(coef_df.weight > 0).sum()
)
# In[41]:
coef_df.head(10)
# The results are very interesting. First, only 200 genes are used to build a fairly successful classifier. Biologists like sparsity! Second, the genes that fall out at the top are informative:
#
# | Entrez | Symbol | Comments |
# | ------ | ---- | -------- |
# | 399671 | [HEATR4](http://www.ncbi.nlm.nih.gov/gene/399671) | Relatively unstudied gene |
# | 29126 | [CD274](http://www.ncbi.nlm.nih.gov/gene/29126) | Immune cell receptor - inhibits Tcell activation and cytokine production |
# | 2852 | [GPER1](http://www.ncbi.nlm.nih.gov/gene/2852) | Estrogen receptor - implicated in lymphoma |
# | 140730 | [RIMS4](http://www.ncbi.nlm.nih.gov/gene/140730) | Synaptic regulatory protein |
# | 84688 | [C9orf24](http://www.ncbi.nlm.nih.gov/gene/84688) | relatively unknown gene - important for differentiation of bronchial cells |
# | 387628 | [FGF7P6](http://www.ncbi.nlm.nih.gov/gene/387628) | Fibroblast growth factor - implicated in ovarian cancer |
# | 4438 | [MSH4](http://www.ncbi.nlm.nih.gov/gene/4438) | Involved in DNA mismatch repair |
# | 80350 | [LPAL2](http://www.ncbi.nlm.nih.gov/gene/157777) | Pseudogene involved with elevated risk for atherosclerosis |
# | 56892 | [C8orf4](http://www.ncbi.nlm.nih.gov/gene/56892) | Relatively uknown gene product - evidence it is important in WNT signaling and proliferation across cancer types |
# | 22943 | [DKK1](http://www.ncbi.nlm.nih.gov/gene/22943) | Inhibits WNT signaling pathway - implicated in myeloma |
#
#
# ## Investigate the predictions
# In[42]:
predict_df = pd.DataFrame.from_items([
('sample_id', X_sub.index),
('testing', X_sub.index.isin(X_test.index).astype(int)),
('status', y_sub),
('decision_function', cv_pipeline.decision_function(X_sub)),
('probability', cv_pipeline.predict_proba(X_sub)[:, 1]),
])
predict_df['probability_str'] = predict_df['probability'].apply('{:.1%}'.format)
# In[43]:
# Top predictions amongst negatives (potential hidden responders)
predict_df.sort_values('decision_function', ascending=False).query("status == 0").head(10)
# In[44]:
# Ignore numpy warning caused by seaborn
warnings.filterwarnings('ignore', 'using a non-integer number instead of an integer')
ax = sns.distplot(predict_df.query("status == 0").decision_function, hist=False, label='Negatives')
ax = sns.distplot(predict_df.query("status == 1").decision_function, hist=False, label='Positives')
# In[45]:
ax = sns.distplot(predict_df.query("status == 0").probability, hist=False, label='Negatives')
ax = sns.distplot(predict_df.query("status == 1").probability, hist=False, label='Positives')
|
py | 1a3c2bf88102853f28313d7991336f01d5447c71 | # -*- coding: utf-8 -*-
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Turbinia task."""
from __future__ import unicode_literals
from copy import deepcopy
from datetime import datetime, timedelta
from enum import IntEnum
import getpass
import json
import logging
import os
import pickle
import platform
import pprint
import subprocess
import sys
import tempfile
import traceback
import uuid
import turbinia
import filelock
from turbinia import config
from turbinia.config import DATETIME_FORMAT
from turbinia.evidence import evidence_decode
from turbinia.processors import resource_manager
from turbinia import output_manager
from turbinia import state_manager
from turbinia import task_utils
from turbinia import TurbiniaException
from turbinia import log_and_report
from turbinia.lib import docker_manager
from prometheus_client import Gauge
from prometheus_client import Histogram
METRICS = {}
log = logging.getLogger('turbinia')
turbinia_worker_tasks_started_total = Gauge(
'turbinia_worker_tasks_started_total',
'Total number of started worker tasks')
turbinia_worker_tasks_completed_total = Gauge(
'turbinia_worker_tasks_completed_total',
'Total number of completed worker tasks')
turbinia_worker_tasks_queued_total = Gauge(
'turbinia_worker_tasks_queued_total', 'Total number of queued worker tasks')
turbinia_worker_tasks_failed_total = Gauge(
'turbinia_worker_tasks_failed_total', 'Total number of failed worker tasks')
turbinia_worker_tasks_timeout_total = Gauge(
'turbinia_worker_tasks_timeout_total',
'Total number of worker tasks timed out.')
class Priority(IntEnum):
"""Reporting priority enum to store common values.
Priorities can be anything in the range of 0-100, where 0 is the highest
priority.
"""
LOW = 80
MEDIUM = 50
HIGH = 20
CRITICAL = 10
class TurbiniaTaskResult:
"""Object to store task results to be returned by a TurbiniaTask.
Attributes:
base_output_dir: Base path for local output
closed: Boolean indicating whether this result is closed
output_dir: Full path for local output
error: Dict of error data ('error' and 'traceback' are some valid keys)
evidence: List of newly created Evidence objects.
id: Unique Id of result (string of hex)
input_evidence: The evidence this task processed.
job_id (str): The ID of the Job that generated this Task/TaskResult
report_data (string): Markdown data that can be used in a Turbinia report.
report_priority (int): Value between 0-100 (0 is the highest priority) to
be used to order report sections.
request_id: The id of the initial request to process this evidence.
run_time: Length of time the task ran for.
saved_paths: Paths where output has been saved.
start_time: Datetime object of when the task was started
status: A one line descriptive task status.
successful: Bool indicating success status.
task_id: Task ID of the parent task.
task_name: Name of parent task.
requester: The user who requested the task.
state_manager: (DatastoreStateManager|RedisStateManager): State manager
object to handle syncing with storage.
worker_name: Name of worker task executed on.
_log: A list of log messages
"""
# The list of attributes that we will persist into storage
STORED_ATTRIBUTES = [
'worker_name', 'report_data', 'report_priority', 'run_time', 'status',
'saved_paths', 'successful'
]
def __init__(
self, evidence=None, input_evidence=None, base_output_dir=None,
request_id=None, job_id=None, no_output_manager=False):
"""Initialize the TurbiniaTaskResult object."""
self.closed = False
self.evidence = evidence if evidence else []
self.input_evidence = input_evidence
self.id = uuid.uuid4().hex
self.job_id = job_id
self.base_output_dir = base_output_dir
self.request_id = request_id
self.task_id = None
self.task_name = None
self.requester = None
self.output_dir = None
self.report_data = None
self.report_priority = Priority.MEDIUM
self.start_time = datetime.now()
self.run_time = None
self.saved_paths = []
self.successful = None
self.status = None
self.error = {}
self.worker_name = platform.node()
self.state_manager = None
# TODO(aarontp): Create mechanism to grab actual python logging data.
self._log = []
self.no_output_manager = no_output_manager
def __str__(self):
return pprint.pformat(vars(self), depth=3)
def setup(self, task):
"""Handles initializing task based attributes, after object creation.
Args:
task (TurbiniaTask): The calling Task object
Raises:
TurbiniaException: If the Output Manager is not setup.
"""
self.task_id = task.id
self.task_name = task.name
self.requester = task.requester
self.state_manager = state_manager.get_state_manager()
if not self.no_output_manager:
if task.output_manager.is_setup:
ldirs = task.output_manager.get_local_output_dirs()
_, self.output_dir = ldirs
else:
raise TurbiniaException('Output Manager is not setup yet.')
def close(self, task, success, status=None):
"""Handles closing of this result and writing logs.
Normally this should be called by the Run method to make sure that the
status, etc are set correctly, but if there is an exception thrown when the
task executes, then run_wrapper will call this with default arguments
indicating a failure.
Args:
task (TurbiniaTask): The calling Task object
success: Bool indicating task success
status: One line descriptive task status.
"""
if self.closed:
# Don't try to close twice.
return
self.successful = success
self.run_time = datetime.now() - self.start_time
if success:
turbinia_worker_tasks_completed_total.inc()
else:
turbinia_worker_tasks_failed_total.inc()
if not status and self.successful:
status = 'Completed successfully in {0:s} on {1:s}'.format(
str(self.run_time), self.worker_name)
elif not status and not self.successful:
status = 'Run failed in {0:s} on {1:s}'.format(
str(self.run_time), self.worker_name)
self.log(status)
self.status = status
for evidence in self.evidence:
if evidence.source_path:
if os.path.exists(evidence.source_path):
self.saved_paths.append(evidence.source_path)
if evidence.copyable:
task.output_manager.save_evidence(evidence, self)
else:
self.log(
'Evidence {0:s} has missing file at source_path {1!s} so '
'not saving.'.format(evidence.name, evidence.source_path))
else:
self.log(
'Evidence {0:s} has empty source_path so '
'not saving.'.format(evidence.name))
if not evidence.request_id:
evidence.request_id = self.request_id
if self.input_evidence:
try:
self.input_evidence.postprocess(task_id=self.task_id)
# Adding a broad exception here because we want to try post-processing
# to clean things up even after other failures in the task, so this could
# also fail.
# pylint: disable=broad-except
except Exception as exception:
message = 'Evidence post-processing for {0!s} failed: {1!s}'.format(
self.input_evidence.name, exception)
self.log(message, level=logging.ERROR)
with filelock.FileLock(config.RESOURCE_FILE_LOCK):
resource_manager.PostProcessResourceState(
self.input_evidence.resource_id, self.task_id)
else:
self.log(
'No input evidence attached to the result object so post-processing '
'cannot be run. This usually means there were previous failures '
'during Task execution and this may result in resources (e.g. '
'mounted disks) accumulating on the Worker.', level=logging.WARNING)
# Now that we've post-processed the input_evidence, we can unset it
# because we don't need to return it.
self.input_evidence = None
if not self.no_output_manager:
# Write result log info to file
logfile = os.path.join(self.output_dir, 'worker-log.txt')
# Create default log text just so that the worker log is created to
# avoid confusion if it doesn't exist.
if not self._log:
self._log.append('No worker messages were logged.')
if self.output_dir and os.path.exists(self.output_dir):
with open(logfile, 'w') as f:
f.write('\n'.join(self._log))
f.write('\n')
task.output_manager.save_local_file(logfile, self)
self.closed = True
log.debug('Result close successful. Status is [{0:s}]'.format(self.status))
def log(self, message, level=logging.INFO, traceback_=None):
"""Log Task messages.
Logs to both the result and the normal logging mechanism.
Args:
message (string): Message to log.
level (int): Log level as defined by logging enums (e.g. logging.INFO)
traceback (string): Trace message to log
"""
self._log.append(message)
if level == logging.DEBUG:
log.debug(message)
elif level == logging.INFO:
log.info(message)
elif level == logging.WARN:
log.warn(message)
elif level == logging.ERROR:
log.error(message)
elif level == logging.CRITICAL:
log.critical(message)
if traceback_:
self.result.set_error(message, traceback_)
def update_task_status(self, task, status=None):
"""Updates the task status and pushes it directly to datastore.
Args:
task (TurbiniaTask): The calling Task object
status (str): Brief word or phrase for Task state. If not supplied, the
existing Task status will be used.
"""
if status:
task.result.status = 'Task {0!s} is {1!s} on {2!s}'.format(
self.task_name, status, self.worker_name)
if self.state_manager:
self.state_manager.update_task(task)
else:
self.log(
'No state_manager initialized, not updating Task info', logging.DEBUG)
def add_evidence(self, evidence, evidence_config):
"""Populate the results list.
Args:
evidence: Evidence object
evidence_config (dict): The evidence config we want to associate with
this object. This will be passed in with the original evidence that
was supplied to the task, so likely the caller will always want to
use evidence_.config for this parameter.
"""
if (evidence.source_path and os.path.exists(evidence.source_path) and
os.path.getsize(evidence.source_path) == 0):
self.log(
'Evidence source path [{0:s}] for [{1:s}] exists but is empty. Not '
'adding empty Evidence.'.format(evidence.source_path, evidence.name),
logging.WARNING)
return
# We want to enforce this here to make sure that any new Evidence objects
# created also contain the config. We could create a closure to do this
# automatically, but the real fix is to attach this to a separate object.
# See https://github.com/google/turbinia/issues/211 for more details.
evidence.config = evidence_config
if evidence.context_dependent:
evidence.set_parent(self.input_evidence)
self.evidence.append(evidence)
def set_error(self, error, traceback_):
"""Add error and traceback.
Args:
error: Short string describing the error.
traceback_: Traceback of the error.
"""
self.error['error'] = str(error)
self.error['traceback'] = str(traceback_)
def serialize(self):
"""Creates serialized result object.
Returns:
dict: Object dictionary that is JSON serializable.
"""
self.state_manager = None
result_copy = deepcopy(self.__dict__)
if self.run_time:
result_copy['run_time'] = self.run_time.total_seconds()
else:
result_copy['run_time'] = None
result_copy['start_time'] = self.start_time.strftime(DATETIME_FORMAT)
if self.input_evidence:
result_copy['input_evidence'] = None
result_copy['evidence'] = [x.serialize() for x in self.evidence]
return result_copy
@classmethod
def deserialize(cls, input_dict):
"""Converts an input dictionary back into a TurbiniaTaskResult object.
Args:
input_dict (dict): TurbiniaTaskResult object dictionary.
Returns:
TurbiniaTaskResult: Deserialized object.
"""
result = TurbiniaTaskResult()
result.__dict__.update(input_dict)
if result.state_manager:
result.state_manager = None
if result.run_time:
result.run_time = timedelta(seconds=result.run_time)
result.start_time = datetime.strptime(result.start_time, DATETIME_FORMAT)
if result.input_evidence:
result.input_evidence = evidence_decode(result.input_evidence)
result.evidence = [evidence_decode(x) for x in result.evidence]
return result
class TurbiniaTask:
"""Base class for Turbinia tasks.
Attributes:
base_output_dir (str): The base directory that output will go into.
Per-task directories will be created under this.
id (str): Unique Id of task (string of hex)
is_finalize_task (bool): Whether this is a finalize Task or not.
job_id (str): Job ID the Task was created by.
job_name (str): The name of the Job.
last_update (datetime): A datetime object with the last time the task was
updated.
name (str): Name of task
output_dir (str): The directory output will go into (including per-task
folder).
output_manager (OutputManager): The object that manages saving output.
result (TurbiniaTaskResult): A TurbiniaTaskResult object.
request_id (str): The id of the initial request to process this evidence.
state_key (str): A key used to manage task state
stub (psq.task.TaskResult|celery.app.Task): The task manager
implementation specific task stub that exists server side to keep a
reference to the remote task objects. For PSQ this is a task result
object, but other implementations have their own stub objects.
tmp_dir (str): Temporary directory for Task to write to.
requester (str): The user who requested the task.
_evidence_config (dict): The config that we want to pass to all new
evidence created from this task.
recipe (dict): Validated recipe to be used as the task configuration.
task_config (dict): Default task configuration, in effect if
no recipe is explicitly provided for the task.
"""
# The list of attributes that we will persist into storage
STORED_ATTRIBUTES = [
'id', 'job_id', 'last_update', 'name', 'request_id', 'requester'
]
# The list of evidence states that are required by a Task in order to run.
# See `evidence.Evidence.preprocess()` docstrings for more details.
REQUIRED_STATES = []
# The default configuration variables used by Tasks. Recipe data will
# override these parameters at run time.
TASK_CONFIG = {}
def __init__(
self, name=None, base_output_dir=None, request_id=None, requester=None):
"""Initialization for TurbiniaTask."""
if base_output_dir:
self.base_output_dir = base_output_dir
else:
self.base_output_dir = config.OUTPUT_DIR
self.id = uuid.uuid4().hex
self.is_finalize_task = False
self.job_id = None
self.job_name = None
self.last_update = datetime.now()
self.name = name if name else self.__class__.__name__
self.output_dir = None
self.output_manager = output_manager.OutputManager()
self.result = None
self.request_id = request_id
self.state_key = None
self.stub = None
self.tmp_dir = None
self.turbinia_version = turbinia.__version__
self.requester = requester if requester else 'user_unspecified'
self._evidence_config = {}
self.recipe = {}
self.task_config = {}
def serialize(self):
"""Converts the TurbiniaTask object into a serializable dict.
Returns:
Dict: Dictionary representing this object, ready to be serialized.
"""
task_copy = deepcopy(self.__dict__)
task_copy['output_manager'] = self.output_manager.__dict__
task_copy['last_update'] = self.last_update.strftime(DATETIME_FORMAT)
return task_copy
@classmethod
def deserialize(cls, input_dict):
"""Converts an input dictionary back into a TurbiniaTask object.
Args:
input_dict (dict): TurbiniaTask object dictionary.
Returns:
TurbiniaTask: Deserialized object.
"""
return task_utils.task_deserialize(input_dict)
@classmethod
def check_worker_role(cls):
"""Checks whether the execution context is within a worker or nosetests.
Returns:
bool: If the current execution is in a worker or nosetests.
"""
if config.TURBINIA_COMMAND in ('celeryworker', 'psqworker'):
return True
for arg in sys.argv:
if 'nosetests' in arg:
return True
return False
def evidence_setup(self, evidence):
"""Validates and processes the evidence.
Args:
evidence(Evidence): The Evidence to setup.
Raises:
TurbiniaException: If the Evidence can't be validated or the current
state does not meet the required state.
"""
evidence.validate()
evidence.preprocess(
self.id, tmp_dir=self.tmp_dir, required_states=self.REQUIRED_STATES)
# Final check to make sure that the required evidence state has been met
# for Evidence types that have those capabilities.
for state in self.REQUIRED_STATES:
if state in evidence.POSSIBLE_STATES and not evidence.state.get(state):
raise TurbiniaException(
'Evidence {0!s} being processed by Task {1:s} requires Evidence '
'to be in state {2:s}, but earlier pre-processors may have '
'failed. Current state is {3:s}. See previous logs for more '
'information.'.format(
evidence, self.name, state.name, evidence.format_state()))
def validate_task_conf(self, proposed_conf):
"""Checks if the provided recipe contains exclusively allowed fields.
Args:
proposed_conf (dict): Dict to override the default dynamic task conf.
Returns:
bool: False if a field not present in the default dynamic task config
is found.
"""
if not proposed_conf:
return False
for k in proposed_conf.keys():
if k == 'task':
continue
if k not in self.TASK_CONFIG:
self.result.log(
'Recipe key "{0:s}" is not found in task {1:s} default config: {2!s}'
.format(k, self.name, self.TASK_CONFIG))
return False
return True
def get_metrics(self):
"""Gets histogram metric for current Task.
Returns:
prometheus_client.Historgram: For the current task,
or None if they are not initialized.
Raises:
TurbiniaException: If no metric is found for the given Task.
"""
global METRICS
metric = METRICS.get(self.name.lower())
if not metric:
message = (
'No metric found for Task {0:s}. client.TASK_MAP may be out of '
'date.'.format(self.name.lower))
raise TurbiniaException(message)
return metric
def execute(
self, cmd, result, save_files=None, log_files=None, new_evidence=None,
close=False, shell=False, stderr_file=None, stdout_file=None,
success_codes=None, cwd=None):
"""Executes a given binary and saves output.
Args:
cmd (list|string): Command arguments to run
result (TurbiniaTaskResult): The result object to put data into.
save_files (list): A list of files to save (files referenced by Evidence
objects are automatically saved, so no need to include them).
log_files (list): A list of files to save even if execution fails.
new_evidence (list): These are new evidence objects created by the task.
If the task is successful, they will be added to the result.
close (bool): Whether to close out the result.
shell (bool): Whether the cmd is in the form of a string or a list.
success_codes (list(int)): Which return codes are considered successful.
stderr_file (str): Path to location to save stderr.
stdout_file (str): Path to location to save stdout.
cwd (str): Sets the current directory before the process is executed.
Returns:
Tuple of the return code, and the TurbiniaTaskResult object
"""
# Avoid circular dependency.
from turbinia.jobs import manager as job_manager
save_files = save_files if save_files else []
log_files = log_files if log_files else []
new_evidence = new_evidence if new_evidence else []
success_codes = success_codes if success_codes else [0]
stdout = None
stderr = None
# Get timeout value.
timeout_limit = job_manager.JobsManager.GetTimeoutValue(self.job_name)
# Execute the job via docker.
docker_image = job_manager.JobsManager.GetDockerImage(self.job_name)
if docker_image:
ro_paths = []
for path in ['local_path', 'source_path', 'device_path', 'mount_path']:
if hasattr(result.input_evidence, path):
path_string = getattr(result.input_evidence, path)
if path_string:
ro_paths.append(path_string)
rw_paths = [self.output_dir, self.tmp_dir]
container_manager = docker_manager.ContainerManager(docker_image)
stdout, stderr, ret = container_manager.execute_container(
cmd, shell, ro_paths=ro_paths, rw_paths=rw_paths,
timeout_limit=timeout_limit)
# Execute the job on the host system.
else:
try:
if shell:
proc = subprocess.Popen(
cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
cwd=cwd)
proc.wait(timeout_limit)
else:
proc = subprocess.Popen(
cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd)
proc.wait(timeout_limit)
except subprocess.TimeoutExpired as exception:
# Log error and close result.
message = (
'Execution of [{0!s}] failed due to job timeout of '
'{1:d} seconds has been reached.'.format(cmd, timeout_limit))
result.log(message)
result.close(self, success=False, status=message)
# Increase timeout metric and raise exception
turbinia_worker_tasks_timeout_total.inc()
raise TurbiniaException(message)
stdout, stderr = proc.communicate()
ret = proc.returncode
result.error['stdout'] = str(stdout)
result.error['stderr'] = str(stderr)
if stderr_file and not stderr:
result.log(
'Attempting to save stderr to {0:s}, but no stderr found during '
'execution'.format(stderr_file))
elif stderr:
if not stderr_file:
_, stderr_file = tempfile.mkstemp(
suffix='.txt', prefix='stderr-', dir=self.output_dir)
result.log(
'Writing stderr to {0:s}'.format(stderr_file), level=logging.DEBUG)
with open(stderr_file, 'wb') as fh:
fh.write(stderr)
log_files.append(stderr_file)
if stdout_file and not stdout:
result.log(
'Attempting to save stdout to {0:s}, but no stdout found during '
'execution'.format(stdout_file))
elif stdout:
if not stdout_file:
_, stdout_file = tempfile.mkstemp(
suffix='.txt', prefix='stdout-', dir=self.output_dir)
result.log(
'Writing stdout to {0:s}'.format(stdout_file), level=logging.DEBUG)
with open(stdout_file, 'wb') as fh:
fh.write(stdout)
log_files.append(stdout_file)
log_files = list(set(log_files))
for file_ in log_files:
if not os.path.exists(file_):
result.log(
'Log file {0:s} does not exist to save'.format(file_),
level=logging.DEBUG)
continue
if os.path.getsize(file_) == 0:
result.log(
'Log file {0:s} is empty. Not saving'.format(file_),
level=logging.DEBUG)
continue
result.log('Output log file found at {0:s}'.format(file_))
self.output_manager.save_local_file(file_, result)
if ret not in success_codes:
message = 'Execution of [{0!s}] failed with status {1:d}'.format(cmd, ret)
result.log(message)
if close:
result.close(self, success=False, status=message)
else:
result.log('Execution of [{0!s}] succeeded'.format(cmd))
for file_ in save_files:
if os.path.getsize(file_) == 0:
result.log(
'Output file {0:s} is empty. Not saving'.format(file_),
level=logging.DEBUG)
continue
result.log('Output save file at {0:s}'.format(file_))
self.output_manager.save_local_file(file_, result)
for evidence in new_evidence:
# If the local path is set in the Evidence, we check to make sure that
# the path exists and is not empty before adding it.
if evidence.source_path and not os.path.exists(evidence.source_path):
message = (
'Evidence {0:s} source_path {1:s} does not exist. Not returning '
'empty Evidence.'.format(evidence.name, evidence.source_path))
result.log(message, level=logging.WARN)
elif (evidence.source_path and os.path.exists(evidence.source_path) and
os.path.getsize(evidence.source_path) == 0):
message = (
'Evidence {0:s} source_path {1:s} is empty. Not returning '
'empty new Evidence.'.format(evidence.name, evidence.source_path))
result.log(message, level=logging.WARN)
else:
result.add_evidence(evidence, self._evidence_config)
if close:
result.close(self, success=True)
return ret, result
def setup(self, evidence):
"""Perform common setup operations and runtime environment.
Even though TurbiniaTasks are initially instantiated by the Jobs under the
Task Manager, this setup method needs to be run from the task on the worker
because it handles setting up the task runtime environment.
Args:
evidence: An Evidence object to process.
Returns:
A TurbiniaTaskResult object.
Raises:
TurbiniaException: If the evidence can not be found.
"""
self.setup_metrics()
self.output_manager.setup(self.name, self.id, self.request_id)
self.tmp_dir, self.output_dir = self.output_manager.get_local_output_dirs()
if not self.result:
self.result = self.create_result(input_evidence=evidence)
if evidence.copyable and not config.SHARED_FILESYSTEM:
self.output_manager.retrieve_evidence(evidence)
if evidence.source_path and not os.path.exists(evidence.source_path):
raise TurbiniaException(
'Evidence source path {0:s} does not exist'.format(
evidence.source_path))
return self.result
def setup_metrics(self, task_list=None):
"""Sets up the application metrics.
Returns early with metrics if they are already setup.
Arguments:
task_list(list): List of Task names
Returns:
Dict: Mapping of task names to metrics objects.
"""
global METRICS
if METRICS:
return METRICS
if not task_list:
task_loader = task_utils.TaskLoader()
task_list = task_loader.get_task_names()
for task_name in task_list:
task_name = task_name.lower()
if task_name in METRICS:
continue
metric = Histogram(
'{0:s}_duration_seconds'.format(task_name),
'Seconds to run {0:s}'.format(task_name))
METRICS[task_name] = metric
log.debug('Registered {0:d} task metrics'.format(len(METRICS)))
return METRICS
def touch(self):
"""Updates the last_update time of the task."""
self.last_update = datetime.now()
def create_result(
self, input_evidence=None, status=None, message=None, trace=None,
no_output_manager=False):
"""Creates a new TurbiniaTaskResults and instantiates the result.
Args:
input_evidence(Evidence): The evidence being processed by this Task.
status(str): A one line descriptive task status.
message(str): An error message to show when returning the result.
trace: Stack traceback for errors.
"""
result = TurbiniaTaskResult(
base_output_dir=self.base_output_dir, request_id=self.request_id,
job_id=self.job_id, input_evidence=input_evidence,
no_output_manager=no_output_manager)
result.setup(self)
if message:
if status:
result.status = '{0:s}. Previous status: [{1:s}]'.format(
message, status)
else:
result.status = message
result.set_error(message, trace)
return result
def validate_result(self, result):
"""Checks to make sure that the result is valid.
We occasionally get something added into a TurbiniaTaskResult that makes
it unpickleable. We don't necessarily know what caused it to be in that
state, so we need to create a new, mostly empty result so that the client
is able to get the error message (otherwise the task will stay pending
indefinitely).
Args:
result (TurbiniaTaskResult): Result object to check
Returns:
The original result object if it is OK, otherwise an empty result object
indicating a failure.
"""
message = None
check_status = 'Successful'
if isinstance(result, TurbiniaTaskResult):
try:
log.debug('Checking TurbiniaTaskResult for pickle serializability')
pickle.dumps(result.serialize())
except (TypeError, pickle.PicklingError) as exception:
message = (
'Error pickling TurbiniaTaskResult object. Returning a new result '
'with the pickling error, and all previous result data will be '
'lost. Pickle Error: {0!s}'.format(exception))
try:
log.debug('Checking TurbiniaTaskResult for JSON serializability')
json.dumps(result.serialize())
except (TypeError) as exception:
message = (
'Error JSON serializing TurbiniaTaskResult object. Returning a new '
'result with the JSON error, and all previous result data will '
'be lost. JSON Error: {0!s}'.format(exception))
else:
message = (
'Task returned type [{0!s}] instead of TurbiniaTaskResult.').format(
type(result))
if message:
log.error(message)
if result and hasattr(result, 'status') and result.status:
status = result.status
else:
status = 'No previous status'
result = self.create_result(
status=status, message=message, trace=traceback.format_exc())
result.close(self, success=False, status=message)
check_status = 'Failed, but replaced with empty result'
log.info('Result check: {0:s}'.format(check_status))
return result
def get_task_recipe(self, recipe):
"""Creates and validates a recipe for the specified task.
Args:
recipe (dict): The full request recipe data.
Returns:
Dict: Recipe data specific to the current Task
"""
recipe_data = deepcopy(self.TASK_CONFIG)
for _, task_recipe in recipe.items():
if isinstance(task_recipe, dict):
task = task_recipe.get('task', None)
if task and task == self.name and self.validate_task_conf(task_recipe):
log.debug(
'Setting recipe data for task {0:s}: {1!s}'.format(
task, task_recipe))
recipe_data.update(task_recipe)
recipe_data.pop('task')
break
recipe_data.update(recipe['globals'])
return recipe_data
def run_wrapper(self, evidence):
"""Wrapper to manage TurbiniaTaskResults and exception handling.
This wrapper should be called to invoke the run() methods so it can handle
the management of TurbiniaTaskResults and the exception handling. Otherwise
details from exceptions in the worker cannot be propagated back to the
Turbinia TaskManager.
This method should handle (in no particular order):
- Exceptions thrown from run()
- Verifying valid TurbiniaTaskResult object is returned
- Check for bad results (non TurbiniaTaskResults) returned from run()
- Auto-close results that haven't been closed
- Verifying that the results are serializeable
- Locking to make sure only one task is active at a time
Args:
evidence (dict): To be decoded into Evidence object
Returns:
A TurbiniaTaskResult object
"""
# Avoid circular dependency.
from turbinia.jobs import manager as job_manager
log.debug('Task {0:s} {1:s} awaiting execution'.format(self.name, self.id))
evidence = evidence_decode(evidence)
try:
self.result = self.setup(evidence)
self.result.update_task_status(self, 'queued')
turbinia_worker_tasks_queued_total.inc()
task_runtime_metrics = self.get_metrics()
except TurbiniaException as exception:
message = (
'{0:s} Task setup failed with exception: [{1!s}]'.format(
self.name, exception))
# Logging explicitly here because the result is in an unknown state
trace = traceback.format_exc()
log.error(message)
log.error(trace)
if self.result:
if hasattr(exception, 'message'):
self.result.set_error(exception.message, traceback.format_exc())
else:
self.result.set_error(exception.__class__, traceback.format_exc())
self.result.status = message
else:
self.result = self.create_result(
message=message, trace=traceback.format_exc())
return self.result.serialize()
log.info('Starting Task {0:s} {1:s}'.format(self.name, self.id))
original_result_id = None
turbinia_worker_tasks_started_total.inc()
with task_runtime_metrics.time():
try:
original_result_id = self.result.id
# Check if Task's job is available for the worker.
active_jobs = list(job_manager.JobsManager.GetJobNames())
if self.job_name.lower() not in active_jobs:
message = (
'Task will not run due to the job: {0:s} being disabled '
'on the worker.'.format(self.job_name))
self.result.log(message, level=logging.ERROR)
self.result.status = message
return self.result.serialize()
self.evidence_setup(evidence)
if self.turbinia_version != turbinia.__version__:
message = (
'Worker and Server versions do not match: {0:s} != {1:s}'.format(
self.turbinia_version, turbinia.__version__))
self.result.log(message, level=logging.ERROR)
self.result.status = message
self.result.successful = False
return self.result.serialize()
self.result.update_task_status(self, 'running')
self._evidence_config = evidence.config
self.task_config = self.get_task_recipe(evidence.config)
self.result = self.run(evidence, self.result)
# pylint: disable=broad-except
except Exception as exception:
message = (
'{0:s} Task failed with exception: [{1!s}]'.format(
self.name, exception))
# Logging explicitly here because the result is in an unknown state
trace = traceback.format_exc()
log_and_report(message, trace)
if self.result:
self.result.log(message, level=logging.ERROR)
self.result.log(trace)
if hasattr(exception, 'message'):
self.result.set_error(exception.message, traceback.format_exc())
else:
self.result.set_error(exception.__class__, traceback.format_exc())
self.result.status = message
else:
log.error('No TurbiniaTaskResult object found after task execution.')
self.result = self.validate_result(self.result)
if self.result:
self.result.update_task_status(self)
# Trying to close the result if possible so that we clean up what we can.
# This has a higher likelihood of failing because something must have gone
# wrong as the Task should have already closed this.
if self.result and not self.result.closed:
message = 'Trying last ditch attempt to close result'
log.warning(message)
self.result.log(message)
if self.result.status:
status = self.result.status
else:
status = 'No previous status'
message = (
'Task Result was auto-closed from task executor on {0:s} likely '
'due to previous failures. Previous status: [{1:s}]'.format(
self.result.worker_name, status))
self.result.log(message)
try:
self.result.close(self, False, message)
# Using broad except here because lots can go wrong due to the reasons
# listed above.
# pylint: disable=broad-except
except Exception as exception:
log.error('TurbiniaTaskResult close failed: {0!s}'.format(exception))
if not self.result.status:
self.result.status = message
# Check the result again after closing to make sure it's still good.
self.result = self.validate_result(self.result)
if original_result_id != self.result.id:
log.debug(
'Result object {0:s} is different from original {1!s} after task '
'execution which indicates errors during execution'.format(
self.result.id, original_result_id))
else:
log.debug(
'Returning original result object {0:s} after task execution'.format(
self.result.id))
return self.result.serialize()
def run(self, evidence, result):
"""Entry point to execute the task.
Args:
evidence: Evidence object.
result: A TurbiniaTaskResult object to place task results into.
Returns:
TurbiniaTaskResult object.
"""
raise NotImplementedError
|
py | 1a3c2cf040bc6ed0ad8e21c7530945830cea9da1 | # coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class TpdmTeacherPreparationProviderProgramGradeLevel(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'grade_level_descriptor': 'str'
}
attribute_map = {
'grade_level_descriptor': 'gradeLevelDescriptor'
}
def __init__(self, grade_level_descriptor=None, _configuration=None): # noqa: E501
"""TpdmTeacherPreparationProviderProgramGradeLevel - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._grade_level_descriptor = None
self.discriminator = None
self.grade_level_descriptor = grade_level_descriptor
@property
def grade_level_descriptor(self):
"""Gets the grade_level_descriptor of this TpdmTeacherPreparationProviderProgramGradeLevel. # noqa: E501
The grade levels served at the TPP Program. # noqa: E501
:return: The grade_level_descriptor of this TpdmTeacherPreparationProviderProgramGradeLevel. # noqa: E501
:rtype: str
"""
return self._grade_level_descriptor
@grade_level_descriptor.setter
def grade_level_descriptor(self, grade_level_descriptor):
"""Sets the grade_level_descriptor of this TpdmTeacherPreparationProviderProgramGradeLevel.
The grade levels served at the TPP Program. # noqa: E501
:param grade_level_descriptor: The grade_level_descriptor of this TpdmTeacherPreparationProviderProgramGradeLevel. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and grade_level_descriptor is None:
raise ValueError("Invalid value for `grade_level_descriptor`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
grade_level_descriptor is not None and len(grade_level_descriptor) > 306):
raise ValueError("Invalid value for `grade_level_descriptor`, length must be less than or equal to `306`") # noqa: E501
self._grade_level_descriptor = grade_level_descriptor
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TpdmTeacherPreparationProviderProgramGradeLevel, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TpdmTeacherPreparationProviderProgramGradeLevel):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TpdmTeacherPreparationProviderProgramGradeLevel):
return True
return self.to_dict() != other.to_dict()
|
py | 1a3c2d3cb52436b7170648b5b1e10f5e9eefc5d0 | from flask import Flask
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class HelloWorld(Resource):
def get(self):
return {'hello': 'world'}
api.add_resource(HelloWorld, '/')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') |
py | 1a3c2d9c813baa9f2d133c10185ee2fea3207b19 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.internal.backends.chrome_inspector import inspector_websocket
from telemetry.internal.platform import system_info
from py_utils import camel_case
class SystemInfoBackend(object):
def __init__(self, devtools_port):
self._port = devtools_port
def GetSystemInfo(self, timeout=10):
req = {'method': 'SystemInfo.getInfo'}
websocket = inspector_websocket.InspectorWebsocket()
try:
websocket.Connect('ws://127.0.0.1:%i/devtools/browser' % self._port,
timeout)
res = websocket.SyncRequest(req, timeout)
finally:
websocket.Disconnect()
if 'error' in res:
return None
return system_info.SystemInfo.FromDict(
camel_case.ToUnderscore(res['result']))
def Close(self):
pass
|
py | 1a3c2f9a19e69200ffcdcd676147203dc5befd2e | from datetime import datetime
from django.db import models
from django.contrib.auth import get_user_model
from goods.models import Goods
User = get_user_model()
# Create your models here.
class ShoppingCart(models.Model):
"""
购物车
"""
user = models.ForeignKey(User, verbose_name=u"用户")
goods = models.ForeignKey(Goods, verbose_name=u"商品")
nums = models.IntegerField(default=0, verbose_name="购买数量")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = '购物车'
verbose_name_plural = verbose_name
unique_together = ("user", "goods")
def __str__(self):
return "%s(%d)".format(self.goods.name, self.nums)
class OrderInfo(models.Model):
"""
订单
"""
ORDER_STATUS = (
("TRADE_SUCCESS", "成功"),
("TRADE_CLOSED", "超时关闭"),
("WAIT_BUYER_PAY", "交易创建"),
("TRADE_FINISHED", "交易结束"),
("paying", "待支付"),
)
user = models.ForeignKey(User, verbose_name="用户")
order_sn = models.CharField(max_length=30, null=True, blank=True, unique=True, verbose_name="订单号")
trade_no = models.CharField(max_length=100, unique=True, null=True, blank=True, verbose_name=u"交易号")
pay_status = models.CharField(choices=ORDER_STATUS, default="paying", max_length=30, verbose_name="订单状态")
post_script = models.CharField(max_length=200, verbose_name="订单留言")
order_mount = models.FloatField(default=0.0, verbose_name="订单金额")
pay_time = models.DateTimeField(null=True, blank=True, verbose_name="支付时间")
# 用户信息
address = models.CharField(max_length=100, default="", verbose_name="收货地址")
signer_name = models.CharField(max_length=20, default="", verbose_name="签收人")
singer_mobile = models.CharField(max_length=11, verbose_name="联系电话")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = u"订单"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order_sn)
class OrderGoods(models.Model):
"""
订单的商品详情
"""
order = models.ForeignKey(OrderInfo, verbose_name="订单信息", related_name="goods")
goods = models.ForeignKey(Goods, verbose_name="商品")
goods_num = models.IntegerField(default=0, verbose_name="商品数量")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "订单商品"
verbose_name_plural = verbose_name
def __str__(self):
return str(self.order.order_sn)
|
py | 1a3c2fb577436439e5e8f30758358265b219ef90 | # Generated by Django 3.1.2 on 2022-02-22 20:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0007_auto_20220217_1254'),
]
operations = [
migrations.AlterModelOptions(
name='postitem',
options={'ordering': ['-created']},
),
]
|
py | 1a3c30212f40e6580a5b05fbe09a14b44a78bdfd | #!/usr/bin/env python
# coding: UTF-8
'''This scirpt builds the seafile server tarball.
Some notes:
1. The working directory is always the 'builddir'. 'os.chdir' is only called
to change to the 'builddir'. We make use of the 'cwd' argument in
'subprocess.Popen' to run a command in a specific directory.
'''
import sys
####################
### Requires Python 2.6+
####################
if sys.version_info[0] == 3:
print 'Python 3 not supported yet. Quit now.'
sys.exit(1)
if sys.version_info[1] < 6:
print 'Python 2.6 or above is required. Quit now.'
sys.exit(1)
import os
import glob
import tempfile
import shutil
import re
import subprocess
import optparse
import atexit
####################
### Global variables
####################
# command line configuartion
conf = {}
# key names in the conf dictionary.
CONF_VERSION = 'version'
CONF_LIBSEARPC_VERSION = 'libsearpc_version'
CONF_CCNET_VERSION = 'ccnet_version'
CONF_SEAFILE_VERSION = 'seafile_version'
CONF_SEAFILE_CLIENT_VERSION = 'seafile_client_version'
CONF_SRCDIR = 'srcdir'
CONF_KEEP = 'keep'
CONF_BUILDDIR = 'builddir'
CONF_OUTPUTDIR = 'outputdir'
CONF_NO_STRIP = 'nostrip'
####################
### Common helper functions
####################
def highlight(content, is_error=False):
'''Add ANSI color to content to get it highlighted on terminal'''
if is_error:
return '\x1b[1;31m%s\x1b[m' % content
else:
return '\x1b[1;32m%s\x1b[m' % content
def info(msg):
print highlight('[INFO] ') + msg
def exist_in_path(prog):
'''Test whether prog exists in system path'''
dirs = os.environ['PATH'].split(':')
for d in dirs:
if d == '':
continue
path = os.path.join(d, prog)
if os.path.exists(path):
return True
return False
def prepend_env_value(name, value, seperator=':'):
'''append a new value to a list'''
try:
current_value = os.environ[name]
except KeyError:
current_value = ''
new_value = value
if current_value:
new_value += seperator + current_value
os.environ[name] = new_value
def error(msg=None, usage=None):
if msg:
print highlight('[ERROR] ') + msg
if usage:
print usage
sys.exit(1)
def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Run a program and wait it to finish, and return its exit code. The
standard output of this program is supressed.
'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(argv,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env)
return proc.wait()
def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Like run_argv but specify a command line string instead of argv'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(cmdline,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env,
shell=True)
return proc.wait()
def must_mkdir(path):
'''Create a directory, exit on failure'''
try:
os.mkdir(path)
except OSError, e:
error('failed to create directory %s:%s' % (path, e))
def must_copy(src, dst):
'''Copy src to dst, exit on failure'''
try:
shutil.copy(src, dst)
except Exception, e:
error('failed to copy %s to %s: %s' % (src, dst, e))
class Project(object):
'''Base class for a project'''
# Probject name, i.e. libseaprc/ccnet/seafile/seahub
name = ''
# A list of shell commands to configure/build the project
build_commands = []
def __init__(self):
self.version = self.get_version()
self.src_tarball = os.path.join(conf[CONF_SRCDIR],
'%s-%s.tar.gz' % (self.name, self.version))
# project dir, like <builddir>/seafile-1.2.2/
self.projdir = os.path.join(conf[CONF_BUILDDIR], '%s-%s' % (self.name, self.version))
def get_version(self):
# libsearpc and ccnet can have different versions from seafile.
raise NotImplementedError
def get_source_commit_id(self):
'''By convetion, we record the commit id of the source code in the
file "<projdir>/latest_commit"
'''
latest_commit_file = os.path.join(self.projdir, 'latest_commit')
with open(latest_commit_file, 'r') as fp:
commit_id = fp.read().strip('\n\r\t ')
return commit_id
def append_cflags(self, macros):
cflags = ' '.join([ '-D%s=%s' % (k, macros[k]) for k in macros ])
prepend_env_value('DEB_CPPFLAGS_APPEND',
cflags,
seperator=' ')
def uncompress(self):
'''Uncompress the source from the tarball'''
info('Uncompressing %s' % self.name)
if run('tar xf %s' % self.src_tarball) < 0:
error('failed to uncompress source of %s' % self.name)
def before_build(self):
'''Hook method to do project-specific stuff before running build commands'''
pass
def build(self):
'''Build the source'''
self.before_build()
info('Building %s' % self.name)
for cmd in self.build_commands:
if run(cmd, cwd=self.projdir) != 0:
error('error when running command:\n\t%s\n' % cmd)
class Libsearpc(Project):
name = 'libsearpc'
def get_version(self):
return conf[CONF_LIBSEARPC_VERSION]
class Ccnet(Project):
name = 'ccnet'
def get_version(self):
return conf[CONF_CCNET_VERSION]
def before_build(self):
macros = {}
# SET CCNET_SOURCE_COMMIT_ID, so it can be printed in the log
macros['CCNET_SOURCE_COMMIT_ID'] = '\\"%s\\"' % self.get_source_commit_id()
self.append_cflags(macros)
class SeafileClient(Project):
name = 'seafile-client'
def get_version(self):
return conf[CONF_SEAFILE_CLIENT_VERSION]
def before_build(self):
pass
class Seafile(Project):
name = 'seafile'
def __init__(self):
Project.__init__(self)
self.build_commands = [
'dpkg-buildpackage -B -nc -uc -us',
]
def get_version(self):
return conf[CONF_SEAFILE_VERSION]
def before_build(self):
macros = {}
# SET SEAFILE_SOURCE_COMMIT_ID, so it can be printed in the log
macros['SEAFILE_SOURCE_COMMIT_ID'] = '\\"%s\\"' % self.get_source_commit_id()
self.append_cflags(macros)
def check_targz_src(proj, version, srcdir):
src_tarball = os.path.join(srcdir, '%s-%s.tar.gz' % (proj, version))
if not os.path.exists(src_tarball):
error('%s not exists' % src_tarball)
def validate_args(usage, options):
required_args = [
CONF_VERSION,
CONF_LIBSEARPC_VERSION,
CONF_CCNET_VERSION,
CONF_SEAFILE_VERSION,
CONF_SEAFILE_CLIENT_VERSION,
CONF_SRCDIR,
]
# fist check required args
for optname in required_args:
if getattr(options, optname, None) == None:
error('%s must be specified' % optname, usage=usage)
def get_option(optname):
return getattr(options, optname)
# [ version ]
def check_project_version(version):
'''A valid version must be like 1.2.2, 1.3'''
if not re.match('^[0-9](\.[0-9])+$', version):
error('%s is not a valid version' % version, usage=usage)
version = get_option(CONF_VERSION)
libsearpc_version = get_option(CONF_LIBSEARPC_VERSION)
ccnet_version = get_option(CONF_CCNET_VERSION)
seafile_version = get_option(CONF_SEAFILE_VERSION)
seafile_client_version = get_option(CONF_SEAFILE_CLIENT_VERSION)
check_project_version(version)
check_project_version(libsearpc_version)
check_project_version(ccnet_version)
check_project_version(seafile_version)
# [ srcdir ]
srcdir = get_option(CONF_SRCDIR)
check_targz_src('libsearpc', libsearpc_version, srcdir)
check_targz_src('ccnet', ccnet_version, srcdir)
check_targz_src('seafile', seafile_version, srcdir)
check_targz_src('seafile-client', seafile_client_version, srcdir)
# [ builddir ]
builddir = get_option(CONF_BUILDDIR)
if not os.path.exists(builddir):
error('%s does not exist' % builddir, usage=usage)
builddir = os.path.join(builddir, 'seafile-deb-build')
# [ outputdir ]
outputdir = get_option(CONF_OUTPUTDIR)
if outputdir:
if not os.path.exists(outputdir):
error('outputdir %s does not exist' % outputdir, usage=usage)
else:
outputdir = os.getcwd()
# [ keep ]
keep = get_option(CONF_KEEP)
# [ no strip]
nostrip = get_option(CONF_NO_STRIP)
conf[CONF_VERSION] = version
conf[CONF_LIBSEARPC_VERSION] = libsearpc_version
conf[CONF_CCNET_VERSION] = ccnet_version
conf[CONF_SEAFILE_VERSION] = seafile_version
conf[CONF_SEAFILE_CLIENT_VERSION] = seafile_client_version
conf[CONF_BUILDDIR] = builddir
conf[CONF_SRCDIR] = srcdir
conf[CONF_OUTPUTDIR] = outputdir
conf[CONF_KEEP] = keep
conf[CONF_NO_STRIP] = nostrip
prepare_builddir(builddir)
show_build_info()
def show_build_info():
'''Print all conf information. Confirm before continue.'''
info('------------------------------------------')
info('Seafile debian package: BUILD INFO')
info('------------------------------------------')
info('seafile: %s' % conf[CONF_SEAFILE_VERSION])
info('seafile-client: %s' % conf[CONF_SEAFILE_CLIENT_VERSION])
info('ccnet: %s' % conf[CONF_CCNET_VERSION])
info('libsearpc: %s' % conf[CONF_LIBSEARPC_VERSION])
info('builddir: %s' % conf[CONF_BUILDDIR])
info('outputdir: %s' % conf[CONF_OUTPUTDIR])
info('source dir: %s' % conf[CONF_SRCDIR])
info('strip symbols: %s' % (not conf[CONF_NO_STRIP]))
info('clean on exit: %s' % (not conf[CONF_KEEP]))
info('------------------------------------------')
info('press any key to continue ')
info('------------------------------------------')
dummy = raw_input()
def prepare_builddir(builddir):
must_mkdir(builddir)
if not conf[CONF_KEEP]:
def remove_builddir():
'''Remove the builddir when exit'''
info('remove builddir before exit')
shutil.rmtree(builddir, ignore_errors=True)
atexit.register(remove_builddir)
os.chdir(builddir)
def parse_args():
parser = optparse.OptionParser()
def long_opt(opt):
return '--' + opt
parser.add_option(long_opt(CONF_VERSION),
dest=CONF_VERSION,
nargs=1,
help='the version to build. Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_LIBSEARPC_VERSION),
dest=CONF_LIBSEARPC_VERSION,
nargs=1,
help='the version of libsearpc as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_CCNET_VERSION),
dest=CONF_CCNET_VERSION,
nargs=1,
help='the version of ccnet as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_SEAFILE_VERSION),
dest=CONF_SEAFILE_VERSION,
nargs=1,
help='the version of ccnet as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_SEAFILE_CLIENT_VERSION),
dest=CONF_SEAFILE_CLIENT_VERSION,
nargs=1,
help='the version of seafile-client. Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_BUILDDIR),
dest=CONF_BUILDDIR,
nargs=1,
help='the directory to build the source. Defaults to /tmp',
default=tempfile.gettempdir())
parser.add_option(long_opt(CONF_OUTPUTDIR),
dest=CONF_OUTPUTDIR,
nargs=1,
help='the output directory to put the generated server tarball. Defaults to the current directory.',
default=os.getcwd())
parser.add_option(long_opt(CONF_SRCDIR),
dest=CONF_SRCDIR,
nargs=1,
help='''Source tarballs must be placed in this directory.''')
parser.add_option(long_opt(CONF_KEEP),
dest=CONF_KEEP,
action='store_true',
help='''keep the build directory after the script exits. By default, the script would delete the build directory at exit.''')
parser.add_option(long_opt(CONF_NO_STRIP),
dest=CONF_NO_STRIP,
action='store_true',
help='''do not strip debug symbols''')
usage = parser.format_help()
options, remain = parser.parse_args()
if remain:
error(usage=usage)
validate_args(usage, options)
def setup_build_env():
'''Setup environment variables, such as export PATH=$BUILDDDIR/bin:$PATH'''
prefix = os.path.join(Seafile().projdir, 'debian', 'seafile', 'usr')
prepend_env_value('DEB_CPPFLAGS_APPEND',
'-DSEAFILE_CLIENT_VERSION=\\"%s\\"' % conf[CONF_VERSION],
seperator=' ')
if conf[CONF_NO_STRIP]:
prepend_env_value('DEB_CPPFLAGS_APPEND',
'-g -O0',
seperator=' ')
prepend_env_value('PATH', os.path.join(prefix, 'bin'))
prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib', 'pkgconfig'))
os.environ['LIBSEARPC_SOURCE_DIR'] = Libsearpc().projdir
os.environ['CCNET_SOURCE_DIR'] = Ccnet().projdir
os.environ['SEAFILE_CLIENT_SOURCE_DIR'] = SeafileClient().projdir
def move_deb():
builddir = conf[CONF_BUILDDIR]
deb_name = glob.glob('*.deb')[0]
src_deb = os.path.join(builddir, deb_name)
dst_deb = os.path.join(conf[CONF_OUTPUTDIR], deb_name)
# move deb to outputdir
try:
shutil.move(src_deb, dst_deb)
except Exception, e:
error('failed to copy %s to %s: %s' % (src_deb, dst_deb, e))
print '---------------------------------------------'
print 'The build is successfully. Output is:\t%s' % dst_deb
print '---------------------------------------------'
def main():
parse_args()
setup_build_env()
libsearpc = Libsearpc()
ccnet = Ccnet()
seafile = Seafile()
seafile_client = SeafileClient()
libsearpc.uncompress()
ccnet.uncompress()
seafile.uncompress()
seafile_client.uncompress()
libsearpc.build()
ccnet.build()
seafile.build()
seafile_client.build()
move_deb()
if __name__ == '__main__':
main() |
py | 1a3c30ae627b77e056968ce1934530a582fc7a57 | import logging
from threading import Thread
from telemetry_f1_2021.listener import TelemetryListener
from kafka.kafka_admin import KafkaAdmin
class TelemetryManager(Thread):
"""Class for adding packets to the packet queue.
Derived from the Thread class, this is run as part of a multithreaded program.
The class initialises a TelemetryListener object and uses this to gather packets
from the UDP stream. These are then added to a separate packet queue by reference.
Methods:
run - called as part of the start method in Thread. Gets packets and adds them to the queue.
"""
def __init__(self, producer):
Thread.__init__(self)
self.producer = producer
self.daemon = True
self.telemetry_listener = TelemetryListener()
self.start()
def run(self):
admin = KafkaAdmin(self.producer.config)
while True:
packet = self.telemetry_listener.get()
topic_name = type(packet).__name__
admin.check_add_topic(topic_name)
self.producer.produce_data(topic_name, packet)
|
py | 1a3c318904fb11c7f5f230b2bc3df56b23b6486e | import logging
import multiprocessing
import os
from bootleg.utils import train_utils
def get_log_name(args, mode):
log_name = os.path.join(train_utils.get_save_folder(args.run_config), f"log_{mode}")
log_name += train_utils.get_file_suffix(args)
log_name += f'_gpu{args.run_config.gpu}'
return log_name
def create_logger(args, mode):
if args.run_config.distributed:
logger = logging.getLogger("bootleg")
else:
logger = logging.getLogger("bootleg")
# set logging level
numeric_level = getattr(logging, args.run_config.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.run_config.loglevel.upper())
logger.setLevel(numeric_level)
# do not propagate messages to the root logger
logger.propagate = False
log_name = get_log_name(args, mode)
if not os.path.exists(log_name): os.system("touch " + log_name)
if not logger.hasHandlers():
formatter = logging.Formatter('%(asctime)s %(message)s')
fh = logging.FileHandler(log_name, mode='w' if mode == 'train' else 'a')
fh.setFormatter(formatter)
logger.addHandler(fh)
# only print the stream for the first GPU
if args.run_config.gpu == 0:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
else:
print('Something went wrong in the logger')
exit()
return logger
def get_logger(args):
if args.run_config.distributed:
return logging.getLogger("bootleg")
else:
return logging.getLogger("bootleg") |
py | 1a3c3217bea2453330534b09051ea27a8e1a8ffc | # -*- coding: utf-8 -*-
# 本类实现了Richard Wallace博士在以下站点描述的AIML模式匹配算法:http://www.alicebot.org/documentation/matching.html '''
from __future__ import print_function
import marshal
import pprint
import re
from .constants import *
class PatternMgr:
# special dictionary keys
_UNDERSCORE = 0
_STAR = 1
_TEMPLATE = 2
_THAT = 3
_TOPIC = 4
_BOT_NAME = 5
def __init__(self):
self._root = {}
self._templateCount = 0
self._botName = u"Nameless"
punctuation = "\"`~!@#$%^&*()-_=+[{]}\|;:',<.>/?"
self._puncStripRE = re.compile("[" + re.escape(punctuation) + "]")
self._whitespaceRE = re.compile("\s+", re.UNICODE)
def numTemplates(self):
"""返回当前存储的模板数量。"""
return self._templateCount
def setBotName(self, name):
"""设置机器人的名称,用于匹配模式中的<bot name =“name”>标签。 名字必须是一个单词! """
# 将多个单词的名字合并为一个单词
self._botName = unicode( ' '.join(name.split()) )
def dump(self):
"""打印所有学习的模式,用于调试目的。"""
pprint.pprint(self._root)
def save(self, filename):
"""将当前模式转储到由filename指定的文件。 要稍后恢复,请使用restore(). """
try:
outFile = open(filename, "wb")
marshal.dump(self._templateCount, outFile)
marshal.dump(self._botName, outFile)
marshal.dump(self._root, outFile)
outFile.close()
except Exception as e:
print( "Error saving PatternMgr to file %s:" % filename )
raise
def restore(self, filename):
"""还原以前保存过的模式集合。"""
try:
inFile = open(filename, "rb")
self._templateCount = marshal.load(inFile)
self._botName = marshal.load(inFile)
self._root = marshal.load(inFile)
inFile.close()
except Exception as e:
print( "Error restoring PatternMgr from file %s:" % filename )
raise
def add(self, data, template):
"""将[pattern / that / topic]元组及其相应的模板添加到节点树中。 """
pattern,that,topic = data
# TODO: 请确保 words只包含合法的字符! (alphanumerics,*,_)
# N浏览节点树到模板的位置,如有必要添加节点。
node = self._root
for word in pattern.split():
key = word
if key == u"_":
key = self._UNDERSCORE
elif key == u"*":
key = self._STAR
elif key == u"BOT_NAME":
key = self._BOT_NAME
if key not in node:
node[key] = {}
node = node[key]
# 如果包含一个非空的“that”模式,进一步向下浏览
if len(that) > 0:
if self._THAT not in node:
node[self._THAT] = {}
node = node[self._THAT]
for word in that.split():
key = word
if key == u"_":
key = self._UNDERSCORE
elif key == u"*":
key = self._STAR
if key not in node:
node[key] = {}
node = node[key]
# 如果包含一个非空的“topic”字符串,可以进一步导航
if len(topic) > 0:
if self._TOPIC not in node:
node[self._TOPIC] = {}
node = node[self._TOPIC]
for word in topic.split():
key = word
if key == u"_":
key = self._UNDERSCORE
elif key == u"*":
key = self._STAR
if key not in node:
node[key] = {}
node = node[key]
# 添加模板
if self._TEMPLATE not in node:
self._templateCount += 1
node[self._TEMPLATE] = template
def match(self, pattern, that, topic):
""" 返回最接近模式的模板。 'that'参数包含机器人以前的回应。 “topic”参数包含当前的对话主题。
如果没有找到模板,则返回None。 """
if len(pattern) == 0:
return None
# 切断输入内容。 删除所有标点符号并将文本转换为全部大写。【关键!】
input_ = pattern.upper()
input_ = re.sub(self._puncStripRE, " ", input_)
if that.strip() == u"": that = u"ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
thatInput = that.upper()
thatInput = re.sub(self._puncStripRE, " ", thatInput)
thatInput = re.sub(self._whitespaceRE, " ", thatInput)
if topic.strip() == u"": topic = u"ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
topicInput = topic.upper()
topicInput = re.sub(self._puncStripRE, " ", topicInput)
# 将输入传递给递归调用
patMatch, template = self._match(input_.split(), thatInput.split(), topicInput.split(), self._root)
return template
def star(self, starType, pattern, that, topic, index):
"""返回一个字符串,即由*匹配的模式部分。
'starType'参数指定要找到哪种星型。 合法值是:
- “star”:匹配主要模式中的一个星号。
- “thatstar”:与that模式中的一个星号匹配。
- “topicstar”:与topic模式中的一个星号匹配。 """
# 破坏输入。 删除所有标点符号并将文本转换为全部大写。
input_ = pattern.upper()
input_ = re.sub(self._puncStripRE, " ", input_)
input_ = re.sub(self._whitespaceRE, " ", input_)
if that.strip() == u"": that = u"ULTRABOGUSDUMMYTHAT" # 'that' must never be empty
thatInput = that.upper()
thatInput = re.sub(self._puncStripRE, " ", thatInput)
thatInput = re.sub(self._whitespaceRE, " ", thatInput)
if topic.strip() == u"": topic = u"ULTRABOGUSDUMMYTOPIC" # 'topic' must never be empty
topicInput = topic.upper()
topicInput = re.sub(self._puncStripRE, " ", topicInput)
topicInput = re.sub(self._whitespaceRE, " ", topicInput)
# P将输入传递给递归 pattern-matcher
patMatch, template = self._match(input_.split(), thatInput.split(), topicInput.split(), self._root)
if template == None:
return ""
# 返回基于starType参数提取模式的适当部分。
words = None
if starType == 'star':
patMatch = patMatch[:patMatch.index(self._THAT)]
words = input_.split()
elif starType == 'thatstar':
patMatch = patMatch[patMatch.index(self._THAT)+1 : patMatch.index(self._TOPIC)]
words = thatInput.split()
elif starType == 'topicstar':
patMatch = patMatch[patMatch.index(self._TOPIC)+1 :]
words = topicInput.split()
else:
# unknown value
raise ValueError( "starType must be in ['star', 'thatstar', 'topicstar']" )
# 将输入的字符串与匹配的模式进行逐字比较。 在循环结束时,如果foundTheRightStar为true,
# 则start和end将包含所需星形匹配子字符串的开始和结束索引(以“单词”表示)。
foundTheRightStar = False
start = end = j = numStars = k = 0
for i in range(len(words)):
# 在处理不是我们正在寻找的星星之后,这个条件是 true
if i < k:
continue
# 如果我们已经达到了模式的结尾,就完成了。
if j == len(patMatch):
break
if not foundTheRightStar:
if patMatch[j] in [self._STAR, self._UNDERSCORE]: #we got a star
numStars += 1
if numStars == index:
# 这个是我们关心的那个 star .
foundTheRightStar = True
start = i
# 迭代字符串的其余部分。
for k in range (i, len(words)):
# 如果星星在模式的最后,我们知道它到底在哪里。
if j+1 == len (patMatch):
end = len (words)
break
# 如果单词已经开始再次匹配,那么这个星星就结束了。
# ======== 不确定:修正:对于pattch“* A B”,“A C A B”将匹配,这是一个错误
if patMatch[j+1] == words[k]:
end = k - 1
i = k
break
# 如果我们刚刚完成处理我们所关心的星,我们会尽早退出循环。
if foundTheRightStar:
break
# 移动到模式的下一个元素。
j += 1
# 从原始的,毫不含糊的输入中提取星号。
if foundTheRightStar:
#print( ' '.join(pattern.split()[start:end+1]) )
if starType == 'star': return ' '.join(pattern.split()[start:end+1])
elif starType == 'thatstar': return ' '.join(that.split()[start:end+1])
elif starType == 'topicstar': return ' '.join(topic.split()[start:end+1])
else: return u""
def _match(self, words, thatWords, topicWords, root):
"""返回一个元组(pat,tem),其中pat是节点列表,从根开始并导致匹配的模式,tem是匹配的模板。 """
# 基本情况:如果单词列表为空,则返回当前节点的模板。
if len(words) == 0:
# we're out of words.
pattern = []
template = None
if len(thatWords) > 0:
# 如果该词不为空,则在_THAT节点上将该词与该词递归模式匹配。
try:
pattern, template = self._match(thatWords, [], topicWords, root[self._THAT])
if pattern != None:
pattern = [self._THAT] + pattern
except KeyError:
pattern = []
template = None
elif len(topicWords) > 0:
# 如果该字词为空且topicWords不为空,则以topicWords为单词在_TOPIC节点上以递归方式进行模式。
try:
pattern, template = self._match(topicWords, [], [], root[self._TOPIC])
if pattern != None:
pattern = [self._TOPIC] + pattern
except KeyError:
pattern = []
template = None
if template == None:
# 完全没有输入了。 在此节点抓取模板。
pattern = []
try: template = root[self._TEMPLATE]
except KeyError: template = None
return (pattern, template)
first = words[0]
suffix = words[1:]
# Check underscore.检查下划线。
# 注意:这是标准AIML集合中的问题,目前已被禁用。
if self._UNDERSCORE in root:
# 必须包含suf为[]的情况,以便处理在模式结尾处出现*或_的情况。
for j in range(len(suffix)+1):
suf = suffix[j:]
pattern, template = self._match(suf, thatWords, topicWords, root[self._UNDERSCORE])
if template is not None:
newPattern = [self._UNDERSCORE] + pattern
return (newPattern, template)
# Check first
if first in root:
pattern, template = self._match(suffix, thatWords, topicWords, root[first])
if template is not None:
newPattern = [first] + pattern
return (newPattern, template)
# check bot name
if self._BOT_NAME in root and first == self._botName:
pattern, template = self._match(suffix, thatWords, topicWords, root[self._BOT_NAME])
if template is not None:
newPattern = [first] + pattern
return (newPattern, template)
# check star
if self._STAR in root:
# 必须包含suf为[]的情况,以便处理在模式结尾处出现*或_的情况。
for j in range(len(suffix)+1):
suf = suffix[j:]
pattern, template = self._match(suf, thatWords, topicWords, root[self._STAR])
if template is not None:
newPattern = [self._STAR] + pattern
return (newPattern, template)
# 没有找到匹配。
return (None, None) |
py | 1a3c371294a2058afc34e97341b9d1740a46e4ce | import numpy as np
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import normalize, LabelEncoder
import sys
from process import load_names
from scanorama import *
NAMESPACE = 'hsc'
data_names = [
'data/hsc/hsc_mars',
'data/hsc/hsc_ss2',
]
# Computes the probability that the corrected SS2 dataset
# comes from the original SS2 distribution or from the same
# distribution as the corrected MARS-Seq dataset.
if __name__ == '__main__':
# Load data.
datasets, genes_list, n_cells = load_names(data_names, verbose=False)
datasets, genes = merge_datasets(datasets, genes_list, verbose=False)
datasets, genes = process_data(datasets, genes)
datasets = [ normalize(ds, axis=1) for ds in datasets ]
# Fit initial mixture models.
gm_ss2 = (GaussianMixture(n_components=3, n_init=3)
.fit(datasets[1]))
# Do batch correction.
datasets = assemble(
datasets,
verbose=False, knn=KNN, sigma=SIGMA, approx=APPROX
)
datasets = [ normalize(ds, axis=1) for ds in datasets ]
# Fit mixture models to other dataset.
gm_mars_corrected = (
GaussianMixture(n_components=3, n_init=3)
.fit(datasets[0])
)
# Natural log likelihoods.
ll_ss2 = gm_ss2.score(datasets[1])
ll_mars_corrected = gm_mars_corrected.score(datasets[1])
# Natural log of the likelihood ratio.
print(ll_ss2 - max(ll_ss2, ll_mars_corrected))
|
py | 1a3c373a7914b47a227d9bd524a580179e222ba6 | from pynonymizer.database.provider import DatabaseProvider
from pynonymizer.database.provider import SEED_TABLE_NAME
from pynonymizer.strategy.update_column import UpdateColumnStrategyTypes
from pynonymizer.strategy.table import TableStrategyTypes
from pynonymizer.database.exceptions import (
UnsupportedColumnStrategyError,
UnsupportedTableStrategyError,
DependencyError,
)
from pynonymizer.fake import FakeDataType
import math
import logging
from tqdm import tqdm
from pathlib import PureWindowsPath, PurePosixPath
import re
_FAKE_COLUMN_TYPES = {
FakeDataType.STRING: "VARCHAR(MAX)",
FakeDataType.DATE: "DATE",
FakeDataType.DATETIME: "DATETIME",
FakeDataType.INT: "INT",
}
_LOCAL_SERVER = "127.0.0.1"
_DEFAULT_PORT = "1433"
def _extract_driver_version(driver):
try:
return int(re.findall(r"\d+", driver)[0])
except IndexError:
return 0
class MsSqlProvider(DatabaseProvider):
"""
A pyodbc-based MSSQL provider.
"""
logger = logging.getLogger(__name__)
# stats value for restore/backup command: Report progress every X percent
# A lower value means MORE resultssets / more frequent updates from the backup command.
# Values lower than 5 often yield unreliable results on smaller backups
__STATS = 5
def __init__(
self,
db_host,
db_user,
db_pass,
db_name,
db_port=None,
seed_rows=None,
backup_compression=False,
driver=None,
):
# import here for fast-failiness
import pyodbc
db_host = db_host or _LOCAL_SERVER
db_port = db_port or _DEFAULT_PORT
driver = driver or self.__detect_driver()
self.db_host = db_host
self.db_user = db_user
self.db_pass = db_pass
self.db_name = db_name
self.db_port = db_port
if seed_rows is None:
seed_rows = 150
self.seed_rows = int(seed_rows)
self.__conn = None
self.__db_conn = None
self.__backup_compression = backup_compression
self.__driver = driver
def __detect_driver(self):
import pyodbc
ms_drivers = [i for i in pyodbc.drivers() if "sql server" in i.lower()]
if len(ms_drivers) < 1:
raise DependencyError(
"odbc", "Failed to detect any ODBC drivers on this system."
)
if len(ms_drivers) > 1:
self.logger.debug("multiple drivers detected for mssql: %s", ms_drivers)
# Sort by the highest number (like, ODBC driver 14 for SQL server)
return sorted(ms_drivers, key=_extract_driver_version, reverse=True)[0]
def __require_local_server(self):
if self.db_host != _LOCAL_SERVER:
raise DependencyError(
"db_host",
"This operation does not support remote servers due to backup file "
"location requirements. You must omit db_host from your configuration "
"and run pynonymizer on the same server as the database.",
)
def __connection(self):
import pyodbc
"""a lazy-evaluated connection"""
if self.__conn is None:
self.__conn = pyodbc.connect(
driver=f"{{{self.__driver}}}",
server=f"{self.db_host},{self.db_port}",
uid=self.db_user,
pwd=self.db_pass,
autocommit=True,
)
return self.__conn
def __db_connection(self):
import pyodbc
"""a lazy-evaluated db-specific connection"""
if self.__db_conn is None:
self.__db_conn = pyodbc.connect(
driver=f"{{{self.__driver}}}",
database=self.db_name,
server=f"{self.db_host},{self.db_port}",
uid=self.db_user,
pwd=self.db_pass,
autocommit=True,
)
return self.__db_conn
def __execute(self, *args, **kwargs):
return self.__connection().execute(*args, **kwargs)
def __db_execute(self, *args, **kwargs):
return self.__db_connection().execute(*args, **kwargs)
def __get_path(self, filepath):
if "\\" in filepath:
return PureWindowsPath(filepath)
else:
return PurePosixPath(filepath)
def __get_default_datafolder(self):
"""
Locate the default data folder using the `model` database location
It's possible that the model database is not the currently set default, i.e if it's been changed after install
The solution to this would be to make a new database and then perform the below check on that instead.
See https://blogs.technet.microsoft.com/sqlman/2009/07/19/tsql-script-determining-default-database-file-log-path/
However, this seems like a heavyweight solution for what is essentially a tsql-writeable tempfolder, so
checking the model db seems like a good 'boring' solution
:return: Default data directory e.g. "C:\\DATA"
"""
datafile = self.__execute(
"""
SELECT physical_name
FROM sys.master_files mf
INNER JOIN sys.[databases] d
ON mf.[database_id] = d.[database_id]
WHERE d.[name] = 'model' AND type = 0
"""
).fetchone()[0]
return self.__get_path(datafile).parent
def __get_default_logfolder(self):
"""
Locate the default log folder using the `model` database location
__get_default_datafolder: see for more info
:return:
"""
logfile = self.__execute(
"""
SELECT physical_name
FROM sys.master_files mf
INNER JOIN sys.[databases] d
ON mf.[database_id] = d.[database_id]
WHERE d.[name] = 'model' AND type = 1
"""
).fetchone()[0]
return self.__get_path(logfile).parent
def __get_file_moves(self, input_path):
"""
Using RESTORE FILELISTONLY, get all the files in the backup that need to be moved to the local system for restore
:return: a dict of file name: new destination
"""
datadir = self.__get_default_datafolder()
logdir = self.__get_default_logfolder()
filelist = self.__execute(
f"RESTORE FILELISTONLY FROM DISK = ?;", input_path
).fetchall()
move_file_map = {}
for file in filelist:
name = file[0]
type = file[2].upper()
filepath = self.__get_path(file[1])
# log files can go into the default log directory, everything else can go into the data directory
if type == "L":
target_path = str(logdir.joinpath(f"{self.db_name}_{filepath.name}"))
else:
target_path = str(datadir.joinpath(f"{self.db_name}_{filepath.name}"))
move_file_map[name] = target_path
return move_file_map
def __async_operation_progress(self, desc, cursor):
# With STATS=x, we should recieve 100/x resultsets, provided the backup is slow enough.
# With some databases, it will jump from y% to 100, so we'll only get <x nextset calls.
# Even SSMS doesn't get informed (read: it's not my fault, blame microsoft)
with tqdm(desc=desc, total=math.floor(100 / self.__STATS)) as progressbar:
while cursor.nextset():
progressbar.update()
# finish the progress - less confusing than a dangling 40% progressbar
progressbar.update(progressbar.total - progressbar.n)
def __run_scripts(self, script_list, title=""):
import pyodbc
for i, script in enumerate(script_list):
self.logger.info(f'Running {title} script #{i} "{script[:50]}"')
cursor = self.__db_execute(script)
results = None
try:
results = cursor.fetchall()
except pyodbc.Error:
pass
self.logger.info(results)
def __create_seed_table(self, qualifier_map):
seed_column_lines = [
"[{}] {}".format(name, _FAKE_COLUMN_TYPES[col.data_type])
for name, col in qualifier_map.items()
]
create_statement = "CREATE TABLE [{}]({});".format(
SEED_TABLE_NAME, ",".join(seed_column_lines)
)
self.__db_execute(create_statement)
def __drop_seed_table(self):
self.__db_execute("DROP TABLE IF EXISTS [{}];".format(SEED_TABLE_NAME))
def __insert_seed_row(self, qualifier_map):
column_list = ",".join(
["[{}]".format(qualifier) for qualifier in qualifier_map]
)
substitution_list = ",".join(
[" ?".format(qualifier) for qualifier in qualifier_map]
)
value_list = [column.value for qualifier, column in qualifier_map.items()]
statement = "INSERT INTO [{}]({}) VALUES ({});".format(
SEED_TABLE_NAME, column_list, substitution_list
)
self.__db_execute(statement, value_list)
def __seed(self, qualifier_map):
for i in tqdm(
range(0, self.seed_rows), desc="Inserting seed data", unit="rows"
):
self.__insert_seed_row(qualifier_map)
def __get_column_subquery(self, column_strategy, table_name, column_name):
if column_strategy.strategy_type == UpdateColumnStrategyTypes.EMPTY:
return "('')"
elif column_strategy.strategy_type == UpdateColumnStrategyTypes.UNIQUE_EMAIL:
return f"( SELECT CONCAT(NEWID(), '@', NEWID(), '.com') )"
elif column_strategy.strategy_type == UpdateColumnStrategyTypes.UNIQUE_LOGIN:
return f"( SELECT NEWID() )"
elif column_strategy.strategy_type == UpdateColumnStrategyTypes.FAKE_UPDATE:
column = f"[{column_strategy.qualifier}]"
if column_strategy.sql_type:
column = f"CAST({column} AS {column_strategy.sql_type})"
# Add WHERE LIKE % OR NULL to make subquery correlated with outer table, therefore uncachable
return f"( SELECT TOP 1 {column} FROM [{SEED_TABLE_NAME}] WHERE [{table_name}].[{column_name}] LIKE '%' OR [{table_name}].[{column_name}] IS NULL ORDER BY NEWID())"
elif column_strategy.strategy_type == UpdateColumnStrategyTypes.LITERAL:
return column_strategy.value
else:
raise UnsupportedColumnStrategyError(column_strategy)
def create_database(self):
self.logger.warning(
"MSSQL: create_database ignored, database will be created when restore_db is run"
)
def drop_database(self):
# force connection close so we can always drop the db: sometimes timing makes a normal drop impossible.
self.__execute(
f"ALTER DATABASE [{self.db_name}] SET SINGLE_USER WITH ROLLBACK IMMEDIATE;"
)
self.__execute(f"DROP DATABASE IF EXISTS [{self.db_name}];")
def anonymize_database(self, database_strategy):
qualifier_map = database_strategy.fake_update_qualifier_map
if len(qualifier_map) > 0:
self.logger.info("creating seed table with %d columns", len(qualifier_map))
self.__create_seed_table(qualifier_map)
self.logger.info("Inserting seed data")
self.__seed(qualifier_map)
self.__run_scripts(database_strategy.before_scripts, "before")
table_strategies = database_strategy.table_strategies
self.logger.info("Anonymizing %d tables", len(table_strategies))
anonymization_errors = []
with tqdm(
desc="Anonymizing database", total=len(table_strategies)
) as progressbar:
for table_strategy in table_strategies:
try:
table_name = table_strategy.table_name
schema_prefix = (
f"[{table_strategy.schema}]." if table_strategy.schema else ""
)
if table_strategy.strategy_type == TableStrategyTypes.TRUNCATE:
progressbar.set_description("Truncating {}".format(table_name))
self.__db_execute(
"TRUNCATE TABLE {}[{}];".format(schema_prefix, table_name)
)
elif table_strategy.strategy_type == TableStrategyTypes.DELETE:
progressbar.set_description("Deleting {}".format(table_name))
self.__db_execute(
"DELETE FROM {}[{}];".format(schema_prefix, table_name)
)
elif (
table_strategy.strategy_type
== TableStrategyTypes.UPDATE_COLUMNS
):
progressbar.set_description("Anonymizing {}".format(table_name))
where_grouping = table_strategy.group_by_where()
total_wheres = len(where_grouping)
for i, (where, column_map) in enumerate(where_grouping.items()):
column_assignments = ",".join(
[
"[{}] = {}".format(
name,
self.__get_column_subquery(
column, table_name, name
),
)
for name, column in column_map.items()
]
)
where_clause = f" WHERE {where}" if where else ""
progressbar.set_description(
"Anonymizing {}: w[{}/{}]".format(
table_name, i + 1, total_wheres
)
)
# Disable ANSI_WARNINGS to allow oversized fake data to be truncated without error
self.__db_execute(
"SET ANSI_WARNINGS off; UPDATE {}[{}] SET {}{}; SET ANSI_WARNINGS on;".format(
schema_prefix,
table_name,
column_assignments,
where_clause,
)
)
else:
raise UnsupportedTableStrategyError(table_strategy)
except Exception as e:
anonymization_errors.append(e)
self.logger.exception(
f"Error while anonymizing table {table_strategy.qualified_name}"
)
progressbar.update()
if len(anonymization_errors) > 0:
raise Exception("Error during anonymization")
self.__run_scripts(database_strategy.after_scripts, "after")
self.logger.info("Dropping seed table")
self.__drop_seed_table()
def restore_database(self, input_path):
self.__require_local_server()
move_files = self.__get_file_moves(input_path)
self.logger.info("Found %d files in %s", len(move_files), input_path)
self.logger.debug(move_files)
# get move statements and flatten pairs out so we can do the 2-param substitution
move_clauses = ", ".join(["MOVE ? TO ?"] * len(move_files))
move_clause_params = [item for pair in move_files.items() for item in pair]
restore_cursor = self.__execute(
f"RESTORE DATABASE ? FROM DISK = ? WITH {move_clauses}, STATS = ?;",
[self.db_name, input_path, *move_clause_params, self.__STATS],
)
self.__async_operation_progress("Restoring Database", restore_cursor)
def dump_database(self, output_path):
self.__require_local_server()
with_options = []
if self.__backup_compression:
with_options.append("COMPRESSION")
with_options_str = (
",".join(with_options) + ", " if len(with_options) > 0 else ""
)
dump_cursor = self.__execute(
f"BACKUP DATABASE ? TO DISK = ? WITH {with_options_str}STATS = ?;",
[self.db_name, output_path, self.__STATS],
)
self.__async_operation_progress("Dumping Database", dump_cursor)
|
py | 1a3c376a9bf5d79305c20753c4c41957badb8e46 | """Based on BertForTokenClassification, implemented here since it's not in transformers currently."""
from torch import nn
from transformers import AlbertModel, AlbertPreTrainedModel
class AlbertForTokenClassification(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`,
`optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration
(:class:`~transformers.AlbertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
|
py | 1a3c38397f840038328bfaa6cae546c2c8a40b0b | '''
Created on June 28, 2019
@author: [email protected]
''' |
py | 1a3c383de794fbfac1c5d114312eaa1275411c43 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import binascii
import calendar
import datetime
import json
import time
import unittest
import mock
import pytest
import six
from six.moves import urllib_parse
from . import _read_local_json
_SERVICE_ACCOUNT_JSON = _read_local_json("url_signer_v4_test_account.json")
_CONFORMANCE_TESTS = _read_local_json("url_signer_v4_test_data.json")["signingV4Tests"]
_BUCKET_TESTS = [
test for test in _CONFORMANCE_TESTS if "bucket" in test and not test.get("object")
]
_BLOB_TESTS = [
test for test in _CONFORMANCE_TESTS if "bucket" in test and test.get("object")
]
def _utc_seconds(when):
return int(calendar.timegm(when.timetuple()))
class Test_get_expiration_seconds_v2(unittest.TestCase):
@staticmethod
def _call_fut(expiration):
from google.cloud.storage._signing import get_expiration_seconds_v2
return get_expiration_seconds_v2(expiration)
def test_w_invalid_expiration_type(self):
with self.assertRaises(TypeError):
self._call_fut(object(), None)
def test_w_expiration_none(self):
with self.assertRaises(TypeError):
self._call_fut(None)
def test_w_expiration_int(self):
self.assertEqual(self._call_fut(123), 123)
def test_w_expiration_long(self):
if not six.PY2:
raise unittest.SkipTest("No long on Python 3+")
self.assertEqual(self._call_fut(long(123)), 123) # noqa: F821
def test_w_expiration_naive_datetime(self):
expiration_no_tz = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
utc_seconds = _utc_seconds(expiration_no_tz)
self.assertEqual(self._call_fut(expiration_no_tz), utc_seconds)
def test_w_expiration_utc_datetime(self):
from google.cloud._helpers import UTC
expiration_utc = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, UTC)
utc_seconds = _utc_seconds(expiration_utc)
self.assertEqual(self._call_fut(expiration_utc), utc_seconds)
def test_w_expiration_other_zone_datetime(self):
from google.cloud._helpers import _UTC
class CET(_UTC):
_tzname = "CET"
_utcoffset = datetime.timedelta(hours=1)
zone = CET()
expiration_other = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, zone)
utc_seconds = _utc_seconds(expiration_other)
cet_seconds = utc_seconds - (60 * 60) # CET one hour earlier than UTC
self.assertEqual(self._call_fut(expiration_other), cet_seconds)
def test_w_expiration_timedelta_seconds(self):
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
utc_seconds = _utc_seconds(dummy_utcnow)
expiration_as_delta = datetime.timedelta(seconds=10)
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_as_delta)
self.assertEqual(result, utc_seconds + 10)
utcnow.assert_called_once_with()
def test_w_expiration_timedelta_days(self):
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
utc_seconds = _utc_seconds(dummy_utcnow)
expiration_as_delta = datetime.timedelta(days=1)
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_as_delta)
self.assertEqual(result, utc_seconds + 86400)
utcnow.assert_called_once_with()
class Test_get_expiration_seconds_v4(unittest.TestCase):
@staticmethod
def _call_fut(expiration):
from google.cloud.storage._signing import get_expiration_seconds_v4
return get_expiration_seconds_v4(expiration)
def test_w_invalid_expiration_type(self):
with self.assertRaises(TypeError):
self._call_fut(object(), None)
def test_w_expiration_none(self):
with self.assertRaises(TypeError):
self._call_fut(None)
def test_w_expiration_int_gt_seven_days(self):
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
delta = datetime.timedelta(days=10)
expiration_utc = dummy_utcnow + delta
expiration_seconds = _utc_seconds(expiration_utc)
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
with self.assertRaises(ValueError):
self._call_fut(expiration_seconds)
utcnow.assert_called_once_with()
def test_w_expiration_int(self):
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
expiration_seconds = 10
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_seconds)
self.assertEqual(result, expiration_seconds)
utcnow.assert_called_once_with()
def test_w_expiration_naive_datetime(self):
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
delta = datetime.timedelta(seconds=10)
expiration_no_tz = dummy_utcnow + delta
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_no_tz)
self.assertEqual(result, delta.seconds)
utcnow.assert_called_once_with()
def test_w_expiration_utc_datetime(self):
from google.cloud._helpers import UTC
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, UTC)
delta = datetime.timedelta(seconds=10)
expiration_utc = dummy_utcnow + delta
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_utc)
self.assertEqual(result, delta.seconds)
utcnow.assert_called_once_with()
def test_w_expiration_other_zone_datetime(self):
from google.cloud._helpers import UTC
from google.cloud._helpers import _UTC
class CET(_UTC):
_tzname = "CET"
_utcoffset = datetime.timedelta(hours=1)
zone = CET()
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0, UTC)
dummy_cetnow = dummy_utcnow.astimezone(zone)
delta = datetime.timedelta(seconds=10)
expiration_other = dummy_cetnow + delta
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_other)
self.assertEqual(result, delta.seconds)
utcnow.assert_called_once_with()
def test_w_expiration_timedelta(self):
dummy_utcnow = datetime.datetime(2004, 8, 19, 0, 0, 0, 0)
expiration_as_delta = datetime.timedelta(seconds=10)
patch = mock.patch(
"google.cloud.storage._signing.NOW", return_value=dummy_utcnow
)
with patch as utcnow:
result = self._call_fut(expiration_as_delta)
self.assertEqual(result, expiration_as_delta.total_seconds())
utcnow.assert_called_once_with()
class Test_get_signed_query_params_v2(unittest.TestCase):
@staticmethod
def _call_fut(credentials, expiration, string_to_sign):
from google.cloud.storage._signing import get_signed_query_params_v2
return get_signed_query_params_v2(credentials, expiration, string_to_sign)
def test_it(self):
sig_bytes = b"DEADBEEF"
account_name = mock.sentinel.service_account_email
credentials = _make_credentials(signer_email=account_name)
credentials.sign_bytes.return_value = sig_bytes
expiration = 100
string_to_sign = "dummy_signature"
result = self._call_fut(credentials, expiration, string_to_sign)
expected = {
"GoogleAccessId": account_name,
"Expires": expiration,
"Signature": base64.b64encode(sig_bytes),
}
self.assertEqual(result, expected)
credentials.sign_bytes.assert_called_once_with(string_to_sign)
class Test_get_canonical_headers(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage._signing import get_canonical_headers
return get_canonical_headers(*args, **kwargs)
def test_w_none(self):
headers = None
expected_canonical = []
expected_ordered = []
canonical, ordered = self._call_fut(headers)
self.assertEqual(canonical, expected_canonical)
self.assertEqual(ordered, expected_ordered)
def test_w_dict(self):
headers = {"foo": "Foo 1.2.3", "Bar": " baz,bam,qux "}
expected_canonical = ["bar:baz,bam,qux", "foo:Foo 1.2.3"]
expected_ordered = [tuple(item.split(":")) for item in expected_canonical]
canonical, ordered = self._call_fut(headers)
self.assertEqual(canonical, expected_canonical)
self.assertEqual(ordered, expected_ordered)
def test_w_list_and_multiples(self):
headers = [
("foo", "Foo 1.2.3"),
("Bar", " baz"),
("Bar", "bam"),
("Bar", "qux "),
]
expected_canonical = ["bar:baz,bam,qux", "foo:Foo 1.2.3"]
expected_ordered = [tuple(item.split(":")) for item in expected_canonical]
canonical, ordered = self._call_fut(headers)
self.assertEqual(canonical, expected_canonical)
self.assertEqual(ordered, expected_ordered)
def test_w_embedded_ws(self):
headers = {"foo": "Foo\n1.2.3", "Bar": " baz bam qux "}
expected_canonical = ["bar:baz bam qux", "foo:Foo 1.2.3"]
expected_ordered = [tuple(item.split(":")) for item in expected_canonical]
canonical, ordered = self._call_fut(headers)
self.assertEqual(canonical, expected_canonical)
self.assertEqual(ordered, expected_ordered)
class Test_canonicalize_v2(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage._signing import canonicalize_v2
return canonicalize_v2(*args, **kwargs)
def test_wo_headers_or_query_parameters(self):
method = "GET"
resource = "/bucket/blob"
canonical = self._call_fut(method, resource, None, None)
self.assertEqual(canonical.method, method)
self.assertEqual(canonical.resource, resource)
self.assertEqual(canonical.query_parameters, [])
self.assertEqual(canonical.headers, [])
def test_w_headers_and_resumable(self):
method = "RESUMABLE"
resource = "/bucket/blob"
headers = [("x-goog-extension", "foobar")]
canonical = self._call_fut(method, resource, None, headers)
self.assertEqual(canonical.method, "POST")
self.assertEqual(canonical.resource, resource)
self.assertEqual(canonical.query_parameters, [])
self.assertEqual(
canonical.headers, ["x-goog-extension:foobar", "x-goog-resumable:start"]
)
def test_w_query_parameters(self):
method = "GET"
resource = "/bucket/blob"
query_parameters = {"foo": "bar", "baz": "qux"}
canonical = self._call_fut(method, resource, query_parameters, None)
self.assertEqual(canonical.method, method)
self.assertEqual(canonical.resource, "{}?baz=qux&foo=bar".format(resource))
self.assertEqual(canonical.query_parameters, [("baz", "qux"), ("foo", "bar")])
self.assertEqual(canonical.headers, [])
class Test_generate_signed_url_v2(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage._signing import generate_signed_url_v2
return generate_signed_url_v2(*args, **kwargs)
def _generate_helper(
self,
api_access_endpoint="",
method="GET",
content_md5=None,
content_type=None,
response_type=None,
response_disposition=None,
generation=None,
headers=None,
query_parameters=None,
):
from six.moves.urllib.parse import urlencode
resource = "/name/path"
credentials = _make_credentials(signer_email="[email protected]")
credentials.sign_bytes.return_value = b"DEADBEEF"
signed = base64.b64encode(credentials.sign_bytes.return_value)
signed = signed.decode("ascii")
expiration = 1000
url = self._call_fut(
credentials,
resource,
expiration=expiration,
api_access_endpoint=api_access_endpoint,
method=method,
content_md5=content_md5,
content_type=content_type,
response_type=response_type,
response_disposition=response_disposition,
generation=generation,
headers=headers,
query_parameters=query_parameters,
service_account_email=None,
access_token=None,
)
# Check the mock was called.
method = method.upper()
if headers is None:
headers = []
elif isinstance(headers, dict):
headers = sorted(headers.items())
elements = []
expected_resource = resource
if method == "RESUMABLE":
elements.append("POST")
headers.append(("x-goog-resumable", "start"))
else:
elements.append(method)
if query_parameters is not None:
normalized_qp = {
key.lower(): value and value.strip() or ""
for key, value in query_parameters.items()
}
expected_qp = urlencode(sorted(normalized_qp.items()))
expected_resource = "{}?{}".format(resource, expected_qp)
elements.append(content_md5 or "")
elements.append(content_type or "")
elements.append(str(expiration))
elements.extend(["{}:{}".format(*header) for header in headers])
elements.append(expected_resource)
string_to_sign = "\n".join(elements)
credentials.sign_bytes.assert_called_once_with(string_to_sign)
scheme, netloc, path, qs, frag = urllib_parse.urlsplit(url)
expected_scheme, expected_netloc, _, _, _ = urllib_parse.urlsplit(
api_access_endpoint
)
self.assertEqual(scheme, expected_scheme)
self.assertEqual(netloc, expected_netloc)
self.assertEqual(path, resource)
self.assertEqual(frag, "")
# Check the URL parameters.
params = dict(urllib_parse.parse_qsl(qs, keep_blank_values=True))
self.assertEqual(params["GoogleAccessId"], credentials.signer_email)
self.assertEqual(params["Expires"], str(expiration))
self.assertEqual(params["Signature"], signed)
if response_type is not None:
self.assertEqual(params["response-content-type"], response_type)
if response_disposition is not None:
self.assertEqual(
params["response-content-disposition"], response_disposition
)
if generation is not None:
self.assertEqual(params["generation"], str(generation))
if query_parameters is not None:
for key, value in query_parameters.items():
value = value.strip() if value else ""
self.assertEqual(params[key].lower(), value)
def test_w_expiration_int(self):
self._generate_helper()
def test_w_endpoint(self):
api_access_endpoint = "https://api.example.com"
self._generate_helper(api_access_endpoint=api_access_endpoint)
def test_w_method(self):
method = "POST"
self._generate_helper(method=method)
def test_w_method_resumable(self):
method = "RESUMABLE"
self._generate_helper(method=method)
def test_w_response_type(self):
response_type = "text/plain"
self._generate_helper(response_type=response_type)
def test_w_response_disposition(self):
response_disposition = "attachment; filename=blob.png"
self._generate_helper(response_disposition=response_disposition)
def test_w_generation(self):
generation = "123"
self._generate_helper(generation=generation)
def test_w_custom_headers_dict(self):
self._generate_helper(headers={"x-goog-foo": "bar"})
def test_w_custom_headers_list(self):
self._generate_helper(headers=[("x-goog-foo", "bar")])
def test_w_custom_query_parameters_w_string_value(self):
self._generate_helper(query_parameters={"bar": "/"})
def test_w_custom_query_parameters_w_none_value(self):
self._generate_helper(query_parameters={"qux": None})
def test_with_google_credentials(self):
resource = "/name/path"
credentials = _make_credentials()
expiration = int(time.time() + 5)
with self.assertRaises(AttributeError):
self._call_fut(credentials, resource=resource, expiration=expiration)
def test_with_access_token(self):
resource = "/name/path"
credentials = _make_credentials()
expiration = int(time.time() + 5)
email = mock.sentinel.service_account_email
with mock.patch(
"google.cloud.storage._signing._sign_message", return_value=b"DEADBEEF"
):
self._call_fut(
credentials,
resource=resource,
expiration=expiration,
service_account_email=email,
access_token="token",
)
class Test_generate_signed_url_v4(unittest.TestCase):
DEFAULT_EXPIRATION = 1000
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage._signing import generate_signed_url_v4
return generate_signed_url_v4(*args, **kwargs)
def _generate_helper(
self,
expiration=DEFAULT_EXPIRATION,
api_access_endpoint="",
method="GET",
content_type=None,
content_md5=None,
response_type=None,
response_disposition=None,
generation=None,
headers=None,
query_parameters=None,
):
now = datetime.datetime(2019, 2, 26, 19, 53, 27)
resource = "/name/path"
signer_email = "[email protected]"
credentials = _make_credentials(signer_email=signer_email)
credentials.sign_bytes.return_value = b"DEADBEEF"
with mock.patch("google.cloud.storage._signing.NOW", lambda: now):
url = self._call_fut(
credentials,
resource,
expiration=expiration,
api_access_endpoint=api_access_endpoint,
method=method,
content_type=content_type,
content_md5=content_md5,
response_type=response_type,
response_disposition=response_disposition,
generation=generation,
headers=headers,
query_parameters=query_parameters,
)
# Check the mock was called.
credentials.sign_bytes.assert_called_once()
scheme, netloc, path, qs, frag = urllib_parse.urlsplit(url)
expected_scheme, expected_netloc, _, _, _ = urllib_parse.urlsplit(
api_access_endpoint
)
self.assertEqual(scheme, expected_scheme)
self.assertEqual(netloc, expected_netloc)
self.assertEqual(path, resource)
self.assertEqual(frag, "")
# Check the URL parameters.
params = dict(urllib_parse.parse_qsl(qs, keep_blank_values=True))
self.assertEqual(params["X-Goog-Algorithm"], "GOOG4-RSA-SHA256")
now_date = now.date().strftime("%Y%m%d")
expected_cred = "{}/{}/auto/storage/goog4_request".format(
signer_email, now_date
)
self.assertEqual(params["X-Goog-Credential"], expected_cred)
now_stamp = now.strftime("%Y%m%dT%H%M%SZ")
self.assertEqual(params["X-Goog-Date"], now_stamp)
self.assertEqual(params["X-Goog-Expires"], str(self.DEFAULT_EXPIRATION))
signed = binascii.hexlify(credentials.sign_bytes.return_value).decode("ascii")
self.assertEqual(params["X-Goog-Signature"], signed)
if response_type is not None:
self.assertEqual(params["response-content-type"], response_type)
if response_disposition is not None:
self.assertEqual(
params["response-content-disposition"], response_disposition
)
if generation is not None:
self.assertEqual(params["generation"], str(generation))
if query_parameters is not None:
for key, value in query_parameters.items():
value = value.strip() if value else ""
self.assertEqual(params[key].lower(), value)
def test_w_expiration_too_long(self):
with self.assertRaises(ValueError):
self._generate_helper(expiration=datetime.timedelta(days=8))
def test_w_defaults(self):
self._generate_helper()
def test_w_api_access_endpoint(self):
self._generate_helper(api_access_endpoint="http://api.example.com")
def test_w_method(self):
self._generate_helper(method="PUT")
def test_w_method_resumable(self):
self._generate_helper(method="RESUMABLE")
def test_w_content_type(self):
self._generate_helper(content_type="text/plain")
def test_w_content_md5(self):
self._generate_helper(content_md5="FACEDACE")
def test_w_response_type(self):
self._generate_helper(response_type="application/octets")
def test_w_response_disposition(self):
self._generate_helper(response_disposition="attachment")
def test_w_generation(self):
self._generate_helper(generation=12345)
def test_w_custom_host_header(self):
self._generate_helper(headers={"Host": "api.example.com"})
def test_w_custom_headers(self):
self._generate_helper(headers={"x-goog-foo": "bar"})
def test_w_custom_payload_hash_goog(self):
self._generate_helper(headers={"x-goog-content-sha256": "DEADBEEF"})
def test_w_custom_query_parameters_w_string_value(self):
self._generate_helper(query_parameters={"bar": "/"})
def test_w_custom_query_parameters_w_none_value(self):
self._generate_helper(query_parameters={"qux": None})
def test_with_access_token(self):
resource = "/name/path"
signer_email = "[email protected]"
credentials = _make_credentials(signer_email=signer_email)
with mock.patch(
"google.cloud.storage._signing._sign_message", return_value=b"DEADBEEF"
):
self._call_fut(
credentials,
resource=resource,
expiration=datetime.timedelta(days=5),
service_account_email=signer_email,
access_token="token",
)
class Test_sign_message(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage._signing import _sign_message
return _sign_message(*args, **kwargs)
def test_sign_bytes(self):
signature = "DEADBEEF"
data = {"signedBlob": signature}
request = make_request(200, data)
with mock.patch("google.auth.transport.requests.Request", return_value=request):
returned_signature = self._call_fut(
"123", service_account_email="[email protected]", access_token="token"
)
assert returned_signature == signature
def test_sign_bytes_failure(self):
from google.auth import exceptions
request = make_request(401)
with mock.patch("google.auth.transport.requests.Request", return_value=request):
with pytest.raises(exceptions.TransportError):
self._call_fut(
"123",
service_account_email="[email protected]",
access_token="token",
)
class TestCustomURLEncoding(unittest.TestCase):
def test_url_encode(self):
from google.cloud.storage._signing import _url_encode
# param1 includes safe symbol ~
# param# includes symbols, which must be encoded
query_params = {"param1": "value~1-2", "param#": "*value+value/"}
self.assertEqual(
_url_encode(query_params), "param%23=%2Avalue%2Bvalue%2F¶m1=value~1-2"
)
class TestQuoteParam(unittest.TestCase):
def test_ascii_symbols(self):
from google.cloud.storage._signing import _quote_param
encoded_param = _quote_param("param")
self.assertIsInstance(encoded_param, str)
self.assertEqual(encoded_param, "param")
def test_quoted_symbols(self):
from google.cloud.storage._signing import _quote_param
encoded_param = _quote_param("!#$%&'()*+,/:;=?@[]")
self.assertIsInstance(encoded_param, str)
self.assertEqual(
encoded_param, "%21%23%24%25%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D"
)
def test_unquoted_symbols(self):
from google.cloud.storage._signing import _quote_param
import string
UNQUOTED = string.ascii_letters + string.digits + ".~_-"
encoded_param = _quote_param(UNQUOTED)
self.assertIsInstance(encoded_param, str)
self.assertEqual(encoded_param, UNQUOTED)
def test_unicode_symbols(self):
from google.cloud.storage._signing import _quote_param
encoded_param = _quote_param("ЁЙЦЯЩЯЩ")
self.assertIsInstance(encoded_param, str)
self.assertEqual(encoded_param, "%D0%81%D0%99%D0%A6%D0%AF%D0%A9%D0%AF%D0%A9")
def test_bytes(self):
from google.cloud.storage._signing import _quote_param
encoded_param = _quote_param(b"bytes")
self.assertIsInstance(encoded_param, str)
self.assertEqual(encoded_param, "bytes")
class TestV4Stamps(unittest.TestCase):
def test_get_v4_now_dtstamps(self):
import datetime
from google.cloud.storage._signing import get_v4_now_dtstamps
with mock.patch(
"google.cloud.storage._signing.NOW",
return_value=datetime.datetime(2020, 3, 12, 13, 14, 15),
) as now_mock:
timestamp, datestamp = get_v4_now_dtstamps()
now_mock.assert_called_once()
self.assertEqual(timestamp, "20200312T131415Z")
self.assertEqual(datestamp, "20200312")
_DUMMY_SERVICE_ACCOUNT = None
def dummy_service_account():
global _DUMMY_SERVICE_ACCOUNT
from google.oauth2.service_account import Credentials
if _DUMMY_SERVICE_ACCOUNT is None:
_DUMMY_SERVICE_ACCOUNT = Credentials.from_service_account_info(
_SERVICE_ACCOUNT_JSON
)
return _DUMMY_SERVICE_ACCOUNT
_API_ACCESS_ENDPOINT = "https://storage.googleapis.com"
def _run_conformance_test(
resource, test_data, api_access_endpoint=_API_ACCESS_ENDPOINT
):
credentials = dummy_service_account()
url = Test_generate_signed_url_v4._call_fut(
credentials,
resource,
expiration=test_data["expiration"],
api_access_endpoint=api_access_endpoint,
method=test_data["method"],
_request_timestamp=test_data["timestamp"],
headers=test_data.get("headers"),
query_parameters=test_data.get("queryParameters"),
)
assert url == test_data["expectedUrl"]
@pytest.mark.parametrize("test_data", _BUCKET_TESTS)
def test_conformance_bucket(test_data):
global _API_ACCESS_ENDPOINT
if "urlStyle" in test_data and test_data["urlStyle"] == "BUCKET_BOUND_HOSTNAME":
_API_ACCESS_ENDPOINT = "{scheme}://{bucket_bound_hostname}".format(
scheme=test_data["scheme"],
bucket_bound_hostname=test_data["bucketBoundHostname"],
)
resource = "/"
_run_conformance_test(resource, test_data, _API_ACCESS_ENDPOINT)
else:
resource = "/{}".format(test_data["bucket"])
_run_conformance_test(resource, test_data)
@pytest.mark.parametrize("test_data", _BLOB_TESTS)
def test_conformance_blob(test_data):
global _API_ACCESS_ENDPOINT
if "urlStyle" in test_data:
if test_data["urlStyle"] == "BUCKET_BOUND_HOSTNAME":
_API_ACCESS_ENDPOINT = "{scheme}://{bucket_bound_hostname}".format(
scheme=test_data["scheme"],
bucket_bound_hostname=test_data["bucketBoundHostname"],
)
# For the VIRTUAL_HOSTED_STYLE
else:
_API_ACCESS_ENDPOINT = "{scheme}://{bucket_name}.storage.googleapis.com".format(
scheme=test_data["scheme"], bucket_name=test_data["bucket"]
)
resource = "/{}".format(test_data["object"])
_run_conformance_test(resource, test_data, _API_ACCESS_ENDPOINT)
else:
resource = "/{}/{}".format(test_data["bucket"], test_data["object"])
_run_conformance_test(resource, test_data)
def _make_credentials(signer_email=None):
import google.auth.credentials
if signer_email:
credentials = mock.Mock(spec=google.auth.credentials.Signing)
credentials.signer_email = signer_email
return credentials
else:
return mock.Mock(spec=google.auth.credentials.Credentials)
def make_request(status, data=None):
from google.auth import transport
response = mock.create_autospec(transport.Response, instance=True)
response.status = status
if data is not None:
response.data = json.dumps(data).encode("utf-8")
request = mock.create_autospec(transport.Request)
request.return_value = response
return request
|
py | 1a3c3888f237709efd685eb4dcafbd8d241c8d80 | #!/usr/bin/env python
# coding: utf-8
# The MIT License (MIT)
# Copyright (c) 2015 Pavel Vomacka
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import argparse
import os
import sys
import re
import subprocess, shlex
from threading import Timer
import sqlite3
manpage_groups = ("1", "8",)
# Name of output file.
db_file = "switch.sqlite3"
# Database path
db_path = "/tmp/switchTest/"
# Database schema
schema_file = "./schema.sql"
opened_db = None
def err_print(*args, **kwargs):
"""
Print to stderr.
"""
print(*args, file=sys.stderr, **kwargs)
def create_empty_db():
"""
Prepare empty database.
"""
global opened_db
database_file = os.path.join(db_path, db_file)
print("\tCreating new database file " + database_file)
if not os.path.exists(db_path):
os.makedirs(db_path)
with sqlite3.connect(os.path.join(db_path, db_file)) as opened_db:
print("\t\tImporting database schema...")
with open(schema_file, 'rt') as schema_f:
schema = schema_f.read()
# Aplly the schema.
opened_db.executescript(schema)
def open_db():
"""
Open DB file.
"""
global opened_db
database_file = os.path.join(db_path, db_file)
print("\tOpening DB file: " + database_file)
opened_db = sqlite3.connect(database_file)
curs = opened_db.cursor()
# Check whether correct tables exists in db
curs.execute("SELECT count(*) FROM sqlite_master WHERE type='table' AND ("
"name=? OR name=? OR name=?);", ('system', 'command', 'switch',))
table_count = curs.fetchone()[0]
if table_count != 3:
raise RuntimeError
def add_system(sys_name):
"""
Add system record.
"""
curs = opened_db.cursor()
curs.execute("INSERT INTO system(name) VALUES(?)", (sys_name,))
opened_db.commit()
return curs.lastrowid
def find_system(sys_name):
"""
Find system id.
"""
curs = opened_db.cursor()
curs.execute("SELECT id FROM system WHERE name=?", (sys_name,))
return curs.fetchone()
def handle_system(sys_name):
"""
Handle system.
"""
system = find_system(sys_name)
if system is None:
system = add_system(sys_name)
else:
system = system[0]
return system
def add_command(manpage_name, command, group, sys_id):
"""
Add command record.
"""
curs = opened_db.cursor()
# Handle situation when we are finding record for command --help output.
if group is not None:
group = str(group)
curs.execute("INSERT INTO command(command, manpage_name, man_group, system_id) "
"VALUES(?,?,?,?)", (command, manpage_name, group, str(sys_id),))
opened_db.commit()
return curs.lastrowid
def find_command(command, group, os_id):
"""
Find command record for correct OS.
"""
curs = opened_db.cursor()
# Handle situation when we are finding record for command --help output.
if group is None:
curs.execute("SELECT id FROM command WHERE command=? AND system_id=?",
(command, os_id,))
else:
curs.execute("SELECT id FROM command WHERE command=? AND "
"man_group=? AND system_id=?",
(command, group, os_id,))
return curs.fetchone()
def handle_command(manpage_name, command, group, os_id):
"""
Handle adding commands, in case that command already exists
also remove all switches which are associated with current command
"""
command_id = find_command(command, group, os_id)
if command_id is None:
# Command is not in database. Add it and use the new ID
command_id = add_command(manpage_name, command, group, os_id)
else:
# Command already exists so use its record id and remove
# all associated switches.
command_id = command_id[0]
delete_associated_switches(command_id)
return command_id
def store_cmds_to_db(cmds, os_id):
"""
Store all commands from compgen -c command to database also in case that
we don't run --help for each command. It helps with testing of commands.
"""
for cmd in cmds:
handle_command(None, cmd, None, os_id)
def get_all_commands():
"""
Get all already inserted commands
"""
curs = opened_db.cursor()
curs.execute("SELECT command FROM command;")
return curs.fetchall()
def add_switch(switch, com_id):
"""
Add switch record.
"""
curs = opened_db.cursor()
curs.execute("INSERT INTO switch(switch, command_id) "
"VALUES(?,?)", (switch, str(com_id),))
opened_db.commit()
def delete_associated_switches(command_id):
"""
Delete all switches associated to the particular command.=
"""
curs = opened_db.cursor()
curs.execute("DELETE FROM switch WHERE command_id=?", (command_id,))
opened_db.commit()
def prepare_dir_regex():
"""
Prepare regex for getting directories which numbers are defined by
global variables.
"""
regex_begin = r"^(?:"
regex_end = r")$"
regex = regex_begin
for group_num in manpage_groups:
regex = regex + r"(?:man" + group_num + ")|"
regex = re.sub(r'\|$', '', regex)
regex = regex + regex_end
return regex
def get_directories():
"""
Function that fetch all needed directory names.
"""
directories = []
dir_regex = prepare_dir_regex()
# Load all directories and files in /usr/share/man.
for root, dirs, files in os.walk('/usr/share/man'):
# Go through all directory names
for directory in dirs:
# Prepare regexp which match to all directories which starts by 'man'
dirRegexp = re.compile(dir_regex)
if dirRegexp.match(directory) is None:
# Skip all directories which does not match regexp
continue
# All directories names which match the regexp concatenate with path
# and save them into list.
directories.append(os.path.join(root, directory))
# Do not go deeper into subdirectories.
break
# Return list with directories
return directories
def get_file_names(directories):
"""
Function that get names of all files in 'directories'.
"""
files = []
# Go through all directories
for directory in directories:
# Fetch all directories and files in current directory
for r, d, f in os.walk(directory):
# Go through all files.
for ccc in f:
# Add path to the file to the list
files.append(r + "/" + ccc)
# Return filled list.
return files
def parse_name(content):
"""
Finds the name of the man page.
"""
# Create regular expression
name_regex = re.compile(r"^([\w\.-]*)")
# Get name of manual page
just_name = name_regex.search(content)
name_str = ""
if just_name is not None:
name_str = just_name.group(1)
return name_str
def parse_manpage_number(path):
"""
Parse number of man page group.
"""
# Create regular expression
number_regex = re.compile(r".*/man(\d).*")
# Get number of manpage group
number = number_regex.search(path)
only_number = ""
if number is not None:
number = number.group(1)
return number
def parse_one_page(content):
"""
Parse flags from manpage which is in content parameter.
"""
# Create regular expression for getting flags from file \s{1,}
flag_regex = re.compile(r"(?:\n?(?:(?:[^\w\-])|(?:\[))((?:(?:\-{1,2})|(?:\+))[#\?\w\-\+]*)"
"(?:(?:,?\s((?:(?:\-{1,2})|(?:\+))[#\?\w\-\+]+))"
"|(?:.*?\s((?:(?:\-{1,2})|(?:\+))[#\?\w\-\+]+)))?)"
"|(?:[\[\{]((?:(?:\-{1,2})|(?:\+))[^ ]*?)[\|,\]\}]"
"(?:((?:(?:\-{1,2})|(?:\+))}[^ ]*?)[\]\}])?)+")
flag_list = flag_regex.findall(content)
# Prepare empty list.
parsed_flags = []
# Create regex for checking whether flag contains at least one letter
# or '#' or question mark.
check_regexp = re.compile(r"(?:.*?[\w#\?]+.*?)|(?:\-\-)")
# Go through all flags (flags can be in tuple.)
for flags in flag_list:
# Go through each tuple.
for flag in flags:
# Check flag.
if check_regexp.match(flag):
#Add flag into list.
#print(flag)
parsed_flags.append(flag)
# Remove duplicates
parsed_flags = list(set(parsed_flags))
# Return flag which was found.
return parsed_flags
def parse_bash_page(content, command_list, os_id):
"""
Parse bash manpage, which is different and keeps switches for more commands.
"""
#regex for SHELL BUILTIN COMMANDS
shell_builtins = re.compile(r"^SHELL BUILTIN COMMANDS$")
# subcommands has 7 spaces before its name.
builtin_reg = re.compile(r"^ {6,8}([a-zA-Z0-9_\-\+]+)")
# Match the end of section
section_end = re.compile(r"^[A-Z]")
man_group = 1
builtins = False
first_line = False
current_builtin = ""
bash_man = ""
mans = {}
for line in content.splitlines():
if not builtins:
if shell_builtins.match(line):
builtins = True
# add bash and so far concatenated manpage to table
mans['bash'] = bash_man
else:
bash_man = bash_man + line
else:
if builtin_reg.match(line):
# builtin command
first_word = builtin_reg.findall(line)[0]
if first_word in command_list:
# first word is correct command
current_builtin = first_word
mans[current_builtin] = first_word
continue
elif section_end.match(line):
# next section end whole for cycle
break
if current_builtin != "":
mans[current_builtin] = mans[current_builtin] + line
# parse mans
for command in mans:
flags = parse_one_page(mans[command])
put_manpage_into_db(os_id, None, command, man_group, flags)
def store_helps(os_id, helps):
"""
Store options from help outputs to DB.
"""
for command, manpage in helps.iteritems():
f_list = parse_one_page(manpage)
put_manpage_into_db(os_id, None, command, None, f_list)
def put_manpage_into_db(os_id, man_name, command, number, flags_list):
"""
Insert manpage into database.
"""
command_id = handle_command(man_name, command, number, os_id)
for flag in flags_list:
add_switch(flag, command_id)
def parse_man_pages(files, builtins, os_id):
"""
Parse all manpages which are accessible by the path in 'path' parameter list.
"""
# Define variables with tools for reading files.
reader = "zcat "
zipped_files = "zcat "
not_zipped_files = "cat "
commands_stored = []
# Open /dev/null/ for output of groff
f_devnull = open(os.devnull, 'w')
# Check all files.
for file_path in files:
# clean vars
flags_list = None
man_name = None
command = None
number = None
""" zcat " + f + " | groff -mandoc -Tutf8
SOME ERRORS OCCURE WHILE GROFF READING MANPAGES --- ADJUST LINE
^^ those errors are caused by mistakes in manpages
"""
# Check whether the file is zipped or not.
zipped = re.compile(r".*\.gz$")
if zipped.match(file_path):
reader = zipped_files
else:
reader = not_zipped_files
# Check whether there is redirection. If it is then parse name from the path.
file_name_changed = False
check_file = subprocess.Popen(shlex.split(reader + file_path), stdout=subprocess.PIPE).communicate()[0]
if re.match("\.so", check_file):
file_name_changed = True
# Create regex for getting name of file.
reg_name = re.compile(r".*/(.*?)\.\w{1,5}\.gz")
# Parse path.
parsed_path = reg_name.search(file_path)
# Variable for saving name.
man_name = None
# If there is at least one match then save it to the variable.
if parsed_path is not None:
man_name = parsed_path.group(1)
# Create regex which catch new file name.
new_file_regex = re.compile(r".* (.*)")
# Parse file.
n_f_search = new_file_regex.search(check_file)
# Prepare variable.
new_file = None
# If there is at least one match then save it to the prepared variable.
if n_f_search is not None:
new_file = n_f_search.group(1)
# Add .gz extension.
new_file = new_file + ".gz"
# Substitute old file name by new file name.
if re.match(r".*/.*", new_file):
file_path = re.sub(r"/[-\.\w]*/[-\.\w]*$", "/" + new_file, file_path)
elif re.match(r"[^/]*", new_file):
file_path = re.sub(r"/[-\.\w]*$", "/" + new_file, file_path)
p1 = subprocess.Popen(shlex.split(reader + file_path),
stdout=subprocess.PIPE,
universal_newlines=True)
# Run these two commands connected by pipe.
"""
Error output is redirected to /dev/null because of warnings from
incorrectly formated manpages
"""
output = subprocess.Popen(shlex.split("groff -E -c -mandoc -Tutf8"),
stdin=p1.stdout,
stdout=subprocess.PIPE,
stderr=f_devnull,
universal_newlines=True).communicate()[0]
number = parse_manpage_number(file_path)
# Parse name of manpage.
if not file_name_changed:
man_name = parse_name(output)
# \u001B is escape character - character which make colors in man pages
output = re.sub(u"\u001B\[[^-]*?;?[^-]*?m", "", output)
if man_name == 'BASH':
parse_bash_page(output, builtins, os_id)
continue # manpage is put into db directly in previous function
# Get list of flags for this page
flags_list = parse_one_page(output)
# Consider manpage name as the name of command.
command = man_name.lower()
put_manpage_into_db(os_id, man_name, command, number, flags_list)
commands_stored.append(command)
f_devnull.close()
return commands_stored
def get_os_commands(ctype=None):
"""
Get bash builtin functions
@param type string could be 'builtin'
"""
command = "compgen -c"
if (ctype == 'builtin'):
command = 'compgen -b'
p = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
universal_newlines=True
)
output = subprocess.Popen(["sort", "-u"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=p.stdout,
universal_newlines=True
).communicate()[0]
output = output.split('\n')
regex = re.compile(r'[a-zA-Z]')
for o in output:
if not regex.match(o):
output.remove(o)
return output
def remove_already_found_cmds(cmds, cmds_in_db):
"""
Remove commands which are already in database
"""
for cmd in cmds_in_db:
if cmd in cmds:
cmds.remove(cmd)
return cmds
def handle_helps(os_id, cmds):
"""
Call --help on each command which has not been processed yet
"""
help_cont = ''
timeout = 2
helps = {}
for cmd in cmds:
try:
p = subprocess.Popen([cmd, "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
universal_newlines=True
)
kill_proc = lambda p: p.kill()
timer = Timer(timeout, kill_proc, [p])
try:
timer.start()
help_cont = p.communicate()[0]
finally:
timer.cancel()
except OSError:
err_print("ERROR in running '" + cmd + " --help'.")
continue
f_list = parse_one_page(help_cont)
put_manpage_into_db(os_id, None, cmd, None, f_list)
helps[cmd] = help_cont
return helps
def parse_options():
"""
Parse options
"""
parser = argparse.ArgumentParser(description="Generate SQLite3 database "
"with all options and switches for all "
"installed commands.")
parser.add_argument("--from-help", help="WARNING: Use this parameter only on "
"virtual machine, which could be lost. Try to run all "
"found commands with '--help' parameter to fetch all "
"options from the output. Please use this only if you "
"know what you are doing. ",
action="store_true")
parser.add_argument("--os-name", help="Name of the OS. Whole name will be "
"created by concatenating OS name and OS version.",
required=True)
parser.add_argument("--os-version", help="Version of OS. Whole name will be "
"created by concatenating OS name and OS version.",
required=True)
parser.add_argument("--schema-file", default="./schema.sql",
help="File with database schema. Default file: "
"./schema.sql")
parser.add_argument("--db-file", default="switch.sqlite3",
help="The name of the database file.")
parser.add_argument("--output-db-dir", default="/tmp/switchTest",
help="Directory to write generated database to. "
"Default directory: /tmp/switchTest/")
prog_args = parser.parse_args()
# Name of schema file.
if prog_args.schema_file:
global schema_file
schema_file = prog_args.schema_file
# Name of database file.
if prog_args.output_db_dir:
global db_path
db_path = prog_args.output_db_dir
# DB path
if prog_args.db_file:
global db_file
db_file = prog_args.db_file
return prog_args
def main():
"""
Main funciton.
"""
# Parse options
args = parse_options()
# Check Python version
if sys.version_info[0] != 2:
raise Exception("Must be using Python 2")
print("Preparing database file...")
# Create empty database in case that db file does not exists
if os.path.exists(os.path.join(db_path, db_file)):
open_db()
else:
create_empty_db()
print("Searching OS ID...")
current_os_id = handle_system(args.os_name + args.os_version)
print("Fetching directories with manual pages...")
# Get directories with manual pages
directories = get_directories()
# Get names of manpage files.
files = get_file_names(directories)
print("Fetching builtin commands...")
# Get bash builtin functions
builtins = get_os_commands('builtin')
# Get all runnable commands - get all runable commands
cmds = get_os_commands()
print("Parsing manual pages...")
# Parse man pages
handled_cmds = parse_man_pages(files, builtins, current_os_id)
# Compare list of commands found in OS with all already stored in DB.
# Then remove all commands which are already in DB from list of all commands.
remove_already_found_cmds(cmds, handled_cmds)
print("Storing commands from 'compgen -c' command...")
store_cmds_to_db(cmds, current_os_id)
# Call each command which is not in DB yet with '--help' param to gather
# further data.
if args.from_help:
print("Running commands with --help option...")
helps = handle_helps(current_os_id, cmds)
"""
Run main function.
"""
if __name__ == "__main__":
main()
|
py | 1a3c3be3501b83a539a5391b2bef4bd1ef7ab68d | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the fetch_builds module."""
import errno
import unittest
# The third-party mock module is expected to be available in PYTHONPATH.
import mock
import fetch_build
# The tests below test private functions (W0212).
# Some methods don't reference self because they use the mock module (R0201).
# pylint: disable=R0201,W0212
class FetchBuildTest(unittest.TestCase):
def setUp(self):
# Mocks of the os and bisect_utils modules are used in the methods below.
cloud_storage_patcher = mock.patch('fetch_build.cloud_storage')
self.mock_cloud_storage = cloud_storage_patcher.start()
self.addCleanup(cloud_storage_patcher.stop)
@mock.patch('fetch_build.os.path.exists')
def test_FetchFromCloudStorage_FileFound(self, mock_os_path_exists):
self.mock_cloud_storage.Exists.return_value = True
mock_os_path_exists.return_value = True
local_path = fetch_build.FetchFromCloudStorage(
'my_bucket', 'remote/foo.zip', 'local')
self.assertEqual('local/foo.zip', local_path)
self.mock_cloud_storage.Get.assert_called_with(
'my_bucket', 'remote/foo.zip', 'local/foo.zip')
def test_FetchFromCloudStorage_FileNotFound(self):
self.mock_cloud_storage.Exists.return_value = False
local_path = fetch_build.FetchFromCloudStorage(
'my_bucket', 'remote/foo.zip', 'local')
self.assertIsNone(local_path)
self.assertFalse(self.mock_cloud_storage.Get.called)
class BuildArchiveTest(unittest.TestCase):
def test_CreatePerfBuildArchive(self):
archive = fetch_build.BuildArchive.Create(fetch_build.PERF_BUILDER)
self.assertEqual('chrome-perf', archive.BucketName())
self.assertTrue(isinstance(archive, fetch_build.PerfBuildArchive))
def test_CreateFullBuildArchive(self):
archive = fetch_build.BuildArchive.Create(fetch_build.FULL_BUILDER)
archive._platform = 'linux'
self.assertEqual('chromium-linux-archive', archive.BucketName())
self.assertTrue(isinstance(archive, fetch_build.FullBuildArchive))
def test_BuildArchive_NonExistentType(self):
self.assertRaises(
NotImplementedError, fetch_build.BuildArchive.Create, 'other')
def test_FullBuildArchive_Linux(self):
archive = fetch_build.FullBuildArchive()
archive._platform = 'linux'
self.assertEqual('chromium-linux-archive', archive.BucketName())
self.assertEqual(
'chromium.linux/Linux Builder/full-build-linux_1234567890abcdef.zip',
archive.FilePath('1234567890abcdef'))
def test_FullBuildArchive_Android(self):
archive = fetch_build.FullBuildArchive()
archive._platform = 'android'
self.assertEqual('chromium-android', archive.BucketName())
self.assertEqual('android_main_rel/full-build-linux_1234567890abcdef.zip',
archive.FilePath('1234567890abcdef'))
def test_PerfBuildArchive_Linux(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'linux'
self.assertEqual('chrome-perf', archive.BucketName())
self.assertEqual(
'Linux Builder/full-build-linux_1234567890abcdef.zip',
archive.FilePath('1234567890abcdef'))
def test_PerfBuildArchive_Android(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'android'
self.assertEqual('chrome-perf', archive.BucketName())
self.assertEqual(
'android_perf_rel/full-build-linux_123456.zip',
archive.FilePath('123456'))
def test_PerfBuildArchive_64BitWindows(self):
archive = fetch_build.PerfBuildArchive(target_arch='x64')
archive._platform = 'win64'
self.assertEqual('chrome-perf', archive.BucketName())
self.assertEqual(
'Win x64 Builder/full-build-win32_123456.zip',
archive.FilePath('123456'))
def test_PerfBuildArchive_WithDepsPatchSha(self):
archive = fetch_build.PerfBuildArchive()
archive._platform = 'linux'
self.assertEqual(
'Linux Builder/full-build-linux_123456'
'_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.zip',
archive.FilePath(123456, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'))
class UnzipTest(unittest.TestCase):
def setUp(self):
# Mocks of the os and bisect_utils modules are used in the methods below.
os_patcher = mock.patch('fetch_build.os')
self.mock_os = os_patcher.start()
self.addCleanup(os_patcher.stop)
bisect_utils_patcher = mock.patch('fetch_build.bisect_utils')
self.mock_bisect_utils = bisect_utils_patcher.start()
self.addCleanup(bisect_utils_patcher.stop)
@mock.patch('fetch_build._MakeDirectory')
@mock.patch('fetch_build._UnzipUsingCommand')
def test_Unzip_Linux(self, mock_UnzipUsingCommand, mock_MakeDirectory):
self.mock_bisect_utils.IsLinuxHost.return_value = True
self.mock_bisect_utils.IsMacHost.return_value = False
self.mock_bisect_utils.IsWindowsHost.return_value = False
fetch_build.Unzip('x.zip', 'out_dir', verbose=False)
mock_MakeDirectory.assert_called_with('out_dir')
mock_UnzipUsingCommand.assert_called_with(
['unzip', '-o'], 'x.zip', 'out_dir')
@mock.patch('fetch_build._MakeDirectory')
@mock.patch('fetch_build._UnzipUsingZipFile')
def test_Unzip_Mac_LargeFile(
self, mock_UnzipUsingZipFile, mock_MakeDirectory):
# The zipfile module is used to unzip on mac when the file is > 4GB.
self.mock_bisect_utils.IsLinuxHost.return_value = False
self.mock_bisect_utils.IsMacHost.return_value = True
self.mock_bisect_utils.IsWindowsHost.return_value = False
self.mock_os.path.getsize.return_value = 2 ** 33 # 8GB
fetch_build.Unzip('x.zip', 'out_dir', verbose=False)
mock_MakeDirectory.assert_called_with('out_dir')
mock_UnzipUsingZipFile.assert_called_with('x.zip', 'out_dir', False)
def test_UnzipUsingCommand(self):
# The _UnzipUsingCommand function should move to the output
# directory and run the command with the file's absolute path.
self.mock_os.path.abspath.return_value = '/foo/some/path/x.zip'
self.mock_os.getcwd.return_value = 'curr_dir'
self.mock_bisect_utils.RunProcess.return_value = 0
fetch_build._UnzipUsingCommand(['unzip'], 'x.zip', 'out_dir')
self.mock_os.chdir.assert_has_calls(
[mock.call('out_dir'), mock.call('curr_dir')])
self.mock_bisect_utils.RunProcess.assert_called_with(
['unzip', '/foo/some/path/x.zip'])
def test_MakeDirectory(self):
# _MakeDirectory uses os.makedirs.
fetch_build._MakeDirectory('some/path')
self.mock_os.makedirs.assert_called_with('some/path')
def test_MakeDirectory_RaisesError(self):
self.mock_os.makedirs.side_effect = OSError()
self.assertRaises(OSError, fetch_build._MakeDirectory, 'some/path')
def test_MakeDirectory_NoErrorIfDirectoryAlreadyExists(self):
already_exists = OSError()
already_exists.errno = errno.EEXIST
self.mock_os.makedirs.side_effect = already_exists
fetch_build._MakeDirectory('some/path')
@mock.patch('fetch_build.shutil')
def test_RemoveDirectoryTree(self, mock_shutil):
# _RemoveDirectoryTree uses shutil.rmtree.
fetch_build._RemoveDirectoryTree('some/path')
mock_shutil.rmtree.assert_called_with('some/path')
if __name__ == '__main__':
unittest.main()
|
py | 1a3c3e9bc0bb5f2bc5e149d1dd5e13c2633076a3 | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that we don't leak txs to inbound peers that we haven't yet announced to"""
from test_framework.messages import msg_getdata, CInv, MSG_TX
from test_framework.p2p import p2p_lock, P2PDataStore
from test_framework.test_framework import RuvchainTestFramework
from test_framework.util import (
assert_equal,
)
from test_framework.wallet import MiniWallet
class P2PNode(P2PDataStore):
def on_inv(self, msg):
pass
class P2PLeakTxTest(RuvchainTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
gen_node = self.nodes[0] # The block and tx generating node
miniwallet = MiniWallet(gen_node)
# Add enough mature utxos to the wallet, so that all txs spend confirmed coins
miniwallet.generate(1)
gen_node.generate(100)
inbound_peer = self.nodes[0].add_p2p_connection(P2PNode()) # An "attacking" inbound peer
MAX_REPEATS = 100
self.log.info("Running test up to {} times.".format(MAX_REPEATS))
for i in range(MAX_REPEATS):
self.log.info('Run repeat {}'.format(i + 1))
txid = miniwallet.send_self_transfer(from_node=gen_node)['wtxid']
want_tx = msg_getdata()
want_tx.inv.append(CInv(t=MSG_TX, h=int(txid, 16)))
with p2p_lock:
inbound_peer.last_message.pop('notfound', None)
inbound_peer.send_and_ping(want_tx)
if inbound_peer.last_message.get('notfound'):
self.log.debug('tx {} was not yet announced to us.'.format(txid))
self.log.debug("node has responded with a notfound message. End test.")
assert_equal(inbound_peer.last_message['notfound'].vec[0].hash, int(txid, 16))
with p2p_lock:
inbound_peer.last_message.pop('notfound')
break
else:
self.log.debug('tx {} was already announced to us. Try test again.'.format(txid))
assert int(txid, 16) in [inv.hash for inv in inbound_peer.last_message['inv'].inv]
if __name__ == '__main__':
P2PLeakTxTest().main()
|
py | 1a3c3fce1fb1fd661dfe61119ba8bed80b035662 | # -*- coding: utf-8 -*-
"""
@brief test log(time=10s)
"""
import unittest
import numpy
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from pyquickhelper.pycode import ExtTestCase
from mlinsights.mlmodel.sklearn_text import TraceableTfidfVectorizer, TraceableCountVectorizer
class TestSklearnText(ExtTestCase):
def test_count_vectorizer(self):
corpus = numpy.array([
"This is the first document.",
"This document is the second document.",
"And this is the third one.",
"Is this the first document?",
"",
]).reshape((5, ))
for ng in [(1, 1), (1, 2), (2, 2), (1, 3)]:
mod1 = CountVectorizer(ngram_range=ng)
mod1.fit(corpus)
mod2 = TraceableCountVectorizer(ngram_range=ng)
mod2.fit(corpus)
pred1 = mod1.transform(corpus)
pred2 = mod2.transform(corpus)
self.assertEqualArray(pred1.todense(), pred2.todense())
voc = mod2.vocabulary_
for k in voc:
self.assertIsInstance(k, tuple)
def test_count_vectorizer_regex(self):
corpus = numpy.array([
"This is the first document.",
"This document is the second document.",
"And this is the third one.",
"Is this the first document?",
"",
]).reshape((5, ))
for pattern in ["[a-zA-Z ]{1,4}", "[a-zA-Z]{1,4}"]:
for ng in [(1, 1), (1, 2), (2, 2), (1, 3)]:
mod1 = CountVectorizer(ngram_range=ng, token_pattern=pattern)
mod1.fit(corpus)
mod2 = TraceableCountVectorizer(ngram_range=ng,
token_pattern=pattern)
mod2.fit(corpus)
pred1 = mod1.transform(corpus)
pred2 = mod2.transform(corpus)
self.assertEqualArray(pred1.todense(), pred2.todense())
voc = mod2.vocabulary_
for k in voc:
self.assertIsInstance(k, tuple)
if " ]" in pattern:
spaces = 0
for k in voc:
self.assertIsInstance(k, tuple)
for i in k:
if ' ' in i:
spaces += 1
self.assertGreater(spaces, 1)
def test_tfidf_vectorizer(self):
corpus = numpy.array([
"This is the first document.",
"This document is the second document.",
"And this is the third one.",
"Is this the first document?",
"",
]).reshape((5, ))
for ng in [(1, 1), (1, 2), (2, 2), (1, 3)]:
mod1 = TfidfVectorizer(ngram_range=ng)
mod1.fit(corpus)
mod2 = TraceableTfidfVectorizer(ngram_range=ng)
mod2.fit(corpus)
pred1 = mod1.transform(corpus)
pred2 = mod2.transform(corpus)
self.assertEqualArray(pred1.todense(), pred2.todense())
voc = mod2.vocabulary_
for k in voc:
self.assertIsInstance(k, tuple)
def test_tfidf_vectorizer_regex(self):
corpus = numpy.array([
"This is the first document.",
"This document is the second document.",
"And this is the third one.",
"Is this the first document?",
"",
]).reshape((5, ))
for pattern in ["[a-zA-Z ]{1,4}", "[a-zA-Z]{1,4}"]:
for ng in [(1, 1), (1, 2), (2, 2), (1, 3)]:
mod1 = TfidfVectorizer(ngram_range=ng, token_pattern=pattern)
mod1.fit(corpus)
mod2 = TraceableTfidfVectorizer(ngram_range=ng,
token_pattern=pattern)
mod2.fit(corpus)
pred1 = mod1.transform(corpus)
pred2 = mod2.transform(corpus)
if ' ]' in pattern:
voc = mod2.vocabulary_
spaces = 0
for k in voc:
self.assertIsInstance(k, tuple)
for i in k:
if ' ' in i:
spaces += 1
self.assertGreater(spaces, 1)
self.assertEqualArray(pred1.todense(), pred2.todense())
if __name__ == "__main__":
unittest.main()
|
py | 1a3c3fe40b7744d62482370b298da8d675174747 | #!/usr/bin/env python
from Bio import SeqIO
import argparse as ap
import sys
def read_params():
p = ap.ArgumentParser(description = 'fastq2fasta.py Parameters\n')
p.add_argument('--ifn', required = False, default = None, type = str)
p.add_argument('--ofn', required = False, default = None, type = str)
return vars(p.parse_args())
if __name__ == '__main__':
args = read_params()
if args['ifn'] == None:
ifile = sys.stdin
else:
ifile = open(args['ifn'], 'r')
if args['ofn'] == None:
ofile = sys.stdout
else:
ofile = open(args['ofn'], 'w')
for r in SeqIO.parse(ifile, "fastq"):
SeqIO.write(r, ofile, "fasta")
|
py | 1a3c400966b675be8b221b3752f53f73a231a477 | import pyeccodes.accessors as _
def load(h):
h.alias('mars.number', 'forecastOrSingularVectorNumber')
h.alias('mars.origin', 'centre')
if (h.get_l('class') == 9):
h.alias('mars.method', 'methodNumber')
|
py | 1a3c40ef07047ee4c6eb8b17479c355fbd8d6e0a | """
# lex-ler
Compreender a motivação e mecanismos da análise léxica.
* Separar um código fonte em tokens e lexemas.
* Identificar os diferentes tipos de lexemas.
* Identificar lexemas em linguagens de programação reais como Python ou C.
----
Atenção! Este não é um exercício de programação, mas sim de compreensão dos
conceitos relacionados à análise léxica. Ainda assim, a resposta é corrigida
de forma automatizada.
Considere o código ruspy abaixo:
// Fatorial
fn fat(n: int) {
r = n
for i in 1..n {
r *= i
}
r
}
Separe este programa em lexemas e salve-os como uma lista de strings na variável:
FAT_LEXEMAS = ["fn", "fat", ...]
Os comentários são removidos da lista de lexemas, já que não interessam à análise
semântica. Você pode gerar esta lista de forma manual, automática ou semi-automática.
A única parte importante é obter o valor correto no final.
Na segunda parte faça a classificação de cada lexema em sua categoria e salve
como
FAT_TOKENS = ["fn FN", "fat ID", ...]
ou seja, cada string contêm o lexema e a categoria de não-terminal separados
por um espaço. Considere as seguintes categorias:
ID - identificadores
INT - inteiros
OP - operadores binários
LBRACE/RBRACE - chaves (abrir/fechar)
LPAR/RPAR - parênteses (abrir/fechar)
Cada palavra reservada possui sua categoria a parte como FN, IF, etc.
"""
import pytest
def test_verificações_básicas(var, check_value, fn):
size_hint = 24
lexemas = var("FAT_LEXEMAS")
tokens = var("FAT_TOKENS")
assert len(lexemas) == len(tokens)
tok_class = {tk.split()[1] for tk in tokens}
assert tok_class == {"FN", "FOR", "IN", "ID", "INT", "OP", "LBRACE", "RBRACE", "LPAR", "RPAR"}
check_value(
lexemas,
name="FAT_LEXEMAS",
type=list,
hash="MjQ6AD8NORcHa9YKPMlnYAmp6A==",
check=[fn.size(size_hint)],
)
check_value(
tokens,
name="FAT_TOKENS",
type=list,
hash="MjQ6eDGqAGldx9jyZm2FqVyIJg==",
check=[fn.size(size_hint)],
)
|
py | 1a3c41b48eb977b9bd1a0bc76609ede5865b1c44 | import argparse
import random
import math
from dali.utils import (
set_device_from_args,
add_device_args,
unpickle_as_dict,
)
from dali.data.utils import split_punctuation
from translation import TranslationModel
def parse_args():
parser = argparse.ArgumentParser()
add_device_args(parser)
parser.add_argument("--path", type=str, required='True', help="Path to saved model")
parser.add_argument("--beam_width", type=int, default=5, help="Beam width used when prediction")
parser.add_argument("--max_output_length", type=int, default=40, help="Maximum number of words in the translation")
parser.add_argument("--show_beams", action='store_true', default=False,
help="If true shows all the beams and probabilities")
return parser.parse_args()
def show_reconstructions(model, example_pair, vocabs, max_sentence_length):
from_words, to_words = example_pair
from_vocab, to_vocab = vocabs
from_with_unk = ' '.join(from_vocab.decode(from_vocab.encode(from_words)))
to_with_unk = ' '.join(to_vocab.decode(to_vocab.encode(to_words)))
print('TRANSLATING: %s' % from_with_unk)
print('REFERENCE: %s' % to_with_unk)
print('')
def main(args):
set_device_from_args(args)
RELEVANT_VARIABLES = ["model", "vocabs"]
loaded = unpickle_as_dict(args.path, RELEVANT_VARIABLES)
model = loaded["model"]
from_vocab, to_vocab = loaded["vocabs"]
while True:
from_sentence = split_punctuation(input()).split(' ')
encoded = from_vocab.encode(list(reversed(from_sentence)), add_eos=False)
beams = model.predict(encoded,
eos_symbol=to_vocab.eos,
max_sequence_length=args.max_output_length + 1,
beam_width=args.beam_width)
if args.show_beams:
for solution, score, _ in beams:
score = math.exp(score.w[0])
# reveal the unks
solution = ' '.join(to_vocab.decode(solution, strip_eos=True))
print('%f => %s' % (score, to_vocab.decode(solution, True)))
else:
print(' '.join(to_vocab.decode(beams[0].solution, strip_eos=True)))
if __name__ == '__main__':
main(parse_args())
|
py | 1a3c41b8f2f7e09a730a84dfbd8dc323152cb2d7 | # -*- coding: utf-8 -*-
#
# Devito documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 20 13:02:08 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon' # support for numpydoc
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Devito v4.3'
copyright = u'2016-2019, Devito'
author = u'The Devito Community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = u'4.3'
# The full version, including alpha/beta/rc tags.
# release = u'4.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
html_title = u'Devito v4.3'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = '_static/devito_logo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = '_static/devito_logo.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Devitodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Devito.tex', u'Devito Documentation',
u'Devito', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'devito', u'Devito Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Devito', u'Devito Documentation',
author, 'Devito', u'Devito Documentation.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
|
py | 1a3c4219e14e88969147f38c1506ed48ec4abd93 | """
Carneades argumentation package
"""
__all__ = ['caes', 'tracecalls']
|
py | 1a3c43495bbf9d302216d7ddf62df75446907a36 | _base_ = './psanet_r50-d8_512x512_20k_voc12aug.py'
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
py | 1a3c439ef6960c58744d8611e7ba305dc3cc1c62 | # Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
|
py | 1a3c4467baa75aa7c5ff4ade484c85c80f59ad65 |
from common import FILE_TEMPLATE
#================================================================================================
MAPPER_TEMPLATE = """\
public partial class PythonMapper : PythonApi
{
%s
}"""
MAPPER_FILE_TEMPLATE = FILE_TEMPLATE % MAPPER_TEMPLATE
#================================================================================================
REGISTER_EXCEPTION_TEMPLATE = """\
public override void Register_PyExc_%(name)s(IntPtr addr)
{
CPyMarshal.WritePtr(addr, this.Store(%(source)s.%(name)s));
}"""
#================================================================================================
STOREDISPATCH_TEMPLATE = """\
private IntPtr StoreDispatch(object obj)
{
%s
return this.StoreObject(obj);
}"""
STOREDISPATCH_FILE_TEMPLATE = MAPPER_FILE_TEMPLATE % STOREDISPATCH_TEMPLATE
STOREDISPATCH_TYPE_TEMPLATE = """\
if (obj is %(type)s) { return this.StoreTyped((%(type)s)obj); }"""
#================================================================================================
OPERATOR_TEMPLATE = """\
public override IntPtr
%(name)s(IntPtr arg1ptr, IntPtr arg2ptr)
{
try
{
object result = PythonOperator.%(operator)s(this.scratchContext, this.Retrieve(arg1ptr), this.Retrieve(arg2ptr));
return this.Store(result);
}
catch (Exception e)
{
this.LastException = e;
return IntPtr.Zero;
}
}"""
#================================================================================================
NUMBERS_C2PY_TEMPLATE = """\
public override IntPtr
%(name)s(%(type)s value)
{
return this.Store(%(cast)svalue);
}"""
#================================================================================================
NUMBERS_PY2C_TEMPLATE = """\
public override %(type)s
%(name)s(IntPtr valuePtr)
{
try
{
return %(coerce)sNumberMaker.%(converter)s(this.scratchContext, this.Retrieve(valuePtr));
}
catch (Exception e)
{
this.LastException = e;
return %(default)s;
}
}"""
#================================================================================================
REGISTER_TYPES_TEMPLATE = """\
public override void
Register_%(name)s(IntPtr ptr)
{
CPyMarshal.Zero(ptr, Marshal.SizeOf(typeof(PyTypeObject)));
CPyMarshal.WriteIntField(ptr, typeof(PyTypeObject), "ob_refcnt", 1);
%(extra)s
string name = (string)Builtin.getattr(this.scratchContext, %(type)s, "__name__");
CPyMarshal.WriteCStringField(ptr, typeof(PyTypeObject), "tp_name", name);
this.map.Associate(ptr, %(type)s);
}"""
REGISTER_TYPES_NUMBER_TEMPLATE = """\
this.%(data)s(ptr);"""
REGISTER_TYPES_SIZE_TEMPLATE = """\
CPyMarshal.WriteIntField(ptr, typeof(PyTypeObject), "%(slot)s", Marshal.SizeOf(typeof(%(data)s)));"""
REGISTER_TYPES_DEFAULT_TEMPLATE = """\
CPyMarshal.WritePtrField(ptr, typeof(PyTypeObject), "%(slot)s", this.GetFuncPtr("%(data)s"));"""
REGISTER_TYPES_SLOT_TEMPLATES = {
"tp_as_number": REGISTER_TYPES_NUMBER_TEMPLATE,
"tp_basicsize": REGISTER_TYPES_SIZE_TEMPLATE,
"tp_itemsize": REGISTER_TYPES_SIZE_TEMPLATE,
}
#================================================================================================
|
py | 1a3c448f51f78d3a88cefaff6f532fe3c7e5a13b | import os
from pathlib import Path
import pytest
from aqt.archives import QtArchives, SrcDocExamplesArchives
from aqt.helper import Settings
@pytest.fixture(autouse=True)
def setup():
Settings.load_settings(os.path.join(os.path.dirname(__file__), "data", "settings.ini"))
@pytest.mark.parametrize(
"os_name, version, flavor, datafile",
(
("windows", "5.15.2", "doc", "windows-5152-src-doc-example-update.xml"),
("windows", "5.15.2", "src", "windows-5152-src-doc-example-update.xml"),
("windows", "5.15.2", "examples", "windows-5152-src-doc-example-update.xml"),
),
)
def test_parse_update_xml(monkeypatch, os_name, version, flavor, datafile):
def _mock(self, url):
return (Path(__file__).parent / "data" / datafile).read_text("utf-8")
monkeypatch.setattr(QtArchives, "_download_update_xml", _mock)
qt_archives = SrcDocExamplesArchives(flavor, os_name, "desktop", version, Settings.baseurl)
assert qt_archives.archives is not None
# Get packages with all extra modules
qt_archives_all_modules = SrcDocExamplesArchives(
flavor,
os_name,
"desktop",
version,
Settings.baseurl,
all_extra=True,
)
assert qt_archives_all_modules.archives is not None
# Extract all urls
url_list = [item.archive_path for item in qt_archives.archives]
url_all_modules_list = [item.archive_path for item in qt_archives_all_modules.archives]
# Check the difference list contains only extra modules urls for target specified
list_diff = [item for item in url_all_modules_list if item not in url_list]
unwanted_targets = [item for item in list_diff if flavor not in item]
# Assert if list_diff contains urls without target specified
assert unwanted_targets == []
|
py | 1a3c451d36ce174befbb36ac8e51ace8826ba9f2 | import numpy as np
import pandas as pd
import cPickle as pickle
from datetime import datetime
import keras.backend as K
from keras.layers import Layer, RepeatVector, Flatten
from keras.regularizers import Regularizer
if K._backend == 'tensorflow':
import tensorflow as tf
else:
import theano
import theano.tensor as T
####################
# common functions
####################
def get_cur_time():
return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def get_key_from_val(val, dictionary, sanctity_check=False):
''' assuming val is unique '''
if sanctity_check:
val_len = len(dictionary.values)
val_set_len = len(set(dictionary.values))
assert val_len == val_set_len, [val_len, val_set_len]
for key, val_ in dictionary.iteritems():
if val == val_:
return key
def nan_detection(val_name, val):
if np.isnan(val):
assert False, 'ERROR (nan)! {} is {}..'.format(val_name, val)
####################
# math functions
####################
def sigmoid(x):
return 1./(np.exp(-x) + 1.)
def tanh(x):
return 2 * sigmoid(2 * x) - 1
def softplus(x):
return np.log(1 + np.exp(x))
def softmax(x):
x -= np.max(x, axis=-1, keepdims=True)
deno = np.sum(np.exp(x), axis=-1)
return (np.exp(x).T / deno).T
def normalized_embedding(X):
# n x dim
return (1. / np.sqrt(np.sum(X**2, axis=1)) * X.T).T
############################
# Keras backend extensions
############################
def theano_reshape(x, new_shape):
return x.reshape(new_shape)
def tensorflow_reshape(x, new_shape):
return tf.reshape(x, new_shape)
def theano_diag(x):
print "NOT implemented!"
return None
def tensorflow_diag(x, size=None):
''' size: square matrix size
tf.diag_part is very slow!
so when size given, we use faster gather_nd
'''
if size is None:
return tf.diag_part(x)
else:
diag_idx = np.vstack((np.arange(size), np.arange(size))).T
return tf.gather_nd(x, diag_idx.astype(np.int32))
def theano_get_shape(x):
return x.shape
def tensorflow_get_shape(x):
shape = x._shape_as_list()
assert shape.count(None) <= 1, '[Error!] not sure what to do with multiple None in tensor(flow) shape'
shape = [-1 if s is None else s for s in shape]
return shape
def dimshuffle_theano(x, shape):
return x.dimshuffle(shape)
def dimshuffle_tensorflow(x, shape):
# do not support degeneration of shape
dims_to_permute = []
dims_to_expand = []
for i, s in enumerate(shape):
if s == 'x':
dims_to_expand.append(i)
else:
dims_to_permute.append(s)
x = tf.transpose(x, dims_to_permute)
for dim in dims_to_expand:
x = tf.expand_dims(x, dim)
return x
if K._backend == 'tensorflow':
K.dimshuffle = dimshuffle_tensorflow
K.reshape = tensorflow_reshape
K.get_shape = tensorflow_get_shape
K.diag = tensorflow_diag
else:
K.dimshuffle = dimshuffle_theano
K.reshape = theano_reshape
K.get_shape = theano_get_shape
K.diag = theano_diag
class L1L2Act(Regularizer):
''' reg normalized by number of samples '''
def __init__(self, l1=0., l2=0.):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
def __call__(self, x):
regularization = 0
if self.l1:
regularization += K.sum(self.l1 * K.mean(K.abs(x), axis=0))
if self.l2:
regularization += K.sum(self.l2 * K.mean(K.square(x), axis=0))
return regularization
def get_config(self):
return {'name': self.__class__.__name__,
'l1': float(self.l1),
'l2': float(self.l2)}
def activity_l1(l=0.01):
return L1L2Act(l1=l)
def activity_l2(l=0.01):
return L1L2Act(l2=l)
def activity_l1l2(l1=0.01, l2=0.01):
return L1L2Act(l1=l1, l2=l2)
################################
# Keras model related extensions
################################
def save_pred_result(pred_filename, pred_result):
# save the prediction for test pairs
# pred_result should be a list of tuples in the form of ('user', 'item', 'truth', 'pred')
if pred_filename:
pred_result_df = pd.DataFrame(pred_result, columns=['user', 'item', 'truth', 'pred'])
pred_result_df.to_csv(pred_filename, index=False)
def save_model(model_filename, model):
if model_filename:
model.save_weights(model_filename)
def save_embeddings(emb_filename, get_embeddings, args):
if emb_filename:
emb_dict = get_embeddings(*args)
with open(emb_filename, 'wb') as fp:
pickle.dump(emb_dict, fp, 2)
def get_embeddings(model, conf, data_spec, C):
# return a dictionary of important embeddings, such as user, item, word
# make sure in model.layers, InteractionDot, user_embedding, word_embedding are named
from keras.models import Sequential, Model
interaction_layer = model.get_layer('InteractionDot')
# get user embeddings
user_io_idx = 0
assert model.inputs[user_io_idx]._keras_shape[1] == 1, 'check if this is user dim'
user_model = Model(input=model.inputs[user_io_idx], output=interaction_layer.input[user_io_idx])
user_emb = user_model.predict_on_batch([np.array(range(data_spec.user_count))])
if len(user_emb.shape) == 3:
user_emb = user_emb.reshape((user_emb.shape[0], user_emb.shape[2]))
# user_emb_norm = normalized_embedding(user_emb)
# get content embeddings
content_io_idx = 1
assert model.inputs[content_io_idx]._keras_shape[1] > 1, 'check if this is content dim'
content_model = Model(input=model.inputs[content_io_idx:], output=interaction_layer.input[content_io_idx])
content_emb = content_model.predict_on_batch([np.array(range(C.shape[0]))])
if len(content_emb.shape) == 3:
content_emb = content_emb.reshape((content_emb.shape[0], content_emb.shape[2]))
# content_emb_norm = normalized_embedding(content_emb)
try:
user_emb1 = model.get_layer('user_embedding').get_weights()[0]
except:
user_emb1 = None
try:
word_emb = model.get_layer('word_embedding').get_weights()[0]
except:
word_emb = None
# user_emb_norm = normalized_embedding(user_emb)
# word_emb_norm = normalized_embedding(word_emb)
emb_dict = {'U': user_emb, 'V': content_emb, 'W': word_emb, 'U_1_just4check': user_emb1}
return emb_dict
def pickle_dump(filename, data):
with open(filename, 'w') as fp:
pickle.dump(data, fp, 2)
def pickle_load(filename):
with open(filename) as fp:
return pickle.load(fp) |
py | 1a3c456a86973a4624640be9b9c45c9812489279 | import kfp
from kfp import components
from kfp import dsl
sagemaker_hpo_op = components.load_component_from_file(
"../../hyperparameter_tuning/component.yaml"
)
@dsl.pipeline(
name="SageMaker HyperParameter Tuning", description="SageMaker HPO job test"
)
def hpo_pipeline(
region="",
job_name="",
algorithm_name="",
training_input_mode="",
static_parameters="",
integer_parameters="",
channels="",
categorical_parameters="",
early_stopping_type="",
max_parallel_jobs="",
max_num_jobs="",
metric_name="",
metric_type="",
hpo_strategy="",
instance_type="",
instance_count="",
volume_size="",
max_run_time="",
output_location="",
network_isolation="",
max_wait_time="",
role="",
):
sagemaker_hpo_op(
region=region,
job_name=job_name,
algorithm_name=algorithm_name,
training_input_mode=training_input_mode,
static_parameters=static_parameters,
integer_parameters=integer_parameters,
channels=channels,
categorical_parameters=categorical_parameters,
early_stopping_type=early_stopping_type,
max_parallel_jobs=max_parallel_jobs,
max_num_jobs=max_num_jobs,
metric_name=metric_name,
metric_type=metric_type,
strategy=hpo_strategy,
instance_type=instance_type,
instance_count=instance_count,
volume_size=volume_size,
max_run_time=max_run_time,
output_location=output_location,
network_isolation=network_isolation,
max_wait_time=max_wait_time,
role=role,
)
if __name__ == "__main__":
kfp.compiler.Compiler().compile(
hpo_pipeline, "SageMaker_hyperparameter_tuning_pipeline" + ".yaml"
)
|
py | 1a3c45ebba40798e0fd5293a260bc9c2c73c612d | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/TestReport
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import backboneelement, domainresource, fhirtypes
class TestReport(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Describes the results of a TestScript execution.
A summary of information based on the results of executing a TestScript.
"""
resource_type = Field("TestReport", const=True)
identifier: fhirtypes.IdentifierType = Field(
None,
alias="identifier",
title="External identifier",
description=(
"Identifier for the TestScript assigned for external purposes outside "
"the context of FHIR."
),
# if property is element of this resource.
element_property=True,
)
issued: fhirtypes.DateTime = Field(
None,
alias="issued",
title="When the TestScript was executed and this TestReport was generated",
description=None,
# if property is element of this resource.
element_property=True,
)
issued__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_issued", title="Extension field for ``issued``."
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Informal name of the executed TestScript",
description="A free text natural language name identifying the executed TestScript.",
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
participant: typing.List[fhirtypes.TestReportParticipantType] = Field(
None,
alias="participant",
title=(
"A participant in the test execution, either the execution engine, a "
"client, or a server"
),
description=None,
# if property is element of this resource.
element_property=True,
)
result: fhirtypes.Code = Field(
None,
alias="result",
title="pass | fail | pending",
description="The overall result from the execution of the TestScript.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["pass", "fail", "pending"],
)
result__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_result", title="Extension field for ``result``."
)
score: fhirtypes.Decimal = Field(
None,
alias="score",
title=(
"The final score (percentage of tests passed) resulting from the "
"execution of the TestScript"
),
description=None,
# if property is element of this resource.
element_property=True,
)
score__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_score", title="Extension field for ``score``."
)
setup: fhirtypes.TestReportSetupType = Field(
None,
alias="setup",
title=(
"The results of the series of required setup operations before the "
"tests were executed"
),
description=None,
# if property is element of this resource.
element_property=True,
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="completed | in-progress | waiting | stopped | entered-in-error",
description="The current state of this test report.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"completed",
"in-progress",
"waiting",
"stopped",
"entered-in-error",
],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
teardown: fhirtypes.TestReportTeardownType = Field(
None,
alias="teardown",
title="The results of running the series of required clean up steps",
description=(
"The results of the series of operations required to clean up after all"
" the tests were executed (successfully or otherwise)."
),
# if property is element of this resource.
element_property=True,
)
test: typing.List[fhirtypes.TestReportTestType] = Field(
None,
alias="test",
title="A test executed from the test script",
description=None,
# if property is element of this resource.
element_property=True,
)
testScript: fhirtypes.ReferenceType = Field(
...,
alias="testScript",
title=(
"Reference to the version-specific TestScript that was executed to "
"produce this TestReport"
),
description=(
"Ideally this is an absolute URL that is used to identify the version-"
"specific TestScript that was executed, matching the `TestScript.url`."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["TestScript"],
)
tester: fhirtypes.String = Field(
None,
alias="tester",
title="Name of the tester producing this report (Organization or individual)",
description=None,
# if property is element of this resource.
element_property=True,
)
tester__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_tester", title="Extension field for ``tester``."
)
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_1252(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("result", "result__ext"), ("status", "status__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class TestReportParticipant(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A participant in the test execution, either the execution engine, a client,
or a server.
"""
resource_type = Field("TestReportParticipant", const=True)
display: fhirtypes.String = Field(
None,
alias="display",
title="The display name of the participant",
description=None,
# if property is element of this resource.
element_property=True,
)
display__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_display", title="Extension field for ``display``."
)
type: fhirtypes.Code = Field(
None,
alias="type",
title="test-engine | client | server",
description="The type of participant.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["test-engine", "client", "server"],
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
uri: fhirtypes.Uri = Field(
None,
alias="uri",
title="The uri of the participant. An absolute URL is preferred",
description=None,
# if property is element of this resource.
element_property=True,
element_required=True,
)
uri__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_uri", title="Extension field for ``uri``."
)
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2403(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("type", "type__ext"), ("uri", "uri__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class TestReportSetup(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The results of the series of required setup operations before the tests
were executed.
"""
resource_type = Field("TestReportSetup", const=True)
action: typing.List[fhirtypes.TestReportSetupActionType] = Field(
...,
alias="action",
title="A setup operation or assert that was executed",
description="Action would contain either an operation or an assertion.",
# if property is element of this resource.
element_property=True,
)
class TestReportSetupAction(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A setup operation or assert that was executed.
Action would contain either an operation or an assertion.
"""
resource_type = Field("TestReportSetupAction", const=True)
assert_fhir: fhirtypes.TestReportSetupActionAssertType = Field(
None,
alias="assert",
title="The assertion to perform",
description="The results of the assertion performed on the previous operations.",
# if property is element of this resource.
element_property=True,
)
operation: fhirtypes.TestReportSetupActionOperationType = Field(
None,
alias="operation",
title="The operation to perform",
description="The operation performed.",
# if property is element of this resource.
element_property=True,
)
class TestReportSetupActionAssert(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The assertion to perform.
The results of the assertion performed on the previous operations.
"""
resource_type = Field("TestReportSetupActionAssert", const=True)
detail: fhirtypes.String = Field(
None,
alias="detail",
title="A link to further details on the result",
description=None,
# if property is element of this resource.
element_property=True,
)
detail__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_detail", title="Extension field for ``detail``."
)
message: fhirtypes.Markdown = Field(
None,
alias="message",
title="A message associated with the result",
description="An explanatory message associated with the result.",
# if property is element of this resource.
element_property=True,
)
message__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_message", title="Extension field for ``message``."
)
result: fhirtypes.Code = Field(
None,
alias="result",
title="pass | skip | fail | warning | error",
description="The result of this assertion.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["pass", "skip", "fail", "warning", "error"],
)
result__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_result", title="Extension field for ``result``."
)
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3013(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("result", "result__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class TestReportSetupActionOperation(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The operation to perform.
The operation performed.
"""
resource_type = Field("TestReportSetupActionOperation", const=True)
detail: fhirtypes.Uri = Field(
None,
alias="detail",
title="A link to further details on the result",
description=None,
# if property is element of this resource.
element_property=True,
)
detail__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_detail", title="Extension field for ``detail``."
)
message: fhirtypes.Markdown = Field(
None,
alias="message",
title="A message associated with the result",
description="An explanatory message associated with the result.",
# if property is element of this resource.
element_property=True,
)
message__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_message", title="Extension field for ``message``."
)
result: fhirtypes.Code = Field(
None,
alias="result",
title="pass | skip | fail | warning | error",
description="The result of this operation.",
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["pass", "skip", "fail", "warning", "error"],
)
result__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_result", title="Extension field for ``result``."
)
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_3326(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("result", "result__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
class TestReportTeardown(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
The results of running the series of required clean up steps.
The results of the series of operations required to clean up after all the
tests were executed (successfully or otherwise).
"""
resource_type = Field("TestReportTeardown", const=True)
action: typing.List[fhirtypes.TestReportTeardownActionType] = Field(
...,
alias="action",
title="One or more teardown operations performed",
description="The teardown action will only contain an operation.",
# if property is element of this resource.
element_property=True,
)
class TestReportTeardownAction(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
One or more teardown operations performed.
The teardown action will only contain an operation.
"""
resource_type = Field("TestReportTeardownAction", const=True)
operation: fhirtypes.TestReportSetupActionOperationType = Field(
...,
alias="operation",
title="The teardown operation performed",
description="An operation would involve a REST request to a server.",
# if property is element of this resource.
element_property=True,
)
class TestReportTest(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A test executed from the test script.
"""
resource_type = Field("TestReportTest", const=True)
action: typing.List[fhirtypes.TestReportTestActionType] = Field(
...,
alias="action",
title="A test operation or assert that was performed",
description="Action would contain either an operation or an assertion.",
# if property is element of this resource.
element_property=True,
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Tracking/reporting short description of the test",
description=(
"A short description of the test used by test engines for tracking and "
"reporting purposes."
),
# if property is element of this resource.
element_property=True,
)
description__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_description", title="Extension field for ``description``."
)
name: fhirtypes.String = Field(
None,
alias="name",
title="Tracking/logging name of this test",
description=(
"The name of this test used for tracking/logging purposes by test "
"engines."
),
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
class TestReportTestAction(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
A test operation or assert that was performed.
Action would contain either an operation or an assertion.
"""
resource_type = Field("TestReportTestAction", const=True)
assert_fhir: fhirtypes.TestReportSetupActionAssertType = Field(
None,
alias="assert",
title="The assertion performed",
description="The results of the assertion performed on the previous operations.",
# if property is element of this resource.
element_property=True,
)
operation: fhirtypes.TestReportSetupActionOperationType = Field(
None,
alias="operation",
title="The operation performed",
description="An operation would involve a REST request to a server.",
# if property is element of this resource.
element_property=True,
)
|
py | 1a3c4612daffc1906b31588e1c67db045cffc924 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import subprocess
def find_shortest_symref(repo_path, sha1):
"""
Find the shortest symbolic reference (branch/tag) to a Git SHA1
:param repo_path: the path of a valid git repository
:type repo_path: str
:param sha1: the SAH1 of a commit to lookup the reference for
:type sha1: str
Returns None if nothing points to the requested SHA1
"""
repo_path = os.path.expanduser(repo_path)
possibles = []
# Can't use git for-each-ref --points-at because it only came in in Git 2.7
# which is not in Ubuntu 14.04 - check by hand instead.
branches = subprocess.check_output(
"git for-each-ref --sort=-committerdate "
"--format='%(objectname:short) %(refname:short)' "
"refs/heads/ refs/remotes/ refs/tags",
universal_newlines=True,
cwd=repo_path, shell=True)
for line in branches.splitlines():
try:
sha1_out, name = line.strip().split()
except ValueError:
continue
if sha1_out[:7] == sha1[:7]:
possibles.append(name)
if not possibles:
return None
return min(possibles, key=len)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
|
py | 1a3c474e6786be4645c2a38d34e0c1f1b8c6539c | import cv2
import numpy as np
#from mtcnn import mtcnn
from MTCNN import MTCNN
import os
import sys
apath = os.path.abspath(os.path.dirname(sys.argv[0]))
img = cv2.imread(apath+'/img/1.jpg')
model = MTCNN()
threshold = [0.5,0.6,0.7]
rectangles = model.detectFace(img, threshold)
draw = img.copy()
for rectangle in rectangles:
if rectangle is not None:
W = -int(rectangle[0]) + int(rectangle[2])
H = -int(rectangle[1]) + int(rectangle[3])
paddingH = 0.01 * W
paddingW = 0.02 * H
crop_img = img[int(rectangle[1]+paddingH):int(rectangle[3]-paddingH), int(rectangle[0]-paddingW):int(rectangle[2]+paddingW)]
if crop_img is None:
continue
if crop_img.shape[0] < 0 or crop_img.shape[1] < 0:
continue
cv2.rectangle(draw, (int(rectangle[0]), int(rectangle[1])), (int(rectangle[2]), int(rectangle[3])), (255, 0, 0), 1)
for i in range(5, 15, 2):
cv2.circle(draw, (int(rectangle[i + 0]), int(rectangle[i + 1])), 2, (0, 255, 0))
cv2.imwrite(apath+"/img/out1.jpg",draw)
cv2.imshow("test", draw)
c = cv2.waitKey(0) |
py | 1a3c475f6d236878757e802cd3ac30c49a64c9ce | #Crie um programa que vai gerar cinco números aleatórios e colocar em uma tupla. Depois disso, mostre a #listagem de números gerados e também indique o menor e o maior valor que estão na tupla
from random import randint
n1=randint(0,10)
n2=randint(0,10)
n3=randint(0,10)
n4=randint(0,10)
n5=randint(0,10)
maior=menor=0
sorteio=(n1,n2,n3,n4,n5)
print(sorteio)
print(f"O maior valor é: {max(sorteio)}.")
print(f"O menor valor é: {min(sorteio)}.") |
py | 1a3c4aa942db0e63f6272ff3f0912910a2b76039 | #
# The Python Imaging Library.
# $Id$
#
# base class for image file handlers
#
# history:
# 1995-09-09 fl Created
# 1996-03-11 fl Fixed load mechanism.
# 1996-04-15 fl Added pcx/xbm decoders.
# 1996-04-30 fl Added encoders.
# 1996-12-14 fl Added load helpers
# 1997-01-11 fl Use encode_to_file where possible
# 1997-08-27 fl Flush output in _save
# 1998-03-05 fl Use memory mapping for some modes
# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B"
# 1999-05-31 fl Added image parser
# 2000-10-12 fl Set readonly flag on memory-mapped images
# 2002-03-20 fl Use better messages for common decoder errors
# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available
# 2003-10-30 fl Added StubImageFile class
# 2004-02-25 fl Made incremental parser more robust
#
# Copyright (c) 1997-2004 by Secret Labs AB
# Copyright (c) 1995-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
from ._util import isPath
import io
import os
import sys
import struct
MAXBLOCK = 65536
SAFEBLOCK = 1024*1024
LOAD_TRUNCATED_IMAGES = False
ERRORS = {
-1: "image buffer overrun error",
-2: "decoding error",
-3: "unknown error",
-8: "bad configuration",
-9: "out of memory error"
}
def raise_ioerror(error):
try:
message = Image.core.getcodecstatus(error)
except AttributeError:
message = ERRORS.get(error)
if not message:
message = "decoder error %d" % error
raise IOError(message + " when reading image file")
#
# --------------------------------------------------------------------
# Helpers
def _tilesort(t):
# sort on offset
return t[2]
#
# --------------------------------------------------------------------
# ImageFile base class
class ImageFile(Image.Image):
"Base class for image file format handlers."
def __init__(self, fp=None, filename=None):
Image.Image.__init__(self)
self._min_frame = 0
self.tile = None
self.readonly = 1 # until we know better
self.decoderconfig = ()
self.decodermaxblock = MAXBLOCK
if isPath(fp):
# filename
self.fp = open(fp, "rb")
self.filename = fp
self._exclusive_fp = True
else:
# stream
self.fp = fp
self.filename = filename
# can be overridden
self._exclusive_fp = None
try:
self._open()
except (IndexError, # end of data
TypeError, # end of data (ord)
KeyError, # unsupported mode
EOFError, # got header but not the first frame
struct.error) as v:
# close the file only if we have opened it this constructor
if self._exclusive_fp:
self.fp.close()
raise SyntaxError(v)
if not self.mode or self.size[0] <= 0:
raise SyntaxError("not identified by this driver")
def draft(self, mode, size):
"Set draft mode"
pass
def verify(self):
"Check file integrity"
# raise exception if something's wrong. must be called
# directly after open, and closes file when finished.
if self._exclusive_fp:
self.fp.close()
self.fp = None
def load(self):
"Load image data based on tile list"
pixel = Image.Image.load(self)
if self.tile is None:
raise IOError("cannot load this image")
if not self.tile:
return pixel
self.map = None
use_mmap = self.filename and len(self.tile) == 1
# As of pypy 2.1.0, memory mapping was failing here.
use_mmap = use_mmap and not hasattr(sys, 'pypy_version_info')
readonly = 0
# look for read/seek overrides
try:
read = self.load_read
# don't use mmap if there are custom read/seek functions
use_mmap = False
except AttributeError:
read = self.fp.read
try:
seek = self.load_seek
use_mmap = False
except AttributeError:
seek = self.fp.seek
if use_mmap:
# try memory mapping
decoder_name, extents, offset, args = self.tile[0]
if decoder_name == "raw" and len(args) >= 3 and args[0] == self.mode \
and args[0] in Image._MAPMODES:
try:
if hasattr(Image.core, "map"):
# use built-in mapper WIN32 only
self.map = Image.core.map(self.filename)
self.map.seek(offset)
self.im = self.map.readimage(
self.mode, self.size, args[1], args[2]
)
else:
# use mmap, if possible
import mmap
fp = open(self.filename, "r")
size = os.path.getsize(self.filename)
self.map = mmap.mmap(fp.fileno(), size, access=mmap.ACCESS_READ)
self.im = Image.core.map_buffer(
self.map, self.size, decoder_name, extents, offset, args
)
readonly = 1
# After trashing self.im, we might need to reload the palette data.
if self.palette:
self.palette.dirty = 1
except (AttributeError, EnvironmentError, ImportError):
self.map = None
self.load_prepare()
err_code = -3 # initialize to unknown error
if not self.map:
# sort tiles in file order
self.tile.sort(key=_tilesort)
try:
# FIXME: This is a hack to handle TIFF's JpegTables tag.
prefix = self.tile_prefix
except AttributeError:
prefix = b""
for decoder_name, extents, offset, args in self.tile:
decoder = Image._getdecoder(self.mode, decoder_name,
args, self.decoderconfig)
try:
seek(offset)
decoder.setimage(self.im, extents)
if decoder.pulls_fd:
decoder.setfd(self.fp)
status, err_code = decoder.decode(b"")
else:
b = prefix
while True:
try:
s = read(self.decodermaxblock)
except (IndexError, struct.error): # truncated png/gif
if LOAD_TRUNCATED_IMAGES:
break
else:
raise IOError("image file is truncated")
if not s: # truncated jpeg
if LOAD_TRUNCATED_IMAGES:
break
else:
self.tile = []
raise IOError("image file is truncated "
"(%d bytes not processed)" % len(b))
b = b + s
n, err_code = decoder.decode(b)
if n < 0:
break
b = b[n:]
finally:
# Need to cleanup here to prevent leaks
decoder.cleanup()
self.tile = []
self.readonly = readonly
self.load_end()
if self._exclusive_fp and self._close_exclusive_fp_after_loading:
self.fp.close()
self.fp = None
if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0:
# still raised if decoder fails to return anything
raise_ioerror(err_code)
return Image.Image.load(self)
def load_prepare(self):
# create image memory if necessary
if not self.im or\
self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.new(self.mode, self.size)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def load_end(self):
# may be overridden
pass
# may be defined for contained formats
# def load_seek(self, pos):
# pass
# may be defined for blocked formats (e.g. PNG)
# def load_read(self, bytes):
# pass
def _seek_check(self, frame):
if (frame < self._min_frame or
# Only check upper limit on frames if additional seek operations
# are not required to do so
(not (hasattr(self, "_n_frames") and self._n_frames is None) and
frame >= self.n_frames+self._min_frame)):
raise EOFError("attempt to seek outside sequence")
return self.tell() != frame
class StubImageFile(ImageFile):
"""
Base class for stub image loaders.
A stub loader is an image loader that can identify files of a
certain format, but relies on external code to load the file.
"""
def _open(self):
raise NotImplementedError(
"StubImageFile subclass must implement _open"
)
def load(self):
loader = self._load()
if loader is None:
raise IOError("cannot find loader for this %s file" % self.format)
image = loader.load(self)
assert image is not None
# become the other object (!)
self.__class__ = image.__class__
self.__dict__ = image.__dict__
def _load(self):
"(Hook) Find actual image loader."
raise NotImplementedError(
"StubImageFile subclass must implement _load"
)
class Parser(object):
"""
Incremental image parser. This class implements the standard
feed/close consumer interface.
"""
incremental = None
image = None
data = None
decoder = None
offset = 0
finished = 0
def reset(self):
"""
(Consumer) Reset the parser. Note that you can only call this
method immediately after you've created a parser; parser
instances cannot be reused.
"""
assert self.data is None, "cannot reuse parsers"
def feed(self, data):
"""
(Consumer) Feed data to the parser.
:param data: A string buffer.
:exception IOError: If the parser failed to parse the image file.
"""
# collect data
if self.finished:
return
if self.data is None:
self.data = data
else:
self.data = self.data + data
# parse what we have
if self.decoder:
if self.offset > 0:
# skip header
skip = min(len(self.data), self.offset)
self.data = self.data[skip:]
self.offset = self.offset - skip
if self.offset > 0 or not self.data:
return
n, e = self.decoder.decode(self.data)
if n < 0:
# end of stream
self.data = None
self.finished = 1
if e < 0:
# decoding error
self.image = None
raise_ioerror(e)
else:
# end of image
return
self.data = self.data[n:]
elif self.image:
# if we end up here with no decoder, this file cannot
# be incrementally parsed. wait until we've gotten all
# available data
pass
else:
# attempt to open this file
try:
with io.BytesIO(self.data) as fp:
im = Image.open(fp)
except IOError:
# traceback.print_exc()
pass # not enough data
else:
flag = hasattr(im, "load_seek") or hasattr(im, "load_read")
if flag or len(im.tile) != 1:
# custom load code, or multiple tiles
self.decode = None
else:
# initialize decoder
im.load_prepare()
d, e, o, a = im.tile[0]
im.tile = []
self.decoder = Image._getdecoder(
im.mode, d, a, im.decoderconfig
)
self.decoder.setimage(im.im, e)
# calculate decoder offset
self.offset = o
if self.offset <= len(self.data):
self.data = self.data[self.offset:]
self.offset = 0
self.image = im
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
"""
(Consumer) Close the stream.
:returns: An image object.
:exception IOError: If the parser failed to parse the image file either
because it cannot be identified or cannot be
decoded.
"""
# finish decoding
if self.decoder:
# get rid of what's left in the buffers
self.feed(b"")
self.data = self.decoder = None
if not self.finished:
raise IOError("image was incomplete")
if not self.image:
raise IOError("cannot parse this image")
if self.data:
# incremental parsing not possible; reopen the file
# not that we have all data
with io.BytesIO(self.data) as fp:
try:
self.image = Image.open(fp)
finally:
self.image.load()
return self.image
# --------------------------------------------------------------------
def _save(im, fp, tile, bufsize=0):
"""Helper to save image based on tile list
:param im: Image object.
:param fp: File object.
:param tile: Tile list.
:param bufsize: Optional buffer size
"""
im.load()
if not hasattr(im, "encoderconfig"):
im.encoderconfig = ()
tile.sort(key=_tilesort)
# FIXME: make MAXBLOCK a configuration parameter
# It would be great if we could have the encoder specify what it needs
# But, it would need at least the image size in most cases. RawEncode is
# a tricky case.
bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c
if fp == sys.stdout:
fp.flush()
return
try:
fh = fp.fileno()
fp.flush()
except (AttributeError, io.UnsupportedOperation):
# compress to Python file-compatible object
for e, b, o, a in tile:
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
if o > 0:
fp.seek(o, 0)
e.setimage(im.im, b)
if e.pushes_fd:
e.setfd(fp)
l, s = e.encode_to_pyfd()
else:
while True:
l, s, d = e.encode(bufsize)
fp.write(d)
if s:
break
if s < 0:
raise IOError("encoder error %d when writing image file" % s)
e.cleanup()
else:
# slight speedup: compress to real file object
for e, b, o, a in tile:
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
if o > 0:
fp.seek(o, 0)
e.setimage(im.im, b)
if e.pushes_fd:
e.setfd(fp)
l, s = e.encode_to_pyfd()
else:
s = e.encode_to_file(fh, bufsize)
if s < 0:
raise IOError("encoder error %d when writing image file" % s)
e.cleanup()
if hasattr(fp, "flush"):
fp.flush()
def _safe_read(fp, size):
"""
Reads large blocks in a safe way. Unlike fp.read(n), this function
doesn't trust the user. If the requested size is larger than
SAFEBLOCK, the file is read block by block.
:param fp: File handle. Must implement a <b>read</b> method.
:param size: Number of bytes to read.
:returns: A string containing up to <i>size</i> bytes of data.
"""
if size <= 0:
return b""
if size <= SAFEBLOCK:
return fp.read(size)
data = []
while size > 0:
block = fp.read(min(size, SAFEBLOCK))
if not block:
break
data.append(block)
size -= len(block)
return b"".join(data)
class PyCodecState(object):
def __init__(self):
self.xsize = 0
self.ysize = 0
self.xoff = 0
self.yoff = 0
def extents(self):
return (self.xoff, self.yoff,
self.xoff+self.xsize, self.yoff+self.ysize)
class PyDecoder(object):
"""
Python implementation of a format decoder. Override this class and
add the decoding logic in the `decode` method.
See :ref:`Writing Your Own File Decoder in Python<file-decoders-py>`
"""
_pulls_fd = False
def __init__(self, mode, *args):
self.im = None
self.state = PyCodecState()
self.fd = None
self.mode = mode
self.init(args)
def init(self, args):
"""
Override to perform decoder specific initialization
:param args: Array of args items from the tile entry
:returns: None
"""
self.args = args
@property
def pulls_fd(self):
return self._pulls_fd
def decode(self, buffer):
"""
Override to perform the decoding process.
:param buffer: A bytes object with the data to be decoded. If `handles_eof`
is set, then `buffer` will be empty and `self.fd` will be set.
:returns: A tuple of (bytes consumed, errcode). If finished with decoding
return <0 for the bytes consumed. Err codes are from `ERRORS`
"""
raise NotImplementedError()
def cleanup(self):
"""
Override to perform decoder specific cleanup
:returns: None
"""
pass
def setfd(self, fd):
"""
Called from ImageFile to set the python file-like object
:param fd: A python file-like object
:returns: None
"""
self.fd = fd
def setimage(self, im, extents=None):
"""
Called from ImageFile to set the core output image for the decoder
:param im: A core image object
:param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle
for this tile
:returns: None
"""
# following c code
self.im = im
if extents:
(x0, y0, x1, y1) = extents
else:
(x0, y0, x1, y1) = (0, 0, 0, 0)
if x0 == 0 and x1 == 0:
self.state.xsize, self.state.ysize = self.im.size
else:
self.state.xoff = x0
self.state.yoff = y0
self.state.xsize = x1 - x0
self.state.ysize = y1 - y0
if self.state.xsize <= 0 or self.state.ysize <= 0:
raise ValueError("Size cannot be negative")
if (self.state.xsize + self.state.xoff > self.im.size[0] or
self.state.ysize + self.state.yoff > self.im.size[1]):
raise ValueError("Tile cannot extend outside image")
def set_as_raw(self, data, rawmode=None):
"""
Convenience method to set the internal image from a stream of raw data
:param data: Bytes to be set
:param rawmode: The rawmode to be used for the decoder. If not specified,
it will default to the mode of the image
:returns: None
"""
if not rawmode:
rawmode = self.mode
d = Image._getdecoder(self.mode, 'raw', (rawmode))
d.setimage(self.im, self.state.extents())
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
|
py | 1a3c4b0aff37a4376e9550334514c76b23964c5c | import os
import copy
import re
import yaml
from fabric.colors import yellow as _yellow
from ghost_log import log
from .provisioner import FeaturesProvisioner
SALT_PILLAR_TOP = {'base': {'*': ['features']}}
class FeaturesProvisionerSalt(FeaturesProvisioner):
""" Build features with SaltStack """
def __init__(self, log_file, unique_id, options, config, global_config):
FeaturesProvisioner.__init__(self, log_file, 'salt', unique_id, options, config, global_config)
self._salt_state_tree = os.path.join(self.local_repo_path, 'salt')
self._salt_pillar_roots = os.path.join(self.local_repo_path, 'pillar')
self._provisioner_log_level = self.global_config.get('provisioner_log_level', 'info')
self._salt_state_top_path = os.path.join(self._salt_state_tree, 'top.sls')
self._salt_pillar_top_path = os.path.join(self._salt_pillar_roots, 'top.sls')
self._salt_pillar_features_path = os.path.join(self._salt_pillar_roots, 'features.sls')
self._salt_additional_pillar = config.get('salt_additional_pillar', '')
def build_packer_provisioner_config(self, features_config):
features = self._format_provisioner_features(features_config)
provisioner_params = self._format_provisioner_params(features_config)
enabled_packer_salt_config = self._test_not_empty_salt_features(features)
if enabled_packer_salt_config:
self.build_provisioner_features_files(features, provisioner_params)
_provisionner_config = {
'type': 'salt-masterless',
'local_state_tree': self._salt_state_tree,
'local_pillar_roots': self._salt_pillar_roots,
'skip_bootstrap': self._options[0],
'log_level': self._provisioner_log_level,
}
else:
return None
return [_provisionner_config]
def build_provisioner_features_files(self, features, provisioner_params):
""" Build salt files only if features with salt provisioner """
self._build_salt_top(features)
self._build_salt_pillar(provisioner_params)
def build_packer_provisioner_cleanup(self):
return {
'type': 'shell',
'inline': [
"sudo rm -rf /srv/salt || echo 'Salt - no cleanup salt'",
"sudo rm -rf /srv/pillar || echo 'Salt - no cleanup pillar'"
]
}
def _test_not_empty_salt_features(self, features):
""" Test is features set
>>> features = []
>>> import pprint
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._test_not_empty_salt_features(features))
False
>>> features = ['pkg']
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._test_not_empty_salt_features(features))
True
"""
return features != []
def _build_salt_top(self, params):
""" Build salt salt/top.sls file from features """
with open(self._salt_state_top_path, "w") as stream:
log("Salt - Writing Top state to: {0}".format(self._salt_state_top_path), self._log_file)
if os.path.exists(os.path.join(self._salt_state_tree, 'common')):
data = {'base': {'*': ['common'] + params}}
else:
data = {'base': {'*': params}}
log('Salt - state: top.sls: {0}'.format(data), self._log_file)
yaml.dump(data, stream, default_flow_style=False)
def _build_salt_pillar(self, features):
""" Build salt pillar/top.sls and pillar/features.sls """
data_top = copy.deepcopy(SALT_PILLAR_TOP)
with open(self._salt_pillar_top_path, "w") as stream_top:
if self._salt_additional_pillar != '':
data_top['base']['*'].append(self._salt_additional_pillar)
else:
log('Salt - No additional pillar to add', self._log_file)
log('Salt - pillar: top.sls: {0}'.format(data_top), self._log_file)
yaml.dump(data_top, stream_top, default_flow_style=False)
with open(self._salt_pillar_features_path, "w") as stream_features:
log(_yellow('Salt - pillar: features.sls: {0}'.format(features)), self._log_file)
yaml.dump(features, stream_features, default_flow_style=False)
def _format_provisioner_features(self, features):
""" Generates the formula dictionnary object with all required features
>>> features = [{'name': 'pkg', 'version': 'git_vim'}, {'name': 'pkg', 'version': 'package=lsof'}, {'name': 'pkg', 'version': 'package=curl'}]
>>> FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_features(features)
['pkg']
>>> features = [{'name': 'pkg', 'version': 'git_vim', 'provisioner': 'salt'}, {'name': 'pkg', 'version': 'package=lsof', 'provisioner': 'salt'}, {'name': 'pkg', 'version': 'package=curl', 'provisioner': 'salt'}]
>>> FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_features(features)
['pkg']
>>> features = []
>>> FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_features(features)
[]
"""
top = []
for i in features:
if i.get('provisioner', self._default_provisioner) != self.name:
continue
if re.search('^(php|php5)-(.*)', i['name']):
continue
if re.search('^zabbix-(.*)', i['name']):
continue
if re.search('^gem-(.*)', i['name']):
continue
if not i['name'].encode('utf-8') in top:
top.append(i['name'].encode('utf-8'))
return top
def _format_provisioner_params(self, features):
""" Generates the pillar dictionnary object with all required features and their options
>>> features = [{'name': 'pkg', 'version': 'git_vim'}, {'name': 'pkg', 'version': 'package=lsof'}, {'name': 'pkg', 'version': 'package=curl'}]
>>> import pprint
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_params(features).items())
[('pkg', {'package': ['lsof', 'curl'], 'version': 'git_vim'})]
>>> features = [{'name': 'pkg', 'version': 'git_vim', 'provisioner': 'salt'}, {'name': 'pkg', 'version': 'package=lsof', 'provisioner': 'salt'}, {'name': 'pkg', 'version': 'package=curl', 'provisioner': 'salt'}]
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_params(features).items())
[('pkg', {'package': ['lsof', 'curl'], 'version': 'git_vim'})]
>>> features = [{'name': 'pkg', 'version': 'git_vim', 'provisioner': 'ansible'}, {'name': 'pkg', 'version': 'package=lsof', 'provisioner': 'salt'}, {'name': 'pkg', 'version': 'package=curl', 'provisioner': 'salt'}]
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_params(features).items())
[('pkg', {'package': ['lsof', 'curl']})]
>>> features = [{'name': 'pkg', 'version': 'git_vim', 'provisioner': 'ansible'}, {'name': 'pkg', 'version': 'package=lsof', 'provisioner': 'ansible'}, {'name': 'pkg', 'version': 'package=curl', 'provisioner': 'ansible'}]
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_params(features).items())
[]
>>> features = []
>>> pprint.pprint(FeaturesProvisionerSalt(None, None, {}, {}, {})._format_provisioner_params(features).items())
[]
"""
pillar = {}
for ft in features:
if ft.get('provisioner', self._default_provisioner) != self.name:
continue
values = ft.get('version', '').split('=', 1) # Split only one time
feature_name = ft['name'].encode('utf-8')
if not feature_name in pillar:
pillar[feature_name] = {}
if len(values) == 2:
ft_param_key = values[0].encode('utf-8')
ft_param_val = values[1].encode('utf-8')
if not ft_param_key in pillar[feature_name]:
pillar[feature_name][ft_param_key] = []
pillar[feature_name][ft_param_key].append(ft_param_val)
else:
pillar[feature_name]['version'] = ft.get('version', '').encode('utf-8')
return pillar
|
py | 1a3c4d000a2d2f82f0aa52c1818b80cc5e2da371 | # -*- coding: utf8 -*-
# python >=3.8
import requests,time,re,json,random
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
headers = {
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 9; MI 6 MIUI/20.6.18)'
}
#获取登录code
def get_code(location):
code_pattern = re.compile("(?<=access=).*?(?=&)")
code = code_pattern.findall(location)[0]
return code
#登录
def login(user,password):
url1 = "https://api-user.huami.com/registrations/+86" + user + "/tokens"
headers = {
"Content-Type":"application/x-www-form-urlencoded;charset=UTF-8",
"User-Agent":"MiFit/4.6.0 (iPhone; iOS 14.0.1; Scale/2.00)"
}
data1 = {
"client_id":"HuaMi",
"password":f"{password}",
"redirect_uri":"https://s3-us-west-2.amazonaws.com/hm-registration/successsignin.html",
"token":"access"
}
r1 = requests.post(url1,data=data1,headers=headers,allow_redirects=False)
location = r1.headers["Location"]
try:
code = get_code(location)
except:
return 0,0
#print("access_code获取成功!")
#print(code)
url2 = "https://account.huami.com/v2/client/login"
data2 = {
"app_name":"com.xiaomi.hm.health",
"app_version":"4.6.0",
"code":f"{code}",
"country_code":"CN",
"device_id":"2C8B4939-0CCD-4E94-8CBA-CB8EA6E613A1",
"device_model":"phone",
"grant_type":"access_token",
"third_name":"huami_phone",
}
r2 = requests.post(url2,data=data2,headers=headers).json()
login_token = r2["token_info"]["login_token"]
#print("login_token获取成功!")
#print(login_token)
userid = r2["token_info"]["user_id"]
#print("userid获取成功!")
#print(userid)
return login_token,userid
#主函数
def main(user, passwd, step):
user = str(user)
password = str(passwd)
step = str(step)
if user == '' or password == '':
print ("用户名或密码填写有误!")
return
if step == '':
print ("已设置为随机步数(10000-19999)")
step = str(random.randint(10000,19999))
login_token = 0
login_token,userid = login(user,password)
if login_token == 0:
print("登陆失败!")
return "login fail!"
t = get_time()
app_token = get_app_token(login_token)
today = time.strftime("%F")
data_json = '%5B%7B%22data_hr%22%3A%22%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9L%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FVv%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0v%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9e%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0n%5C%2Fa%5C%2F%5C%2F%5C%2FS%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0b%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F1FK%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FR%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9PTFFpaf9L%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FR%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0j%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9K%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FOv%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2Fzf%5C%2F%5C%2F%5C%2F86%5C%2Fzr%5C%2FOv88%5C%2Fzf%5C%2FPf%5C%2F%5C%2F%5C%2F0v%5C%2FS%5C%2F8%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FSf%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2Fz3%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0r%5C%2FOv%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FS%5C%2F9L%5C%2Fzb%5C%2FSf9K%5C%2F0v%5C%2FRf9H%5C%2Fzj%5C%2FSf9K%5C%2F0%5C%2F%5C%2FN%5C%2F%5C%2F%5C%2F%5C%2F0D%5C%2FSf83%5C%2Fzr%5C%2FPf9M%5C%2F0v%5C%2FOv9e%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FS%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2Fzv%5C%2F%5C%2Fz7%5C%2FO%5C%2F83%5C%2Fzv%5C%2FN%5C%2F83%5C%2Fzr%5C%2FN%5C%2F86%5C%2Fz%5C%2F%5C%2FNv83%5C%2Fzn%5C%2FXv84%5C%2Fzr%5C%2FPP84%5C%2Fzj%5C%2FN%5C%2F9e%5C%2Fzr%5C%2FN%5C%2F89%5C%2F03%5C%2FP%5C%2F89%5C%2Fz3%5C%2FQ%5C%2F9N%5C%2F0v%5C%2FTv9C%5C%2F0H%5C%2FOf9D%5C%2Fzz%5C%2FOf88%5C%2Fz%5C%2F%5C%2FPP9A%5C%2Fzr%5C%2FN%5C%2F86%5C%2Fzz%5C%2FNv87%5C%2F0D%5C%2FOv84%5C%2F0v%5C%2FO%5C%2F84%5C%2Fzf%5C%2FMP83%5C%2FzH%5C%2FNv83%5C%2Fzf%5C%2FN%5C%2F84%5C%2Fzf%5C%2FOf82%5C%2Fzf%5C%2FOP83%5C%2Fzb%5C%2FMv81%5C%2FzX%5C%2FR%5C%2F9L%5C%2F0v%5C%2FO%5C%2F9I%5C%2F0T%5C%2FS%5C%2F9A%5C%2Fzn%5C%2FPf89%5C%2Fzn%5C%2FNf9K%5C%2F07%5C%2FN%5C%2F83%5C%2Fzn%5C%2FNv83%5C%2Fzv%5C%2FO%5C%2F9A%5C%2F0H%5C%2FOf8%5C%2F%5C%2Fzj%5C%2FPP83%5C%2Fzj%5C%2FS%5C%2F87%5C%2Fzj%5C%2FNv84%5C%2Fzf%5C%2FOf83%5C%2Fzf%5C%2FOf83%5C%2Fzb%5C%2FNv9L%5C%2Fzj%5C%2FNv82%5C%2Fzb%5C%2FN%5C%2F85%5C%2Fzf%5C%2FN%5C%2F9J%5C%2Fzf%5C%2FNv83%5C%2Fzj%5C%2FNv84%5C%2F0r%5C%2FSv83%5C%2Fzf%5C%2FMP%5C%2F%5C%2F%5C%2Fzb%5C%2FMv82%5C%2Fzb%5C%2FOf85%5C%2Fz7%5C%2FNv8%5C%2F%5C%2F0r%5C%2FS%5C%2F85%5C%2F0H%5C%2FQP9B%5C%2F0D%5C%2FNf89%5C%2Fzj%5C%2FOv83%5C%2Fzv%5C%2FNv8%5C%2F%5C%2F0f%5C%2FSv9O%5C%2F0ZeXv%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F1X%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9B%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2FTP%5C%2F%5C%2F%5C%2F1b%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F0%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F9N%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2F%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%5C%2Fv7%2B%22%2C%22date%22%3A%222021-08-07%22%2C%22data%22%3A%5B%7B%22start%22%3A0%2C%22stop%22%3A1439%2C%22value%22%3A%22UA8AUBQAUAwAUBoAUAEAYCcAUBkAUB4AUBgAUCAAUAEAUBkAUAwAYAsAYB8AYB0AYBgAYCoAYBgAYB4AUCcAUBsAUB8AUBwAUBIAYBkAYB8AUBoAUBMAUCEAUCIAYBYAUBwAUCAAUBgAUCAAUBcAYBsAYCUAATIPYD0KECQAYDMAYB0AYAsAYCAAYDwAYCIAYB0AYBcAYCQAYB0AYBAAYCMAYAoAYCIAYCEAYCYAYBsAYBUAYAYAYCIAYCMAUB0AUCAAUBYAUCoAUBEAUC8AUB0AUBYAUDMAUDoAUBkAUC0AUBQAUBwAUA0AUBsAUAoAUCEAUBYAUAwAUB4AUAwAUCcAUCYAUCwKYDUAAUUlEC8IYEMAYEgAYDoAYBAAUAMAUBkAWgAAWgAAWgAAWgAAWgAAUAgAWgAAUBAAUAQAUA4AUA8AUAkAUAIAUAYAUAcAUAIAWgAAUAQAUAkAUAEAUBkAUCUAWgAAUAYAUBEAWgAAUBYAWgAAUAYAWgAAWgAAWgAAWgAAUBcAUAcAWgAAUBUAUAoAUAIAWgAAUAQAUAYAUCgAWgAAUAgAWgAAWgAAUAwAWwAAXCMAUBQAWwAAUAIAWgAAWgAAWgAAWgAAWgAAWgAAWgAAWgAAWREAWQIAUAMAWSEAUDoAUDIAUB8AUCEAUC4AXB4AUA4AWgAAUBIAUA8AUBAAUCUAUCIAUAMAUAEAUAsAUAMAUCwAUBYAWgAAWgAAWgAAWgAAWgAAWgAAUAYAWgAAWgAAWgAAUAYAWwAAWgAAUAYAXAQAUAMAUBsAUBcAUCAAWwAAWgAAWgAAWgAAWgAAUBgAUB4AWgAAUAcAUAwAWQIAWQkAUAEAUAIAWgAAUAoAWgAAUAYAUB0AWgAAWgAAUAkAWgAAWSwAUBIAWgAAUC4AWSYAWgAAUAYAUAoAUAkAUAIAUAcAWgAAUAEAUBEAUBgAUBcAWRYAUA0AWSgAUB4AUDQAUBoAXA4AUA8AUBwAUA8AUA4AUA4AWgAAUAIAUCMAWgAAUCwAUBgAUAYAUAAAUAAAUAAAUAAAUAAAUAAAUAAAUAAAUAAAWwAAUAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAeSEAeQ8AcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcBcAcAAAcAAAcCYOcBUAUAAAUAAAUAAAUAAAUAUAUAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcCgAeQAAcAAAcAAAcAAAcAAAcAAAcAYAcAAAcBgAeQAAcAAAcAAAegAAegAAcAAAcAcAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcCkAeQAAcAcAcAAAcAAAcAwAcAAAcAAAcAIAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcCIAeQAAcAAAcAAAcAAAcAAAcAAAeRwAeQAAWgAAUAAAUAAAUAAAUAAAUAAAcAAAcAAAcBoAeScAeQAAegAAcBkAeQAAUAAAUAAAUAAAUAAAUAAAUAAAcAAAcAAAcAAAcAAAcAAAcAAAegAAegAAcAAAcAAAcBgAeQAAcAAAcAAAcAAAcAAAcAAAcAkAegAAegAAcAcAcAAAcAcAcAAAcAAAcAAAcAAAcA8AeQAAcAAAcAAAeRQAcAwAUAAAUAAAUAAAUAAAUAAAUAAAcAAAcBEAcA0AcAAAWQsAUAAAUAAAUAAAUAAAUAAAcAAAcAoAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAYAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcBYAegAAcAAAcAAAegAAcAcAcAAAcAAAcAAAcAAAcAAAeRkAegAAegAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAEAcAAAcAAAcAAAcAUAcAQAcAAAcBIAeQAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcBsAcAAAcAAAcBcAeQAAUAAAUAAAUAAAUAAAUAAAUBQAcBYAUAAAUAAAUAoAWRYAWTQAWQAAUAAAUAAAUAAAcAAAcAAAcAAAcAAAcAAAcAMAcAAAcAQAcAAAcAAAcAAAcDMAeSIAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcAAAcBQAeQwAcAAAcAAAcAAAcAMAcAAAeSoAcA8AcDMAcAYAeQoAcAwAcFQAcEMAeVIAaTYAbBcNYAsAYBIAYAIAYAIAYBUAYCwAYBMAYDYAYCkAYDcAUCoAUCcAUAUAUBAAWgAAYBoAYBcAYCgAUAMAUAYAUBYAUA4AUBgAUAgAUAgAUAsAUAsAUA4AUAMAUAYAUAQAUBIAASsSUDAAUDAAUBAAYAYAUBAAUAUAUCAAUBoAUCAAUBAAUAoAYAIAUAQAUAgAUCcAUAsAUCIAUCUAUAoAUA4AUB8AUBkAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAAfgAA%22%2C%22tz%22%3A32%2C%22did%22%3A%22DA932FFFFE8816E7%22%2C%22src%22%3A24%7D%5D%2C%22summary%22%3A%22%7B%5C%22v%5C%22%3A6%2C%5C%22slp%5C%22%3A%7B%5C%22st%5C%22%3A1628296479%2C%5C%22ed%5C%22%3A1628296479%2C%5C%22dp%5C%22%3A0%2C%5C%22lt%5C%22%3A0%2C%5C%22wk%5C%22%3A0%2C%5C%22usrSt%5C%22%3A-1440%2C%5C%22usrEd%5C%22%3A-1440%2C%5C%22wc%5C%22%3A0%2C%5C%22is%5C%22%3A0%2C%5C%22lb%5C%22%3A0%2C%5C%22to%5C%22%3A0%2C%5C%22dt%5C%22%3A0%2C%5C%22rhr%5C%22%3A0%2C%5C%22ss%5C%22%3A0%7D%2C%5C%22stp%5C%22%3A%7B%5C%22ttl%5C%22%3A18272%2C%5C%22dis%5C%22%3A10627%2C%5C%22cal%5C%22%3A510%2C%5C%22wk%5C%22%3A41%2C%5C%22rn%5C%22%3A50%2C%5C%22runDist%5C%22%3A7654%2C%5C%22runCal%5C%22%3A397%2C%5C%22stage%5C%22%3A%5B%7B%5C%22start%5C%22%3A327%2C%5C%22stop%5C%22%3A341%2C%5C%22mode%5C%22%3A1%2C%5C%22dis%5C%22%3A481%2C%5C%22cal%5C%22%3A13%2C%5C%22step%5C%22%3A680%7D%2C%7B%5C%22start%5C%22%3A342%2C%5C%22stop%5C%22%3A367%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A2295%2C%5C%22cal%5C%22%3A95%2C%5C%22step%5C%22%3A2874%7D%2C%7B%5C%22start%5C%22%3A368%2C%5C%22stop%5C%22%3A377%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A1592%2C%5C%22cal%5C%22%3A88%2C%5C%22step%5C%22%3A1664%7D%2C%7B%5C%22start%5C%22%3A378%2C%5C%22stop%5C%22%3A386%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A1072%2C%5C%22cal%5C%22%3A51%2C%5C%22step%5C%22%3A1245%7D%2C%7B%5C%22start%5C%22%3A387%2C%5C%22stop%5C%22%3A393%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A1036%2C%5C%22cal%5C%22%3A57%2C%5C%22step%5C%22%3A1124%7D%2C%7B%5C%22start%5C%22%3A394%2C%5C%22stop%5C%22%3A398%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A488%2C%5C%22cal%5C%22%3A19%2C%5C%22step%5C%22%3A607%7D%2C%7B%5C%22start%5C%22%3A399%2C%5C%22stop%5C%22%3A414%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A2220%2C%5C%22cal%5C%22%3A120%2C%5C%22step%5C%22%3A2371%7D%2C%7B%5C%22start%5C%22%3A415%2C%5C%22stop%5C%22%3A427%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A1268%2C%5C%22cal%5C%22%3A59%2C%5C%22step%5C%22%3A1489%7D%2C%7B%5C%22start%5C%22%3A428%2C%5C%22stop%5C%22%3A433%2C%5C%22mode%5C%22%3A1%2C%5C%22dis%5C%22%3A152%2C%5C%22cal%5C%22%3A4%2C%5C%22step%5C%22%3A238%7D%2C%7B%5C%22start%5C%22%3A434%2C%5C%22stop%5C%22%3A444%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A2295%2C%5C%22cal%5C%22%3A95%2C%5C%22step%5C%22%3A2874%7D%2C%7B%5C%22start%5C%22%3A445%2C%5C%22stop%5C%22%3A455%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A1592%2C%5C%22cal%5C%22%3A88%2C%5C%22step%5C%22%3A1664%7D%2C%7B%5C%22start%5C%22%3A456%2C%5C%22stop%5C%22%3A466%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A1072%2C%5C%22cal%5C%22%3A51%2C%5C%22step%5C%22%3A1245%7D%2C%7B%5C%22start%5C%22%3A467%2C%5C%22stop%5C%22%3A477%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A1036%2C%5C%22cal%5C%22%3A57%2C%5C%22step%5C%22%3A1124%7D%2C%7B%5C%22start%5C%22%3A478%2C%5C%22stop%5C%22%3A488%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A488%2C%5C%22cal%5C%22%3A19%2C%5C%22step%5C%22%3A607%7D%2C%7B%5C%22start%5C%22%3A489%2C%5C%22stop%5C%22%3A499%2C%5C%22mode%5C%22%3A4%2C%5C%22dis%5C%22%3A2220%2C%5C%22cal%5C%22%3A120%2C%5C%22step%5C%22%3A2371%7D%2C%7B%5C%22start%5C%22%3A500%2C%5C%22stop%5C%22%3A511%2C%5C%22mode%5C%22%3A3%2C%5C%22dis%5C%22%3A1268%2C%5C%22cal%5C%22%3A59%2C%5C%22step%5C%22%3A1489%7D%2C%7B%5C%22start%5C%22%3A512%2C%5C%22stop%5C%22%3A522%2C%5C%22mode%5C%22%3A1%2C%5C%22dis%5C%22%3A152%2C%5C%22cal%5C%22%3A4%2C%5C%22step%5C%22%3A238%7D%5D%7D%2C%5C%22goal%5C%22%3A8000%2C%5C%22tz%5C%22%3A%5C%2228800%5C%22%7D%22%2C%22source%22%3A24%2C%22type%22%3A0%7D%5D'
finddate = re.compile(r'.*?date%22%3A%22(.*?)%22%2C%22data.*?')
findstep = re.compile(r'.*?ttl%5C%22%3A(.*?)%2C%5C%22dis.*?')
data_json = re.sub(finddate.findall(data_json)[0], today, str(data_json))
data_json = re.sub(findstep.findall(data_json)[0], step, str(data_json))
url = f'https://api-mifit-cn.huami.com/v1/data/band_data.json?&t={t}'
head = {
"apptoken": app_token,
"Content-Type": "application/x-www-form-urlencoded"
}
data = f'userid={userid}&last_sync_data_time=1597306380&device_type=0&last_deviceid=DA932FFFFE8816E7&data_json={data_json}'
response = requests.post(url, data=data, headers=head).json()
#print(response)
result = f"{user[:4]}****{user[-4:]}: [{now}] 修改步数({step})"+ response['message']
print(result)
return result
#获取时间戳
def get_time():
url = 'http://api.m.taobao.com/rest/api3.do?api=mtop.common.getTimestamp'
response = requests.get(url,headers=headers).json()
t = response['data']['t']
return t
#获取app_token
def get_app_token(login_token):
url = f"https://account-cn.huami.com/v1/client/app_tokens?app_name=com.xiaomi.hm.health&dn=api-user.huami.com%2Capi-mifit.huami.com%2Capp-analytics.huami.com&login_token={login_token}"
response = requests.get(url,headers=headers).json()
app_token = response['token_info']['app_token']
#print("app_token获取成功!")
#print(app_token)
return app_token
# 推送server酱
def push_wx(sckey, desp=""):
"""
推送消息到微信
"""
if sckey == '':
print("[注意] 未提供sckey,不进行推送!")
else:
server_url = f"https://sc.ftqq.com/{sckey}.send"
params = {
"text": '小米运动 步数修改',
"desp": desp
}
response = requests.get(server_url, params=params)
json_data = response.json()
if json_data['errno'] == 0:
print(f"[{now}] 推送成功。")
else:
print(f"[{now}] 推送失败:{json_data['errno']}({json_data['errmsg']})")
# 推送server
def push_server(sckey, desp=""):
"""
推送消息到微信
"""
if sckey == '':
print("[注意] 未提供sckey,不进行微信推送!")
else:
server_url = f"https://sctapi.ftqq.com/{sckey}.send"
params = {
"title": '小米运动 步数修改',
"desp": desp
}
response = requests.get(server_url, params=params)
json_data = response.json()
if json_data['code'] == 0:
print(f"[{now}] 推送成功。")
else:
print(f"[{now}] 推送失败:{json_data['code']}({json_data['message']})")
# 推送tg
def push_tg(token, chat_id, desp=""):
"""
推送消息到TG
"""
if token == '':
print("[注意] 未提供token,不进行tg推送!")
elif chat_id == '':
print("[注意] 未提供chat_id,不进行tg推送!")
else:
server_url = f"https://api.telegram.org/bot{token}/sendmessage"
params = {
"text": '小米运动 步数修改\n\n' + desp,
"chat_id": chat_id
}
response = requests.get(server_url, params=params)
json_data = response.json()
if json_data['ok'] == True:
print(f"[{now}] 推送成功。")
else:
print(f"[{now}] 推送失败:{json_data['error_code']}({json_data['description']})")
if __name__ == "__main__":
# Push Mode
Pm = input()
if Pm == 'wx' or Pm == 'nwx':
# ServerChan
sckey = input()
if str(sckey) == '0':
sckey = ''
elif Pm == 'tg':
token = input()
sl = token.split('-')
if len(sl) != 2:
print('tg推送参数有误!')
# 用户名(格式为 13800138000)
user = input()
# 登录密码
passwd = input()
# 要修改的步数,直接输入想要修改的步数值,留空为随机步数
step = input()
user_list = user.split('#')
passwd_list = passwd.split('#')
setp_array = step.split('-')
if len(user_list) == len(passwd_list):
push = ''
for line in range(0,len(user_list)):
if len(setp_array) == 2:
step = str(random.randint(int(setp_array[0]),int(setp_array[1])))
elif str(step) == '0':
step = ''
push += main(user_list[line], passwd_list[line], step) + '\n'
if Pm == 'wx':
push_wx(sckey, push)
elif Pm == 'nwx':
push_server(sckey, push)
elif Pm == 'tg':
push_tg(sl[0], sl[1], push)
else:
print('用户名和密码数量不对')
|
py | 1a3c4ddb2c51e86b4d38adc47a2f2b04f60c67d7 | import re
from collections import Counter
import numpy as np
def read_txt(data):
lines = []
with open(data, encoding='utf-8') as f:
for line in f:
lines.append(re.sub('\n', '', line))
return lines
def tokenizer(sentence):
tokens = re.findall(r"[\w]+|[^\s\w]", sentence)
return tokens
def build_character(sentences):
word_counter = Counter()
vocab = dict()
reverse_vocab = dict()
for sentence in sentences:
tokens = list(sentence)
word_counter.update(tokens)
vocab['<PAD>'] = 0
vocab['<GO>'] = 1
vocab['<UNK>'] = 2
vocab_idx = 3
for key, value in word_counter.most_common(len(word_counter)):
vocab[key] = vocab_idx
vocab_idx += 1
for key, value in vocab.items():
reverse_vocab[value] = key
vocab_size = len(vocab.keys())
return vocab, reverse_vocab, vocab_size
def build_vocab(sentences):
word_counter = Counter()
vocab = dict()
reverse_vocab = dict()
for sentence in sentences:
tokens = tokenizer(sentence)
word_counter.update(tokens)
vocab['<PAD>'] = 0
vocab['<GO>'] = 1
vocab['<UNK>'] = 2
vocab_idx = 3
for key, value in word_counter.most_common(len(word_counter)):
vocab[key] = vocab_idx
vocab_idx += 1
for key, value in vocab.items():
reverse_vocab[value] = key
vocab_size = len(vocab.keys())
return vocab, reverse_vocab, vocab_size
def sentence_to_char_index(lines, vocab, is_target=False):
tokens = []
indexes = []
max_len = 0
if len(lines) == 1:
tokens = list(lines[0])
for token in tokens:
if token in vocab.keys():
indexes.append(vocab[token])
else:
indexes.append(vocab['<UNK>'])
else:
for sentence in lines:
token = list(sentence)
tokens.append(token)
length = len(token)
if max_len < length:
if is_target == True:
max_len = length + 1
else:
max_len = length
for token in tokens:
temp = token
for _ in range(len(temp), max_len):
temp.append('<PAD>')
index = []
for char in temp:
if char in vocab.keys():
index.append(vocab[char])
else:
index.append(vocab['<UNK>'])
indexes.append(index)
return indexes
def sentence_to_word_index(lines, vocab, is_target=False):
tokens = []
indexes = []
max_len = 0
if type(lines) is str:
tokens = tokenizer(lines)
for token in tokens:
if token in vocab.keys():
indexes.append(vocab[token])
else:
indexes.append(vocab['<UNK>'])
else:
for sentence in lines:
token = tokenizer(sentence)
tokens.append(token)
length = len(token)
if max_len < length:
if is_target == True:
max_len = length + 1
else:
max_len = length
for token in tokens:
temp = token
for _ in range(len(temp), max_len):
temp.append('<PAD>')
index = []
for char in temp:
if char in vocab.keys():
index.append(vocab[char])
else:
index.append(vocab['<UNK>'])
indexes.append(index)
return indexes
def make_dataset(data):
input = []
target = []
for i in range(len(data)-1):
input.append(data[i])
target.append(data[i+1])
return input, target
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index] |
py | 1a3c4f0d1e6a45582f0976a46b7e4a76870cfe30 | # Tendo como dados de entrada a altura de uma pessoa, construa um algoritmo que calcule seu peso ideal,
# usando a seguinte fórmula: (72.7*altura) - 58
altura = float(input('Digite sua altura: '))
peso_ideal = (72.7 * altura) - 58
print('Seu peso ideal é de {}' .format(peso_ideal)) |
py | 1a3c51529ea620967ec3c984ca8a8a3019842bd5 | from abc import abstractmethod
class Observer:
@abstractmethod
def update(self):
pass
class Observable:
@abstractmethod
def register_observer(self, observer: Observer):
pass
@abstractmethod
def unregister_observer(self, observer: Observer):
pass
@abstractmethod
def notify_observers(self):
pass
|
py | 1a3c51c4e10ffb9223116f35115249ea88818703 | # Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hello world v2 engine pipeline."""
from __future__ import annotations
import unittest
from pprint import pprint
import kfp
import kfp_server_api
from .producer_consumer_param import producer_consumer_param_pipeline
from ..test.util import KfpTask, TaskInputs, TaskOutputs, run_pipeline_func, TestCase, KfpMlmdClient
from ml_metadata.proto import Execution
def verify(run: kfp_server_api.ApiRun, mlmd_connection_config, **kwargs):
t = unittest.TestCase()
t.maxDiff = None # we always want to see full diff
t.assertEqual(run.status, 'Succeeded')
client = KfpMlmdClient(mlmd_connection_config=mlmd_connection_config)
tasks = client.get_tasks(run_id=run.id)
pprint(tasks)
t.assertEqual({
'consumer':
KfpTask(
name='consumer',
type='system.ContainerExecution',
state=Execution.State.COMPLETE,
inputs=TaskInputs(
parameters={
'input_value':
'Hello world, this is an output parameter\n'
},
artifacts=[]
),
outputs=TaskOutputs(parameters={}, artifacts=[])
),
'producer':
KfpTask(
name='producer',
type='system.ContainerExecution',
state=Execution.State.COMPLETE,
inputs=TaskInputs(
parameters={'input_text': 'Hello world'}, artifacts=[]
),
outputs=TaskOutputs(
parameters={
'output_value':
'Hello world, this is an output parameter\n'
},
artifacts=[]
)
)
}, tasks)
if __name__ == '__main__':
run_pipeline_func([
TestCase(
pipeline_func=producer_consumer_param_pipeline,
verify_func=verify,
mode=kfp.dsl.PipelineExecutionMode.V2_ENGINE,
),
])
|
py | 1a3c524d8910a2199c39cc4bce8ae0949d9d7533 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
# Given a sentence and an incident id, check their similarity
# Usage:
# sen_incident.py _input_sentence_ -i incident id -v [optional]
# Need w2v for word2v, sif for SIF, vec for caced vecs
# if -v is used, need inc_ids.json and inc_sen.json
import argparse
import numpy as np
from fn_machine_learning_nlp.lib.file_manage import FileManage
from fn_machine_learning_nlp.lib.nlp.res_sen2vec import ResSen2Vec
from fn_machine_learning_nlp.lib.nlp.res_sif import ResSIF
from fn_machine_learning_nlp.lib.nlp.res_nlp import ResNLP
from fn_machine_learning_nlp.lib.nlp.word_sentence_utils import WordSentenceUtils
import json
from nltk.corpus import words
setofwords = set(words.words())
SIF_A = 10e-3
parser = argparse.ArgumentParser(description="Find similarity between given sentence and incident")
parser.add_argument("sentence",
help="input sentence")
parser.add_argument("-i", "--incident",
help="incident id")
parser.add_argument("-s", "--sif",
help="SIF file",
default=FileManage.DEFAULT_SIF_FILE)
parser.add_argument("-w", "--w2v",
help="trained word2vec model",
default=FileManage.DEFAULT_NLP_FILE)
parser.add_argument("-v", "--vec",
help="saved vectors for incidents",
default=FileManage.DEFAULT_VEC_FILE)
parser.add_argument("-d", "--debug",
help="print extra debug information",
action="store_true")
parser.add_argument("-a", "--all_ids",
help="json file of list of all incident ids",
default="inc_ids.json")
parser.add_argument("-e", "--inc_sen",
help="json file of list of words for incidents",
default="inc_sen.json")
args, unknow_args = parser.parse_known_args()
sentence = args.sentence
inc_id = int(args.incident)
sif_file = args.sif
w2v_file = args.w2v
vec_file = args.vec
debug = args.debug
all_ids_file = args.all_ids
inc_sen_file = args.inc_sen
s_util = WordSentenceUtils()
sif = ResSIF()
sif.load_sif(sif_file)
w2v = ResNLP()
w2v.load_model(w2v_file)
vec = ResSen2Vec(w2v.word2vec, sif)
vec.load_s2v(vec_file)
inc_vec = vec.get_incident_vec(str(inc_id))
sen_vec = vec.get_vec_for_sentence(sentence)
u = []
with open("vec_en_new_pcs.json", "r") as infile:
u = json.load(infile)
u = np.multiply(u, np.transpose(u))
sub = np.multiply(u, sen_vec)
sen_vec = np.subtract(sen_vec, sub)
inc_vec_norm = np.linalg.norm(inc_vec)
sen_vec_norm = np.linalg.norm(sen_vec)
sim = np.dot(inc_vec, sen_vec)/(np.linalg.norm(inc_vec) * np.linalg.norm(sen_vec))
sim1 = np.dot(inc_vec, sen_vec)/(inc_vec_norm * sen_vec_norm)
print("Similarity between input incident and sentence:")
print("\t\t%-30s %s"%("similarity:", sim))
if debug:
print("Debug information:")
with open(inc_sen_file, "r") as infile:
sentences = json.load(infile)
with open(all_ids_file, "r") as infile:
ids = json.load(infile)
inc_id_index = None
for i in range(len(ids)):
if ids[i] == inc_id:
inc_id_index = i
break
if inc_id_index is not None:
words = sentences[inc_id_index]
inc_v = np.zeros(w2v.word2vec.vector_size)
inv_v_count = 0
for w in words:
if w in setofwords:
wc = sif.get_word_count(w)
if wc < 300:
wc = 300 - (300 - wc) / 3
a_value = SIF_A / (SIF_A + wc)
try:
w_v = w2v.get_word_vec(w)
inc_v += np.multiply(a_value, w_v)
inv_v_count += 1
except:
pass
if inv_v_count > 0:
inc_v /= inv_v_count
#inc_v = vec.get_vec_for_words(sentences[inc_id_index])
sub = np.multiply(u, inc_v)
inc_v = np.subtract(inc_v, sub)
sim1 = np.dot(inc_vec, inc_v)/(np.linalg.norm(inc_vec) * np.linalg.norm(inc_v))
print("\trecompute incident vec, and check with cached one. Sim shall be close to 1:")
print("\t\t%-30s %s" % ("recom sim:", sim1))
wrd1 = None
wrd2 = None
sim_max = 0
words_1 = s_util.get_words(sentence)
words_2 = sentences[inc_id_index]
for w1 in words_1:
for w2 in words_2:
try:
v1 = w2v.get_word_vec(w1)
v2 = w2v.get_word_vec(w2)
v1_norm = np.linalg.norm(v1)
v2_norm = np.linalg.norm(v2)
if v1_norm > 0 and v2_norm > 0:
sim1 = np.dot(v1, v2) / (v1_norm * v2_norm)
if abs(sim1) > sim_max:
sim_max = abs(sim1)
wrd1 = w1
wrd2 = w2
except:
pass
if sim_max != 0:
print("\ttop matching words:")
print("\t\t%-30s %s" % ("sentence:", wrd1))
print("\t\t%-30s %s" % ("incident:", wrd2))
print("\t\t%-30s %s" % ("similarity:", sim_max))
print("\tsentence top 5 word count:")
s_count = [(w, sif.get_word_count(w)) for w in words_1]
s_count.sort(key=lambda u: u[1])
for i in range(min(5, len(s_count))):
print("\t\t%-30s %s" % (s_count[i][0] + ':', s_count[i][1]))
print("\tincident top 5 word count:")
s_count = [(w, sif.get_word_count(w)) for w in words_2]
s_count.sort(key=lambda u: u[1])
s_tmp = [s for s in s_count if s[1] > 0]
for i in range(min(5, len(s_tmp))):
print("\t\t%-30s %s" % (s_tmp[i][0] + ':', s_tmp[i][1]))
count_threshold = 10
v2_high = np.zeros(w2v.word2vec.vector_size)
v2_low = np.zeros(w2v.word2vec.vector_size)
high_count = 0
low_count = 0
v2_all = np.zeros(w2v.word2vec.vector_size)
v2_all_count = 0
sen_vec_norm = np.linalg.norm(sen_vec)
total_wc = 0
for w2 in words_2:
total_wc += sif.get_word_count(w2)
res = []
for w2 in words_2:
wc = sif.get_word_count(w2)
a_value = SIF_A/(SIF_A + wc)
try:
w_v = w2v.get_word_vec(w2)
w_sim = np.dot(w_v, sen_vec)/(np.linalg.norm(w_v) * sen_vec_norm)
res.append((w2, wc, w_sim))
#if wc/total_wc < 0.005:
#if wc < 500 and w_sim < 0.50:
if wc > 10:
v2_high += np.multiply(a_value, w2v.get_word_vec(w2))
high_count += 1
if wc > 100:
v2_low += np.multiply(a_value, w2v.get_word_vec(w2))
low_count += 1
v2_all += np.multiply(a_value, w2v.get_word_vec(w2))
v2_all_count += 1
except:
pass
if high_count > 0:
v2_high /= high_count
if low_count > 0:
v2_low /= low_count
if v2_all_count > 0:
v2_all /= v2_all_count
sim_high = np.dot(v2_high, sen_vec)/(np.linalg.norm(v2_high) * sen_vec_norm)
sim_low = np.dot(v2_low, sen_vec)/(np.linalg.norm(v2_low) * sen_vec_norm)
sim_all = np.dot(v2_all, sen_vec) / (np.linalg.norm(v2_all) * sen_vec_norm)
print("\tLow sim: {}".format(sim_low))
print("\tHigh sim: {}".format(sim_high))
print("\tAll sim: {}".format(sim_all))
res.sort(key=lambda u:u[2])
for w in res:
print("%-20s, %-8s, %s"%(w[0], str(w[1]), str(w[2]))) |
py | 1a3c52887a0dcc4106f2d24a8633def4b5a9f3c8 | import asyncio
from dataclasses import asdict, dataclass, field
from io import BytesIO
import json
import random
from confluent_kafka import Producer
from faker import Faker
from fastavro import parse_schema, writer
faker = Faker()
BROKER_URL = "PLAINTEXT://localhost:9092"
@dataclass
class ClickAttribute:
element: str = field(default_factory=lambda: random.choice(["div", "a", "button"]))
content: str = field(default_factory=faker.bs)
@classmethod
def attributes(self):
return {faker.uri_page(): ClickAttribute() for _ in range(random.randint(1, 5))}
@dataclass
class ClickEvent:
email: str = field(default_factory=faker.email)
timestamp: str = field(default_factory=faker.iso8601)
uri: str = field(default_factory=faker.uri)
number: int = field(default_factory=lambda: random.randint(0, 999))
attributes: dict = field(default_factory=ClickAttribute.attributes)
#
# TODO: Update this Avro schema to include a map of attributes
# See: https://avro.apache.org/docs/1.8.2/spec.html#Maps
#
schema = parse_schema(
{
"type": "record",
"name": "click_event",
"namespace": "com.udacity.lesson3.exercise2",
"fields": [
{"name": "email", "type": "string"},
{"name": "timestamp", "type": "string"},
{"name": "uri", "type": "string"},
{"name": "number", "type": "int"},
{
"name": "attributes",
"type": {
"type": "map",
"values": {
"type": "record",
"name": "attribute",
"fields": [
{"name": "element", "type": "string"},
{"name": "content", "type": "string"},
],
},
},
},
],
}
)
def serialize(self):
"""Serializes the ClickEvent for sending to Kafka"""
out = BytesIO()
writer(out, ClickEvent.schema, [asdict(self)])
return out.getvalue()
async def produce(topic_name):
"""Produces data into the Kafka Topic"""
p = Producer({"bootstrap.servers": BROKER_URL})
while True:
p.produce(topic_name, ClickEvent().serialize())
await asyncio.sleep(1.0)
def main():
"""Checks for topic and creates the topic if it does not exist"""
try:
asyncio.run(produce_consume("com.udacity.lesson3.solution3.clicks"))
except KeyboardInterrupt as e:
print("shutting down")
async def produce_consume(topic_name):
"""Runs the Producer and Consumer tasks"""
await asyncio.create_task(produce(topic_name))
if __name__ == "__main__":
main()
|
py | 1a3c52b1fcce4fbbf3e2e752a3e84fee45547809 | """ppm cache管理资源包缓存"""
from pmfp.utils.endpoint import EndPoint
from ..core import ppm
class Cache(EndPoint):
"""资源包缓存相关命令."""
cache = ppm.regist_sub(Cache)
|
py | 1a3c52f58d08d645400811e6df1d78ac30b35d3f | print('=====Desafio 035=====')
num=int(input('Me diga um número qualquer: '))
resultado=num % 2
if resultado == 0:
print('O número {} é um numero PAR'.format(num))
else:
print('O número {} é um número IMPAR'.format(num))
|
py | 1a3c53238343bd9f422dcff440bba7f90ddc2df8 | #!/usr/bin/env python3
import os
from pathlib import Path
import shutil
import argparse
import json
from pprint import pprint
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from vfat import Vfat
# ---------------------------------------------------------------
## CONSTANTS
floppy_size = 1536 * 1024 # REVIEW: 1536 or 1440?
# ---------------------------------------------------------------
## ARGS
parser = argparse.ArgumentParser("Extract Akai MPC 2000 floppy files")
parser.add_argument("--src", help="path to disk image file or device (/dev/sd?)", required=True)
parser.add_argument("--floppy", help="virtual floppy id(s), list and ranges supported", required=False)
parser.add_argument("--dest", help="folder to write to", required=False)
parser.add_argument("--out-format", help="output format for listing files", choices=['txt', 'json'], required=False)
parser.add_argument("-v", "--verbose", action = "store_true")
args = parser.parse_args()
sudo_user = ''
if 'SUDO_USER' in os.environ:
sudo_user = os.environ["SUDO_USER"]
if args.src.startswith("~/"):
args.src = args.src.replace("~/", "~"+sudo_user+"/")
args.src = os.path.expanduser(args.src)
if args.dest and args.dest.startswith("~/"):
args.dest = args.dest.replace("~/", "~"+sudo_user+"/")
args.dest = os.path.expanduser(args.dest)
if not args.dest:
args.verbose = True
if not args.out_format:
# NB: default option doesn't seem to work / choices
args.out_format = 'txt'
# print(args.out_format)
floppy_list = []
if args.floppy:
floppy_ranges = args.floppy.split(',')
for frange in floppy_ranges:
split_frange = frange.split('-')
if len(split_frange) == 2:
f, t = split_frange
floppy_list.extend(range(int(f), int(t)+1))
else:
floppy_list.append(int(frange))
floppy_list = list(set(floppy_list))
if args.src.startswith("/dev/sd"):
if not floppy_list:
parser.error("When targeting a Gotek-formated USB drive, please precise `--floppy`, i.e. which virtual floppy to extract.")
## ------------------------------------------------------------------------
## FUNCTIONS: GENERIC
def is_printable_ascii_char(c):
return c >= 0x20 and c <= 0x7e
def bytes_to_ascii(byte_arr):
filtered_arr = bytearray()
for b in byte_arr:
if is_printable_ascii_char(b):
filtered_arr.append(b)
return filtered_arr.decode(u"ASCII")
## ------------------------------------------------------------------------
## FUNCTIONS: FIELD PARSING
def parse_vfat_lfn(r):
lfn_arr = bytearray()
for i in [1, 3, 5, 7, 9]:
lfn_arr.append(r.file_name[i])
for i in [2, 4, 6, 8]:
lfn_arr.append(r.reserved[i])
r_time = r.time.to_bytes(2, 'little')
for i in [0]:
lfn_arr.append(r_time[i])
r_date = r.date.to_bytes(2, 'little')
for i in [0]:
lfn_arr.append(r_date[i])
r_size = r.file_size.to_bytes(4, 'little')
for i in [0, 2]:
lfn_arr.append(r_size[i])
return bytes_to_ascii(lfn_arr)
def parse_mpc_lfn_ext(reserved):
return bytes_to_ascii(reserved[:-2]).replace("[Q", "").rstrip()
## ------------------------------------------------------------------------
## FUNCTIONS: FLOPPY PARSING
def get_floppy_file_list(floppy_bytes, vfloppy_offest=0):
data = Vfat.from_bytes(floppy_bytes)
# those might always the same for FAT12 but whatever...
bytes_per_ls = data.boot_sector.bpb.bytes_per_ls
ls_per_clus = data.boot_sector.bpb.ls_per_clus
clus_size = bytes_per_ls * ls_per_clus
data_start_clus = 33
# cf https://www.eit.lth.se/fileadmin/eit/courses/eitn50/Literature/fat12_description.pdf
start_clus_offset = None
parsed_files = []
if data.boot_sector.is_fat32:
floppy_name = data.boot_sector.ebpb_fat32.partition_volume_label
else:
floppy_name = data.boot_sector.ebpb_fat16.partition_volume_label
current_vfat_lfn = ""
for r in data.root_dir.records:
# NB: the records index is at 0x2600
if r.attribute in [8, 0]: # current dir, empty slot
continue
if r.attribute == 15: # vFAT LFN
current_vfat_lfn = parse_vfat_lfn(r)
continue
if r.file_size == 0: # empty file
if current_vfat_lfn:
current_vfat_lfn = ""
continue
sfn_no_ext = bytes_to_ascii(r.file_name[:-3]).rstrip()
ext = r.file_name[-3:].decode(u"ASCII")
# NB: MPC implementation of LFN uses reserved bytes of a record instead of separate record
mpc_lfn_part = parse_mpc_lfn_ext(r.reserved)
mpc_fn = sfn_no_ext + mpc_lfn_part + "." + ext
if mpc_lfn_part:
fn = mpc_fn
elif current_vfat_lfn:
fn = current_vfat_lfn
else:
fn = mpc_fn
if args.verbose and args.out_format == "txt":
fn_text = mpc_fn
if current_vfat_lfn:
fn_text += " (" + current_vfat_lfn + ")"
print("- " + fn_text)
print(" start cluster: #" + str(r.start_clus))
print(" size: " + str(r.file_size))
if start_clus_offset is None:
start_bytes = data_start_clus * clus_size
start_clus_offset = r.start_clus
else:
start_bytes = (data_start_clus - start_clus_offset + r.start_clus) * clus_size
current_vfat_lfn = ""
if args.verbose and args.out_format == "txt":
print(" start pos in floppy: " + str(start_bytes))
if vfloppy_offest:
print(" start pos in img: " + str(vfloppy_offest + start_bytes))
parsed_files.append({
'name': fn,
'start': vfloppy_offest + start_bytes,
'size': r.file_size,
})
return (floppy_name, parsed_files)
def extract_parsed_files(parsed_files, floppy_id=None):
dest_dir = args.dest
if floppy_id:
dest_dir = args.dest.rstrip("/") + "/" + str(floppy_id) + "/"
Path(dest_dir).mkdir(parents=True, exist_ok=True)
if sudo_user:
shutil.chown(dest_dir, sudo_user, sudo_user)
with open(args.src, 'rb') as f:
for props in parsed_files:
f.seek(props['start'], 0)
file_bytes = f.read(props['size'])
with open(dest_dir + props['name'], "wb") as out_f:
out_f.write(file_bytes)
if sudo_user:
shutil.chown(dest_dir + props['name'], sudo_user, sudo_user)
## ------------------------------------------------------------------------
## PARSE FLOPPY IMAGES
vfloppy_offset = 0
file_bytes = None
f = open(args.src, 'rb')
if floppy_list:
parsed_files = []
for floppy in floppy_list:
if args.verbose and args.out_format == "txt":
print("-"*35)
print("FLOPPY #" + str(floppy))
vfloppy_offset = floppy * 1536 * 1024
f.seek(vfloppy_offset, 0)
file_bytes = f.read(floppy_size)
(name, files) = get_floppy_file_list(file_bytes, vfloppy_offset)
parsed_files.append({
'name': name,
'files': files,
})
else:
file_bytes = f.read(floppy_size)
(name, parsed_files) = get_floppy_file_list(file_bytes, vfloppy_offset)
f.close()
## ------------------------------------------------------------------------
## EXTRACT FILES
if not args.dest:
if args.out_format == "json":
print(json.dumps(parsed_files))
exit(0)
if floppy_list:
for f_id, props in parsed_files.items():
files = props['files']
if files:
extract_parsed_files(files, f)
else:
extract_parsed_files(parsed_files)
print("Extraction complete!")
|
py | 1a3c53968085e7042d9502e9f80763077403a7dd | # -*- coding: utf-8 -*-
import os
import fnmatch
from django.apps import apps
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import color_style
from django.template.loader import get_template
from django_extensions.compat import get_template_setting
from django_extensions.management.utils import signalcommand
#
# TODO: Render the template with fake request object ?
#
class Command(BaseCommand):
args = ''
help = "Validate templates on syntax and compile errors"
ignores = set([
".DS_Store",
"*.swp",
"*~",
])
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--no-apps', action='store_true', dest='no_apps',
default=False, help="Do not automatically include apps.")
parser.add_argument(
'--break', '-b', action='store_true', dest='break',
default=False, help="Break on first error.")
parser.add_argument(
'--include', '-i', action='append', dest='includes',
default=[], help="Append these paths to TEMPLATE DIRS")
parser.add_argument(
'--ignore-app', action='append', dest='ignore_apps',
default=[], help="Ignore these apps")
def ignore_filename(self, filename):
filename = os.path.basename(filename)
for ignore_pattern in self.ignores:
if fnmatch.fnmatch(filename, ignore_pattern):
return True
return False
@signalcommand
def handle(self, *args, **options):
if hasattr(settings, 'VALIDATE_TEMPLATES_IGNORES'):
self.ignores = getattr(settings, 'VALIDATE_TEMPLATES_IGNORES')
style = color_style()
template_dirs = set(get_template_setting('DIRS'))
template_dirs |= set(options['includes'])
template_dirs |= set(getattr(settings, 'VALIDATE_TEMPLATES_EXTRA_TEMPLATE_DIRS', []))
if not options['no_apps']:
ignore_apps = options['ignore_apps']
if not ignore_apps and hasattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS'):
ignore_apps = getattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS')
for app in apps.get_app_configs():
if app.name in ignore_apps:
continue
app_template_dir = os.path.join(app.path, 'templates')
if os.path.isdir(app_template_dir):
template_dirs.add(app_template_dir)
template_dirs = map(lambda path: os.path.abspath(path), template_dirs)
settings.TEMPLATES[0]['DIRS'] = list(template_dirs)
settings.TEMPLATE_DEBUG = True
verbosity = options["verbosity"]
errors = 0
for template_dir in template_dirs:
for root, dirs, filenames in os.walk(template_dir):
for filename in filenames:
if self.ignore_filename(filename):
continue
filepath = os.path.join(root, filename)
if verbosity > 1:
self.stdout.write(filepath)
try:
get_template(filepath)
except Exception as e:
errors += 1
self.stdout.write("%s: %s" % (filepath, style.ERROR("%s %s" % (e.__class__.__name__, str(e)))))
if errors and options['break']:
raise CommandError("Errors found")
if errors:
raise CommandError("%s errors found" % errors)
self.stdout.write("%s errors found" % errors)
|
py | 1a3c53984bcdfd1e85f59cfda6e43546a9cde0ef | from flask_caching import Cache
from flask_jwt_oidc import JwtManager
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from .config import Config
from .helper import Api
db = SQLAlchemy()
migrate = Migrate(compare_type=True)
jwt = JwtManager()
cache = Cache()
api = Api(
prefix=f'{Config.BASE_PATH}',
doc=f'{Config.BASE_PATH}/',
default='nris_api',
default_label='NRIS related operations')
|
py | 1a3c545c8a6d49c466c739406b6a17f7b4cabb27 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Microsoft Public License. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Microsoft Public License, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Microsoft Public License.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from iptest.assert_util import *
add_clr_assemblies("loadorder_3")
# namespace First {
# public class Generic1<K, V> {
# public static string Flag = typeof(Generic1<,>).FullName;
# }
# }
import First
from First import *
AreEqual(First.Generic1[str, str].Flag, "First.Generic1`2")
add_clr_assemblies("loadorder_3h")
# namespace First {
# public class Generic1<T> {
# public static string Flag = typeof(Generic1<>).FullName;
# }
# }
AreEqual(First.Generic1[str, str].Flag, "First.Generic1`2")
AreEqual(First.Generic1[int].Flag, "First.Generic1`1")
AssertError(ValueError, lambda: Generic1[int]) # !!!
AreEqual(Generic1[str, str].Flag, "First.Generic1`2")
from First import *
AreEqual(Generic1[str, str].Flag, "First.Generic1`2")
AreEqual(Generic1[int].Flag, "First.Generic1`1")
|
py | 1a3c55d0cea89faafab53d01aa73ef31cd77689a | Number_one = float(input( "Введите первое число" ))
Number_two = float(input( "Введите второе число"))
Number_three = float(input("Введите третье число"))
Number_four = float(input("Введите четвертое число"))
Sum_1 = Number_one + Number_two
Sum_2 = Number_three + Number_four
Div = Sum_1/Sum_2
print("% .2f" % Div) |
py | 1a3c5621a635addec488ab78b4bff0c3ed345f5b | # File name: exercise3.py
# Author: Steve Hommy
# Description: Sorting list in ascending order
# Asking user for range of items that will be on list
number_of_elements = int(input("Enter number of elements in list: "))
# Creating lists
number_list = []
word_list = []
# Appending intgeres and strings to the list
for i in range(number_of_elements):
number = int(input("Enter number: "))
number_list.append(number)
for i in range(number_of_elements):
word = input("Type anything: ")
word_list.append(word)
# Sorting lists in ascending order
number_list.sort()
word_list.sort()
# Prints out lists
print(number_list)
print(word_list)
|
py | 1a3c56da59ece5d4db9b3f77f24d9fb1c2a12469 | import frappe
def execute():
frappe.db.sql("""
UPDATE
`tabPrint Format`
SET
`tabPrint Format`.`parent`='',
`tabPrint Format`.`parenttype`='',
`tabPrint Format`.parentfield=''
WHERE
`tabPrint Format`.parent != ''
OR `tabPrint Format`.parenttype != ''
""") |
py | 1a3c575f576872fef5b64f535e7149c5efa1fdfb | from typing import List, Optional
from spacy.language import Language
from spacy.tokens import Doc, Span, Token
from edsnlp.pipelines.qualifiers.base import Qualifier
from edsnlp.pipelines.terminations import termination
from edsnlp.utils.filter import consume_spans, filter_spans, get_spans
from edsnlp.utils.inclusion import check_inclusion
from edsnlp.utils.resources import get_verbs
from .patterns import following, preceding, pseudo, verbs_eds, verbs_hyp
class Hypothesis(Qualifier):
"""
Hypothesis detection with spaCy.
The component looks for five kinds of expressions in the text :
- preceding hypothesis, ie cues that precede a hypothetic expression
- following hypothesis, ie cues that follow a hypothetic expression
- pseudo hypothesis : contain a hypothesis cue, but are not hypothesis
(eg "pas de doute"/"no doubt")
- hypothetic verbs : verbs indicating hypothesis (eg "douter")
- classic verbs conjugated to the conditional, thus indicating hypothesis
Parameters
----------
nlp : Language
spaCy nlp pipeline to use for matching.
pseudo : Optional[List[str]]
List of pseudo hypothesis cues.
preceding : Optional[List[str]]
List of preceding hypothesis cues
following : Optional[List[str]]
List of following hypothesis cues.
verbs_hyp : Optional[List[str]]
List of hypothetic verbs.
verbs_eds : Optional[List[str]]
List of mainstream verbs.
filter_matches : bool
Whether to filter out overlapping matches.
attr : str
spaCy's attribute to use:
a string with the value "TEXT" or "NORM", or a dict with the key 'term_attr'
we can also add a key for each regex.
on_ents_only : bool
Whether to look for matches around detected entities only.
Useful for faster inference in downstream tasks.
within_ents : bool
Whether to consider cues within entities.
explain : bool
Whether to keep track of cues for each entity.
regex : Optional[Dict[str, Union[List[str], str]]]
A dictionnary of regex patterns.
"""
defaults = dict(
following=following,
preceding=preceding,
pseudo=pseudo,
termination=termination,
verbs_eds=verbs_eds,
verbs_hyp=verbs_hyp,
)
def __init__(
self,
nlp: Language,
attr: str,
pseudo: Optional[List[str]],
preceding: Optional[List[str]],
following: Optional[List[str]],
termination: Optional[List[str]],
verbs_eds: Optional[List[str]],
verbs_hyp: Optional[List[str]],
on_ents_only: bool,
within_ents: bool,
explain: bool,
):
terms = self.get_defaults(
pseudo=pseudo,
preceding=preceding,
following=following,
termination=termination,
verbs_eds=verbs_eds,
verbs_hyp=verbs_hyp,
)
terms["verbs"] = self.load_verbs(
verbs_hyp=terms.pop("verbs_hyp"),
verbs_eds=terms.pop("verbs_eds"),
)
super().__init__(
nlp=nlp,
attr=attr,
on_ents_only=on_ents_only,
explain=explain,
**terms,
)
self.within_ents = within_ents
self.set_extensions()
@staticmethod
def set_extensions() -> None:
if not Token.has_extension("hypothesis"):
Token.set_extension("hypothesis", default=False)
if not Token.has_extension("hypothesis_"):
Token.set_extension(
"hypothesis_",
getter=lambda token: "HYP" if token._.hypothesis else "CERT",
)
if not Span.has_extension("hypothesis"):
Span.set_extension("hypothesis", default=False)
if not Span.has_extension("hypothesis_"):
Span.set_extension(
"hypothesis_",
getter=lambda span: "HYP" if span._.hypothesis else "CERT",
)
if not Span.has_extension("hypothesis_cues"):
Span.set_extension("hypothesis_cues", default=[])
if not Doc.has_extension("hypothesis"):
Doc.set_extension("hypothesis", default=[])
def load_verbs(
self,
verbs_hyp: List[str],
verbs_eds: List[str],
) -> List[str]:
"""
Conjugate "classic" verbs to conditional, and add hypothesis
verbs conjugated to all tenses.
Parameters
----------
verbs_hyp: List of verbs that specifically imply an hypothesis.
verbs_eds: List of general verbs.
Returns
-------
list of hypothesis verbs conjugated at all tenses and classic
verbs conjugated to conditional.
"""
classic_verbs = get_verbs(verbs_eds)
classic_verbs = classic_verbs.loc[classic_verbs["mode"] == "Conditionnel"]
list_classic_verbs = list(classic_verbs["term"].unique())
hypo_verbs = get_verbs(verbs_hyp)
list_hypo_verbs = list(hypo_verbs["term"].unique())
return list_hypo_verbs + list_classic_verbs
def process(self, doc: Doc) -> Doc:
"""
Finds entities related to hypothesis.
Parameters
----------
doc: spaCy Doc object
Returns
-------
doc: spaCy Doc object, annotated for hypothesis
"""
matches = self.get_matches(doc)
terminations = get_spans(matches, "termination")
boundaries = self._boundaries(doc, terminations)
# Removes duplicate matches and pseudo-expressions in one statement
matches = filter_spans(matches, label_to_remove="pseudo")
entities = list(doc.ents) + list(doc.spans.get("discarded", []))
ents = None
for start, end in boundaries:
ents, entities = consume_spans(
entities,
filter=lambda s: check_inclusion(s, start, end),
second_chance=ents,
)
sub_matches, matches = consume_spans(
matches, lambda s: start <= s.start < end
)
if self.on_ents_only and not ents:
continue
sub_preceding = get_spans(sub_matches, "preceding")
sub_following = get_spans(sub_matches, "following")
sub_verbs = get_spans(sub_matches, "verbs")
if not sub_preceding + sub_following + sub_verbs:
continue
if not self.on_ents_only:
for token in doc[start:end]:
token._.hypothesis = any(
m.end <= token.i for m in sub_preceding + sub_verbs
) or any(m.start > token.i for m in sub_following)
for ent in ents:
if self.within_ents:
cues = [m for m in sub_preceding + sub_verbs if m.end <= ent.end]
cues += [m for m in sub_following if m.start >= ent.start]
else:
cues = [m for m in sub_preceding + sub_verbs if m.end <= ent.start]
cues += [m for m in sub_following if m.start >= ent.end]
hypothesis = ent._.hypothesis or bool(cues)
ent._.hypothesis = hypothesis
if self.explain and hypothesis:
ent._.hypothesis_cues += cues
if not self.on_ents_only and hypothesis:
for token in ent:
token._.hypothesis = True
return doc
|
py | 1a3c58134ad1e89013c5d3dad00e1940643ac5b3 | import os
import re
import datetime
def benchmarks_Z3(input_path, output_path, option):
if option == "linear":
save_path_QF_LRA = output_path + "/linear/QF_LRA"
save_path_QF_LIA = output_path + "/linear/QF_LIA"
save_path_QF_BV = output_path + "/linear/QF_BV"
if option == "nonlinear":
save_path_QF_NRA = output_path + "/nonlinear/QF_NRA"
save_path_QF_NIA = output_path + "/nonlinear/QF_NIA"
save_path_QF_BV = output_path + "/nonlinear/QF_BV"
if option == "linear":
input_path = input_path + "/linear"
if option == "nonlinear":
input_path = input_path + "/nonlinear"
for root, dirs, files in os.walk(input_path, topdown=False):
for name in files:
if "output_SMT_Solver_Z3_" in os.path.join(root, name):
if "SMT2" in os.path.join(root, name):
if "-Sol" not in os.path.join(root, name):
aux = re.search('output_SMT_Solver_Z3_(.*)SMT2', os.path.join(root, name))
type_theory = (aux.group(1))
type_theory = type_theory[:-1]
partial_name = name.replace('.smt2', '')
if (type_theory == "RealReal" or type_theory == "RealBool" or type_theory == "RealPBC" or type_theory == "RealPBCMultiObjectives") and option == "linear" :
completeName_Path = os.path.join(save_path_QF_LRA, partial_name + "_" + type_theory + ".smt2")
if (type_theory == "IntIntOr" or type_theory == "IntIntLessThan") and option == "linear" :
completeName_Path = os.path.join(save_path_QF_LIA, partial_name + "_" + type_theory + ".smt2")
if (type_theory == "BV") and option == "linear" :
completeName_Path = os.path.join(save_path_QF_BV, partial_name + "_" + type_theory + ".smt2")
if (type_theory == "RealReal" or type_theory == "RealBool" or type_theory == "RealPBC" or type_theory == "RealPBCMultiObjectives") and option == "nonlinear":
completeName_Path = os.path.join(save_path_QF_NRA, partial_name + "_" + type_theory + ".smt2")
if (type_theory == "IntIntOr" or type_theory == "IntIntLessThan") and option == "nonlinear":
completeName_Path = os.path.join(save_path_QF_NIA, partial_name + "_" + type_theory + ".smt2")
if (type_theory == "BV") and option == "nonlinear":
completeName_Path = os.path.join(save_path_QF_BV, partial_name + "_" + type_theory + ".smt2")
file = open(completeName_Path, "w")
file.write("(set-info :smt-lib-version 2.6)\n")
if (type_theory == "RealReal" or type_theory == "RealBool" or type_theory == "RealPBC" or type_theory == "RealPBCMultiObjectives") and option == "linear":
file.write("(set-logic QF_LRA)\n")
if (type_theory == "IntIntOr" or type_theory == "IntIntLessThan") and option == "linear":
file.write("(set-logic QF_LIA)\n")
if (type_theory == "BV") and option == "linear":
file.write("(set-logic QF_BV)\n")
if (type_theory == "RealReal" or type_theory == "RealBool" or type_theory == "RealPBC" or type_theory == "RealPBCMultiObjectives") and option == "nonlinear":
file.write("(set-logic QF_NRA)\n")
if (type_theory == "IntIntOr" or type_theory == "IntIntLessThan") and option == "nonlinear":
file.write("(set-logic QF_NIA)\n")
if (type_theory == "BV") and option == "nonlinear":
file.write("(set-logic QF_BV)\n")
file.write("(set-info :source |\n")
file.write("Generated by: Mădălina Erașcu, Răzvan Meteș \n")
file.write("Generated on: " + datetime.date.today().strftime("%Y-%m-%d") + "\n")
file.write("Application: " + name.split('-')[0] + "\n")
file.write("Target solver: Z3\n")
file.write("|)\n")
file.write("(set-info :license \"https://creativecommons.org/licenses/by/4.0/\")\n")
file.write("(set-info :category \"industrial\")\n")
file.write("(set-info :minimum")
aux_path = os.path.join(root, name).split('/SMT2')[0] + "/csv/" + name.split('.smt2')[0] + ".csv"
print(aux_path)
if os.path.isfile(aux_path) == False or os.stat(aux_path).st_size == 0:
file.write(" unknown)\n")
file.write("\n")
else:
with open(aux_path) as fin:
next(fin)
for line in fin:
min_price = line.split(None, 1)[0]
file.write(" " + min_price.split(',')[0] + ")\n")
file.write("\n")
with open(os.path.join(root, name)) as f:
lines = f.readlines()
file.writelines(lines)
file.close()
if __name__ == "__main__":
benchmarks_Z3("/Users/razvanmetes/Optimization-Modulo-Theory/experimentalResults", "/Users/razvanmetes/Optimization-Modulo-Theory/benchmarks/output_Z3", "linear") |
py | 1a3c5a88e4fbed791de1b81f5543b6fbb0bb9e56 | from . import moduleItem
from ....cfg import bbData
from .... import lib
from typing import List
from ..gameItem import spawnableItem
@spawnableItem
class JumpDriveModule(moduleItem.ModuleItem):
""""A module providing a ship with the ability to jump anywhere within the galaxy, without the need to use jumpgates
"""
def __init__(self, name : str, aliases : List[str], value : int = 0,
wiki : str = "", manufacturer : str = "", icon : str = "",
emoji : lib.emojis.BasedEmoji = lib.emojis.BasedEmoji.EMPTY, techLevel : int = -1,
builtIn : bool = False):
"""
:param str name: The name of the module. Must be unique.
:param list[str] aliases: Alternative names by which this module may be referred to
:param int value: The number of credits this module may be sold or bought or at a shop (Default 0)
:param str wiki: A web page that is displayed as the wiki page for this module. (Default "")
:param str manufacturer: The name of the manufacturer of this module (Default "")
:param str icon: A URL pointing to an image to use for this module's icon (Default "")
:param lib.emojis.BasedEmoji emoji: The emoji to use for the module's small icon (Default lib.emojis.BasedEmoji.EMPTY)
:param int techLevel: A rating from 1 to 10 of this item's technical advancement. Used
as a measure for its effectiveness compared to other modules of the same type (Default -1)
:param bool builtIn: Whether this is a BountyBot standard module (loaded in from bbData) or
a custom spawned module (Default False)
"""
super(JumpDriveModule, self).__init__(name, aliases, value=value, wiki=wiki, manufacturer=manufacturer, icon=icon,
emoji=emoji, techLevel=techLevel, builtIn=builtIn)
def toDict(self, **kwargs) -> dict:
"""Serialize this module into dictionary format, to be saved to file.
No extra attributes implemented by this class, so just eses the base moduleItem toDict method.
:return: A dictionary containing all information needed to reconstruct this module
:rtype: dict
"""
itemDict = super(JumpDriveModule, self).toDict(**kwargs)
return itemDict
@classmethod
def fromDict(cls, moduleDict : dict, **kwargs):
"""Factory function building a new module object from the information in the provided dictionary.
The opposite of this class's toDict function.
:param moduleDict: A dictionary containing all information needed to construct the requested module
:return: The new module object as described in moduleDict
:rtype: dict
"""
if moduleDict.get("builtIn", False):
return bbData.builtInModuleObjs[moduleDict["name"]]
return JumpDriveModule(**cls._makeDefaults(moduleDict, ignores=("type",),
emoji=lib.emojis.BasedEmoji.fromStr(moduleDict["emoji"]) \
if "emoji" in moduleDict else lib.emojis.BasedEmoji.EMPTY))
|
py | 1a3c5a9db6ffeaef7bdef8c40c660c61819d04dc | #!/usr/bin/env python3
# Build and install fmt
import sys
import logging
from pathlib import Path
from subprocess import run, CalledProcessError
import multiprocessing
# Version check
if sys.version_info.minor < 6:
print("Python version is %s, 3.6+ is required." % sys.version)
sys.exit(1)
def build_fmt(fmt_path: Path, fmt_build_path: Path, fmt_install_path: Path):
"""Build fmt from source path into build path"""
# create directory with any intermediate parents, if needed
# similar to Unix: mkdir -p
Path(fmt_build_path).mkdir(parents=True, exist_ok=True)
# We want:
# - build for: Release (default)
# - install (default)
# - no doc with FMT_DOC:BOOL=OFF
# - no test with FMT_TEST=OFF
options = [
"-G", "Unix Makefiles",
f"-DCMAKE_INSTALL_PREFIX={fmt_install_path}",
f"-DFMT_DOC:BOOL=OFF",
f"-DFMT_TEST=OFF",
]
try:
run(["cmake", *options, fmt_path], cwd=fmt_build_path, check=True)
except CalledProcessError as e:
logging.error(f"cmake command failed")
sys.exit(e.returncode)
cpu_count = 1
try:
cpu_count = multiprocessing.cpu_count()
except NotImplementedError as e:
logging.error(f"multiprocessing.cpu_count() not implemented, defaulting to -j1")
try:
run(["make", f"-j{cpu_count}"], cwd=fmt_build_path, check=True)
except CalledProcessError as e:
logging.error(f"make command failed")
sys.exit(e.returncode)
try:
run(["make", "install"], cwd=fmt_build_path, check=True)
except CalledProcessError as e:
logging.error(f"make install command failed")
sys.exit(e.returncode)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
# get third-party dir from this file's path
third_party_path = Path(__file__).parent.absolute()
fmt_path = third_party_path.joinpath("fmt")
fmt_build_path = third_party_path.joinpath("build/fmt")
fmt_install_path = third_party_path.joinpath("install/fmt")
# build fmt in target path
logging.info(f"Building fmt from {fmt_path} in {fmt_build_path} and installing to {fmt_install_path}...")
build_fmt(fmt_path, fmt_build_path, fmt_install_path)
logging.info("Done.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.